1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/reset.h" 30 #include "sysemu/hvf.h" 31 #include "sysemu/cpus.h" 32 #include "sysemu/xen.h" 33 #include "kvm_i386.h" 34 #include "sev_i386.h" 35 36 #include "qemu/error-report.h" 37 #include "qemu/module.h" 38 #include "qemu/option.h" 39 #include "qemu/config-file.h" 40 #include "qapi/error.h" 41 #include "qapi/qapi-visit-machine.h" 42 #include "qapi/qapi-visit-run-state.h" 43 #include "qapi/qmp/qdict.h" 44 #include "qapi/qmp/qerror.h" 45 #include "qapi/visitor.h" 46 #include "qom/qom-qobject.h" 47 #include "sysemu/arch_init.h" 48 #include "qapi/qapi-commands-machine-target.h" 49 50 #include "standard-headers/asm-x86/kvm_para.h" 51 52 #include "sysemu/sysemu.h" 53 #include "sysemu/tcg.h" 54 #include "hw/qdev-properties.h" 55 #include "hw/i386/topology.h" 56 #ifndef CONFIG_USER_ONLY 57 #include "exec/address-spaces.h" 58 #include "hw/i386/apic_internal.h" 59 #include "hw/boards.h" 60 #endif 61 62 #include "disas/capstone.h" 63 64 /* Helpers for building CPUID[2] descriptors: */ 65 66 struct CPUID2CacheDescriptorInfo { 67 enum CacheType type; 68 int level; 69 int size; 70 int line_size; 71 int associativity; 72 }; 73 74 /* 75 * Known CPUID 2 cache descriptors. 76 * From Intel SDM Volume 2A, CPUID instruction 77 */ 78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 82 .associativity = 4, .line_size = 32, }, 83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 84 .associativity = 4, .line_size = 64, }, 85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 86 .associativity = 2, .line_size = 32, }, 87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 32, }, 89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 90 .associativity = 4, .line_size = 64, }, 91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 92 .associativity = 6, .line_size = 64, }, 93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 94 .associativity = 2, .line_size = 64, }, 95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 96 .associativity = 8, .line_size = 64, }, 97 /* lines per sector is not supported cpuid2_cache_descriptor(), 98 * so descriptors 0x22, 0x23 are not included 99 */ 100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 101 .associativity = 16, .line_size = 64, }, 102 /* lines per sector is not supported cpuid2_cache_descriptor(), 103 * so descriptors 0x25, 0x20 are not included 104 */ 105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 108 .associativity = 8, .line_size = 64, }, 109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 118 .associativity = 4, .line_size = 32, }, 119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 120 .associativity = 4, .line_size = 64, }, 121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 122 .associativity = 8, .line_size = 64, }, 123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 129 .associativity = 16, .line_size = 64, }, 130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 131 .associativity = 12, .line_size = 64, }, 132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 133 .associativity = 16, .line_size = 64, }, 134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 135 .associativity = 24, .line_size = 64, }, 136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 137 .associativity = 8, .line_size = 64, }, 138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 143 .associativity = 4, .line_size = 64, }, 144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 145 .associativity = 4, .line_size = 64, }, 146 /* lines per sector is not supported cpuid2_cache_descriptor(), 147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 148 */ 149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 150 .associativity = 8, .line_size = 64, }, 151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 2, .line_size = 64, }, 153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 154 .associativity = 8, .line_size = 64, }, 155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 162 .associativity = 8, .line_size = 32, }, 163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 164 .associativity = 4, .line_size = 64, }, 165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 166 .associativity = 8, .line_size = 64, }, 167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 172 .associativity = 4, .line_size = 64, }, 173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 178 .associativity = 8, .line_size = 64, }, 179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 184 .associativity = 12, .line_size = 64, }, 185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 190 .associativity = 16, .line_size = 64, }, 191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 196 .associativity = 24, .line_size = 64, }, 197 }; 198 199 /* 200 * "CPUID leaf 2 does not report cache descriptor information, 201 * use CPUID leaf 4 to query cache parameters" 202 */ 203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 204 205 /* 206 * Return a CPUID 2 cache descriptor for a given cache. 207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 208 */ 209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 210 { 211 int i; 212 213 assert(cache->size > 0); 214 assert(cache->level > 0); 215 assert(cache->line_size > 0); 216 assert(cache->associativity > 0); 217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 219 if (d->level == cache->level && d->type == cache->type && 220 d->size == cache->size && d->line_size == cache->line_size && 221 d->associativity == cache->associativity) { 222 return i; 223 } 224 } 225 226 return CACHE_DESCRIPTOR_UNAVAILABLE; 227 } 228 229 /* CPUID Leaf 4 constants: */ 230 231 /* EAX: */ 232 #define CACHE_TYPE_D 1 233 #define CACHE_TYPE_I 2 234 #define CACHE_TYPE_UNIFIED 3 235 236 #define CACHE_LEVEL(l) (l << 5) 237 238 #define CACHE_SELF_INIT_LEVEL (1 << 8) 239 240 /* EDX: */ 241 #define CACHE_NO_INVD_SHARING (1 << 0) 242 #define CACHE_INCLUSIVE (1 << 1) 243 #define CACHE_COMPLEX_IDX (1 << 2) 244 245 /* Encode CacheType for CPUID[4].EAX */ 246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 249 0 /* Invalid value */) 250 251 252 /* Encode cache info for CPUID[4] */ 253 static void encode_cache_cpuid4(CPUCacheInfo *cache, 254 int num_apic_ids, int num_cores, 255 uint32_t *eax, uint32_t *ebx, 256 uint32_t *ecx, uint32_t *edx) 257 { 258 assert(cache->size == cache->line_size * cache->associativity * 259 cache->partitions * cache->sets); 260 261 assert(num_apic_ids > 0); 262 *eax = CACHE_TYPE(cache->type) | 263 CACHE_LEVEL(cache->level) | 264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 265 ((num_cores - 1) << 26) | 266 ((num_apic_ids - 1) << 14); 267 268 assert(cache->line_size > 0); 269 assert(cache->partitions > 0); 270 assert(cache->associativity > 0); 271 /* We don't implement fully-associative caches */ 272 assert(cache->associativity < cache->sets); 273 *ebx = (cache->line_size - 1) | 274 ((cache->partitions - 1) << 12) | 275 ((cache->associativity - 1) << 22); 276 277 assert(cache->sets > 0); 278 *ecx = cache->sets - 1; 279 280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 281 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 283 } 284 285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 287 { 288 assert(cache->size % 1024 == 0); 289 assert(cache->lines_per_tag > 0); 290 assert(cache->associativity > 0); 291 assert(cache->line_size > 0); 292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 293 (cache->lines_per_tag << 8) | (cache->line_size); 294 } 295 296 #define ASSOC_FULL 0xFF 297 298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 300 a == 2 ? 0x2 : \ 301 a == 4 ? 0x4 : \ 302 a == 8 ? 0x6 : \ 303 a == 16 ? 0x8 : \ 304 a == 32 ? 0xA : \ 305 a == 48 ? 0xB : \ 306 a == 64 ? 0xC : \ 307 a == 96 ? 0xD : \ 308 a == 128 ? 0xE : \ 309 a == ASSOC_FULL ? 0xF : \ 310 0 /* invalid value */) 311 312 /* 313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 314 * @l3 can be NULL. 315 */ 316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 317 CPUCacheInfo *l3, 318 uint32_t *ecx, uint32_t *edx) 319 { 320 assert(l2->size % 1024 == 0); 321 assert(l2->associativity > 0); 322 assert(l2->lines_per_tag > 0); 323 assert(l2->line_size > 0); 324 *ecx = ((l2->size / 1024) << 16) | 325 (AMD_ENC_ASSOC(l2->associativity) << 12) | 326 (l2->lines_per_tag << 8) | (l2->line_size); 327 328 if (l3) { 329 assert(l3->size % (512 * 1024) == 0); 330 assert(l3->associativity > 0); 331 assert(l3->lines_per_tag > 0); 332 assert(l3->line_size > 0); 333 *edx = ((l3->size / (512 * 1024)) << 18) | 334 (AMD_ENC_ASSOC(l3->associativity) << 12) | 335 (l3->lines_per_tag << 8) | (l3->line_size); 336 } else { 337 *edx = 0; 338 } 339 } 340 341 /* Encode cache info for CPUID[8000001D] */ 342 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, 343 X86CPUTopoInfo *topo_info, 344 uint32_t *eax, uint32_t *ebx, 345 uint32_t *ecx, uint32_t *edx) 346 { 347 uint32_t l3_cores; 348 unsigned nodes = MAX(topo_info->nodes_per_pkg, 1); 349 350 assert(cache->size == cache->line_size * cache->associativity * 351 cache->partitions * cache->sets); 352 353 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 354 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 355 356 /* L3 is shared among multiple cores */ 357 if (cache->level == 3) { 358 l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg * 359 topo_info->cores_per_die * 360 topo_info->threads_per_core), 361 nodes); 362 *eax |= (l3_cores - 1) << 14; 363 } else { 364 *eax |= ((topo_info->threads_per_core - 1) << 14); 365 } 366 367 assert(cache->line_size > 0); 368 assert(cache->partitions > 0); 369 assert(cache->associativity > 0); 370 /* We don't implement fully-associative caches */ 371 assert(cache->associativity < cache->sets); 372 *ebx = (cache->line_size - 1) | 373 ((cache->partitions - 1) << 12) | 374 ((cache->associativity - 1) << 22); 375 376 assert(cache->sets > 0); 377 *ecx = cache->sets - 1; 378 379 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 380 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 381 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 382 } 383 384 /* Encode cache info for CPUID[8000001E] */ 385 static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu, 386 uint32_t *eax, uint32_t *ebx, 387 uint32_t *ecx, uint32_t *edx) 388 { 389 X86CPUTopoIDs topo_ids = {0}; 390 unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1); 391 int shift; 392 393 x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids); 394 395 *eax = cpu->apic_id; 396 /* 397 * CPUID_Fn8000001E_EBX 398 * 31:16 Reserved 399 * 15:8 Threads per core (The number of threads per core is 400 * Threads per core + 1) 401 * 7:0 Core id (see bit decoding below) 402 * SMT: 403 * 4:3 node id 404 * 2 Core complex id 405 * 1:0 Core id 406 * Non SMT: 407 * 5:4 node id 408 * 3 Core complex id 409 * 1:0 Core id 410 */ 411 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) | 412 (topo_ids.core_id); 413 /* 414 * CPUID_Fn8000001E_ECX 415 * 31:11 Reserved 416 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 417 * 7:0 Node id (see bit decoding below) 418 * 2 Socket id 419 * 1:0 Node id 420 */ 421 if (nodes <= 4) { 422 *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id; 423 } else { 424 /* 425 * Node id fix up. Actual hardware supports up to 4 nodes. But with 426 * more than 32 cores, we may end up with more than 4 nodes. 427 * Node id is a combination of socket id and node id. Only requirement 428 * here is that this number should be unique accross the system. 429 * Shift the socket id to accommodate more nodes. We dont expect both 430 * socket id and node id to be big number at the same time. This is not 431 * an ideal config but we need to to support it. Max nodes we can have 432 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 433 * 5 bits for nodes. Find the left most set bit to represent the total 434 * number of nodes. find_last_bit returns last set bit(0 based). Left 435 * shift(+1) the socket id to represent all the nodes. 436 */ 437 nodes -= 1; 438 shift = find_last_bit(&nodes, 8); 439 *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) | 440 topo_ids.node_id; 441 } 442 *edx = 0; 443 } 444 445 /* 446 * Definitions of the hardcoded cache entries we expose: 447 * These are legacy cache values. If there is a need to change any 448 * of these values please use builtin_x86_defs 449 */ 450 451 /* L1 data cache: */ 452 static CPUCacheInfo legacy_l1d_cache = { 453 .type = DATA_CACHE, 454 .level = 1, 455 .size = 32 * KiB, 456 .self_init = 1, 457 .line_size = 64, 458 .associativity = 8, 459 .sets = 64, 460 .partitions = 1, 461 .no_invd_sharing = true, 462 }; 463 464 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 465 static CPUCacheInfo legacy_l1d_cache_amd = { 466 .type = DATA_CACHE, 467 .level = 1, 468 .size = 64 * KiB, 469 .self_init = 1, 470 .line_size = 64, 471 .associativity = 2, 472 .sets = 512, 473 .partitions = 1, 474 .lines_per_tag = 1, 475 .no_invd_sharing = true, 476 }; 477 478 /* L1 instruction cache: */ 479 static CPUCacheInfo legacy_l1i_cache = { 480 .type = INSTRUCTION_CACHE, 481 .level = 1, 482 .size = 32 * KiB, 483 .self_init = 1, 484 .line_size = 64, 485 .associativity = 8, 486 .sets = 64, 487 .partitions = 1, 488 .no_invd_sharing = true, 489 }; 490 491 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 492 static CPUCacheInfo legacy_l1i_cache_amd = { 493 .type = INSTRUCTION_CACHE, 494 .level = 1, 495 .size = 64 * KiB, 496 .self_init = 1, 497 .line_size = 64, 498 .associativity = 2, 499 .sets = 512, 500 .partitions = 1, 501 .lines_per_tag = 1, 502 .no_invd_sharing = true, 503 }; 504 505 /* Level 2 unified cache: */ 506 static CPUCacheInfo legacy_l2_cache = { 507 .type = UNIFIED_CACHE, 508 .level = 2, 509 .size = 4 * MiB, 510 .self_init = 1, 511 .line_size = 64, 512 .associativity = 16, 513 .sets = 4096, 514 .partitions = 1, 515 .no_invd_sharing = true, 516 }; 517 518 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 519 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 520 .type = UNIFIED_CACHE, 521 .level = 2, 522 .size = 2 * MiB, 523 .line_size = 64, 524 .associativity = 8, 525 }; 526 527 528 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 529 static CPUCacheInfo legacy_l2_cache_amd = { 530 .type = UNIFIED_CACHE, 531 .level = 2, 532 .size = 512 * KiB, 533 .line_size = 64, 534 .lines_per_tag = 1, 535 .associativity = 16, 536 .sets = 512, 537 .partitions = 1, 538 }; 539 540 /* Level 3 unified cache: */ 541 static CPUCacheInfo legacy_l3_cache = { 542 .type = UNIFIED_CACHE, 543 .level = 3, 544 .size = 16 * MiB, 545 .line_size = 64, 546 .associativity = 16, 547 .sets = 16384, 548 .partitions = 1, 549 .lines_per_tag = 1, 550 .self_init = true, 551 .inclusive = true, 552 .complex_indexing = true, 553 }; 554 555 /* TLB definitions: */ 556 557 #define L1_DTLB_2M_ASSOC 1 558 #define L1_DTLB_2M_ENTRIES 255 559 #define L1_DTLB_4K_ASSOC 1 560 #define L1_DTLB_4K_ENTRIES 255 561 562 #define L1_ITLB_2M_ASSOC 1 563 #define L1_ITLB_2M_ENTRIES 255 564 #define L1_ITLB_4K_ASSOC 1 565 #define L1_ITLB_4K_ENTRIES 255 566 567 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 568 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 569 #define L2_DTLB_4K_ASSOC 4 570 #define L2_DTLB_4K_ENTRIES 512 571 572 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 573 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 574 #define L2_ITLB_4K_ASSOC 4 575 #define L2_ITLB_4K_ENTRIES 512 576 577 /* CPUID Leaf 0x14 constants: */ 578 #define INTEL_PT_MAX_SUBLEAF 0x1 579 /* 580 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 581 * MSR can be accessed; 582 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 583 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 584 * of Intel PT MSRs across warm reset; 585 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 586 */ 587 #define INTEL_PT_MINIMAL_EBX 0xf 588 /* 589 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 590 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 591 * accessed; 592 * bit[01]: ToPA tables can hold any number of output entries, up to the 593 * maximum allowed by the MaskOrTableOffset field of 594 * IA32_RTIT_OUTPUT_MASK_PTRS; 595 * bit[02]: Support Single-Range Output scheme; 596 */ 597 #define INTEL_PT_MINIMAL_ECX 0x7 598 /* generated packets which contain IP payloads have LIP values */ 599 #define INTEL_PT_IP_LIP (1 << 31) 600 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 601 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 602 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 603 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 604 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 605 606 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 607 uint32_t vendor2, uint32_t vendor3) 608 { 609 int i; 610 for (i = 0; i < 4; i++) { 611 dst[i] = vendor1 >> (8 * i); 612 dst[i + 4] = vendor2 >> (8 * i); 613 dst[i + 8] = vendor3 >> (8 * i); 614 } 615 dst[CPUID_VENDOR_SZ] = '\0'; 616 } 617 618 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 619 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 620 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 621 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 622 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 623 CPUID_PSE36 | CPUID_FXSR) 624 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 625 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 626 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 627 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 628 CPUID_PAE | CPUID_SEP | CPUID_APIC) 629 630 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 631 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 632 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 633 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 634 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 635 /* partly implemented: 636 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 637 /* missing: 638 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 639 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 640 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 641 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 642 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 643 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 644 CPUID_EXT_RDRAND) 645 /* missing: 646 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 647 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 648 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 649 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 650 CPUID_EXT_F16C */ 651 652 #ifdef TARGET_X86_64 653 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 654 #else 655 #define TCG_EXT2_X86_64_FEATURES 0 656 #endif 657 658 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 659 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 660 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 661 TCG_EXT2_X86_64_FEATURES) 662 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 663 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 664 #define TCG_EXT4_FEATURES 0 665 #define TCG_SVM_FEATURES CPUID_SVM_NPT 666 #define TCG_KVM_FEATURES 0 667 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 668 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 669 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 670 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 671 CPUID_7_0_EBX_ERMS) 672 /* missing: 673 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 674 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 675 CPUID_7_0_EBX_RDSEED */ 676 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 677 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 678 CPUID_7_0_ECX_LA57) 679 #define TCG_7_0_EDX_FEATURES 0 680 #define TCG_7_1_EAX_FEATURES 0 681 #define TCG_APM_FEATURES 0 682 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 683 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 684 /* missing: 685 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 686 687 typedef enum FeatureWordType { 688 CPUID_FEATURE_WORD, 689 MSR_FEATURE_WORD, 690 } FeatureWordType; 691 692 typedef struct FeatureWordInfo { 693 FeatureWordType type; 694 /* feature flags names are taken from "Intel Processor Identification and 695 * the CPUID Instruction" and AMD's "CPUID Specification". 696 * In cases of disagreement between feature naming conventions, 697 * aliases may be added. 698 */ 699 const char *feat_names[64]; 700 union { 701 /* If type==CPUID_FEATURE_WORD */ 702 struct { 703 uint32_t eax; /* Input EAX for CPUID */ 704 bool needs_ecx; /* CPUID instruction uses ECX as input */ 705 uint32_t ecx; /* Input ECX value for CPUID */ 706 int reg; /* output register (R_* constant) */ 707 } cpuid; 708 /* If type==MSR_FEATURE_WORD */ 709 struct { 710 uint32_t index; 711 } msr; 712 }; 713 uint64_t tcg_features; /* Feature flags supported by TCG */ 714 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 715 uint64_t migratable_flags; /* Feature flags known to be migratable */ 716 /* Features that shouldn't be auto-enabled by "-cpu host" */ 717 uint64_t no_autoenable_flags; 718 } FeatureWordInfo; 719 720 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 721 [FEAT_1_EDX] = { 722 .type = CPUID_FEATURE_WORD, 723 .feat_names = { 724 "fpu", "vme", "de", "pse", 725 "tsc", "msr", "pae", "mce", 726 "cx8", "apic", NULL, "sep", 727 "mtrr", "pge", "mca", "cmov", 728 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 729 NULL, "ds" /* Intel dts */, "acpi", "mmx", 730 "fxsr", "sse", "sse2", "ss", 731 "ht" /* Intel htt */, "tm", "ia64", "pbe", 732 }, 733 .cpuid = {.eax = 1, .reg = R_EDX, }, 734 .tcg_features = TCG_FEATURES, 735 }, 736 [FEAT_1_ECX] = { 737 .type = CPUID_FEATURE_WORD, 738 .feat_names = { 739 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 740 "ds-cpl", "vmx", "smx", "est", 741 "tm2", "ssse3", "cid", NULL, 742 "fma", "cx16", "xtpr", "pdcm", 743 NULL, "pcid", "dca", "sse4.1", 744 "sse4.2", "x2apic", "movbe", "popcnt", 745 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 746 "avx", "f16c", "rdrand", "hypervisor", 747 }, 748 .cpuid = { .eax = 1, .reg = R_ECX, }, 749 .tcg_features = TCG_EXT_FEATURES, 750 }, 751 /* Feature names that are already defined on feature_name[] but 752 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 753 * names on feat_names below. They are copied automatically 754 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 755 */ 756 [FEAT_8000_0001_EDX] = { 757 .type = CPUID_FEATURE_WORD, 758 .feat_names = { 759 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 760 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 761 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 762 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 763 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 764 "nx", NULL, "mmxext", NULL /* mmx */, 765 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 766 NULL, "lm", "3dnowext", "3dnow", 767 }, 768 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 769 .tcg_features = TCG_EXT2_FEATURES, 770 }, 771 [FEAT_8000_0001_ECX] = { 772 .type = CPUID_FEATURE_WORD, 773 .feat_names = { 774 "lahf-lm", "cmp-legacy", "svm", "extapic", 775 "cr8legacy", "abm", "sse4a", "misalignsse", 776 "3dnowprefetch", "osvw", "ibs", "xop", 777 "skinit", "wdt", NULL, "lwp", 778 "fma4", "tce", NULL, "nodeid-msr", 779 NULL, "tbm", "topoext", "perfctr-core", 780 "perfctr-nb", NULL, NULL, NULL, 781 NULL, NULL, NULL, NULL, 782 }, 783 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 784 .tcg_features = TCG_EXT3_FEATURES, 785 /* 786 * TOPOEXT is always allowed but can't be enabled blindly by 787 * "-cpu host", as it requires consistent cache topology info 788 * to be provided so it doesn't confuse guests. 789 */ 790 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 791 }, 792 [FEAT_C000_0001_EDX] = { 793 .type = CPUID_FEATURE_WORD, 794 .feat_names = { 795 NULL, NULL, "xstore", "xstore-en", 796 NULL, NULL, "xcrypt", "xcrypt-en", 797 "ace2", "ace2-en", "phe", "phe-en", 798 "pmm", "pmm-en", NULL, NULL, 799 NULL, NULL, NULL, NULL, 800 NULL, NULL, NULL, NULL, 801 NULL, NULL, NULL, NULL, 802 NULL, NULL, NULL, NULL, 803 }, 804 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 805 .tcg_features = TCG_EXT4_FEATURES, 806 }, 807 [FEAT_KVM] = { 808 .type = CPUID_FEATURE_WORD, 809 .feat_names = { 810 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 811 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 812 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 813 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL, 814 NULL, NULL, NULL, NULL, 815 NULL, NULL, NULL, NULL, 816 "kvmclock-stable-bit", NULL, NULL, NULL, 817 NULL, NULL, NULL, NULL, 818 }, 819 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 820 .tcg_features = TCG_KVM_FEATURES, 821 }, 822 [FEAT_KVM_HINTS] = { 823 .type = CPUID_FEATURE_WORD, 824 .feat_names = { 825 "kvm-hint-dedicated", NULL, NULL, NULL, 826 NULL, NULL, NULL, NULL, 827 NULL, NULL, NULL, NULL, 828 NULL, NULL, NULL, NULL, 829 NULL, NULL, NULL, NULL, 830 NULL, NULL, NULL, NULL, 831 NULL, NULL, NULL, NULL, 832 NULL, NULL, NULL, NULL, 833 }, 834 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 835 .tcg_features = TCG_KVM_FEATURES, 836 /* 837 * KVM hints aren't auto-enabled by -cpu host, they need to be 838 * explicitly enabled in the command-line. 839 */ 840 .no_autoenable_flags = ~0U, 841 }, 842 /* 843 * .feat_names are commented out for Hyper-V enlightenments because we 844 * don't want to have two different ways for enabling them on QEMU command 845 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 846 * enabling several feature bits simultaneously, exposing these bits 847 * individually may just confuse guests. 848 */ 849 [FEAT_HYPERV_EAX] = { 850 .type = CPUID_FEATURE_WORD, 851 .feat_names = { 852 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 853 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 854 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 855 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 856 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 857 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 858 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 859 NULL, NULL, 860 NULL, NULL, NULL, NULL, 861 NULL, NULL, NULL, NULL, 862 NULL, NULL, NULL, NULL, 863 NULL, NULL, NULL, NULL, 864 }, 865 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 866 }, 867 [FEAT_HYPERV_EBX] = { 868 .type = CPUID_FEATURE_WORD, 869 .feat_names = { 870 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 871 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 872 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 873 NULL /* hv_create_port */, NULL /* hv_connect_port */, 874 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 875 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 876 NULL, NULL, 877 NULL, NULL, NULL, NULL, 878 NULL, NULL, NULL, NULL, 879 NULL, NULL, NULL, NULL, 880 NULL, NULL, NULL, NULL, 881 }, 882 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 883 }, 884 [FEAT_HYPERV_EDX] = { 885 .type = CPUID_FEATURE_WORD, 886 .feat_names = { 887 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 888 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 889 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 890 NULL, NULL, 891 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 892 NULL, NULL, NULL, NULL, 893 NULL, NULL, NULL, NULL, 894 NULL, NULL, NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 NULL, NULL, NULL, NULL, 897 }, 898 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 899 }, 900 [FEAT_HV_RECOMM_EAX] = { 901 .type = CPUID_FEATURE_WORD, 902 .feat_names = { 903 NULL /* hv_recommend_pv_as_switch */, 904 NULL /* hv_recommend_pv_tlbflush_local */, 905 NULL /* hv_recommend_pv_tlbflush_remote */, 906 NULL /* hv_recommend_msr_apic_access */, 907 NULL /* hv_recommend_msr_reset */, 908 NULL /* hv_recommend_relaxed_timing */, 909 NULL /* hv_recommend_dma_remapping */, 910 NULL /* hv_recommend_int_remapping */, 911 NULL /* hv_recommend_x2apic_msrs */, 912 NULL /* hv_recommend_autoeoi_deprecation */, 913 NULL /* hv_recommend_pv_ipi */, 914 NULL /* hv_recommend_ex_hypercalls */, 915 NULL /* hv_hypervisor_is_nested */, 916 NULL /* hv_recommend_int_mbec */, 917 NULL /* hv_recommend_evmcs */, 918 NULL, 919 NULL, NULL, NULL, NULL, 920 NULL, NULL, NULL, NULL, 921 NULL, NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 }, 924 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 925 }, 926 [FEAT_HV_NESTED_EAX] = { 927 .type = CPUID_FEATURE_WORD, 928 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 929 }, 930 [FEAT_SVM] = { 931 .type = CPUID_FEATURE_WORD, 932 .feat_names = { 933 "npt", "lbrv", "svm-lock", "nrip-save", 934 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 935 NULL, NULL, "pause-filter", NULL, 936 "pfthreshold", NULL, NULL, NULL, 937 NULL, NULL, NULL, NULL, 938 NULL, NULL, NULL, NULL, 939 NULL, NULL, NULL, NULL, 940 NULL, NULL, NULL, NULL, 941 }, 942 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 943 .tcg_features = TCG_SVM_FEATURES, 944 }, 945 [FEAT_7_0_EBX] = { 946 .type = CPUID_FEATURE_WORD, 947 .feat_names = { 948 "fsgsbase", "tsc-adjust", NULL, "bmi1", 949 "hle", "avx2", NULL, "smep", 950 "bmi2", "erms", "invpcid", "rtm", 951 NULL, NULL, "mpx", NULL, 952 "avx512f", "avx512dq", "rdseed", "adx", 953 "smap", "avx512ifma", "pcommit", "clflushopt", 954 "clwb", "intel-pt", "avx512pf", "avx512er", 955 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 956 }, 957 .cpuid = { 958 .eax = 7, 959 .needs_ecx = true, .ecx = 0, 960 .reg = R_EBX, 961 }, 962 .tcg_features = TCG_7_0_EBX_FEATURES, 963 }, 964 [FEAT_7_0_ECX] = { 965 .type = CPUID_FEATURE_WORD, 966 .feat_names = { 967 NULL, "avx512vbmi", "umip", "pku", 968 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 969 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 970 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 971 "la57", NULL, NULL, NULL, 972 NULL, NULL, "rdpid", NULL, 973 NULL, "cldemote", NULL, "movdiri", 974 "movdir64b", NULL, NULL, NULL, 975 }, 976 .cpuid = { 977 .eax = 7, 978 .needs_ecx = true, .ecx = 0, 979 .reg = R_ECX, 980 }, 981 .tcg_features = TCG_7_0_ECX_FEATURES, 982 }, 983 [FEAT_7_0_EDX] = { 984 .type = CPUID_FEATURE_WORD, 985 .feat_names = { 986 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 987 "fsrm", NULL, NULL, NULL, 988 "avx512-vp2intersect", NULL, "md-clear", NULL, 989 NULL, NULL, "serialize", NULL, 990 "tsx-ldtrk", NULL, NULL /* pconfig */, NULL, 991 NULL, NULL, NULL, NULL, 992 NULL, NULL, "spec-ctrl", "stibp", 993 NULL, "arch-capabilities", "core-capability", "ssbd", 994 }, 995 .cpuid = { 996 .eax = 7, 997 .needs_ecx = true, .ecx = 0, 998 .reg = R_EDX, 999 }, 1000 .tcg_features = TCG_7_0_EDX_FEATURES, 1001 }, 1002 [FEAT_7_1_EAX] = { 1003 .type = CPUID_FEATURE_WORD, 1004 .feat_names = { 1005 NULL, NULL, NULL, NULL, 1006 NULL, "avx512-bf16", NULL, NULL, 1007 NULL, NULL, NULL, NULL, 1008 NULL, NULL, NULL, NULL, 1009 NULL, NULL, NULL, NULL, 1010 NULL, NULL, NULL, NULL, 1011 NULL, NULL, NULL, NULL, 1012 NULL, NULL, NULL, NULL, 1013 }, 1014 .cpuid = { 1015 .eax = 7, 1016 .needs_ecx = true, .ecx = 1, 1017 .reg = R_EAX, 1018 }, 1019 .tcg_features = TCG_7_1_EAX_FEATURES, 1020 }, 1021 [FEAT_8000_0007_EDX] = { 1022 .type = CPUID_FEATURE_WORD, 1023 .feat_names = { 1024 NULL, NULL, NULL, NULL, 1025 NULL, NULL, NULL, NULL, 1026 "invtsc", NULL, NULL, NULL, 1027 NULL, NULL, NULL, NULL, 1028 NULL, NULL, NULL, NULL, 1029 NULL, NULL, NULL, NULL, 1030 NULL, NULL, NULL, NULL, 1031 NULL, NULL, NULL, NULL, 1032 }, 1033 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1034 .tcg_features = TCG_APM_FEATURES, 1035 .unmigratable_flags = CPUID_APM_INVTSC, 1036 }, 1037 [FEAT_8000_0008_EBX] = { 1038 .type = CPUID_FEATURE_WORD, 1039 .feat_names = { 1040 "clzero", NULL, "xsaveerptr", NULL, 1041 NULL, NULL, NULL, NULL, 1042 NULL, "wbnoinvd", NULL, NULL, 1043 "ibpb", NULL, NULL, "amd-stibp", 1044 NULL, NULL, NULL, NULL, 1045 NULL, NULL, NULL, NULL, 1046 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1047 NULL, NULL, NULL, NULL, 1048 }, 1049 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1050 .tcg_features = 0, 1051 .unmigratable_flags = 0, 1052 }, 1053 [FEAT_XSAVE] = { 1054 .type = CPUID_FEATURE_WORD, 1055 .feat_names = { 1056 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1057 NULL, NULL, NULL, NULL, 1058 NULL, NULL, NULL, NULL, 1059 NULL, NULL, NULL, NULL, 1060 NULL, NULL, NULL, NULL, 1061 NULL, NULL, NULL, NULL, 1062 NULL, NULL, NULL, NULL, 1063 NULL, NULL, NULL, NULL, 1064 }, 1065 .cpuid = { 1066 .eax = 0xd, 1067 .needs_ecx = true, .ecx = 1, 1068 .reg = R_EAX, 1069 }, 1070 .tcg_features = TCG_XSAVE_FEATURES, 1071 }, 1072 [FEAT_6_EAX] = { 1073 .type = CPUID_FEATURE_WORD, 1074 .feat_names = { 1075 NULL, NULL, "arat", NULL, 1076 NULL, NULL, NULL, NULL, 1077 NULL, NULL, NULL, NULL, 1078 NULL, NULL, NULL, NULL, 1079 NULL, NULL, NULL, NULL, 1080 NULL, NULL, NULL, NULL, 1081 NULL, NULL, NULL, NULL, 1082 NULL, NULL, NULL, NULL, 1083 }, 1084 .cpuid = { .eax = 6, .reg = R_EAX, }, 1085 .tcg_features = TCG_6_EAX_FEATURES, 1086 }, 1087 [FEAT_XSAVE_COMP_LO] = { 1088 .type = CPUID_FEATURE_WORD, 1089 .cpuid = { 1090 .eax = 0xD, 1091 .needs_ecx = true, .ecx = 0, 1092 .reg = R_EAX, 1093 }, 1094 .tcg_features = ~0U, 1095 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1096 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1097 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1098 XSTATE_PKRU_MASK, 1099 }, 1100 [FEAT_XSAVE_COMP_HI] = { 1101 .type = CPUID_FEATURE_WORD, 1102 .cpuid = { 1103 .eax = 0xD, 1104 .needs_ecx = true, .ecx = 0, 1105 .reg = R_EDX, 1106 }, 1107 .tcg_features = ~0U, 1108 }, 1109 /*Below are MSR exposed features*/ 1110 [FEAT_ARCH_CAPABILITIES] = { 1111 .type = MSR_FEATURE_WORD, 1112 .feat_names = { 1113 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1114 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1115 "taa-no", NULL, NULL, NULL, 1116 NULL, NULL, NULL, NULL, 1117 NULL, NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 NULL, NULL, NULL, NULL, 1120 NULL, NULL, NULL, NULL, 1121 }, 1122 .msr = { 1123 .index = MSR_IA32_ARCH_CAPABILITIES, 1124 }, 1125 }, 1126 [FEAT_CORE_CAPABILITY] = { 1127 .type = MSR_FEATURE_WORD, 1128 .feat_names = { 1129 NULL, NULL, NULL, NULL, 1130 NULL, "split-lock-detect", NULL, NULL, 1131 NULL, NULL, NULL, NULL, 1132 NULL, NULL, NULL, NULL, 1133 NULL, NULL, NULL, NULL, 1134 NULL, NULL, NULL, NULL, 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 }, 1138 .msr = { 1139 .index = MSR_IA32_CORE_CAPABILITY, 1140 }, 1141 }, 1142 [FEAT_PERF_CAPABILITIES] = { 1143 .type = MSR_FEATURE_WORD, 1144 .feat_names = { 1145 NULL, NULL, NULL, NULL, 1146 NULL, NULL, NULL, NULL, 1147 NULL, NULL, NULL, NULL, 1148 NULL, "full-width-write", NULL, NULL, 1149 NULL, NULL, NULL, NULL, 1150 NULL, NULL, NULL, NULL, 1151 NULL, NULL, NULL, NULL, 1152 NULL, NULL, NULL, NULL, 1153 }, 1154 .msr = { 1155 .index = MSR_IA32_PERF_CAPABILITIES, 1156 }, 1157 }, 1158 1159 [FEAT_VMX_PROCBASED_CTLS] = { 1160 .type = MSR_FEATURE_WORD, 1161 .feat_names = { 1162 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1163 NULL, NULL, NULL, "vmx-hlt-exit", 1164 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1165 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1166 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1167 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1168 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1169 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1170 }, 1171 .msr = { 1172 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1173 } 1174 }, 1175 1176 [FEAT_VMX_SECONDARY_CTLS] = { 1177 .type = MSR_FEATURE_WORD, 1178 .feat_names = { 1179 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1180 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1181 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1182 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1183 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1184 "vmx-xsaves", NULL, NULL, NULL, 1185 NULL, NULL, NULL, NULL, 1186 NULL, NULL, NULL, NULL, 1187 }, 1188 .msr = { 1189 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1190 } 1191 }, 1192 1193 [FEAT_VMX_PINBASED_CTLS] = { 1194 .type = MSR_FEATURE_WORD, 1195 .feat_names = { 1196 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1197 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1198 NULL, NULL, NULL, NULL, 1199 NULL, NULL, NULL, NULL, 1200 NULL, NULL, NULL, NULL, 1201 NULL, NULL, NULL, NULL, 1202 NULL, NULL, NULL, NULL, 1203 NULL, NULL, NULL, NULL, 1204 }, 1205 .msr = { 1206 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1207 } 1208 }, 1209 1210 [FEAT_VMX_EXIT_CTLS] = { 1211 .type = MSR_FEATURE_WORD, 1212 /* 1213 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1214 * the LM CPUID bit. 1215 */ 1216 .feat_names = { 1217 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1218 NULL, NULL, NULL, NULL, 1219 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1220 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1221 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1222 "vmx-exit-save-efer", "vmx-exit-load-efer", 1223 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1224 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1225 NULL, NULL, NULL, NULL, 1226 }, 1227 .msr = { 1228 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1229 } 1230 }, 1231 1232 [FEAT_VMX_ENTRY_CTLS] = { 1233 .type = MSR_FEATURE_WORD, 1234 .feat_names = { 1235 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1236 NULL, NULL, NULL, NULL, 1237 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1238 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1239 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1240 NULL, NULL, NULL, NULL, 1241 NULL, NULL, NULL, NULL, 1242 NULL, NULL, NULL, NULL, 1243 }, 1244 .msr = { 1245 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1246 } 1247 }, 1248 1249 [FEAT_VMX_MISC] = { 1250 .type = MSR_FEATURE_WORD, 1251 .feat_names = { 1252 NULL, NULL, NULL, NULL, 1253 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1254 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1255 NULL, NULL, NULL, NULL, 1256 NULL, NULL, NULL, NULL, 1257 NULL, NULL, NULL, NULL, 1258 NULL, NULL, NULL, NULL, 1259 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1260 }, 1261 .msr = { 1262 .index = MSR_IA32_VMX_MISC, 1263 } 1264 }, 1265 1266 [FEAT_VMX_EPT_VPID_CAPS] = { 1267 .type = MSR_FEATURE_WORD, 1268 .feat_names = { 1269 "vmx-ept-execonly", NULL, NULL, NULL, 1270 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1271 NULL, NULL, NULL, NULL, 1272 NULL, NULL, NULL, NULL, 1273 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1274 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1275 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1276 NULL, NULL, NULL, NULL, 1277 "vmx-invvpid", NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1280 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1281 NULL, NULL, NULL, NULL, 1282 NULL, NULL, NULL, NULL, 1283 NULL, NULL, NULL, NULL, 1284 NULL, NULL, NULL, NULL, 1285 NULL, NULL, NULL, NULL, 1286 }, 1287 .msr = { 1288 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1289 } 1290 }, 1291 1292 [FEAT_VMX_BASIC] = { 1293 .type = MSR_FEATURE_WORD, 1294 .feat_names = { 1295 [54] = "vmx-ins-outs", 1296 [55] = "vmx-true-ctls", 1297 }, 1298 .msr = { 1299 .index = MSR_IA32_VMX_BASIC, 1300 }, 1301 /* Just to be safe - we don't support setting the MSEG version field. */ 1302 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1303 }, 1304 1305 [FEAT_VMX_VMFUNC] = { 1306 .type = MSR_FEATURE_WORD, 1307 .feat_names = { 1308 [0] = "vmx-eptp-switching", 1309 }, 1310 .msr = { 1311 .index = MSR_IA32_VMX_VMFUNC, 1312 } 1313 }, 1314 1315 }; 1316 1317 typedef struct FeatureMask { 1318 FeatureWord index; 1319 uint64_t mask; 1320 } FeatureMask; 1321 1322 typedef struct FeatureDep { 1323 FeatureMask from, to; 1324 } FeatureDep; 1325 1326 static FeatureDep feature_dependencies[] = { 1327 { 1328 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1329 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1330 }, 1331 { 1332 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1333 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1334 }, 1335 { 1336 .from = { FEAT_1_ECX, CPUID_EXT_PDCM }, 1337 .to = { FEAT_PERF_CAPABILITIES, ~0ull }, 1338 }, 1339 { 1340 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1341 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1342 }, 1343 { 1344 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1345 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1346 }, 1347 { 1348 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1349 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1350 }, 1351 { 1352 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1353 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1354 }, 1355 { 1356 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1357 .to = { FEAT_VMX_MISC, ~0ull }, 1358 }, 1359 { 1360 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1361 .to = { FEAT_VMX_BASIC, ~0ull }, 1362 }, 1363 { 1364 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1365 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1366 }, 1367 { 1368 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1369 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1370 }, 1371 { 1372 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1373 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1374 }, 1375 { 1376 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1377 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1378 }, 1379 { 1380 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1381 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1382 }, 1383 { 1384 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1385 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1386 }, 1387 { 1388 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1389 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1390 }, 1391 { 1392 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1393 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1394 }, 1395 { 1396 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1397 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1398 }, 1399 { 1400 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1401 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1402 }, 1403 { 1404 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1405 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1406 }, 1407 { 1408 .from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM }, 1409 .to = { FEAT_SVM, ~0ull }, 1410 }, 1411 }; 1412 1413 typedef struct X86RegisterInfo32 { 1414 /* Name of register */ 1415 const char *name; 1416 /* QAPI enum value register */ 1417 X86CPURegister32 qapi_enum; 1418 } X86RegisterInfo32; 1419 1420 #define REGISTER(reg) \ 1421 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1422 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1423 REGISTER(EAX), 1424 REGISTER(ECX), 1425 REGISTER(EDX), 1426 REGISTER(EBX), 1427 REGISTER(ESP), 1428 REGISTER(EBP), 1429 REGISTER(ESI), 1430 REGISTER(EDI), 1431 }; 1432 #undef REGISTER 1433 1434 typedef struct ExtSaveArea { 1435 uint32_t feature, bits; 1436 uint32_t offset, size; 1437 } ExtSaveArea; 1438 1439 static const ExtSaveArea x86_ext_save_areas[] = { 1440 [XSTATE_FP_BIT] = { 1441 /* x87 FP state component is always enabled if XSAVE is supported */ 1442 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1443 /* x87 state is in the legacy region of the XSAVE area */ 1444 .offset = 0, 1445 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1446 }, 1447 [XSTATE_SSE_BIT] = { 1448 /* SSE state component is always enabled if XSAVE is supported */ 1449 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1450 /* SSE state is in the legacy region of the XSAVE area */ 1451 .offset = 0, 1452 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1453 }, 1454 [XSTATE_YMM_BIT] = 1455 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1456 .offset = offsetof(X86XSaveArea, avx_state), 1457 .size = sizeof(XSaveAVX) }, 1458 [XSTATE_BNDREGS_BIT] = 1459 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1460 .offset = offsetof(X86XSaveArea, bndreg_state), 1461 .size = sizeof(XSaveBNDREG) }, 1462 [XSTATE_BNDCSR_BIT] = 1463 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1464 .offset = offsetof(X86XSaveArea, bndcsr_state), 1465 .size = sizeof(XSaveBNDCSR) }, 1466 [XSTATE_OPMASK_BIT] = 1467 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1468 .offset = offsetof(X86XSaveArea, opmask_state), 1469 .size = sizeof(XSaveOpmask) }, 1470 [XSTATE_ZMM_Hi256_BIT] = 1471 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1472 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1473 .size = sizeof(XSaveZMM_Hi256) }, 1474 [XSTATE_Hi16_ZMM_BIT] = 1475 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1476 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1477 .size = sizeof(XSaveHi16_ZMM) }, 1478 [XSTATE_PKRU_BIT] = 1479 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1480 .offset = offsetof(X86XSaveArea, pkru_state), 1481 .size = sizeof(XSavePKRU) }, 1482 }; 1483 1484 static uint32_t xsave_area_size(uint64_t mask) 1485 { 1486 int i; 1487 uint64_t ret = 0; 1488 1489 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1490 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1491 if ((mask >> i) & 1) { 1492 ret = MAX(ret, esa->offset + esa->size); 1493 } 1494 } 1495 return ret; 1496 } 1497 1498 static inline bool accel_uses_host_cpuid(void) 1499 { 1500 return kvm_enabled() || hvf_enabled(); 1501 } 1502 1503 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1504 { 1505 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1506 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1507 } 1508 1509 const char *get_register_name_32(unsigned int reg) 1510 { 1511 if (reg >= CPU_NB_REGS32) { 1512 return NULL; 1513 } 1514 return x86_reg_info_32[reg].name; 1515 } 1516 1517 /* 1518 * Returns the set of feature flags that are supported and migratable by 1519 * QEMU, for a given FeatureWord. 1520 */ 1521 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1522 { 1523 FeatureWordInfo *wi = &feature_word_info[w]; 1524 uint64_t r = 0; 1525 int i; 1526 1527 for (i = 0; i < 64; i++) { 1528 uint64_t f = 1ULL << i; 1529 1530 /* If the feature name is known, it is implicitly considered migratable, 1531 * unless it is explicitly set in unmigratable_flags */ 1532 if ((wi->migratable_flags & f) || 1533 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1534 r |= f; 1535 } 1536 } 1537 return r; 1538 } 1539 1540 void host_cpuid(uint32_t function, uint32_t count, 1541 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1542 { 1543 uint32_t vec[4]; 1544 1545 #ifdef __x86_64__ 1546 asm volatile("cpuid" 1547 : "=a"(vec[0]), "=b"(vec[1]), 1548 "=c"(vec[2]), "=d"(vec[3]) 1549 : "0"(function), "c"(count) : "cc"); 1550 #elif defined(__i386__) 1551 asm volatile("pusha \n\t" 1552 "cpuid \n\t" 1553 "mov %%eax, 0(%2) \n\t" 1554 "mov %%ebx, 4(%2) \n\t" 1555 "mov %%ecx, 8(%2) \n\t" 1556 "mov %%edx, 12(%2) \n\t" 1557 "popa" 1558 : : "a"(function), "c"(count), "S"(vec) 1559 : "memory", "cc"); 1560 #else 1561 abort(); 1562 #endif 1563 1564 if (eax) 1565 *eax = vec[0]; 1566 if (ebx) 1567 *ebx = vec[1]; 1568 if (ecx) 1569 *ecx = vec[2]; 1570 if (edx) 1571 *edx = vec[3]; 1572 } 1573 1574 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1575 { 1576 uint32_t eax, ebx, ecx, edx; 1577 1578 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1579 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1580 1581 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1582 if (family) { 1583 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1584 } 1585 if (model) { 1586 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1587 } 1588 if (stepping) { 1589 *stepping = eax & 0x0F; 1590 } 1591 } 1592 1593 /* CPU class name definitions: */ 1594 1595 /* Return type name for a given CPU model name 1596 * Caller is responsible for freeing the returned string. 1597 */ 1598 static char *x86_cpu_type_name(const char *model_name) 1599 { 1600 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1601 } 1602 1603 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1604 { 1605 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1606 return object_class_by_name(typename); 1607 } 1608 1609 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1610 { 1611 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1612 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1613 return g_strndup(class_name, 1614 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1615 } 1616 1617 typedef struct PropValue { 1618 const char *prop, *value; 1619 } PropValue; 1620 1621 typedef struct X86CPUVersionDefinition { 1622 X86CPUVersion version; 1623 const char *alias; 1624 const char *note; 1625 PropValue *props; 1626 } X86CPUVersionDefinition; 1627 1628 /* Base definition for a CPU model */ 1629 typedef struct X86CPUDefinition { 1630 const char *name; 1631 uint32_t level; 1632 uint32_t xlevel; 1633 /* vendor is zero-terminated, 12 character ASCII string */ 1634 char vendor[CPUID_VENDOR_SZ + 1]; 1635 int family; 1636 int model; 1637 int stepping; 1638 FeatureWordArray features; 1639 const char *model_id; 1640 CPUCaches *cache_info; 1641 1642 /* Use AMD EPYC encoding for apic id */ 1643 bool use_epyc_apic_id_encoding; 1644 1645 /* 1646 * Definitions for alternative versions of CPU model. 1647 * List is terminated by item with version == 0. 1648 * If NULL, version 1 will be registered automatically. 1649 */ 1650 const X86CPUVersionDefinition *versions; 1651 } X86CPUDefinition; 1652 1653 /* Reference to a specific CPU model version */ 1654 struct X86CPUModel { 1655 /* Base CPU definition */ 1656 X86CPUDefinition *cpudef; 1657 /* CPU model version */ 1658 X86CPUVersion version; 1659 const char *note; 1660 /* 1661 * If true, this is an alias CPU model. 1662 * This matters only for "-cpu help" and query-cpu-definitions 1663 */ 1664 bool is_alias; 1665 }; 1666 1667 /* Get full model name for CPU version */ 1668 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1669 X86CPUVersion version) 1670 { 1671 assert(version > 0); 1672 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1673 } 1674 1675 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1676 { 1677 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1678 static const X86CPUVersionDefinition default_version_list[] = { 1679 { 1 }, 1680 { /* end of list */ } 1681 }; 1682 1683 return def->versions ?: default_version_list; 1684 } 1685 1686 bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type) 1687 { 1688 X86CPUClass *xcc = X86_CPU_CLASS(object_class_by_name(cpu_type)); 1689 1690 assert(xcc); 1691 if (xcc->model && xcc->model->cpudef) { 1692 return xcc->model->cpudef->use_epyc_apic_id_encoding; 1693 } else { 1694 return false; 1695 } 1696 } 1697 1698 static CPUCaches epyc_cache_info = { 1699 .l1d_cache = &(CPUCacheInfo) { 1700 .type = DATA_CACHE, 1701 .level = 1, 1702 .size = 32 * KiB, 1703 .line_size = 64, 1704 .associativity = 8, 1705 .partitions = 1, 1706 .sets = 64, 1707 .lines_per_tag = 1, 1708 .self_init = 1, 1709 .no_invd_sharing = true, 1710 }, 1711 .l1i_cache = &(CPUCacheInfo) { 1712 .type = INSTRUCTION_CACHE, 1713 .level = 1, 1714 .size = 64 * KiB, 1715 .line_size = 64, 1716 .associativity = 4, 1717 .partitions = 1, 1718 .sets = 256, 1719 .lines_per_tag = 1, 1720 .self_init = 1, 1721 .no_invd_sharing = true, 1722 }, 1723 .l2_cache = &(CPUCacheInfo) { 1724 .type = UNIFIED_CACHE, 1725 .level = 2, 1726 .size = 512 * KiB, 1727 .line_size = 64, 1728 .associativity = 8, 1729 .partitions = 1, 1730 .sets = 1024, 1731 .lines_per_tag = 1, 1732 }, 1733 .l3_cache = &(CPUCacheInfo) { 1734 .type = UNIFIED_CACHE, 1735 .level = 3, 1736 .size = 8 * MiB, 1737 .line_size = 64, 1738 .associativity = 16, 1739 .partitions = 1, 1740 .sets = 8192, 1741 .lines_per_tag = 1, 1742 .self_init = true, 1743 .inclusive = true, 1744 .complex_indexing = true, 1745 }, 1746 }; 1747 1748 static CPUCaches epyc_rome_cache_info = { 1749 .l1d_cache = &(CPUCacheInfo) { 1750 .type = DATA_CACHE, 1751 .level = 1, 1752 .size = 32 * KiB, 1753 .line_size = 64, 1754 .associativity = 8, 1755 .partitions = 1, 1756 .sets = 64, 1757 .lines_per_tag = 1, 1758 .self_init = 1, 1759 .no_invd_sharing = true, 1760 }, 1761 .l1i_cache = &(CPUCacheInfo) { 1762 .type = INSTRUCTION_CACHE, 1763 .level = 1, 1764 .size = 32 * KiB, 1765 .line_size = 64, 1766 .associativity = 8, 1767 .partitions = 1, 1768 .sets = 64, 1769 .lines_per_tag = 1, 1770 .self_init = 1, 1771 .no_invd_sharing = true, 1772 }, 1773 .l2_cache = &(CPUCacheInfo) { 1774 .type = UNIFIED_CACHE, 1775 .level = 2, 1776 .size = 512 * KiB, 1777 .line_size = 64, 1778 .associativity = 8, 1779 .partitions = 1, 1780 .sets = 1024, 1781 .lines_per_tag = 1, 1782 }, 1783 .l3_cache = &(CPUCacheInfo) { 1784 .type = UNIFIED_CACHE, 1785 .level = 3, 1786 .size = 16 * MiB, 1787 .line_size = 64, 1788 .associativity = 16, 1789 .partitions = 1, 1790 .sets = 16384, 1791 .lines_per_tag = 1, 1792 .self_init = true, 1793 .inclusive = true, 1794 .complex_indexing = true, 1795 }, 1796 }; 1797 1798 /* The following VMX features are not supported by KVM and are left out in the 1799 * CPU definitions: 1800 * 1801 * Dual-monitor support (all processors) 1802 * Entry to SMM 1803 * Deactivate dual-monitor treatment 1804 * Number of CR3-target values 1805 * Shutdown activity state 1806 * Wait-for-SIPI activity state 1807 * PAUSE-loop exiting (Westmere and newer) 1808 * EPT-violation #VE (Broadwell and newer) 1809 * Inject event with insn length=0 (Skylake and newer) 1810 * Conceal non-root operation from PT 1811 * Conceal VM exits from PT 1812 * Conceal VM entries from PT 1813 * Enable ENCLS exiting 1814 * Mode-based execute control (XS/XU) 1815 s TSC scaling (Skylake Server and newer) 1816 * GPA translation for PT (IceLake and newer) 1817 * User wait and pause 1818 * ENCLV exiting 1819 * Load IA32_RTIT_CTL 1820 * Clear IA32_RTIT_CTL 1821 * Advanced VM-exit information for EPT violations 1822 * Sub-page write permissions 1823 * PT in VMX operation 1824 */ 1825 1826 static X86CPUDefinition builtin_x86_defs[] = { 1827 { 1828 .name = "qemu64", 1829 .level = 0xd, 1830 .vendor = CPUID_VENDOR_AMD, 1831 .family = 6, 1832 .model = 6, 1833 .stepping = 3, 1834 .features[FEAT_1_EDX] = 1835 PPRO_FEATURES | 1836 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1837 CPUID_PSE36, 1838 .features[FEAT_1_ECX] = 1839 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1840 .features[FEAT_8000_0001_EDX] = 1841 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1842 .features[FEAT_8000_0001_ECX] = 1843 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1844 .xlevel = 0x8000000A, 1845 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1846 }, 1847 { 1848 .name = "phenom", 1849 .level = 5, 1850 .vendor = CPUID_VENDOR_AMD, 1851 .family = 16, 1852 .model = 2, 1853 .stepping = 3, 1854 /* Missing: CPUID_HT */ 1855 .features[FEAT_1_EDX] = 1856 PPRO_FEATURES | 1857 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1858 CPUID_PSE36 | CPUID_VME, 1859 .features[FEAT_1_ECX] = 1860 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1861 CPUID_EXT_POPCNT, 1862 .features[FEAT_8000_0001_EDX] = 1863 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1864 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1865 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1866 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1867 CPUID_EXT3_CR8LEG, 1868 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1869 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1870 .features[FEAT_8000_0001_ECX] = 1871 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1872 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1873 /* Missing: CPUID_SVM_LBRV */ 1874 .features[FEAT_SVM] = 1875 CPUID_SVM_NPT, 1876 .xlevel = 0x8000001A, 1877 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1878 }, 1879 { 1880 .name = "core2duo", 1881 .level = 10, 1882 .vendor = CPUID_VENDOR_INTEL, 1883 .family = 6, 1884 .model = 15, 1885 .stepping = 11, 1886 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1887 .features[FEAT_1_EDX] = 1888 PPRO_FEATURES | 1889 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1890 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1891 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1892 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1893 .features[FEAT_1_ECX] = 1894 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1895 CPUID_EXT_CX16, 1896 .features[FEAT_8000_0001_EDX] = 1897 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1898 .features[FEAT_8000_0001_ECX] = 1899 CPUID_EXT3_LAHF_LM, 1900 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1901 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1902 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1903 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1904 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1905 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1906 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1907 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1908 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1909 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1910 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1911 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1912 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1913 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1914 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1915 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1916 .features[FEAT_VMX_SECONDARY_CTLS] = 1917 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1918 .xlevel = 0x80000008, 1919 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1920 }, 1921 { 1922 .name = "kvm64", 1923 .level = 0xd, 1924 .vendor = CPUID_VENDOR_INTEL, 1925 .family = 15, 1926 .model = 6, 1927 .stepping = 1, 1928 /* Missing: CPUID_HT */ 1929 .features[FEAT_1_EDX] = 1930 PPRO_FEATURES | CPUID_VME | 1931 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1932 CPUID_PSE36, 1933 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1934 .features[FEAT_1_ECX] = 1935 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1936 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1937 .features[FEAT_8000_0001_EDX] = 1938 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1939 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1940 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1941 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1942 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1943 .features[FEAT_8000_0001_ECX] = 1944 0, 1945 /* VMX features from Cedar Mill/Prescott */ 1946 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1947 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1948 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1949 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1950 VMX_PIN_BASED_NMI_EXITING, 1951 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1952 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1953 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1954 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1955 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1956 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1957 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1958 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1959 .xlevel = 0x80000008, 1960 .model_id = "Common KVM processor" 1961 }, 1962 { 1963 .name = "qemu32", 1964 .level = 4, 1965 .vendor = CPUID_VENDOR_INTEL, 1966 .family = 6, 1967 .model = 6, 1968 .stepping = 3, 1969 .features[FEAT_1_EDX] = 1970 PPRO_FEATURES, 1971 .features[FEAT_1_ECX] = 1972 CPUID_EXT_SSE3, 1973 .xlevel = 0x80000004, 1974 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1975 }, 1976 { 1977 .name = "kvm32", 1978 .level = 5, 1979 .vendor = CPUID_VENDOR_INTEL, 1980 .family = 15, 1981 .model = 6, 1982 .stepping = 1, 1983 .features[FEAT_1_EDX] = 1984 PPRO_FEATURES | CPUID_VME | 1985 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1986 .features[FEAT_1_ECX] = 1987 CPUID_EXT_SSE3, 1988 .features[FEAT_8000_0001_ECX] = 1989 0, 1990 /* VMX features from Yonah */ 1991 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1992 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1993 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1994 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1995 VMX_PIN_BASED_NMI_EXITING, 1996 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1997 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1998 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1999 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2000 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2001 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2002 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2003 .xlevel = 0x80000008, 2004 .model_id = "Common 32-bit KVM processor" 2005 }, 2006 { 2007 .name = "coreduo", 2008 .level = 10, 2009 .vendor = CPUID_VENDOR_INTEL, 2010 .family = 6, 2011 .model = 14, 2012 .stepping = 8, 2013 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2014 .features[FEAT_1_EDX] = 2015 PPRO_FEATURES | CPUID_VME | 2016 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2017 CPUID_SS, 2018 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2019 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2020 .features[FEAT_1_ECX] = 2021 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2022 .features[FEAT_8000_0001_EDX] = 2023 CPUID_EXT2_NX, 2024 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2025 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2026 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2027 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2028 VMX_PIN_BASED_NMI_EXITING, 2029 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2030 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2031 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2032 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2033 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2034 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2035 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2036 .xlevel = 0x80000008, 2037 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2038 }, 2039 { 2040 .name = "486", 2041 .level = 1, 2042 .vendor = CPUID_VENDOR_INTEL, 2043 .family = 4, 2044 .model = 8, 2045 .stepping = 0, 2046 .features[FEAT_1_EDX] = 2047 I486_FEATURES, 2048 .xlevel = 0, 2049 .model_id = "", 2050 }, 2051 { 2052 .name = "pentium", 2053 .level = 1, 2054 .vendor = CPUID_VENDOR_INTEL, 2055 .family = 5, 2056 .model = 4, 2057 .stepping = 3, 2058 .features[FEAT_1_EDX] = 2059 PENTIUM_FEATURES, 2060 .xlevel = 0, 2061 .model_id = "", 2062 }, 2063 { 2064 .name = "pentium2", 2065 .level = 2, 2066 .vendor = CPUID_VENDOR_INTEL, 2067 .family = 6, 2068 .model = 5, 2069 .stepping = 2, 2070 .features[FEAT_1_EDX] = 2071 PENTIUM2_FEATURES, 2072 .xlevel = 0, 2073 .model_id = "", 2074 }, 2075 { 2076 .name = "pentium3", 2077 .level = 3, 2078 .vendor = CPUID_VENDOR_INTEL, 2079 .family = 6, 2080 .model = 7, 2081 .stepping = 3, 2082 .features[FEAT_1_EDX] = 2083 PENTIUM3_FEATURES, 2084 .xlevel = 0, 2085 .model_id = "", 2086 }, 2087 { 2088 .name = "athlon", 2089 .level = 2, 2090 .vendor = CPUID_VENDOR_AMD, 2091 .family = 6, 2092 .model = 2, 2093 .stepping = 3, 2094 .features[FEAT_1_EDX] = 2095 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2096 CPUID_MCA, 2097 .features[FEAT_8000_0001_EDX] = 2098 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2099 .xlevel = 0x80000008, 2100 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2101 }, 2102 { 2103 .name = "n270", 2104 .level = 10, 2105 .vendor = CPUID_VENDOR_INTEL, 2106 .family = 6, 2107 .model = 28, 2108 .stepping = 2, 2109 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2110 .features[FEAT_1_EDX] = 2111 PPRO_FEATURES | 2112 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2113 CPUID_ACPI | CPUID_SS, 2114 /* Some CPUs got no CPUID_SEP */ 2115 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2116 * CPUID_EXT_XTPR */ 2117 .features[FEAT_1_ECX] = 2118 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2119 CPUID_EXT_MOVBE, 2120 .features[FEAT_8000_0001_EDX] = 2121 CPUID_EXT2_NX, 2122 .features[FEAT_8000_0001_ECX] = 2123 CPUID_EXT3_LAHF_LM, 2124 .xlevel = 0x80000008, 2125 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2126 }, 2127 { 2128 .name = "Conroe", 2129 .level = 10, 2130 .vendor = CPUID_VENDOR_INTEL, 2131 .family = 6, 2132 .model = 15, 2133 .stepping = 3, 2134 .features[FEAT_1_EDX] = 2135 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2136 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2137 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2138 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2139 CPUID_DE | CPUID_FP87, 2140 .features[FEAT_1_ECX] = 2141 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2142 .features[FEAT_8000_0001_EDX] = 2143 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2144 .features[FEAT_8000_0001_ECX] = 2145 CPUID_EXT3_LAHF_LM, 2146 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2147 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2148 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2149 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2150 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2151 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2152 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2153 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2154 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2155 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2156 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2157 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2158 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2159 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2160 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2161 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2162 .features[FEAT_VMX_SECONDARY_CTLS] = 2163 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2164 .xlevel = 0x80000008, 2165 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2166 }, 2167 { 2168 .name = "Penryn", 2169 .level = 10, 2170 .vendor = CPUID_VENDOR_INTEL, 2171 .family = 6, 2172 .model = 23, 2173 .stepping = 3, 2174 .features[FEAT_1_EDX] = 2175 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2176 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2177 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2178 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2179 CPUID_DE | CPUID_FP87, 2180 .features[FEAT_1_ECX] = 2181 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2182 CPUID_EXT_SSE3, 2183 .features[FEAT_8000_0001_EDX] = 2184 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2185 .features[FEAT_8000_0001_ECX] = 2186 CPUID_EXT3_LAHF_LM, 2187 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2188 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2189 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2190 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2191 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2192 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2193 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2194 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2195 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2196 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2197 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2198 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2199 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2200 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2201 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2202 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2203 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2204 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2205 .features[FEAT_VMX_SECONDARY_CTLS] = 2206 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2207 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2208 .xlevel = 0x80000008, 2209 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2210 }, 2211 { 2212 .name = "Nehalem", 2213 .level = 11, 2214 .vendor = CPUID_VENDOR_INTEL, 2215 .family = 6, 2216 .model = 26, 2217 .stepping = 3, 2218 .features[FEAT_1_EDX] = 2219 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2220 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2221 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2222 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2223 CPUID_DE | CPUID_FP87, 2224 .features[FEAT_1_ECX] = 2225 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2226 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2227 .features[FEAT_8000_0001_EDX] = 2228 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2229 .features[FEAT_8000_0001_ECX] = 2230 CPUID_EXT3_LAHF_LM, 2231 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2232 MSR_VMX_BASIC_TRUE_CTLS, 2233 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2234 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2235 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2236 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2237 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2238 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2239 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2240 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2241 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2242 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2243 .features[FEAT_VMX_EXIT_CTLS] = 2244 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2245 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2246 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2247 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2248 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2249 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2250 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2251 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2252 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2253 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2254 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2255 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2256 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2257 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2258 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2259 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2260 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2261 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2262 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2263 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2264 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2265 .features[FEAT_VMX_SECONDARY_CTLS] = 2266 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2267 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2268 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2269 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2270 VMX_SECONDARY_EXEC_ENABLE_VPID, 2271 .xlevel = 0x80000008, 2272 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2273 .versions = (X86CPUVersionDefinition[]) { 2274 { .version = 1 }, 2275 { 2276 .version = 2, 2277 .alias = "Nehalem-IBRS", 2278 .props = (PropValue[]) { 2279 { "spec-ctrl", "on" }, 2280 { "model-id", 2281 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2282 { /* end of list */ } 2283 } 2284 }, 2285 { /* end of list */ } 2286 } 2287 }, 2288 { 2289 .name = "Westmere", 2290 .level = 11, 2291 .vendor = CPUID_VENDOR_INTEL, 2292 .family = 6, 2293 .model = 44, 2294 .stepping = 1, 2295 .features[FEAT_1_EDX] = 2296 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2297 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2298 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2299 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2300 CPUID_DE | CPUID_FP87, 2301 .features[FEAT_1_ECX] = 2302 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2303 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2304 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2305 .features[FEAT_8000_0001_EDX] = 2306 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2307 .features[FEAT_8000_0001_ECX] = 2308 CPUID_EXT3_LAHF_LM, 2309 .features[FEAT_6_EAX] = 2310 CPUID_6_EAX_ARAT, 2311 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2312 MSR_VMX_BASIC_TRUE_CTLS, 2313 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2314 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2315 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2316 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2317 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2318 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2319 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2320 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2321 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2322 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2323 .features[FEAT_VMX_EXIT_CTLS] = 2324 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2325 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2326 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2327 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2328 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2329 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2330 MSR_VMX_MISC_STORE_LMA, 2331 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2332 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2333 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2334 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2335 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2336 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2337 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2338 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2339 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2340 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2341 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2342 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2343 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2344 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2345 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2346 .features[FEAT_VMX_SECONDARY_CTLS] = 2347 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2348 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2349 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2350 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2351 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2352 .xlevel = 0x80000008, 2353 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2354 .versions = (X86CPUVersionDefinition[]) { 2355 { .version = 1 }, 2356 { 2357 .version = 2, 2358 .alias = "Westmere-IBRS", 2359 .props = (PropValue[]) { 2360 { "spec-ctrl", "on" }, 2361 { "model-id", 2362 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2363 { /* end of list */ } 2364 } 2365 }, 2366 { /* end of list */ } 2367 } 2368 }, 2369 { 2370 .name = "SandyBridge", 2371 .level = 0xd, 2372 .vendor = CPUID_VENDOR_INTEL, 2373 .family = 6, 2374 .model = 42, 2375 .stepping = 1, 2376 .features[FEAT_1_EDX] = 2377 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2378 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2379 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2380 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2381 CPUID_DE | CPUID_FP87, 2382 .features[FEAT_1_ECX] = 2383 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2384 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2385 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2386 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2387 CPUID_EXT_SSE3, 2388 .features[FEAT_8000_0001_EDX] = 2389 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2390 CPUID_EXT2_SYSCALL, 2391 .features[FEAT_8000_0001_ECX] = 2392 CPUID_EXT3_LAHF_LM, 2393 .features[FEAT_XSAVE] = 2394 CPUID_XSAVE_XSAVEOPT, 2395 .features[FEAT_6_EAX] = 2396 CPUID_6_EAX_ARAT, 2397 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2398 MSR_VMX_BASIC_TRUE_CTLS, 2399 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2400 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2401 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2402 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2403 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2404 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2405 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2406 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2407 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2408 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2409 .features[FEAT_VMX_EXIT_CTLS] = 2410 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2411 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2412 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2413 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2414 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2415 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2416 MSR_VMX_MISC_STORE_LMA, 2417 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2418 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2419 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2420 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2421 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2422 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2423 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2424 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2425 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2426 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2427 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2428 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2429 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2430 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2431 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2432 .features[FEAT_VMX_SECONDARY_CTLS] = 2433 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2434 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2435 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2436 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2437 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2438 .xlevel = 0x80000008, 2439 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2440 .versions = (X86CPUVersionDefinition[]) { 2441 { .version = 1 }, 2442 { 2443 .version = 2, 2444 .alias = "SandyBridge-IBRS", 2445 .props = (PropValue[]) { 2446 { "spec-ctrl", "on" }, 2447 { "model-id", 2448 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2449 { /* end of list */ } 2450 } 2451 }, 2452 { /* end of list */ } 2453 } 2454 }, 2455 { 2456 .name = "IvyBridge", 2457 .level = 0xd, 2458 .vendor = CPUID_VENDOR_INTEL, 2459 .family = 6, 2460 .model = 58, 2461 .stepping = 9, 2462 .features[FEAT_1_EDX] = 2463 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2464 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2465 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2466 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2467 CPUID_DE | CPUID_FP87, 2468 .features[FEAT_1_ECX] = 2469 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2470 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2471 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2472 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2473 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2474 .features[FEAT_7_0_EBX] = 2475 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2476 CPUID_7_0_EBX_ERMS, 2477 .features[FEAT_8000_0001_EDX] = 2478 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2479 CPUID_EXT2_SYSCALL, 2480 .features[FEAT_8000_0001_ECX] = 2481 CPUID_EXT3_LAHF_LM, 2482 .features[FEAT_XSAVE] = 2483 CPUID_XSAVE_XSAVEOPT, 2484 .features[FEAT_6_EAX] = 2485 CPUID_6_EAX_ARAT, 2486 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2487 MSR_VMX_BASIC_TRUE_CTLS, 2488 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2489 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2490 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2491 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2492 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2493 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2494 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2495 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2496 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2497 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2498 .features[FEAT_VMX_EXIT_CTLS] = 2499 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2500 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2501 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2502 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2503 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2504 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2505 MSR_VMX_MISC_STORE_LMA, 2506 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2507 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2508 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2509 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2510 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2511 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2512 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2513 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2514 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2515 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2516 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2517 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2518 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2519 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2520 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2521 .features[FEAT_VMX_SECONDARY_CTLS] = 2522 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2523 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2524 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2525 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2526 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2527 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2528 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2529 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2530 .xlevel = 0x80000008, 2531 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2532 .versions = (X86CPUVersionDefinition[]) { 2533 { .version = 1 }, 2534 { 2535 .version = 2, 2536 .alias = "IvyBridge-IBRS", 2537 .props = (PropValue[]) { 2538 { "spec-ctrl", "on" }, 2539 { "model-id", 2540 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2541 { /* end of list */ } 2542 } 2543 }, 2544 { /* end of list */ } 2545 } 2546 }, 2547 { 2548 .name = "Haswell", 2549 .level = 0xd, 2550 .vendor = CPUID_VENDOR_INTEL, 2551 .family = 6, 2552 .model = 60, 2553 .stepping = 4, 2554 .features[FEAT_1_EDX] = 2555 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2556 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2557 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2558 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2559 CPUID_DE | CPUID_FP87, 2560 .features[FEAT_1_ECX] = 2561 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2562 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2563 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2564 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2565 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2566 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2567 .features[FEAT_8000_0001_EDX] = 2568 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2569 CPUID_EXT2_SYSCALL, 2570 .features[FEAT_8000_0001_ECX] = 2571 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2572 .features[FEAT_7_0_EBX] = 2573 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2574 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2575 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2576 CPUID_7_0_EBX_RTM, 2577 .features[FEAT_XSAVE] = 2578 CPUID_XSAVE_XSAVEOPT, 2579 .features[FEAT_6_EAX] = 2580 CPUID_6_EAX_ARAT, 2581 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2582 MSR_VMX_BASIC_TRUE_CTLS, 2583 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2584 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2585 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2586 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2587 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2588 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2589 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2590 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2591 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2592 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2593 .features[FEAT_VMX_EXIT_CTLS] = 2594 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2595 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2596 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2597 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2598 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2599 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2600 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2601 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2602 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2603 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2604 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2605 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2606 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2607 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2608 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2609 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2610 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2611 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2612 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2613 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2614 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2615 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2616 .features[FEAT_VMX_SECONDARY_CTLS] = 2617 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2618 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2619 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2620 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2621 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2622 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2623 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2624 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2625 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2626 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2627 .xlevel = 0x80000008, 2628 .model_id = "Intel Core Processor (Haswell)", 2629 .versions = (X86CPUVersionDefinition[]) { 2630 { .version = 1 }, 2631 { 2632 .version = 2, 2633 .alias = "Haswell-noTSX", 2634 .props = (PropValue[]) { 2635 { "hle", "off" }, 2636 { "rtm", "off" }, 2637 { "stepping", "1" }, 2638 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2639 { /* end of list */ } 2640 }, 2641 }, 2642 { 2643 .version = 3, 2644 .alias = "Haswell-IBRS", 2645 .props = (PropValue[]) { 2646 /* Restore TSX features removed by -v2 above */ 2647 { "hle", "on" }, 2648 { "rtm", "on" }, 2649 /* 2650 * Haswell and Haswell-IBRS had stepping=4 in 2651 * QEMU 4.0 and older 2652 */ 2653 { "stepping", "4" }, 2654 { "spec-ctrl", "on" }, 2655 { "model-id", 2656 "Intel Core Processor (Haswell, IBRS)" }, 2657 { /* end of list */ } 2658 } 2659 }, 2660 { 2661 .version = 4, 2662 .alias = "Haswell-noTSX-IBRS", 2663 .props = (PropValue[]) { 2664 { "hle", "off" }, 2665 { "rtm", "off" }, 2666 /* spec-ctrl was already enabled by -v3 above */ 2667 { "stepping", "1" }, 2668 { "model-id", 2669 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2670 { /* end of list */ } 2671 } 2672 }, 2673 { /* end of list */ } 2674 } 2675 }, 2676 { 2677 .name = "Broadwell", 2678 .level = 0xd, 2679 .vendor = CPUID_VENDOR_INTEL, 2680 .family = 6, 2681 .model = 61, 2682 .stepping = 2, 2683 .features[FEAT_1_EDX] = 2684 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2685 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2686 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2687 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2688 CPUID_DE | CPUID_FP87, 2689 .features[FEAT_1_ECX] = 2690 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2691 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2692 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2693 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2694 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2695 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2696 .features[FEAT_8000_0001_EDX] = 2697 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2698 CPUID_EXT2_SYSCALL, 2699 .features[FEAT_8000_0001_ECX] = 2700 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2701 .features[FEAT_7_0_EBX] = 2702 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2703 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2704 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2705 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2706 CPUID_7_0_EBX_SMAP, 2707 .features[FEAT_XSAVE] = 2708 CPUID_XSAVE_XSAVEOPT, 2709 .features[FEAT_6_EAX] = 2710 CPUID_6_EAX_ARAT, 2711 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2712 MSR_VMX_BASIC_TRUE_CTLS, 2713 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2714 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2715 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2716 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2717 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2718 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2719 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2720 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2721 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2722 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2723 .features[FEAT_VMX_EXIT_CTLS] = 2724 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2725 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2726 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2727 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2728 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2729 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2730 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2731 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2732 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2733 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2734 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2735 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2736 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2737 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2738 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2739 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2740 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2741 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2742 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2743 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2744 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2745 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2746 .features[FEAT_VMX_SECONDARY_CTLS] = 2747 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2748 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2749 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2750 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2751 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2752 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2753 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2754 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2755 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2756 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2757 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2758 .xlevel = 0x80000008, 2759 .model_id = "Intel Core Processor (Broadwell)", 2760 .versions = (X86CPUVersionDefinition[]) { 2761 { .version = 1 }, 2762 { 2763 .version = 2, 2764 .alias = "Broadwell-noTSX", 2765 .props = (PropValue[]) { 2766 { "hle", "off" }, 2767 { "rtm", "off" }, 2768 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2769 { /* end of list */ } 2770 }, 2771 }, 2772 { 2773 .version = 3, 2774 .alias = "Broadwell-IBRS", 2775 .props = (PropValue[]) { 2776 /* Restore TSX features removed by -v2 above */ 2777 { "hle", "on" }, 2778 { "rtm", "on" }, 2779 { "spec-ctrl", "on" }, 2780 { "model-id", 2781 "Intel Core Processor (Broadwell, IBRS)" }, 2782 { /* end of list */ } 2783 } 2784 }, 2785 { 2786 .version = 4, 2787 .alias = "Broadwell-noTSX-IBRS", 2788 .props = (PropValue[]) { 2789 { "hle", "off" }, 2790 { "rtm", "off" }, 2791 /* spec-ctrl was already enabled by -v3 above */ 2792 { "model-id", 2793 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2794 { /* end of list */ } 2795 } 2796 }, 2797 { /* end of list */ } 2798 } 2799 }, 2800 { 2801 .name = "Skylake-Client", 2802 .level = 0xd, 2803 .vendor = CPUID_VENDOR_INTEL, 2804 .family = 6, 2805 .model = 94, 2806 .stepping = 3, 2807 .features[FEAT_1_EDX] = 2808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2812 CPUID_DE | CPUID_FP87, 2813 .features[FEAT_1_ECX] = 2814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2815 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2816 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2817 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2818 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2819 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2820 .features[FEAT_8000_0001_EDX] = 2821 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2822 CPUID_EXT2_SYSCALL, 2823 .features[FEAT_8000_0001_ECX] = 2824 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2825 .features[FEAT_7_0_EBX] = 2826 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2827 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2828 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2829 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2830 CPUID_7_0_EBX_SMAP, 2831 /* Missing: XSAVES (not supported by some Linux versions, 2832 * including v4.1 to v4.12). 2833 * KVM doesn't yet expose any XSAVES state save component, 2834 * and the only one defined in Skylake (processor tracing) 2835 * probably will block migration anyway. 2836 */ 2837 .features[FEAT_XSAVE] = 2838 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2839 CPUID_XSAVE_XGETBV1, 2840 .features[FEAT_6_EAX] = 2841 CPUID_6_EAX_ARAT, 2842 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2843 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2844 MSR_VMX_BASIC_TRUE_CTLS, 2845 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2846 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2847 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2848 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2849 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2850 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2851 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2852 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2853 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2854 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2855 .features[FEAT_VMX_EXIT_CTLS] = 2856 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2857 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2858 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2859 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2860 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2861 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2862 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2863 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2864 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2865 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2866 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2867 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2868 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2869 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2870 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2871 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2872 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2873 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2874 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2875 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2876 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2877 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2878 .features[FEAT_VMX_SECONDARY_CTLS] = 2879 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2880 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2881 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2882 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2883 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2884 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2885 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2886 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2887 .xlevel = 0x80000008, 2888 .model_id = "Intel Core Processor (Skylake)", 2889 .versions = (X86CPUVersionDefinition[]) { 2890 { .version = 1 }, 2891 { 2892 .version = 2, 2893 .alias = "Skylake-Client-IBRS", 2894 .props = (PropValue[]) { 2895 { "spec-ctrl", "on" }, 2896 { "model-id", 2897 "Intel Core Processor (Skylake, IBRS)" }, 2898 { /* end of list */ } 2899 } 2900 }, 2901 { 2902 .version = 3, 2903 .alias = "Skylake-Client-noTSX-IBRS", 2904 .props = (PropValue[]) { 2905 { "hle", "off" }, 2906 { "rtm", "off" }, 2907 { "model-id", 2908 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2909 { /* end of list */ } 2910 } 2911 }, 2912 { /* end of list */ } 2913 } 2914 }, 2915 { 2916 .name = "Skylake-Server", 2917 .level = 0xd, 2918 .vendor = CPUID_VENDOR_INTEL, 2919 .family = 6, 2920 .model = 85, 2921 .stepping = 4, 2922 .features[FEAT_1_EDX] = 2923 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2927 CPUID_DE | CPUID_FP87, 2928 .features[FEAT_1_ECX] = 2929 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2930 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2931 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2932 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2933 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2934 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2935 .features[FEAT_8000_0001_EDX] = 2936 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2937 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2938 .features[FEAT_8000_0001_ECX] = 2939 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2940 .features[FEAT_7_0_EBX] = 2941 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2942 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2943 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2944 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2945 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2946 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2947 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2948 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2949 .features[FEAT_7_0_ECX] = 2950 CPUID_7_0_ECX_PKU, 2951 /* Missing: XSAVES (not supported by some Linux versions, 2952 * including v4.1 to v4.12). 2953 * KVM doesn't yet expose any XSAVES state save component, 2954 * and the only one defined in Skylake (processor tracing) 2955 * probably will block migration anyway. 2956 */ 2957 .features[FEAT_XSAVE] = 2958 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2959 CPUID_XSAVE_XGETBV1, 2960 .features[FEAT_6_EAX] = 2961 CPUID_6_EAX_ARAT, 2962 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2963 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2964 MSR_VMX_BASIC_TRUE_CTLS, 2965 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2966 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2967 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2968 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2969 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2970 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2971 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2972 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2973 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2974 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2975 .features[FEAT_VMX_EXIT_CTLS] = 2976 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2977 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2978 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2979 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2980 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2981 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2982 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2983 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2984 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2985 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2986 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2987 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2988 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2989 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2990 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2991 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2992 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2993 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2994 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2995 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2996 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2997 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2998 .features[FEAT_VMX_SECONDARY_CTLS] = 2999 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3000 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3001 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3002 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3003 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3004 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3005 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3006 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3007 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3008 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3009 .xlevel = 0x80000008, 3010 .model_id = "Intel Xeon Processor (Skylake)", 3011 .versions = (X86CPUVersionDefinition[]) { 3012 { .version = 1 }, 3013 { 3014 .version = 2, 3015 .alias = "Skylake-Server-IBRS", 3016 .props = (PropValue[]) { 3017 /* clflushopt was not added to Skylake-Server-IBRS */ 3018 /* TODO: add -v3 including clflushopt */ 3019 { "clflushopt", "off" }, 3020 { "spec-ctrl", "on" }, 3021 { "model-id", 3022 "Intel Xeon Processor (Skylake, IBRS)" }, 3023 { /* end of list */ } 3024 } 3025 }, 3026 { 3027 .version = 3, 3028 .alias = "Skylake-Server-noTSX-IBRS", 3029 .props = (PropValue[]) { 3030 { "hle", "off" }, 3031 { "rtm", "off" }, 3032 { "model-id", 3033 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3034 { /* end of list */ } 3035 } 3036 }, 3037 { 3038 .version = 4, 3039 .props = (PropValue[]) { 3040 { "vmx-eptp-switching", "on" }, 3041 { /* end of list */ } 3042 } 3043 }, 3044 { /* end of list */ } 3045 } 3046 }, 3047 { 3048 .name = "Cascadelake-Server", 3049 .level = 0xd, 3050 .vendor = CPUID_VENDOR_INTEL, 3051 .family = 6, 3052 .model = 85, 3053 .stepping = 6, 3054 .features[FEAT_1_EDX] = 3055 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3056 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3057 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3058 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3059 CPUID_DE | CPUID_FP87, 3060 .features[FEAT_1_ECX] = 3061 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3062 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3063 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3064 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3065 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3066 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3067 .features[FEAT_8000_0001_EDX] = 3068 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3069 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3070 .features[FEAT_8000_0001_ECX] = 3071 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3072 .features[FEAT_7_0_EBX] = 3073 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3074 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3075 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3076 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3077 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3078 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3079 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3080 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3081 .features[FEAT_7_0_ECX] = 3082 CPUID_7_0_ECX_PKU | 3083 CPUID_7_0_ECX_AVX512VNNI, 3084 .features[FEAT_7_0_EDX] = 3085 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3086 /* Missing: XSAVES (not supported by some Linux versions, 3087 * including v4.1 to v4.12). 3088 * KVM doesn't yet expose any XSAVES state save component, 3089 * and the only one defined in Skylake (processor tracing) 3090 * probably will block migration anyway. 3091 */ 3092 .features[FEAT_XSAVE] = 3093 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3094 CPUID_XSAVE_XGETBV1, 3095 .features[FEAT_6_EAX] = 3096 CPUID_6_EAX_ARAT, 3097 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3098 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3099 MSR_VMX_BASIC_TRUE_CTLS, 3100 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3101 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3102 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3103 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3104 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3105 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3106 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3107 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3108 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3109 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3110 .features[FEAT_VMX_EXIT_CTLS] = 3111 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3112 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3113 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3114 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3115 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3116 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3117 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3118 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3119 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3120 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3121 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3122 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3123 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3124 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3125 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3126 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3127 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3128 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3129 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3130 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3131 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3132 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3133 .features[FEAT_VMX_SECONDARY_CTLS] = 3134 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3135 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3136 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3137 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3138 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3139 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3140 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3141 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3142 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3143 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3144 .xlevel = 0x80000008, 3145 .model_id = "Intel Xeon Processor (Cascadelake)", 3146 .versions = (X86CPUVersionDefinition[]) { 3147 { .version = 1 }, 3148 { .version = 2, 3149 .note = "ARCH_CAPABILITIES", 3150 .props = (PropValue[]) { 3151 { "arch-capabilities", "on" }, 3152 { "rdctl-no", "on" }, 3153 { "ibrs-all", "on" }, 3154 { "skip-l1dfl-vmentry", "on" }, 3155 { "mds-no", "on" }, 3156 { /* end of list */ } 3157 }, 3158 }, 3159 { .version = 3, 3160 .alias = "Cascadelake-Server-noTSX", 3161 .note = "ARCH_CAPABILITIES, no TSX", 3162 .props = (PropValue[]) { 3163 { "hle", "off" }, 3164 { "rtm", "off" }, 3165 { /* end of list */ } 3166 }, 3167 }, 3168 { .version = 4, 3169 .note = "ARCH_CAPABILITIES, no TSX", 3170 .props = (PropValue[]) { 3171 { "vmx-eptp-switching", "on" }, 3172 { /* end of list */ } 3173 }, 3174 }, 3175 { /* end of list */ } 3176 } 3177 }, 3178 { 3179 .name = "Cooperlake", 3180 .level = 0xd, 3181 .vendor = CPUID_VENDOR_INTEL, 3182 .family = 6, 3183 .model = 85, 3184 .stepping = 10, 3185 .features[FEAT_1_EDX] = 3186 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3187 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3188 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3189 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3190 CPUID_DE | CPUID_FP87, 3191 .features[FEAT_1_ECX] = 3192 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3193 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3194 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3195 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3196 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3197 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3198 .features[FEAT_8000_0001_EDX] = 3199 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3200 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3201 .features[FEAT_8000_0001_ECX] = 3202 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3203 .features[FEAT_7_0_EBX] = 3204 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3205 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3206 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3207 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3208 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3209 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3210 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3211 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3212 .features[FEAT_7_0_ECX] = 3213 CPUID_7_0_ECX_PKU | 3214 CPUID_7_0_ECX_AVX512VNNI, 3215 .features[FEAT_7_0_EDX] = 3216 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3217 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3218 .features[FEAT_ARCH_CAPABILITIES] = 3219 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3220 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3221 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3222 .features[FEAT_7_1_EAX] = 3223 CPUID_7_1_EAX_AVX512_BF16, 3224 /* 3225 * Missing: XSAVES (not supported by some Linux versions, 3226 * including v4.1 to v4.12). 3227 * KVM doesn't yet expose any XSAVES state save component, 3228 * and the only one defined in Skylake (processor tracing) 3229 * probably will block migration anyway. 3230 */ 3231 .features[FEAT_XSAVE] = 3232 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3233 CPUID_XSAVE_XGETBV1, 3234 .features[FEAT_6_EAX] = 3235 CPUID_6_EAX_ARAT, 3236 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3237 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3238 MSR_VMX_BASIC_TRUE_CTLS, 3239 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3240 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3241 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3242 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3243 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3244 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3245 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3246 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3247 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3248 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3249 .features[FEAT_VMX_EXIT_CTLS] = 3250 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3251 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3252 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3253 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3254 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3255 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3256 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3257 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3258 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3259 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3260 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3261 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3262 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3263 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3264 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3265 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3266 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3267 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3268 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3269 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3270 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3271 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3272 .features[FEAT_VMX_SECONDARY_CTLS] = 3273 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3274 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3275 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3276 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3277 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3278 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3279 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3280 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3281 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3282 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3283 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3284 .xlevel = 0x80000008, 3285 .model_id = "Intel Xeon Processor (Cooperlake)", 3286 }, 3287 { 3288 .name = "Icelake-Client", 3289 .level = 0xd, 3290 .vendor = CPUID_VENDOR_INTEL, 3291 .family = 6, 3292 .model = 126, 3293 .stepping = 0, 3294 .features[FEAT_1_EDX] = 3295 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3296 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3297 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3298 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3299 CPUID_DE | CPUID_FP87, 3300 .features[FEAT_1_ECX] = 3301 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3302 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3303 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3304 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3305 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3306 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3307 .features[FEAT_8000_0001_EDX] = 3308 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3309 CPUID_EXT2_SYSCALL, 3310 .features[FEAT_8000_0001_ECX] = 3311 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3312 .features[FEAT_8000_0008_EBX] = 3313 CPUID_8000_0008_EBX_WBNOINVD, 3314 .features[FEAT_7_0_EBX] = 3315 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3316 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3317 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3318 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3319 CPUID_7_0_EBX_SMAP, 3320 .features[FEAT_7_0_ECX] = 3321 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3322 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3323 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3324 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3325 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3326 .features[FEAT_7_0_EDX] = 3327 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3328 /* Missing: XSAVES (not supported by some Linux versions, 3329 * including v4.1 to v4.12). 3330 * KVM doesn't yet expose any XSAVES state save component, 3331 * and the only one defined in Skylake (processor tracing) 3332 * probably will block migration anyway. 3333 */ 3334 .features[FEAT_XSAVE] = 3335 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3336 CPUID_XSAVE_XGETBV1, 3337 .features[FEAT_6_EAX] = 3338 CPUID_6_EAX_ARAT, 3339 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3340 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3341 MSR_VMX_BASIC_TRUE_CTLS, 3342 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3343 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3344 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3345 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3346 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3347 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3348 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3349 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3350 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3351 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3352 .features[FEAT_VMX_EXIT_CTLS] = 3353 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3354 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3355 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3356 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3357 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3358 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3359 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3360 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3361 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3362 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3363 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3364 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3365 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3366 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3367 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3368 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3369 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3370 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3371 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3372 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3373 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3374 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3375 .features[FEAT_VMX_SECONDARY_CTLS] = 3376 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3377 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3378 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3379 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3380 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3381 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3382 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3383 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3384 .xlevel = 0x80000008, 3385 .model_id = "Intel Core Processor (Icelake)", 3386 .versions = (X86CPUVersionDefinition[]) { 3387 { .version = 1 }, 3388 { 3389 .version = 2, 3390 .note = "no TSX", 3391 .alias = "Icelake-Client-noTSX", 3392 .props = (PropValue[]) { 3393 { "hle", "off" }, 3394 { "rtm", "off" }, 3395 { /* end of list */ } 3396 }, 3397 }, 3398 { /* end of list */ } 3399 } 3400 }, 3401 { 3402 .name = "Icelake-Server", 3403 .level = 0xd, 3404 .vendor = CPUID_VENDOR_INTEL, 3405 .family = 6, 3406 .model = 134, 3407 .stepping = 0, 3408 .features[FEAT_1_EDX] = 3409 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3410 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3411 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3412 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3413 CPUID_DE | CPUID_FP87, 3414 .features[FEAT_1_ECX] = 3415 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3416 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3417 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3418 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3419 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3420 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3421 .features[FEAT_8000_0001_EDX] = 3422 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3423 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3424 .features[FEAT_8000_0001_ECX] = 3425 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3426 .features[FEAT_8000_0008_EBX] = 3427 CPUID_8000_0008_EBX_WBNOINVD, 3428 .features[FEAT_7_0_EBX] = 3429 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3430 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3431 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3432 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3433 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3434 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3435 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3436 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3437 .features[FEAT_7_0_ECX] = 3438 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3439 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3440 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3441 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3442 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3443 .features[FEAT_7_0_EDX] = 3444 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3445 /* Missing: XSAVES (not supported by some Linux versions, 3446 * including v4.1 to v4.12). 3447 * KVM doesn't yet expose any XSAVES state save component, 3448 * and the only one defined in Skylake (processor tracing) 3449 * probably will block migration anyway. 3450 */ 3451 .features[FEAT_XSAVE] = 3452 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3453 CPUID_XSAVE_XGETBV1, 3454 .features[FEAT_6_EAX] = 3455 CPUID_6_EAX_ARAT, 3456 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3457 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3458 MSR_VMX_BASIC_TRUE_CTLS, 3459 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3460 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3461 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3462 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3463 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3464 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3465 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3466 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3467 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3468 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3469 .features[FEAT_VMX_EXIT_CTLS] = 3470 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3471 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3472 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3473 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3474 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3475 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3476 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3477 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3478 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3479 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3480 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3481 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3482 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3483 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3484 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3485 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3486 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3487 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3488 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3489 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3490 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3491 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3492 .features[FEAT_VMX_SECONDARY_CTLS] = 3493 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3494 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3495 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3496 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3497 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3498 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3499 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3500 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3501 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3502 .xlevel = 0x80000008, 3503 .model_id = "Intel Xeon Processor (Icelake)", 3504 .versions = (X86CPUVersionDefinition[]) { 3505 { .version = 1 }, 3506 { 3507 .version = 2, 3508 .note = "no TSX", 3509 .alias = "Icelake-Server-noTSX", 3510 .props = (PropValue[]) { 3511 { "hle", "off" }, 3512 { "rtm", "off" }, 3513 { /* end of list */ } 3514 }, 3515 }, 3516 { 3517 .version = 3, 3518 .props = (PropValue[]) { 3519 { "arch-capabilities", "on" }, 3520 { "rdctl-no", "on" }, 3521 { "ibrs-all", "on" }, 3522 { "skip-l1dfl-vmentry", "on" }, 3523 { "mds-no", "on" }, 3524 { "pschange-mc-no", "on" }, 3525 { "taa-no", "on" }, 3526 { /* end of list */ } 3527 }, 3528 }, 3529 { 3530 .version = 4, 3531 .props = (PropValue[]) { 3532 { "sha-ni", "on" }, 3533 { "avx512ifma", "on" }, 3534 { "rdpid", "on" }, 3535 { "fsrm", "on" }, 3536 { "vmx-rdseed-exit", "on" }, 3537 { "vmx-pml", "on" }, 3538 { "vmx-eptp-switching", "on" }, 3539 { "model", "106" }, 3540 { /* end of list */ } 3541 }, 3542 }, 3543 { /* end of list */ } 3544 } 3545 }, 3546 { 3547 .name = "Denverton", 3548 .level = 21, 3549 .vendor = CPUID_VENDOR_INTEL, 3550 .family = 6, 3551 .model = 95, 3552 .stepping = 1, 3553 .features[FEAT_1_EDX] = 3554 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3555 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3556 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3557 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3558 CPUID_SSE | CPUID_SSE2, 3559 .features[FEAT_1_ECX] = 3560 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3561 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3562 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3563 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3564 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3565 .features[FEAT_8000_0001_EDX] = 3566 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3567 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3568 .features[FEAT_8000_0001_ECX] = 3569 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3570 .features[FEAT_7_0_EBX] = 3571 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3572 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3573 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3574 .features[FEAT_7_0_EDX] = 3575 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3576 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3577 /* 3578 * Missing: XSAVES (not supported by some Linux versions, 3579 * including v4.1 to v4.12). 3580 * KVM doesn't yet expose any XSAVES state save component, 3581 * and the only one defined in Skylake (processor tracing) 3582 * probably will block migration anyway. 3583 */ 3584 .features[FEAT_XSAVE] = 3585 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3586 .features[FEAT_6_EAX] = 3587 CPUID_6_EAX_ARAT, 3588 .features[FEAT_ARCH_CAPABILITIES] = 3589 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3590 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3591 MSR_VMX_BASIC_TRUE_CTLS, 3592 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3593 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3594 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3595 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3596 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3597 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3598 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3599 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3600 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3601 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3602 .features[FEAT_VMX_EXIT_CTLS] = 3603 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3604 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3605 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3606 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3607 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3608 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3609 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3610 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3611 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3612 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3613 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3614 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3615 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3616 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3617 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3618 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3619 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3620 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3621 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3622 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3623 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3624 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3625 .features[FEAT_VMX_SECONDARY_CTLS] = 3626 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3627 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3628 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3629 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3630 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3631 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3632 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3633 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3634 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3635 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3636 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3637 .xlevel = 0x80000008, 3638 .model_id = "Intel Atom Processor (Denverton)", 3639 .versions = (X86CPUVersionDefinition[]) { 3640 { .version = 1 }, 3641 { 3642 .version = 2, 3643 .note = "no MPX, no MONITOR", 3644 .props = (PropValue[]) { 3645 { "monitor", "off" }, 3646 { "mpx", "off" }, 3647 { /* end of list */ }, 3648 }, 3649 }, 3650 { /* end of list */ }, 3651 }, 3652 }, 3653 { 3654 .name = "Snowridge", 3655 .level = 27, 3656 .vendor = CPUID_VENDOR_INTEL, 3657 .family = 6, 3658 .model = 134, 3659 .stepping = 1, 3660 .features[FEAT_1_EDX] = 3661 /* missing: CPUID_PN CPUID_IA64 */ 3662 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3663 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3664 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3665 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3666 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3667 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3668 CPUID_MMX | 3669 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3670 .features[FEAT_1_ECX] = 3671 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3672 CPUID_EXT_SSSE3 | 3673 CPUID_EXT_CX16 | 3674 CPUID_EXT_SSE41 | 3675 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3676 CPUID_EXT_POPCNT | 3677 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3678 CPUID_EXT_RDRAND, 3679 .features[FEAT_8000_0001_EDX] = 3680 CPUID_EXT2_SYSCALL | 3681 CPUID_EXT2_NX | 3682 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3683 CPUID_EXT2_LM, 3684 .features[FEAT_8000_0001_ECX] = 3685 CPUID_EXT3_LAHF_LM | 3686 CPUID_EXT3_3DNOWPREFETCH, 3687 .features[FEAT_7_0_EBX] = 3688 CPUID_7_0_EBX_FSGSBASE | 3689 CPUID_7_0_EBX_SMEP | 3690 CPUID_7_0_EBX_ERMS | 3691 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3692 CPUID_7_0_EBX_RDSEED | 3693 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3694 CPUID_7_0_EBX_CLWB | 3695 CPUID_7_0_EBX_SHA_NI, 3696 .features[FEAT_7_0_ECX] = 3697 CPUID_7_0_ECX_UMIP | 3698 /* missing bit 5 */ 3699 CPUID_7_0_ECX_GFNI | 3700 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3701 CPUID_7_0_ECX_MOVDIR64B, 3702 .features[FEAT_7_0_EDX] = 3703 CPUID_7_0_EDX_SPEC_CTRL | 3704 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3705 CPUID_7_0_EDX_CORE_CAPABILITY, 3706 .features[FEAT_CORE_CAPABILITY] = 3707 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3708 /* 3709 * Missing: XSAVES (not supported by some Linux versions, 3710 * including v4.1 to v4.12). 3711 * KVM doesn't yet expose any XSAVES state save component, 3712 * and the only one defined in Skylake (processor tracing) 3713 * probably will block migration anyway. 3714 */ 3715 .features[FEAT_XSAVE] = 3716 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3717 CPUID_XSAVE_XGETBV1, 3718 .features[FEAT_6_EAX] = 3719 CPUID_6_EAX_ARAT, 3720 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3721 MSR_VMX_BASIC_TRUE_CTLS, 3722 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3723 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3724 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3725 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3726 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3727 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3728 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3729 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3730 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3731 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3732 .features[FEAT_VMX_EXIT_CTLS] = 3733 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3734 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3735 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3736 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3737 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3738 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3739 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3740 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3741 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3742 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3743 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3744 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3745 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3746 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3747 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3748 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3749 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3750 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3751 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3752 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3753 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3754 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3755 .features[FEAT_VMX_SECONDARY_CTLS] = 3756 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3757 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3758 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3759 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3760 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3761 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3762 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3763 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3764 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3765 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3766 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3767 .xlevel = 0x80000008, 3768 .model_id = "Intel Atom Processor (SnowRidge)", 3769 .versions = (X86CPUVersionDefinition[]) { 3770 { .version = 1 }, 3771 { 3772 .version = 2, 3773 .props = (PropValue[]) { 3774 { "mpx", "off" }, 3775 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3776 { /* end of list */ }, 3777 }, 3778 }, 3779 { /* end of list */ }, 3780 }, 3781 }, 3782 { 3783 .name = "KnightsMill", 3784 .level = 0xd, 3785 .vendor = CPUID_VENDOR_INTEL, 3786 .family = 6, 3787 .model = 133, 3788 .stepping = 0, 3789 .features[FEAT_1_EDX] = 3790 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3791 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3792 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3793 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3794 CPUID_PSE | CPUID_DE | CPUID_FP87, 3795 .features[FEAT_1_ECX] = 3796 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3797 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3798 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3799 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3800 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3801 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3802 .features[FEAT_8000_0001_EDX] = 3803 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3804 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3805 .features[FEAT_8000_0001_ECX] = 3806 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3807 .features[FEAT_7_0_EBX] = 3808 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3809 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3810 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3811 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3812 CPUID_7_0_EBX_AVX512ER, 3813 .features[FEAT_7_0_ECX] = 3814 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3815 .features[FEAT_7_0_EDX] = 3816 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3817 .features[FEAT_XSAVE] = 3818 CPUID_XSAVE_XSAVEOPT, 3819 .features[FEAT_6_EAX] = 3820 CPUID_6_EAX_ARAT, 3821 .xlevel = 0x80000008, 3822 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3823 }, 3824 { 3825 .name = "Opteron_G1", 3826 .level = 5, 3827 .vendor = CPUID_VENDOR_AMD, 3828 .family = 15, 3829 .model = 6, 3830 .stepping = 1, 3831 .features[FEAT_1_EDX] = 3832 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3833 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3834 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3835 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3836 CPUID_DE | CPUID_FP87, 3837 .features[FEAT_1_ECX] = 3838 CPUID_EXT_SSE3, 3839 .features[FEAT_8000_0001_EDX] = 3840 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3841 .xlevel = 0x80000008, 3842 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3843 }, 3844 { 3845 .name = "Opteron_G2", 3846 .level = 5, 3847 .vendor = CPUID_VENDOR_AMD, 3848 .family = 15, 3849 .model = 6, 3850 .stepping = 1, 3851 .features[FEAT_1_EDX] = 3852 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3853 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3854 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3855 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3856 CPUID_DE | CPUID_FP87, 3857 .features[FEAT_1_ECX] = 3858 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3859 .features[FEAT_8000_0001_EDX] = 3860 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3861 .features[FEAT_8000_0001_ECX] = 3862 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3863 .xlevel = 0x80000008, 3864 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3865 }, 3866 { 3867 .name = "Opteron_G3", 3868 .level = 5, 3869 .vendor = CPUID_VENDOR_AMD, 3870 .family = 16, 3871 .model = 2, 3872 .stepping = 3, 3873 .features[FEAT_1_EDX] = 3874 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3875 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3876 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3877 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3878 CPUID_DE | CPUID_FP87, 3879 .features[FEAT_1_ECX] = 3880 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3881 CPUID_EXT_SSE3, 3882 .features[FEAT_8000_0001_EDX] = 3883 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3884 CPUID_EXT2_RDTSCP, 3885 .features[FEAT_8000_0001_ECX] = 3886 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3887 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3888 .xlevel = 0x80000008, 3889 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3890 }, 3891 { 3892 .name = "Opteron_G4", 3893 .level = 0xd, 3894 .vendor = CPUID_VENDOR_AMD, 3895 .family = 21, 3896 .model = 1, 3897 .stepping = 2, 3898 .features[FEAT_1_EDX] = 3899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3903 CPUID_DE | CPUID_FP87, 3904 .features[FEAT_1_ECX] = 3905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3906 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3907 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3908 CPUID_EXT_SSE3, 3909 .features[FEAT_8000_0001_EDX] = 3910 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3911 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3912 .features[FEAT_8000_0001_ECX] = 3913 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3914 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3915 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3916 CPUID_EXT3_LAHF_LM, 3917 .features[FEAT_SVM] = 3918 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3919 /* no xsaveopt! */ 3920 .xlevel = 0x8000001A, 3921 .model_id = "AMD Opteron 62xx class CPU", 3922 }, 3923 { 3924 .name = "Opteron_G5", 3925 .level = 0xd, 3926 .vendor = CPUID_VENDOR_AMD, 3927 .family = 21, 3928 .model = 2, 3929 .stepping = 0, 3930 .features[FEAT_1_EDX] = 3931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3935 CPUID_DE | CPUID_FP87, 3936 .features[FEAT_1_ECX] = 3937 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3938 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3939 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3940 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3941 .features[FEAT_8000_0001_EDX] = 3942 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3943 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3944 .features[FEAT_8000_0001_ECX] = 3945 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3946 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3947 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3948 CPUID_EXT3_LAHF_LM, 3949 .features[FEAT_SVM] = 3950 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3951 /* no xsaveopt! */ 3952 .xlevel = 0x8000001A, 3953 .model_id = "AMD Opteron 63xx class CPU", 3954 }, 3955 { 3956 .name = "EPYC", 3957 .level = 0xd, 3958 .vendor = CPUID_VENDOR_AMD, 3959 .family = 23, 3960 .model = 1, 3961 .stepping = 2, 3962 .features[FEAT_1_EDX] = 3963 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3964 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3965 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3966 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3967 CPUID_VME | CPUID_FP87, 3968 .features[FEAT_1_ECX] = 3969 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3970 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3971 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3972 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3973 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3974 .features[FEAT_8000_0001_EDX] = 3975 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3976 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3977 CPUID_EXT2_SYSCALL, 3978 .features[FEAT_8000_0001_ECX] = 3979 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3980 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3981 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3982 CPUID_EXT3_TOPOEXT, 3983 .features[FEAT_7_0_EBX] = 3984 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3985 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3986 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3987 CPUID_7_0_EBX_SHA_NI, 3988 .features[FEAT_XSAVE] = 3989 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3990 CPUID_XSAVE_XGETBV1, 3991 .features[FEAT_6_EAX] = 3992 CPUID_6_EAX_ARAT, 3993 .features[FEAT_SVM] = 3994 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3995 .xlevel = 0x8000001E, 3996 .model_id = "AMD EPYC Processor", 3997 .cache_info = &epyc_cache_info, 3998 .use_epyc_apic_id_encoding = 1, 3999 .versions = (X86CPUVersionDefinition[]) { 4000 { .version = 1 }, 4001 { 4002 .version = 2, 4003 .alias = "EPYC-IBPB", 4004 .props = (PropValue[]) { 4005 { "ibpb", "on" }, 4006 { "model-id", 4007 "AMD EPYC Processor (with IBPB)" }, 4008 { /* end of list */ } 4009 } 4010 }, 4011 { 4012 .version = 3, 4013 .props = (PropValue[]) { 4014 { "ibpb", "on" }, 4015 { "perfctr-core", "on" }, 4016 { "clzero", "on" }, 4017 { "xsaveerptr", "on" }, 4018 { "xsaves", "on" }, 4019 { "model-id", 4020 "AMD EPYC Processor" }, 4021 { /* end of list */ } 4022 } 4023 }, 4024 { /* end of list */ } 4025 } 4026 }, 4027 { 4028 .name = "Dhyana", 4029 .level = 0xd, 4030 .vendor = CPUID_VENDOR_HYGON, 4031 .family = 24, 4032 .model = 0, 4033 .stepping = 1, 4034 .features[FEAT_1_EDX] = 4035 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4036 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4037 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4038 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4039 CPUID_VME | CPUID_FP87, 4040 .features[FEAT_1_ECX] = 4041 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4042 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 4043 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4044 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4045 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 4046 .features[FEAT_8000_0001_EDX] = 4047 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4048 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4049 CPUID_EXT2_SYSCALL, 4050 .features[FEAT_8000_0001_ECX] = 4051 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4052 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4053 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4054 CPUID_EXT3_TOPOEXT, 4055 .features[FEAT_8000_0008_EBX] = 4056 CPUID_8000_0008_EBX_IBPB, 4057 .features[FEAT_7_0_EBX] = 4058 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4059 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4060 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4061 /* 4062 * Missing: XSAVES (not supported by some Linux versions, 4063 * including v4.1 to v4.12). 4064 * KVM doesn't yet expose any XSAVES state save component. 4065 */ 4066 .features[FEAT_XSAVE] = 4067 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4068 CPUID_XSAVE_XGETBV1, 4069 .features[FEAT_6_EAX] = 4070 CPUID_6_EAX_ARAT, 4071 .features[FEAT_SVM] = 4072 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4073 .xlevel = 0x8000001E, 4074 .model_id = "Hygon Dhyana Processor", 4075 .cache_info = &epyc_cache_info, 4076 }, 4077 { 4078 .name = "EPYC-Rome", 4079 .level = 0xd, 4080 .vendor = CPUID_VENDOR_AMD, 4081 .family = 23, 4082 .model = 49, 4083 .stepping = 0, 4084 .features[FEAT_1_EDX] = 4085 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4086 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4087 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4088 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4089 CPUID_VME | CPUID_FP87, 4090 .features[FEAT_1_ECX] = 4091 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4092 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4093 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4094 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4095 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4096 .features[FEAT_8000_0001_EDX] = 4097 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4098 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4099 CPUID_EXT2_SYSCALL, 4100 .features[FEAT_8000_0001_ECX] = 4101 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4102 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4103 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4104 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4105 .features[FEAT_8000_0008_EBX] = 4106 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4107 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4108 CPUID_8000_0008_EBX_STIBP, 4109 .features[FEAT_7_0_EBX] = 4110 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4111 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4112 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4113 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4114 .features[FEAT_7_0_ECX] = 4115 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4116 .features[FEAT_XSAVE] = 4117 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4118 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4119 .features[FEAT_6_EAX] = 4120 CPUID_6_EAX_ARAT, 4121 .features[FEAT_SVM] = 4122 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4123 .xlevel = 0x8000001E, 4124 .model_id = "AMD EPYC-Rome Processor", 4125 .cache_info = &epyc_rome_cache_info, 4126 .use_epyc_apic_id_encoding = 1, 4127 }, 4128 }; 4129 4130 /* KVM-specific features that are automatically added/removed 4131 * from all CPU models when KVM is enabled. 4132 */ 4133 static PropValue kvm_default_props[] = { 4134 { "kvmclock", "on" }, 4135 { "kvm-nopiodelay", "on" }, 4136 { "kvm-asyncpf", "on" }, 4137 { "kvm-steal-time", "on" }, 4138 { "kvm-pv-eoi", "on" }, 4139 { "kvmclock-stable-bit", "on" }, 4140 { "x2apic", "on" }, 4141 { "acpi", "off" }, 4142 { "monitor", "off" }, 4143 { "svm", "off" }, 4144 { NULL, NULL }, 4145 }; 4146 4147 /* TCG-specific defaults that override all CPU models when using TCG 4148 */ 4149 static PropValue tcg_default_props[] = { 4150 { "vme", "off" }, 4151 { NULL, NULL }, 4152 }; 4153 4154 4155 /* 4156 * We resolve CPU model aliases using -v1 when using "-machine 4157 * none", but this is just for compatibility while libvirt isn't 4158 * adapted to resolve CPU model versions before creating VMs. 4159 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi. 4160 */ 4161 X86CPUVersion default_cpu_version = 1; 4162 4163 void x86_cpu_set_default_version(X86CPUVersion version) 4164 { 4165 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4166 assert(version != CPU_VERSION_AUTO); 4167 default_cpu_version = version; 4168 } 4169 4170 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4171 { 4172 int v = 0; 4173 const X86CPUVersionDefinition *vdef = 4174 x86_cpu_def_get_versions(model->cpudef); 4175 while (vdef->version) { 4176 v = vdef->version; 4177 vdef++; 4178 } 4179 return v; 4180 } 4181 4182 /* Return the actual version being used for a specific CPU model */ 4183 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4184 { 4185 X86CPUVersion v = model->version; 4186 if (v == CPU_VERSION_AUTO) { 4187 v = default_cpu_version; 4188 } 4189 if (v == CPU_VERSION_LATEST) { 4190 return x86_cpu_model_last_version(model); 4191 } 4192 return v; 4193 } 4194 4195 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4196 { 4197 PropValue *pv; 4198 for (pv = kvm_default_props; pv->prop; pv++) { 4199 if (!strcmp(pv->prop, prop)) { 4200 pv->value = value; 4201 break; 4202 } 4203 } 4204 4205 /* It is valid to call this function only for properties that 4206 * are already present in the kvm_default_props table. 4207 */ 4208 assert(pv->prop); 4209 } 4210 4211 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 4212 bool migratable_only); 4213 4214 static bool lmce_supported(void) 4215 { 4216 uint64_t mce_cap = 0; 4217 4218 #ifdef CONFIG_KVM 4219 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4220 return false; 4221 } 4222 #endif 4223 4224 return !!(mce_cap & MCG_LMCE_P); 4225 } 4226 4227 #define CPUID_MODEL_ID_SZ 48 4228 4229 /** 4230 * cpu_x86_fill_model_id: 4231 * Get CPUID model ID string from host CPU. 4232 * 4233 * @str should have at least CPUID_MODEL_ID_SZ bytes 4234 * 4235 * The function does NOT add a null terminator to the string 4236 * automatically. 4237 */ 4238 static int cpu_x86_fill_model_id(char *str) 4239 { 4240 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4241 int i; 4242 4243 for (i = 0; i < 3; i++) { 4244 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4245 memcpy(str + i * 16 + 0, &eax, 4); 4246 memcpy(str + i * 16 + 4, &ebx, 4); 4247 memcpy(str + i * 16 + 8, &ecx, 4); 4248 memcpy(str + i * 16 + 12, &edx, 4); 4249 } 4250 return 0; 4251 } 4252 4253 static Property max_x86_cpu_properties[] = { 4254 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4255 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4256 DEFINE_PROP_END_OF_LIST() 4257 }; 4258 4259 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4260 { 4261 DeviceClass *dc = DEVICE_CLASS(oc); 4262 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4263 4264 xcc->ordering = 9; 4265 4266 xcc->model_description = 4267 "Enables all features supported by the accelerator in the current host"; 4268 4269 device_class_set_props(dc, max_x86_cpu_properties); 4270 } 4271 4272 static void max_x86_cpu_initfn(Object *obj) 4273 { 4274 X86CPU *cpu = X86_CPU(obj); 4275 CPUX86State *env = &cpu->env; 4276 KVMState *s = kvm_state; 4277 4278 /* We can't fill the features array here because we don't know yet if 4279 * "migratable" is true or false. 4280 */ 4281 cpu->max_features = true; 4282 4283 if (accel_uses_host_cpuid()) { 4284 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4285 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4286 int family, model, stepping; 4287 4288 host_vendor_fms(vendor, &family, &model, &stepping); 4289 cpu_x86_fill_model_id(model_id); 4290 4291 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 4292 object_property_set_int(OBJECT(cpu), "family", family, &error_abort); 4293 object_property_set_int(OBJECT(cpu), "model", model, &error_abort); 4294 object_property_set_int(OBJECT(cpu), "stepping", stepping, 4295 &error_abort); 4296 object_property_set_str(OBJECT(cpu), "model-id", model_id, 4297 &error_abort); 4298 4299 if (kvm_enabled()) { 4300 env->cpuid_min_level = 4301 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4302 env->cpuid_min_xlevel = 4303 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4304 env->cpuid_min_xlevel2 = 4305 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4306 } else { 4307 env->cpuid_min_level = 4308 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4309 env->cpuid_min_xlevel = 4310 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4311 env->cpuid_min_xlevel2 = 4312 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4313 } 4314 4315 if (lmce_supported()) { 4316 object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); 4317 } 4318 } else { 4319 object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, 4320 &error_abort); 4321 object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); 4322 object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); 4323 object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); 4324 object_property_set_str(OBJECT(cpu), "model-id", 4325 "QEMU TCG CPU version " QEMU_HW_VERSION, 4326 &error_abort); 4327 } 4328 4329 object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); 4330 } 4331 4332 static const TypeInfo max_x86_cpu_type_info = { 4333 .name = X86_CPU_TYPE_NAME("max"), 4334 .parent = TYPE_X86_CPU, 4335 .instance_init = max_x86_cpu_initfn, 4336 .class_init = max_x86_cpu_class_init, 4337 }; 4338 4339 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4340 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4341 { 4342 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4343 4344 xcc->host_cpuid_required = true; 4345 xcc->ordering = 8; 4346 4347 #if defined(CONFIG_KVM) 4348 xcc->model_description = 4349 "KVM processor with all supported host features "; 4350 #elif defined(CONFIG_HVF) 4351 xcc->model_description = 4352 "HVF processor with all supported host features "; 4353 #endif 4354 } 4355 4356 static const TypeInfo host_x86_cpu_type_info = { 4357 .name = X86_CPU_TYPE_NAME("host"), 4358 .parent = X86_CPU_TYPE_NAME("max"), 4359 .class_init = host_x86_cpu_class_init, 4360 }; 4361 4362 #endif 4363 4364 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4365 { 4366 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4367 4368 switch (f->type) { 4369 case CPUID_FEATURE_WORD: 4370 { 4371 const char *reg = get_register_name_32(f->cpuid.reg); 4372 assert(reg); 4373 return g_strdup_printf("CPUID.%02XH:%s", 4374 f->cpuid.eax, reg); 4375 } 4376 case MSR_FEATURE_WORD: 4377 return g_strdup_printf("MSR(%02XH)", 4378 f->msr.index); 4379 } 4380 4381 return NULL; 4382 } 4383 4384 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4385 { 4386 FeatureWord w; 4387 4388 for (w = 0; w < FEATURE_WORDS; w++) { 4389 if (cpu->filtered_features[w]) { 4390 return true; 4391 } 4392 } 4393 4394 return false; 4395 } 4396 4397 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4398 const char *verbose_prefix) 4399 { 4400 CPUX86State *env = &cpu->env; 4401 FeatureWordInfo *f = &feature_word_info[w]; 4402 int i; 4403 4404 if (!cpu->force_features) { 4405 env->features[w] &= ~mask; 4406 } 4407 cpu->filtered_features[w] |= mask; 4408 4409 if (!verbose_prefix) { 4410 return; 4411 } 4412 4413 for (i = 0; i < 64; ++i) { 4414 if ((1ULL << i) & mask) { 4415 g_autofree char *feat_word_str = feature_word_description(f, i); 4416 warn_report("%s: %s%s%s [bit %d]", 4417 verbose_prefix, 4418 feat_word_str, 4419 f->feat_names[i] ? "." : "", 4420 f->feat_names[i] ? f->feat_names[i] : "", i); 4421 } 4422 } 4423 } 4424 4425 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4426 const char *name, void *opaque, 4427 Error **errp) 4428 { 4429 X86CPU *cpu = X86_CPU(obj); 4430 CPUX86State *env = &cpu->env; 4431 int64_t value; 4432 4433 value = (env->cpuid_version >> 8) & 0xf; 4434 if (value == 0xf) { 4435 value += (env->cpuid_version >> 20) & 0xff; 4436 } 4437 visit_type_int(v, name, &value, errp); 4438 } 4439 4440 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4441 const char *name, void *opaque, 4442 Error **errp) 4443 { 4444 X86CPU *cpu = X86_CPU(obj); 4445 CPUX86State *env = &cpu->env; 4446 const int64_t min = 0; 4447 const int64_t max = 0xff + 0xf; 4448 int64_t value; 4449 4450 if (!visit_type_int(v, name, &value, errp)) { 4451 return; 4452 } 4453 if (value < min || value > max) { 4454 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4455 name ? name : "null", value, min, max); 4456 return; 4457 } 4458 4459 env->cpuid_version &= ~0xff00f00; 4460 if (value > 0x0f) { 4461 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4462 } else { 4463 env->cpuid_version |= value << 8; 4464 } 4465 } 4466 4467 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4468 const char *name, void *opaque, 4469 Error **errp) 4470 { 4471 X86CPU *cpu = X86_CPU(obj); 4472 CPUX86State *env = &cpu->env; 4473 int64_t value; 4474 4475 value = (env->cpuid_version >> 4) & 0xf; 4476 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4477 visit_type_int(v, name, &value, errp); 4478 } 4479 4480 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4481 const char *name, void *opaque, 4482 Error **errp) 4483 { 4484 X86CPU *cpu = X86_CPU(obj); 4485 CPUX86State *env = &cpu->env; 4486 const int64_t min = 0; 4487 const int64_t max = 0xff; 4488 int64_t value; 4489 4490 if (!visit_type_int(v, name, &value, errp)) { 4491 return; 4492 } 4493 if (value < min || value > max) { 4494 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4495 name ? name : "null", value, min, max); 4496 return; 4497 } 4498 4499 env->cpuid_version &= ~0xf00f0; 4500 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4501 } 4502 4503 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4504 const char *name, void *opaque, 4505 Error **errp) 4506 { 4507 X86CPU *cpu = X86_CPU(obj); 4508 CPUX86State *env = &cpu->env; 4509 int64_t value; 4510 4511 value = env->cpuid_version & 0xf; 4512 visit_type_int(v, name, &value, errp); 4513 } 4514 4515 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4516 const char *name, void *opaque, 4517 Error **errp) 4518 { 4519 X86CPU *cpu = X86_CPU(obj); 4520 CPUX86State *env = &cpu->env; 4521 const int64_t min = 0; 4522 const int64_t max = 0xf; 4523 int64_t value; 4524 4525 if (!visit_type_int(v, name, &value, errp)) { 4526 return; 4527 } 4528 if (value < min || value > max) { 4529 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4530 name ? name : "null", value, min, max); 4531 return; 4532 } 4533 4534 env->cpuid_version &= ~0xf; 4535 env->cpuid_version |= value & 0xf; 4536 } 4537 4538 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4539 { 4540 X86CPU *cpu = X86_CPU(obj); 4541 CPUX86State *env = &cpu->env; 4542 char *value; 4543 4544 value = g_malloc(CPUID_VENDOR_SZ + 1); 4545 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4546 env->cpuid_vendor3); 4547 return value; 4548 } 4549 4550 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4551 Error **errp) 4552 { 4553 X86CPU *cpu = X86_CPU(obj); 4554 CPUX86State *env = &cpu->env; 4555 int i; 4556 4557 if (strlen(value) != CPUID_VENDOR_SZ) { 4558 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4559 return; 4560 } 4561 4562 env->cpuid_vendor1 = 0; 4563 env->cpuid_vendor2 = 0; 4564 env->cpuid_vendor3 = 0; 4565 for (i = 0; i < 4; i++) { 4566 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4567 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4568 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4569 } 4570 } 4571 4572 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4573 { 4574 X86CPU *cpu = X86_CPU(obj); 4575 CPUX86State *env = &cpu->env; 4576 char *value; 4577 int i; 4578 4579 value = g_malloc(48 + 1); 4580 for (i = 0; i < 48; i++) { 4581 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4582 } 4583 value[48] = '\0'; 4584 return value; 4585 } 4586 4587 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4588 Error **errp) 4589 { 4590 X86CPU *cpu = X86_CPU(obj); 4591 CPUX86State *env = &cpu->env; 4592 int c, len, i; 4593 4594 if (model_id == NULL) { 4595 model_id = ""; 4596 } 4597 len = strlen(model_id); 4598 memset(env->cpuid_model, 0, 48); 4599 for (i = 0; i < 48; i++) { 4600 if (i >= len) { 4601 c = '\0'; 4602 } else { 4603 c = (uint8_t)model_id[i]; 4604 } 4605 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4606 } 4607 } 4608 4609 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4610 void *opaque, Error **errp) 4611 { 4612 X86CPU *cpu = X86_CPU(obj); 4613 int64_t value; 4614 4615 value = cpu->env.tsc_khz * 1000; 4616 visit_type_int(v, name, &value, errp); 4617 } 4618 4619 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4620 void *opaque, Error **errp) 4621 { 4622 X86CPU *cpu = X86_CPU(obj); 4623 const int64_t min = 0; 4624 const int64_t max = INT64_MAX; 4625 int64_t value; 4626 4627 if (!visit_type_int(v, name, &value, errp)) { 4628 return; 4629 } 4630 if (value < min || value > max) { 4631 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4632 name ? name : "null", value, min, max); 4633 return; 4634 } 4635 4636 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4637 } 4638 4639 /* Generic getter for "feature-words" and "filtered-features" properties */ 4640 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4641 const char *name, void *opaque, 4642 Error **errp) 4643 { 4644 uint64_t *array = (uint64_t *)opaque; 4645 FeatureWord w; 4646 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4647 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4648 X86CPUFeatureWordInfoList *list = NULL; 4649 4650 for (w = 0; w < FEATURE_WORDS; w++) { 4651 FeatureWordInfo *wi = &feature_word_info[w]; 4652 /* 4653 * We didn't have MSR features when "feature-words" was 4654 * introduced. Therefore skipped other type entries. 4655 */ 4656 if (wi->type != CPUID_FEATURE_WORD) { 4657 continue; 4658 } 4659 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4660 qwi->cpuid_input_eax = wi->cpuid.eax; 4661 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4662 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4663 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4664 qwi->features = array[w]; 4665 4666 /* List will be in reverse order, but order shouldn't matter */ 4667 list_entries[w].next = list; 4668 list_entries[w].value = &word_infos[w]; 4669 list = &list_entries[w]; 4670 } 4671 4672 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4673 } 4674 4675 /* Convert all '_' in a feature string option name to '-', to make feature 4676 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4677 */ 4678 static inline void feat2prop(char *s) 4679 { 4680 while ((s = strchr(s, '_'))) { 4681 *s = '-'; 4682 } 4683 } 4684 4685 /* Return the feature property name for a feature flag bit */ 4686 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4687 { 4688 const char *name; 4689 /* XSAVE components are automatically enabled by other features, 4690 * so return the original feature name instead 4691 */ 4692 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4693 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4694 4695 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4696 x86_ext_save_areas[comp].bits) { 4697 w = x86_ext_save_areas[comp].feature; 4698 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4699 } 4700 } 4701 4702 assert(bitnr < 64); 4703 assert(w < FEATURE_WORDS); 4704 name = feature_word_info[w].feat_names[bitnr]; 4705 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4706 return name; 4707 } 4708 4709 /* Compatibily hack to maintain legacy +-feat semantic, 4710 * where +-feat overwrites any feature set by 4711 * feat=on|feat even if the later is parsed after +-feat 4712 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4713 */ 4714 static GList *plus_features, *minus_features; 4715 4716 static gint compare_string(gconstpointer a, gconstpointer b) 4717 { 4718 return g_strcmp0(a, b); 4719 } 4720 4721 /* Parse "+feature,-feature,feature=foo" CPU feature string 4722 */ 4723 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4724 Error **errp) 4725 { 4726 char *featurestr; /* Single 'key=value" string being parsed */ 4727 static bool cpu_globals_initialized; 4728 bool ambiguous = false; 4729 4730 if (cpu_globals_initialized) { 4731 return; 4732 } 4733 cpu_globals_initialized = true; 4734 4735 if (!features) { 4736 return; 4737 } 4738 4739 for (featurestr = strtok(features, ","); 4740 featurestr; 4741 featurestr = strtok(NULL, ",")) { 4742 const char *name; 4743 const char *val = NULL; 4744 char *eq = NULL; 4745 char num[32]; 4746 GlobalProperty *prop; 4747 4748 /* Compatibility syntax: */ 4749 if (featurestr[0] == '+') { 4750 plus_features = g_list_append(plus_features, 4751 g_strdup(featurestr + 1)); 4752 continue; 4753 } else if (featurestr[0] == '-') { 4754 minus_features = g_list_append(minus_features, 4755 g_strdup(featurestr + 1)); 4756 continue; 4757 } 4758 4759 eq = strchr(featurestr, '='); 4760 if (eq) { 4761 *eq++ = 0; 4762 val = eq; 4763 } else { 4764 val = "on"; 4765 } 4766 4767 feat2prop(featurestr); 4768 name = featurestr; 4769 4770 if (g_list_find_custom(plus_features, name, compare_string)) { 4771 warn_report("Ambiguous CPU model string. " 4772 "Don't mix both \"+%s\" and \"%s=%s\"", 4773 name, name, val); 4774 ambiguous = true; 4775 } 4776 if (g_list_find_custom(minus_features, name, compare_string)) { 4777 warn_report("Ambiguous CPU model string. " 4778 "Don't mix both \"-%s\" and \"%s=%s\"", 4779 name, name, val); 4780 ambiguous = true; 4781 } 4782 4783 /* Special case: */ 4784 if (!strcmp(name, "tsc-freq")) { 4785 int ret; 4786 uint64_t tsc_freq; 4787 4788 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4789 if (ret < 0 || tsc_freq > INT64_MAX) { 4790 error_setg(errp, "bad numerical value %s", val); 4791 return; 4792 } 4793 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4794 val = num; 4795 name = "tsc-frequency"; 4796 } 4797 4798 prop = g_new0(typeof(*prop), 1); 4799 prop->driver = typename; 4800 prop->property = g_strdup(name); 4801 prop->value = g_strdup(val); 4802 qdev_prop_register_global(prop); 4803 } 4804 4805 if (ambiguous) { 4806 warn_report("Compatibility of ambiguous CPU model " 4807 "strings won't be kept on future QEMU versions"); 4808 } 4809 } 4810 4811 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4812 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4813 4814 /* Build a list with the name of all features on a feature word array */ 4815 static void x86_cpu_list_feature_names(FeatureWordArray features, 4816 strList **feat_names) 4817 { 4818 FeatureWord w; 4819 strList **next = feat_names; 4820 4821 for (w = 0; w < FEATURE_WORDS; w++) { 4822 uint64_t filtered = features[w]; 4823 int i; 4824 for (i = 0; i < 64; i++) { 4825 if (filtered & (1ULL << i)) { 4826 strList *new = g_new0(strList, 1); 4827 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4828 *next = new; 4829 next = &new->next; 4830 } 4831 } 4832 } 4833 } 4834 4835 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4836 const char *name, void *opaque, 4837 Error **errp) 4838 { 4839 X86CPU *xc = X86_CPU(obj); 4840 strList *result = NULL; 4841 4842 x86_cpu_list_feature_names(xc->filtered_features, &result); 4843 visit_type_strList(v, "unavailable-features", &result, errp); 4844 } 4845 4846 /* Check for missing features that may prevent the CPU class from 4847 * running using the current machine and accelerator. 4848 */ 4849 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4850 strList **missing_feats) 4851 { 4852 X86CPU *xc; 4853 Error *err = NULL; 4854 strList **next = missing_feats; 4855 4856 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4857 strList *new = g_new0(strList, 1); 4858 new->value = g_strdup("kvm"); 4859 *missing_feats = new; 4860 return; 4861 } 4862 4863 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4864 4865 x86_cpu_expand_features(xc, &err); 4866 if (err) { 4867 /* Errors at x86_cpu_expand_features should never happen, 4868 * but in case it does, just report the model as not 4869 * runnable at all using the "type" property. 4870 */ 4871 strList *new = g_new0(strList, 1); 4872 new->value = g_strdup("type"); 4873 *next = new; 4874 next = &new->next; 4875 } 4876 4877 x86_cpu_filter_features(xc, false); 4878 4879 x86_cpu_list_feature_names(xc->filtered_features, next); 4880 4881 object_unref(OBJECT(xc)); 4882 } 4883 4884 /* Print all cpuid feature names in featureset 4885 */ 4886 static void listflags(GList *features) 4887 { 4888 size_t len = 0; 4889 GList *tmp; 4890 4891 for (tmp = features; tmp; tmp = tmp->next) { 4892 const char *name = tmp->data; 4893 if ((len + strlen(name) + 1) >= 75) { 4894 qemu_printf("\n"); 4895 len = 0; 4896 } 4897 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4898 len += strlen(name) + 1; 4899 } 4900 qemu_printf("\n"); 4901 } 4902 4903 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4904 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4905 { 4906 ObjectClass *class_a = (ObjectClass *)a; 4907 ObjectClass *class_b = (ObjectClass *)b; 4908 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4909 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4910 int ret; 4911 4912 if (cc_a->ordering != cc_b->ordering) { 4913 ret = cc_a->ordering - cc_b->ordering; 4914 } else { 4915 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4916 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4917 ret = strcmp(name_a, name_b); 4918 } 4919 return ret; 4920 } 4921 4922 static GSList *get_sorted_cpu_model_list(void) 4923 { 4924 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4925 list = g_slist_sort(list, x86_cpu_list_compare); 4926 return list; 4927 } 4928 4929 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4930 { 4931 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4932 char *r = object_property_get_str(obj, "model-id", &error_abort); 4933 object_unref(obj); 4934 return r; 4935 } 4936 4937 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4938 { 4939 X86CPUVersion version; 4940 4941 if (!cc->model || !cc->model->is_alias) { 4942 return NULL; 4943 } 4944 version = x86_cpu_model_resolve_version(cc->model); 4945 if (version <= 0) { 4946 return NULL; 4947 } 4948 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4949 } 4950 4951 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4952 { 4953 ObjectClass *oc = data; 4954 X86CPUClass *cc = X86_CPU_CLASS(oc); 4955 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4956 g_autofree char *desc = g_strdup(cc->model_description); 4957 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4958 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4959 4960 if (!desc && alias_of) { 4961 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4962 desc = g_strdup("(alias configured by machine type)"); 4963 } else { 4964 desc = g_strdup_printf("(alias of %s)", alias_of); 4965 } 4966 } 4967 if (!desc && cc->model && cc->model->note) { 4968 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4969 } 4970 if (!desc) { 4971 desc = g_strdup_printf("%s", model_id); 4972 } 4973 4974 qemu_printf("x86 %-20s %-58s\n", name, desc); 4975 } 4976 4977 /* list available CPU models and flags */ 4978 void x86_cpu_list(void) 4979 { 4980 int i, j; 4981 GSList *list; 4982 GList *names = NULL; 4983 4984 qemu_printf("Available CPUs:\n"); 4985 list = get_sorted_cpu_model_list(); 4986 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4987 g_slist_free(list); 4988 4989 names = NULL; 4990 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4991 FeatureWordInfo *fw = &feature_word_info[i]; 4992 for (j = 0; j < 64; j++) { 4993 if (fw->feat_names[j]) { 4994 names = g_list_append(names, (gpointer)fw->feat_names[j]); 4995 } 4996 } 4997 } 4998 4999 names = g_list_sort(names, (GCompareFunc)strcmp); 5000 5001 qemu_printf("\nRecognized CPUID flags:\n"); 5002 listflags(names); 5003 qemu_printf("\n"); 5004 g_list_free(names); 5005 } 5006 5007 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 5008 { 5009 ObjectClass *oc = data; 5010 X86CPUClass *cc = X86_CPU_CLASS(oc); 5011 CpuDefinitionInfoList **cpu_list = user_data; 5012 CpuDefinitionInfoList *entry; 5013 CpuDefinitionInfo *info; 5014 5015 info = g_malloc0(sizeof(*info)); 5016 info->name = x86_cpu_class_get_model_name(cc); 5017 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 5018 info->has_unavailable_features = true; 5019 info->q_typename = g_strdup(object_class_get_name(oc)); 5020 info->migration_safe = cc->migration_safe; 5021 info->has_migration_safe = true; 5022 info->q_static = cc->static_model; 5023 /* 5024 * Old machine types won't report aliases, so that alias translation 5025 * doesn't break compatibility with previous QEMU versions. 5026 */ 5027 if (default_cpu_version != CPU_VERSION_LEGACY) { 5028 info->alias_of = x86_cpu_class_get_alias_of(cc); 5029 info->has_alias_of = !!info->alias_of; 5030 } 5031 5032 entry = g_malloc0(sizeof(*entry)); 5033 entry->value = info; 5034 entry->next = *cpu_list; 5035 *cpu_list = entry; 5036 } 5037 5038 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 5039 { 5040 CpuDefinitionInfoList *cpu_list = NULL; 5041 GSList *list = get_sorted_cpu_model_list(); 5042 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 5043 g_slist_free(list); 5044 return cpu_list; 5045 } 5046 5047 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5048 bool migratable_only) 5049 { 5050 FeatureWordInfo *wi = &feature_word_info[w]; 5051 uint64_t r = 0; 5052 5053 if (kvm_enabled()) { 5054 switch (wi->type) { 5055 case CPUID_FEATURE_WORD: 5056 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5057 wi->cpuid.ecx, 5058 wi->cpuid.reg); 5059 break; 5060 case MSR_FEATURE_WORD: 5061 r = kvm_arch_get_supported_msr_feature(kvm_state, 5062 wi->msr.index); 5063 break; 5064 } 5065 } else if (hvf_enabled()) { 5066 if (wi->type != CPUID_FEATURE_WORD) { 5067 return 0; 5068 } 5069 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5070 wi->cpuid.ecx, 5071 wi->cpuid.reg); 5072 } else if (tcg_enabled()) { 5073 r = wi->tcg_features; 5074 } else { 5075 return ~0; 5076 } 5077 if (migratable_only) { 5078 r &= x86_cpu_get_migratable_flags(w); 5079 } 5080 return r; 5081 } 5082 5083 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5084 { 5085 PropValue *pv; 5086 for (pv = props; pv->prop; pv++) { 5087 if (!pv->value) { 5088 continue; 5089 } 5090 object_property_parse(OBJECT(cpu), pv->prop, pv->value, 5091 &error_abort); 5092 } 5093 } 5094 5095 /* Apply properties for the CPU model version specified in model */ 5096 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5097 { 5098 const X86CPUVersionDefinition *vdef; 5099 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5100 5101 if (version == CPU_VERSION_LEGACY) { 5102 return; 5103 } 5104 5105 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5106 PropValue *p; 5107 5108 for (p = vdef->props; p && p->prop; p++) { 5109 object_property_parse(OBJECT(cpu), p->prop, p->value, 5110 &error_abort); 5111 } 5112 5113 if (vdef->version == version) { 5114 break; 5115 } 5116 } 5117 5118 /* 5119 * If we reached the end of the list, version number was invalid 5120 */ 5121 assert(vdef->version == version); 5122 } 5123 5124 /* Load data from X86CPUDefinition into a X86CPU object 5125 */ 5126 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) 5127 { 5128 X86CPUDefinition *def = model->cpudef; 5129 CPUX86State *env = &cpu->env; 5130 const char *vendor; 5131 char host_vendor[CPUID_VENDOR_SZ + 1]; 5132 FeatureWord w; 5133 5134 /*NOTE: any property set by this function should be returned by 5135 * x86_cpu_static_props(), so static expansion of 5136 * query-cpu-model-expansion is always complete. 5137 */ 5138 5139 /* CPU models only set _minimum_ values for level/xlevel: */ 5140 object_property_set_uint(OBJECT(cpu), "min-level", def->level, 5141 &error_abort); 5142 object_property_set_uint(OBJECT(cpu), "min-xlevel", def->xlevel, 5143 &error_abort); 5144 5145 object_property_set_int(OBJECT(cpu), "family", def->family, &error_abort); 5146 object_property_set_int(OBJECT(cpu), "model", def->model, &error_abort); 5147 object_property_set_int(OBJECT(cpu), "stepping", def->stepping, 5148 &error_abort); 5149 object_property_set_str(OBJECT(cpu), "model-id", def->model_id, 5150 &error_abort); 5151 for (w = 0; w < FEATURE_WORDS; w++) { 5152 env->features[w] = def->features[w]; 5153 } 5154 5155 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5156 cpu->legacy_cache = !def->cache_info; 5157 5158 /* Special cases not set in the X86CPUDefinition structs: */ 5159 /* TODO: in-kernel irqchip for hvf */ 5160 if (kvm_enabled()) { 5161 if (!kvm_irqchip_in_kernel()) { 5162 x86_cpu_change_kvm_default("x2apic", "off"); 5163 } 5164 5165 x86_cpu_apply_props(cpu, kvm_default_props); 5166 } else if (tcg_enabled()) { 5167 x86_cpu_apply_props(cpu, tcg_default_props); 5168 } 5169 5170 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5171 5172 /* sysenter isn't supported in compatibility mode on AMD, 5173 * syscall isn't supported in compatibility mode on Intel. 5174 * Normally we advertise the actual CPU vendor, but you can 5175 * override this using the 'vendor' property if you want to use 5176 * KVM's sysenter/syscall emulation in compatibility mode and 5177 * when doing cross vendor migration 5178 */ 5179 vendor = def->vendor; 5180 if (accel_uses_host_cpuid()) { 5181 uint32_t ebx = 0, ecx = 0, edx = 0; 5182 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5183 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5184 vendor = host_vendor; 5185 } 5186 5187 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 5188 5189 x86_cpu_apply_version_props(cpu, model); 5190 5191 /* 5192 * Properties in versioned CPU model are not user specified features. 5193 * We can simply clear env->user_features here since it will be filled later 5194 * in x86_cpu_expand_features() based on plus_features and minus_features. 5195 */ 5196 memset(&env->user_features, 0, sizeof(env->user_features)); 5197 } 5198 5199 #ifndef CONFIG_USER_ONLY 5200 /* Return a QDict containing keys for all properties that can be included 5201 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5202 * must be included in the dictionary. 5203 */ 5204 static QDict *x86_cpu_static_props(void) 5205 { 5206 FeatureWord w; 5207 int i; 5208 static const char *props[] = { 5209 "min-level", 5210 "min-xlevel", 5211 "family", 5212 "model", 5213 "stepping", 5214 "model-id", 5215 "vendor", 5216 "lmce", 5217 NULL, 5218 }; 5219 static QDict *d; 5220 5221 if (d) { 5222 return d; 5223 } 5224 5225 d = qdict_new(); 5226 for (i = 0; props[i]; i++) { 5227 qdict_put_null(d, props[i]); 5228 } 5229 5230 for (w = 0; w < FEATURE_WORDS; w++) { 5231 FeatureWordInfo *fi = &feature_word_info[w]; 5232 int bit; 5233 for (bit = 0; bit < 64; bit++) { 5234 if (!fi->feat_names[bit]) { 5235 continue; 5236 } 5237 qdict_put_null(d, fi->feat_names[bit]); 5238 } 5239 } 5240 5241 return d; 5242 } 5243 5244 /* Add an entry to @props dict, with the value for property. */ 5245 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5246 { 5247 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5248 &error_abort); 5249 5250 qdict_put_obj(props, prop, value); 5251 } 5252 5253 /* Convert CPU model data from X86CPU object to a property dictionary 5254 * that can recreate exactly the same CPU model. 5255 */ 5256 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5257 { 5258 QDict *sprops = x86_cpu_static_props(); 5259 const QDictEntry *e; 5260 5261 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5262 const char *prop = qdict_entry_key(e); 5263 x86_cpu_expand_prop(cpu, props, prop); 5264 } 5265 } 5266 5267 /* Convert CPU model data from X86CPU object to a property dictionary 5268 * that can recreate exactly the same CPU model, including every 5269 * writeable QOM property. 5270 */ 5271 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5272 { 5273 ObjectPropertyIterator iter; 5274 ObjectProperty *prop; 5275 5276 object_property_iter_init(&iter, OBJECT(cpu)); 5277 while ((prop = object_property_iter_next(&iter))) { 5278 /* skip read-only or write-only properties */ 5279 if (!prop->get || !prop->set) { 5280 continue; 5281 } 5282 5283 /* "hotplugged" is the only property that is configurable 5284 * on the command-line but will be set differently on CPUs 5285 * created using "-cpu ... -smp ..." and by CPUs created 5286 * on the fly by x86_cpu_from_model() for querying. Skip it. 5287 */ 5288 if (!strcmp(prop->name, "hotplugged")) { 5289 continue; 5290 } 5291 x86_cpu_expand_prop(cpu, props, prop->name); 5292 } 5293 } 5294 5295 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5296 { 5297 const QDictEntry *prop; 5298 5299 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5300 if (!object_property_set_qobject(obj, qdict_entry_key(prop), 5301 qdict_entry_value(prop), errp)) { 5302 break; 5303 } 5304 } 5305 } 5306 5307 /* Create X86CPU object according to model+props specification */ 5308 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5309 { 5310 X86CPU *xc = NULL; 5311 X86CPUClass *xcc; 5312 Error *err = NULL; 5313 5314 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5315 if (xcc == NULL) { 5316 error_setg(&err, "CPU model '%s' not found", model); 5317 goto out; 5318 } 5319 5320 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5321 if (props) { 5322 object_apply_props(OBJECT(xc), props, &err); 5323 if (err) { 5324 goto out; 5325 } 5326 } 5327 5328 x86_cpu_expand_features(xc, &err); 5329 if (err) { 5330 goto out; 5331 } 5332 5333 out: 5334 if (err) { 5335 error_propagate(errp, err); 5336 object_unref(OBJECT(xc)); 5337 xc = NULL; 5338 } 5339 return xc; 5340 } 5341 5342 CpuModelExpansionInfo * 5343 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5344 CpuModelInfo *model, 5345 Error **errp) 5346 { 5347 X86CPU *xc = NULL; 5348 Error *err = NULL; 5349 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5350 QDict *props = NULL; 5351 const char *base_name; 5352 5353 xc = x86_cpu_from_model(model->name, 5354 model->has_props ? 5355 qobject_to(QDict, model->props) : 5356 NULL, &err); 5357 if (err) { 5358 goto out; 5359 } 5360 5361 props = qdict_new(); 5362 ret->model = g_new0(CpuModelInfo, 1); 5363 ret->model->props = QOBJECT(props); 5364 ret->model->has_props = true; 5365 5366 switch (type) { 5367 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5368 /* Static expansion will be based on "base" only */ 5369 base_name = "base"; 5370 x86_cpu_to_dict(xc, props); 5371 break; 5372 case CPU_MODEL_EXPANSION_TYPE_FULL: 5373 /* As we don't return every single property, full expansion needs 5374 * to keep the original model name+props, and add extra 5375 * properties on top of that. 5376 */ 5377 base_name = model->name; 5378 x86_cpu_to_dict_full(xc, props); 5379 break; 5380 default: 5381 error_setg(&err, "Unsupported expansion type"); 5382 goto out; 5383 } 5384 5385 x86_cpu_to_dict(xc, props); 5386 5387 ret->model->name = g_strdup(base_name); 5388 5389 out: 5390 object_unref(OBJECT(xc)); 5391 if (err) { 5392 error_propagate(errp, err); 5393 qapi_free_CpuModelExpansionInfo(ret); 5394 ret = NULL; 5395 } 5396 return ret; 5397 } 5398 #endif /* !CONFIG_USER_ONLY */ 5399 5400 static gchar *x86_gdb_arch_name(CPUState *cs) 5401 { 5402 #ifdef TARGET_X86_64 5403 return g_strdup("i386:x86-64"); 5404 #else 5405 return g_strdup("i386"); 5406 #endif 5407 } 5408 5409 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5410 { 5411 X86CPUModel *model = data; 5412 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5413 5414 xcc->model = model; 5415 xcc->migration_safe = true; 5416 } 5417 5418 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5419 { 5420 g_autofree char *typename = x86_cpu_type_name(name); 5421 TypeInfo ti = { 5422 .name = typename, 5423 .parent = TYPE_X86_CPU, 5424 .class_init = x86_cpu_cpudef_class_init, 5425 .class_data = model, 5426 }; 5427 5428 type_register(&ti); 5429 } 5430 5431 static void x86_register_cpudef_types(X86CPUDefinition *def) 5432 { 5433 X86CPUModel *m; 5434 const X86CPUVersionDefinition *vdef; 5435 5436 /* AMD aliases are handled at runtime based on CPUID vendor, so 5437 * they shouldn't be set on the CPU model table. 5438 */ 5439 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5440 /* catch mistakes instead of silently truncating model_id when too long */ 5441 assert(def->model_id && strlen(def->model_id) <= 48); 5442 5443 /* Unversioned model: */ 5444 m = g_new0(X86CPUModel, 1); 5445 m->cpudef = def; 5446 m->version = CPU_VERSION_AUTO; 5447 m->is_alias = true; 5448 x86_register_cpu_model_type(def->name, m); 5449 5450 /* Versioned models: */ 5451 5452 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5453 X86CPUModel *m = g_new0(X86CPUModel, 1); 5454 g_autofree char *name = 5455 x86_cpu_versioned_model_name(def, vdef->version); 5456 m->cpudef = def; 5457 m->version = vdef->version; 5458 m->note = vdef->note; 5459 x86_register_cpu_model_type(name, m); 5460 5461 if (vdef->alias) { 5462 X86CPUModel *am = g_new0(X86CPUModel, 1); 5463 am->cpudef = def; 5464 am->version = vdef->version; 5465 am->is_alias = true; 5466 x86_register_cpu_model_type(vdef->alias, am); 5467 } 5468 } 5469 5470 } 5471 5472 #if !defined(CONFIG_USER_ONLY) 5473 5474 void cpu_clear_apic_feature(CPUX86State *env) 5475 { 5476 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5477 } 5478 5479 #endif /* !CONFIG_USER_ONLY */ 5480 5481 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5482 uint32_t *eax, uint32_t *ebx, 5483 uint32_t *ecx, uint32_t *edx) 5484 { 5485 X86CPU *cpu = env_archcpu(env); 5486 CPUState *cs = env_cpu(env); 5487 uint32_t die_offset; 5488 uint32_t limit; 5489 uint32_t signature[3]; 5490 X86CPUTopoInfo topo_info; 5491 5492 topo_info.nodes_per_pkg = env->nr_nodes; 5493 topo_info.dies_per_pkg = env->nr_dies; 5494 topo_info.cores_per_die = cs->nr_cores; 5495 topo_info.threads_per_core = cs->nr_threads; 5496 5497 /* Calculate & apply limits for different index ranges */ 5498 if (index >= 0xC0000000) { 5499 limit = env->cpuid_xlevel2; 5500 } else if (index >= 0x80000000) { 5501 limit = env->cpuid_xlevel; 5502 } else if (index >= 0x40000000) { 5503 limit = 0x40000001; 5504 } else { 5505 limit = env->cpuid_level; 5506 } 5507 5508 if (index > limit) { 5509 /* Intel documentation states that invalid EAX input will 5510 * return the same information as EAX=cpuid_level 5511 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5512 */ 5513 index = env->cpuid_level; 5514 } 5515 5516 switch(index) { 5517 case 0: 5518 *eax = env->cpuid_level; 5519 *ebx = env->cpuid_vendor1; 5520 *edx = env->cpuid_vendor2; 5521 *ecx = env->cpuid_vendor3; 5522 break; 5523 case 1: 5524 *eax = env->cpuid_version; 5525 *ebx = (cpu->apic_id << 24) | 5526 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5527 *ecx = env->features[FEAT_1_ECX]; 5528 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5529 *ecx |= CPUID_EXT_OSXSAVE; 5530 } 5531 *edx = env->features[FEAT_1_EDX]; 5532 if (cs->nr_cores * cs->nr_threads > 1) { 5533 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5534 *edx |= CPUID_HT; 5535 } 5536 if (!cpu->enable_pmu) { 5537 *ecx &= ~CPUID_EXT_PDCM; 5538 } 5539 break; 5540 case 2: 5541 /* cache info: needed for Pentium Pro compatibility */ 5542 if (cpu->cache_info_passthrough) { 5543 host_cpuid(index, 0, eax, ebx, ecx, edx); 5544 break; 5545 } 5546 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5547 *ebx = 0; 5548 if (!cpu->enable_l3_cache) { 5549 *ecx = 0; 5550 } else { 5551 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5552 } 5553 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5554 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5555 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5556 break; 5557 case 4: 5558 /* cache info: needed for Core compatibility */ 5559 if (cpu->cache_info_passthrough) { 5560 host_cpuid(index, count, eax, ebx, ecx, edx); 5561 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5562 *eax &= ~0xFC000000; 5563 if ((*eax & 31) && cs->nr_cores > 1) { 5564 *eax |= (cs->nr_cores - 1) << 26; 5565 } 5566 } else { 5567 *eax = 0; 5568 switch (count) { 5569 case 0: /* L1 dcache info */ 5570 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5571 1, cs->nr_cores, 5572 eax, ebx, ecx, edx); 5573 break; 5574 case 1: /* L1 icache info */ 5575 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5576 1, cs->nr_cores, 5577 eax, ebx, ecx, edx); 5578 break; 5579 case 2: /* L2 cache info */ 5580 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5581 cs->nr_threads, cs->nr_cores, 5582 eax, ebx, ecx, edx); 5583 break; 5584 case 3: /* L3 cache info */ 5585 die_offset = apicid_die_offset(&topo_info); 5586 if (cpu->enable_l3_cache) { 5587 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5588 (1 << die_offset), cs->nr_cores, 5589 eax, ebx, ecx, edx); 5590 break; 5591 } 5592 /* fall through */ 5593 default: /* end of info */ 5594 *eax = *ebx = *ecx = *edx = 0; 5595 break; 5596 } 5597 } 5598 break; 5599 case 5: 5600 /* MONITOR/MWAIT Leaf */ 5601 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5602 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5603 *ecx = cpu->mwait.ecx; /* flags */ 5604 *edx = cpu->mwait.edx; /* mwait substates */ 5605 break; 5606 case 6: 5607 /* Thermal and Power Leaf */ 5608 *eax = env->features[FEAT_6_EAX]; 5609 *ebx = 0; 5610 *ecx = 0; 5611 *edx = 0; 5612 break; 5613 case 7: 5614 /* Structured Extended Feature Flags Enumeration Leaf */ 5615 if (count == 0) { 5616 /* Maximum ECX value for sub-leaves */ 5617 *eax = env->cpuid_level_func7; 5618 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5619 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5620 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5621 *ecx |= CPUID_7_0_ECX_OSPKE; 5622 } 5623 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5624 } else if (count == 1) { 5625 *eax = env->features[FEAT_7_1_EAX]; 5626 *ebx = 0; 5627 *ecx = 0; 5628 *edx = 0; 5629 } else { 5630 *eax = 0; 5631 *ebx = 0; 5632 *ecx = 0; 5633 *edx = 0; 5634 } 5635 break; 5636 case 9: 5637 /* Direct Cache Access Information Leaf */ 5638 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5639 *ebx = 0; 5640 *ecx = 0; 5641 *edx = 0; 5642 break; 5643 case 0xA: 5644 /* Architectural Performance Monitoring Leaf */ 5645 if (kvm_enabled() && cpu->enable_pmu) { 5646 KVMState *s = cs->kvm_state; 5647 5648 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5649 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5650 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5651 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5652 } else if (hvf_enabled() && cpu->enable_pmu) { 5653 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5654 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5655 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5656 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5657 } else { 5658 *eax = 0; 5659 *ebx = 0; 5660 *ecx = 0; 5661 *edx = 0; 5662 } 5663 break; 5664 case 0xB: 5665 /* Extended Topology Enumeration Leaf */ 5666 if (!cpu->enable_cpuid_0xb) { 5667 *eax = *ebx = *ecx = *edx = 0; 5668 break; 5669 } 5670 5671 *ecx = count & 0xff; 5672 *edx = cpu->apic_id; 5673 5674 switch (count) { 5675 case 0: 5676 *eax = apicid_core_offset(&topo_info); 5677 *ebx = cs->nr_threads; 5678 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5679 break; 5680 case 1: 5681 *eax = env->pkg_offset; 5682 *ebx = cs->nr_cores * cs->nr_threads; 5683 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5684 break; 5685 default: 5686 *eax = 0; 5687 *ebx = 0; 5688 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5689 } 5690 5691 assert(!(*eax & ~0x1f)); 5692 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5693 break; 5694 case 0x1F: 5695 /* V2 Extended Topology Enumeration Leaf */ 5696 if (env->nr_dies < 2) { 5697 *eax = *ebx = *ecx = *edx = 0; 5698 break; 5699 } 5700 5701 *ecx = count & 0xff; 5702 *edx = cpu->apic_id; 5703 switch (count) { 5704 case 0: 5705 *eax = apicid_core_offset(&topo_info); 5706 *ebx = cs->nr_threads; 5707 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5708 break; 5709 case 1: 5710 *eax = apicid_die_offset(&topo_info); 5711 *ebx = cs->nr_cores * cs->nr_threads; 5712 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5713 break; 5714 case 2: 5715 *eax = env->pkg_offset; 5716 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5717 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5718 break; 5719 default: 5720 *eax = 0; 5721 *ebx = 0; 5722 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5723 } 5724 assert(!(*eax & ~0x1f)); 5725 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5726 break; 5727 case 0xD: { 5728 /* Processor Extended State */ 5729 *eax = 0; 5730 *ebx = 0; 5731 *ecx = 0; 5732 *edx = 0; 5733 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5734 break; 5735 } 5736 5737 if (count == 0) { 5738 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5739 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5740 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5741 /* 5742 * The initial value of xcr0 and ebx == 0, On host without kvm 5743 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5744 * even through guest update xcr0, this will crash some legacy guest 5745 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5746 */ 5747 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5748 } else if (count == 1) { 5749 *eax = env->features[FEAT_XSAVE]; 5750 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5751 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5752 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5753 *eax = esa->size; 5754 *ebx = esa->offset; 5755 } 5756 } 5757 break; 5758 } 5759 case 0x14: { 5760 /* Intel Processor Trace Enumeration */ 5761 *eax = 0; 5762 *ebx = 0; 5763 *ecx = 0; 5764 *edx = 0; 5765 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5766 !kvm_enabled()) { 5767 break; 5768 } 5769 5770 if (count == 0) { 5771 *eax = INTEL_PT_MAX_SUBLEAF; 5772 *ebx = INTEL_PT_MINIMAL_EBX; 5773 *ecx = INTEL_PT_MINIMAL_ECX; 5774 } else if (count == 1) { 5775 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5776 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5777 } 5778 break; 5779 } 5780 case 0x40000000: 5781 /* 5782 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5783 * set here, but we restrict to TCG none the less. 5784 */ 5785 if (tcg_enabled() && cpu->expose_tcg) { 5786 memcpy(signature, "TCGTCGTCGTCG", 12); 5787 *eax = 0x40000001; 5788 *ebx = signature[0]; 5789 *ecx = signature[1]; 5790 *edx = signature[2]; 5791 } else { 5792 *eax = 0; 5793 *ebx = 0; 5794 *ecx = 0; 5795 *edx = 0; 5796 } 5797 break; 5798 case 0x40000001: 5799 *eax = 0; 5800 *ebx = 0; 5801 *ecx = 0; 5802 *edx = 0; 5803 break; 5804 case 0x80000000: 5805 *eax = env->cpuid_xlevel; 5806 *ebx = env->cpuid_vendor1; 5807 *edx = env->cpuid_vendor2; 5808 *ecx = env->cpuid_vendor3; 5809 break; 5810 case 0x80000001: 5811 *eax = env->cpuid_version; 5812 *ebx = 0; 5813 *ecx = env->features[FEAT_8000_0001_ECX]; 5814 *edx = env->features[FEAT_8000_0001_EDX]; 5815 5816 /* The Linux kernel checks for the CMPLegacy bit and 5817 * discards multiple thread information if it is set. 5818 * So don't set it here for Intel to make Linux guests happy. 5819 */ 5820 if (cs->nr_cores * cs->nr_threads > 1) { 5821 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5822 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5823 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5824 *ecx |= 1 << 1; /* CmpLegacy bit */ 5825 } 5826 } 5827 break; 5828 case 0x80000002: 5829 case 0x80000003: 5830 case 0x80000004: 5831 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5832 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5833 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5834 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5835 break; 5836 case 0x80000005: 5837 /* cache info (L1 cache) */ 5838 if (cpu->cache_info_passthrough) { 5839 host_cpuid(index, 0, eax, ebx, ecx, edx); 5840 break; 5841 } 5842 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | 5843 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5844 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | 5845 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5846 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5847 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5848 break; 5849 case 0x80000006: 5850 /* cache info (L2 cache) */ 5851 if (cpu->cache_info_passthrough) { 5852 host_cpuid(index, 0, eax, ebx, ecx, edx); 5853 break; 5854 } 5855 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | 5856 (L2_DTLB_2M_ENTRIES << 16) | 5857 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | 5858 (L2_ITLB_2M_ENTRIES); 5859 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | 5860 (L2_DTLB_4K_ENTRIES << 16) | 5861 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | 5862 (L2_ITLB_4K_ENTRIES); 5863 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5864 cpu->enable_l3_cache ? 5865 env->cache_info_amd.l3_cache : NULL, 5866 ecx, edx); 5867 break; 5868 case 0x80000007: 5869 *eax = 0; 5870 *ebx = 0; 5871 *ecx = 0; 5872 *edx = env->features[FEAT_8000_0007_EDX]; 5873 break; 5874 case 0x80000008: 5875 /* virtual & phys address size in low 2 bytes. */ 5876 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5877 /* 64 bit processor */ 5878 *eax = cpu->phys_bits; /* configurable physical bits */ 5879 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5880 *eax |= 0x00003900; /* 57 bits virtual */ 5881 } else { 5882 *eax |= 0x00003000; /* 48 bits virtual */ 5883 } 5884 } else { 5885 *eax = cpu->phys_bits; 5886 } 5887 *ebx = env->features[FEAT_8000_0008_EBX]; 5888 if (cs->nr_cores * cs->nr_threads > 1) { 5889 /* 5890 * Bits 15:12 is "The number of bits in the initial 5891 * Core::X86::Apic::ApicId[ApicId] value that indicate 5892 * thread ID within a package". This is already stored at 5893 * CPUX86State::pkg_offset. 5894 * Bits 7:0 is "The number of threads in the package is NC+1" 5895 */ 5896 *ecx = (env->pkg_offset << 12) | 5897 ((cs->nr_cores * cs->nr_threads) - 1); 5898 } else { 5899 *ecx = 0; 5900 } 5901 *edx = 0; 5902 break; 5903 case 0x8000000A: 5904 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5905 *eax = 0x00000001; /* SVM Revision */ 5906 *ebx = 0x00000010; /* nr of ASIDs */ 5907 *ecx = 0; 5908 *edx = env->features[FEAT_SVM]; /* optional features */ 5909 } else { 5910 *eax = 0; 5911 *ebx = 0; 5912 *ecx = 0; 5913 *edx = 0; 5914 } 5915 break; 5916 case 0x8000001D: 5917 *eax = 0; 5918 if (cpu->cache_info_passthrough) { 5919 host_cpuid(index, count, eax, ebx, ecx, edx); 5920 break; 5921 } 5922 switch (count) { 5923 case 0: /* L1 dcache info */ 5924 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, 5925 &topo_info, eax, ebx, ecx, edx); 5926 break; 5927 case 1: /* L1 icache info */ 5928 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, 5929 &topo_info, eax, ebx, ecx, edx); 5930 break; 5931 case 2: /* L2 cache info */ 5932 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, 5933 &topo_info, eax, ebx, ecx, edx); 5934 break; 5935 case 3: /* L3 cache info */ 5936 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, 5937 &topo_info, eax, ebx, ecx, edx); 5938 break; 5939 default: /* end of info */ 5940 *eax = *ebx = *ecx = *edx = 0; 5941 break; 5942 } 5943 break; 5944 case 0x8000001E: 5945 assert(cpu->core_id <= 255); 5946 encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx); 5947 break; 5948 case 0xC0000000: 5949 *eax = env->cpuid_xlevel2; 5950 *ebx = 0; 5951 *ecx = 0; 5952 *edx = 0; 5953 break; 5954 case 0xC0000001: 5955 /* Support for VIA CPU's CPUID instruction */ 5956 *eax = env->cpuid_version; 5957 *ebx = 0; 5958 *ecx = 0; 5959 *edx = env->features[FEAT_C000_0001_EDX]; 5960 break; 5961 case 0xC0000002: 5962 case 0xC0000003: 5963 case 0xC0000004: 5964 /* Reserved for the future, and now filled with zero */ 5965 *eax = 0; 5966 *ebx = 0; 5967 *ecx = 0; 5968 *edx = 0; 5969 break; 5970 case 0x8000001F: 5971 *eax = sev_enabled() ? 0x2 : 0; 5972 *ebx = sev_get_cbit_position(); 5973 *ebx |= sev_get_reduced_phys_bits() << 6; 5974 *ecx = 0; 5975 *edx = 0; 5976 break; 5977 default: 5978 /* reserved values: zero */ 5979 *eax = 0; 5980 *ebx = 0; 5981 *ecx = 0; 5982 *edx = 0; 5983 break; 5984 } 5985 } 5986 5987 static void x86_cpu_reset(DeviceState *dev) 5988 { 5989 CPUState *s = CPU(dev); 5990 X86CPU *cpu = X86_CPU(s); 5991 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 5992 CPUX86State *env = &cpu->env; 5993 target_ulong cr4; 5994 uint64_t xcr0; 5995 int i; 5996 5997 xcc->parent_reset(dev); 5998 5999 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 6000 6001 env->old_exception = -1; 6002 6003 /* init to reset state */ 6004 6005 env->hflags2 |= HF2_GIF_MASK; 6006 env->hflags &= ~HF_GUEST_MASK; 6007 6008 cpu_x86_update_cr0(env, 0x60000010); 6009 env->a20_mask = ~0x0; 6010 env->smbase = 0x30000; 6011 env->msr_smi_count = 0; 6012 6013 env->idt.limit = 0xffff; 6014 env->gdt.limit = 0xffff; 6015 env->ldt.limit = 0xffff; 6016 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 6017 env->tr.limit = 0xffff; 6018 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 6019 6020 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 6021 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 6022 DESC_R_MASK | DESC_A_MASK); 6023 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 6024 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6025 DESC_A_MASK); 6026 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 6027 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6028 DESC_A_MASK); 6029 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 6030 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6031 DESC_A_MASK); 6032 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 6033 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6034 DESC_A_MASK); 6035 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 6036 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6037 DESC_A_MASK); 6038 6039 env->eip = 0xfff0; 6040 env->regs[R_EDX] = env->cpuid_version; 6041 6042 env->eflags = 0x2; 6043 6044 /* FPU init */ 6045 for (i = 0; i < 8; i++) { 6046 env->fptags[i] = 1; 6047 } 6048 cpu_set_fpuc(env, 0x37f); 6049 6050 env->mxcsr = 0x1f80; 6051 /* All units are in INIT state. */ 6052 env->xstate_bv = 0; 6053 6054 env->pat = 0x0007040600070406ULL; 6055 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6056 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6057 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6058 } 6059 6060 memset(env->dr, 0, sizeof(env->dr)); 6061 env->dr[6] = DR6_FIXED_1; 6062 env->dr[7] = DR7_FIXED_1; 6063 cpu_breakpoint_remove_all(s, BP_CPU); 6064 cpu_watchpoint_remove_all(s, BP_CPU); 6065 6066 cr4 = 0; 6067 xcr0 = XSTATE_FP_MASK; 6068 6069 #ifdef CONFIG_USER_ONLY 6070 /* Enable all the features for user-mode. */ 6071 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6072 xcr0 |= XSTATE_SSE_MASK; 6073 } 6074 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6075 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6076 if (env->features[esa->feature] & esa->bits) { 6077 xcr0 |= 1ull << i; 6078 } 6079 } 6080 6081 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6082 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6083 } 6084 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6085 cr4 |= CR4_FSGSBASE_MASK; 6086 } 6087 #endif 6088 6089 env->xcr0 = xcr0; 6090 cpu_x86_update_cr4(env, cr4); 6091 6092 /* 6093 * SDM 11.11.5 requires: 6094 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6095 * - IA32_MTRR_PHYSMASKn.V = 0 6096 * All other bits are undefined. For simplification, zero it all. 6097 */ 6098 env->mtrr_deftype = 0; 6099 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6100 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6101 6102 env->interrupt_injected = -1; 6103 env->exception_nr = -1; 6104 env->exception_pending = 0; 6105 env->exception_injected = 0; 6106 env->exception_has_payload = false; 6107 env->exception_payload = 0; 6108 env->nmi_injected = false; 6109 #if !defined(CONFIG_USER_ONLY) 6110 /* We hard-wire the BSP to the first CPU. */ 6111 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6112 6113 s->halted = !cpu_is_bsp(cpu); 6114 6115 if (kvm_enabled()) { 6116 kvm_arch_reset_vcpu(cpu); 6117 } 6118 #endif 6119 } 6120 6121 #ifndef CONFIG_USER_ONLY 6122 bool cpu_is_bsp(X86CPU *cpu) 6123 { 6124 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6125 } 6126 6127 /* TODO: remove me, when reset over QOM tree is implemented */ 6128 static void x86_cpu_machine_reset_cb(void *opaque) 6129 { 6130 X86CPU *cpu = opaque; 6131 cpu_reset(CPU(cpu)); 6132 } 6133 #endif 6134 6135 static void mce_init(X86CPU *cpu) 6136 { 6137 CPUX86State *cenv = &cpu->env; 6138 unsigned int bank; 6139 6140 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6141 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6142 (CPUID_MCE | CPUID_MCA)) { 6143 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6144 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6145 cenv->mcg_ctl = ~(uint64_t)0; 6146 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6147 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6148 } 6149 } 6150 } 6151 6152 #ifndef CONFIG_USER_ONLY 6153 APICCommonClass *apic_get_class(void) 6154 { 6155 const char *apic_type = "apic"; 6156 6157 /* TODO: in-kernel irqchip for hvf */ 6158 if (kvm_apic_in_kernel()) { 6159 apic_type = "kvm-apic"; 6160 } else if (xen_enabled()) { 6161 apic_type = "xen-apic"; 6162 } 6163 6164 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6165 } 6166 6167 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6168 { 6169 APICCommonState *apic; 6170 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6171 6172 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6173 6174 object_property_add_child(OBJECT(cpu), "lapic", 6175 OBJECT(cpu->apic_state)); 6176 object_unref(OBJECT(cpu->apic_state)); 6177 6178 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6179 /* TODO: convert to link<> */ 6180 apic = APIC_COMMON(cpu->apic_state); 6181 apic->cpu = cpu; 6182 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6183 } 6184 6185 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6186 { 6187 APICCommonState *apic; 6188 static bool apic_mmio_map_once; 6189 6190 if (cpu->apic_state == NULL) { 6191 return; 6192 } 6193 qdev_realize(DEVICE(cpu->apic_state), NULL, errp); 6194 6195 /* Map APIC MMIO area */ 6196 apic = APIC_COMMON(cpu->apic_state); 6197 if (!apic_mmio_map_once) { 6198 memory_region_add_subregion_overlap(get_system_memory(), 6199 apic->apicbase & 6200 MSR_IA32_APICBASE_BASE, 6201 &apic->io_memory, 6202 0x1000); 6203 apic_mmio_map_once = true; 6204 } 6205 } 6206 6207 static void x86_cpu_machine_done(Notifier *n, void *unused) 6208 { 6209 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6210 MemoryRegion *smram = 6211 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6212 6213 if (smram) { 6214 cpu->smram = g_new(MemoryRegion, 1); 6215 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6216 smram, 0, 4 * GiB); 6217 memory_region_set_enabled(cpu->smram, true); 6218 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6219 } 6220 } 6221 #else 6222 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6223 { 6224 } 6225 #endif 6226 6227 /* Note: Only safe for use on x86(-64) hosts */ 6228 static uint32_t x86_host_phys_bits(void) 6229 { 6230 uint32_t eax; 6231 uint32_t host_phys_bits; 6232 6233 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6234 if (eax >= 0x80000008) { 6235 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6236 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6237 * at 23:16 that can specify a maximum physical address bits for 6238 * the guest that can override this value; but I've not seen 6239 * anything with that set. 6240 */ 6241 host_phys_bits = eax & 0xff; 6242 } else { 6243 /* It's an odd 64 bit machine that doesn't have the leaf for 6244 * physical address bits; fall back to 36 that's most older 6245 * Intel. 6246 */ 6247 host_phys_bits = 36; 6248 } 6249 6250 return host_phys_bits; 6251 } 6252 6253 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6254 { 6255 if (*min < value) { 6256 *min = value; 6257 } 6258 } 6259 6260 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6261 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6262 { 6263 CPUX86State *env = &cpu->env; 6264 FeatureWordInfo *fi = &feature_word_info[w]; 6265 uint32_t eax = fi->cpuid.eax; 6266 uint32_t region = eax & 0xF0000000; 6267 6268 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6269 if (!env->features[w]) { 6270 return; 6271 } 6272 6273 switch (region) { 6274 case 0x00000000: 6275 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6276 break; 6277 case 0x80000000: 6278 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6279 break; 6280 case 0xC0000000: 6281 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6282 break; 6283 } 6284 6285 if (eax == 7) { 6286 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6287 fi->cpuid.ecx); 6288 } 6289 } 6290 6291 /* Calculate XSAVE components based on the configured CPU feature flags */ 6292 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6293 { 6294 CPUX86State *env = &cpu->env; 6295 int i; 6296 uint64_t mask; 6297 6298 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6299 return; 6300 } 6301 6302 mask = 0; 6303 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6304 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6305 if (env->features[esa->feature] & esa->bits) { 6306 mask |= (1ULL << i); 6307 } 6308 } 6309 6310 env->features[FEAT_XSAVE_COMP_LO] = mask; 6311 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6312 } 6313 6314 /***** Steps involved on loading and filtering CPUID data 6315 * 6316 * When initializing and realizing a CPU object, the steps 6317 * involved in setting up CPUID data are: 6318 * 6319 * 1) Loading CPU model definition (X86CPUDefinition). This is 6320 * implemented by x86_cpu_load_model() and should be completely 6321 * transparent, as it is done automatically by instance_init. 6322 * No code should need to look at X86CPUDefinition structs 6323 * outside instance_init. 6324 * 6325 * 2) CPU expansion. This is done by realize before CPUID 6326 * filtering, and will make sure host/accelerator data is 6327 * loaded for CPU models that depend on host capabilities 6328 * (e.g. "host"). Done by x86_cpu_expand_features(). 6329 * 6330 * 3) CPUID filtering. This initializes extra data related to 6331 * CPUID, and checks if the host supports all capabilities 6332 * required by the CPU. Runnability of a CPU model is 6333 * determined at this step. Done by x86_cpu_filter_features(). 6334 * 6335 * Some operations don't require all steps to be performed. 6336 * More precisely: 6337 * 6338 * - CPU instance creation (instance_init) will run only CPU 6339 * model loading. CPU expansion can't run at instance_init-time 6340 * because host/accelerator data may be not available yet. 6341 * - CPU realization will perform both CPU model expansion and CPUID 6342 * filtering, and return an error in case one of them fails. 6343 * - query-cpu-definitions needs to run all 3 steps. It needs 6344 * to run CPUID filtering, as the 'unavailable-features' 6345 * field is set based on the filtering results. 6346 * - The query-cpu-model-expansion QMP command only needs to run 6347 * CPU model loading and CPU expansion. It should not filter 6348 * any CPUID data based on host capabilities. 6349 */ 6350 6351 /* Expand CPU configuration data, based on configured features 6352 * and host/accelerator capabilities when appropriate. 6353 */ 6354 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6355 { 6356 CPUX86State *env = &cpu->env; 6357 FeatureWord w; 6358 int i; 6359 GList *l; 6360 6361 for (l = plus_features; l; l = l->next) { 6362 const char *prop = l->data; 6363 if (!object_property_set_bool(OBJECT(cpu), prop, true, errp)) { 6364 return; 6365 } 6366 } 6367 6368 for (l = minus_features; l; l = l->next) { 6369 const char *prop = l->data; 6370 if (!object_property_set_bool(OBJECT(cpu), prop, false, errp)) { 6371 return; 6372 } 6373 } 6374 6375 /*TODO: Now cpu->max_features doesn't overwrite features 6376 * set using QOM properties, and we can convert 6377 * plus_features & minus_features to global properties 6378 * inside x86_cpu_parse_featurestr() too. 6379 */ 6380 if (cpu->max_features) { 6381 for (w = 0; w < FEATURE_WORDS; w++) { 6382 /* Override only features that weren't set explicitly 6383 * by the user. 6384 */ 6385 env->features[w] |= 6386 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6387 ~env->user_features[w] & 6388 ~feature_word_info[w].no_autoenable_flags; 6389 } 6390 } 6391 6392 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6393 FeatureDep *d = &feature_dependencies[i]; 6394 if (!(env->features[d->from.index] & d->from.mask)) { 6395 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6396 6397 /* Not an error unless the dependent feature was added explicitly. */ 6398 mark_unavailable_features(cpu, d->to.index, 6399 unavailable_features & env->user_features[d->to.index], 6400 "This feature depends on other features that were not requested"); 6401 6402 env->features[d->to.index] &= ~unavailable_features; 6403 } 6404 } 6405 6406 if (!kvm_enabled() || !cpu->expose_kvm) { 6407 env->features[FEAT_KVM] = 0; 6408 } 6409 6410 x86_cpu_enable_xsave_components(cpu); 6411 6412 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6413 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6414 if (cpu->full_cpuid_auto_level) { 6415 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6416 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6417 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6418 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6419 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6420 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6421 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6422 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6423 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6424 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6425 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6426 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6427 6428 /* Intel Processor Trace requires CPUID[0x14] */ 6429 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { 6430 if (cpu->intel_pt_auto_level) { 6431 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6432 } else if (cpu->env.cpuid_min_level < 0x14) { 6433 mark_unavailable_features(cpu, FEAT_7_0_EBX, 6434 CPUID_7_0_EBX_INTEL_PT, 6435 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,min-level=0x14\""); 6436 } 6437 } 6438 6439 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6440 if (env->nr_dies > 1) { 6441 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6442 } 6443 6444 /* SVM requires CPUID[0x8000000A] */ 6445 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6446 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6447 } 6448 6449 /* SEV requires CPUID[0x8000001F] */ 6450 if (sev_enabled()) { 6451 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6452 } 6453 } 6454 6455 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6456 if (env->cpuid_level_func7 == UINT32_MAX) { 6457 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6458 } 6459 if (env->cpuid_level == UINT32_MAX) { 6460 env->cpuid_level = env->cpuid_min_level; 6461 } 6462 if (env->cpuid_xlevel == UINT32_MAX) { 6463 env->cpuid_xlevel = env->cpuid_min_xlevel; 6464 } 6465 if (env->cpuid_xlevel2 == UINT32_MAX) { 6466 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6467 } 6468 } 6469 6470 /* 6471 * Finishes initialization of CPUID data, filters CPU feature 6472 * words based on host availability of each feature. 6473 * 6474 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6475 */ 6476 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6477 { 6478 CPUX86State *env = &cpu->env; 6479 FeatureWord w; 6480 const char *prefix = NULL; 6481 6482 if (verbose) { 6483 prefix = accel_uses_host_cpuid() 6484 ? "host doesn't support requested feature" 6485 : "TCG doesn't support requested feature"; 6486 } 6487 6488 for (w = 0; w < FEATURE_WORDS; w++) { 6489 uint64_t host_feat = 6490 x86_cpu_get_supported_feature_word(w, false); 6491 uint64_t requested_features = env->features[w]; 6492 uint64_t unavailable_features = requested_features & ~host_feat; 6493 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6494 } 6495 6496 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6497 kvm_enabled()) { 6498 KVMState *s = CPU(cpu)->kvm_state; 6499 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6500 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6501 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6502 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6503 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6504 6505 if (!eax_0 || 6506 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6507 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6508 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6509 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6510 INTEL_PT_ADDR_RANGES_NUM) || 6511 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6512 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6513 (ecx_0 & INTEL_PT_IP_LIP)) { 6514 /* 6515 * Processor Trace capabilities aren't configurable, so if the 6516 * host can't emulate the capabilities we report on 6517 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6518 */ 6519 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6520 } 6521 } 6522 } 6523 6524 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6525 { 6526 CPUState *cs = CPU(dev); 6527 X86CPU *cpu = X86_CPU(dev); 6528 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6529 CPUX86State *env = &cpu->env; 6530 Error *local_err = NULL; 6531 static bool ht_warned; 6532 6533 if (xcc->host_cpuid_required) { 6534 if (!accel_uses_host_cpuid()) { 6535 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6536 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6537 goto out; 6538 } 6539 } 6540 6541 if (cpu->max_features && accel_uses_host_cpuid()) { 6542 if (enable_cpu_pm) { 6543 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6544 &cpu->mwait.ecx, &cpu->mwait.edx); 6545 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6546 if (kvm_enabled() && kvm_has_waitpkg()) { 6547 env->features[FEAT_7_0_ECX] |= CPUID_7_0_ECX_WAITPKG; 6548 } 6549 } 6550 if (kvm_enabled() && cpu->ucode_rev == 0) { 6551 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6552 MSR_IA32_UCODE_REV); 6553 } 6554 } 6555 6556 if (cpu->ucode_rev == 0) { 6557 /* The default is the same as KVM's. */ 6558 if (IS_AMD_CPU(env)) { 6559 cpu->ucode_rev = 0x01000065; 6560 } else { 6561 cpu->ucode_rev = 0x100000000ULL; 6562 } 6563 } 6564 6565 /* mwait extended info: needed for Core compatibility */ 6566 /* We always wake on interrupt even if host does not have the capability */ 6567 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6568 6569 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6570 error_setg(errp, "apic-id property was not initialized properly"); 6571 return; 6572 } 6573 6574 x86_cpu_expand_features(cpu, &local_err); 6575 if (local_err) { 6576 goto out; 6577 } 6578 6579 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6580 6581 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6582 error_setg(&local_err, 6583 accel_uses_host_cpuid() ? 6584 "Host doesn't support requested features" : 6585 "TCG doesn't support requested features"); 6586 goto out; 6587 } 6588 6589 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6590 * CPUID[1].EDX. 6591 */ 6592 if (IS_AMD_CPU(env)) { 6593 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6594 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6595 & CPUID_EXT2_AMD_ALIASES); 6596 } 6597 6598 /* For 64bit systems think about the number of physical bits to present. 6599 * ideally this should be the same as the host; anything other than matching 6600 * the host can cause incorrect guest behaviour. 6601 * QEMU used to pick the magic value of 40 bits that corresponds to 6602 * consumer AMD devices but nothing else. 6603 */ 6604 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6605 if (accel_uses_host_cpuid()) { 6606 uint32_t host_phys_bits = x86_host_phys_bits(); 6607 static bool warned; 6608 6609 /* Print a warning if the user set it to a value that's not the 6610 * host value. 6611 */ 6612 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6613 !warned) { 6614 warn_report("Host physical bits (%u)" 6615 " does not match phys-bits property (%u)", 6616 host_phys_bits, cpu->phys_bits); 6617 warned = true; 6618 } 6619 6620 if (cpu->host_phys_bits) { 6621 /* The user asked for us to use the host physical bits */ 6622 cpu->phys_bits = host_phys_bits; 6623 if (cpu->host_phys_bits_limit && 6624 cpu->phys_bits > cpu->host_phys_bits_limit) { 6625 cpu->phys_bits = cpu->host_phys_bits_limit; 6626 } 6627 } 6628 6629 if (cpu->phys_bits && 6630 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6631 cpu->phys_bits < 32)) { 6632 error_setg(errp, "phys-bits should be between 32 and %u " 6633 " (but is %u)", 6634 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6635 return; 6636 } 6637 } else { 6638 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6639 error_setg(errp, "TCG only supports phys-bits=%u", 6640 TCG_PHYS_ADDR_BITS); 6641 return; 6642 } 6643 } 6644 /* 0 means it was not explicitly set by the user (or by machine 6645 * compat_props or by the host code above). In this case, the default 6646 * is the value used by TCG (40). 6647 */ 6648 if (cpu->phys_bits == 0) { 6649 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6650 } 6651 } else { 6652 /* For 32 bit systems don't use the user set value, but keep 6653 * phys_bits consistent with what we tell the guest. 6654 */ 6655 if (cpu->phys_bits != 0) { 6656 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6657 return; 6658 } 6659 6660 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6661 cpu->phys_bits = 36; 6662 } else { 6663 cpu->phys_bits = 32; 6664 } 6665 } 6666 6667 /* Cache information initialization */ 6668 if (!cpu->legacy_cache) { 6669 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6670 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6671 error_setg(errp, 6672 "CPU model '%s' doesn't support legacy-cache=off", name); 6673 return; 6674 } 6675 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6676 *xcc->model->cpudef->cache_info; 6677 } else { 6678 /* Build legacy cache information */ 6679 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6680 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6681 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6682 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6683 6684 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6685 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6686 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6687 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6688 6689 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6690 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6691 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6692 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6693 } 6694 6695 6696 cpu_exec_realizefn(cs, &local_err); 6697 if (local_err != NULL) { 6698 error_propagate(errp, local_err); 6699 return; 6700 } 6701 6702 #ifndef CONFIG_USER_ONLY 6703 MachineState *ms = MACHINE(qdev_get_machine()); 6704 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6705 6706 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6707 x86_cpu_apic_create(cpu, &local_err); 6708 if (local_err != NULL) { 6709 goto out; 6710 } 6711 } 6712 #endif 6713 6714 mce_init(cpu); 6715 6716 #ifndef CONFIG_USER_ONLY 6717 if (tcg_enabled()) { 6718 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6719 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6720 6721 /* Outer container... */ 6722 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6723 memory_region_set_enabled(cpu->cpu_as_root, true); 6724 6725 /* ... with two regions inside: normal system memory with low 6726 * priority, and... 6727 */ 6728 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6729 get_system_memory(), 0, ~0ull); 6730 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6731 memory_region_set_enabled(cpu->cpu_as_mem, true); 6732 6733 cs->num_ases = 2; 6734 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6735 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6736 6737 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6738 cpu->machine_done.notify = x86_cpu_machine_done; 6739 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6740 } 6741 #endif 6742 6743 qemu_init_vcpu(cs); 6744 6745 /* 6746 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6747 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6748 * based on inputs (sockets,cores,threads), it is still better to give 6749 * users a warning. 6750 * 6751 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6752 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6753 */ 6754 if (IS_AMD_CPU(env) && 6755 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6756 cs->nr_threads > 1 && !ht_warned) { 6757 warn_report("This family of AMD CPU doesn't support " 6758 "hyperthreading(%d)", 6759 cs->nr_threads); 6760 error_printf("Please configure -smp options properly" 6761 " or try enabling topoext feature.\n"); 6762 ht_warned = true; 6763 } 6764 6765 x86_cpu_apic_realize(cpu, &local_err); 6766 if (local_err != NULL) { 6767 goto out; 6768 } 6769 cpu_reset(cs); 6770 6771 xcc->parent_realize(dev, &local_err); 6772 6773 out: 6774 if (local_err != NULL) { 6775 error_propagate(errp, local_err); 6776 return; 6777 } 6778 } 6779 6780 static void x86_cpu_unrealizefn(DeviceState *dev) 6781 { 6782 X86CPU *cpu = X86_CPU(dev); 6783 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6784 6785 #ifndef CONFIG_USER_ONLY 6786 cpu_remove_sync(CPU(dev)); 6787 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6788 #endif 6789 6790 if (cpu->apic_state) { 6791 object_unparent(OBJECT(cpu->apic_state)); 6792 cpu->apic_state = NULL; 6793 } 6794 6795 xcc->parent_unrealize(dev); 6796 } 6797 6798 typedef struct BitProperty { 6799 FeatureWord w; 6800 uint64_t mask; 6801 } BitProperty; 6802 6803 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6804 void *opaque, Error **errp) 6805 { 6806 X86CPU *cpu = X86_CPU(obj); 6807 BitProperty *fp = opaque; 6808 uint64_t f = cpu->env.features[fp->w]; 6809 bool value = (f & fp->mask) == fp->mask; 6810 visit_type_bool(v, name, &value, errp); 6811 } 6812 6813 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6814 void *opaque, Error **errp) 6815 { 6816 DeviceState *dev = DEVICE(obj); 6817 X86CPU *cpu = X86_CPU(obj); 6818 BitProperty *fp = opaque; 6819 bool value; 6820 6821 if (dev->realized) { 6822 qdev_prop_set_after_realize(dev, name, errp); 6823 return; 6824 } 6825 6826 if (!visit_type_bool(v, name, &value, errp)) { 6827 return; 6828 } 6829 6830 if (value) { 6831 cpu->env.features[fp->w] |= fp->mask; 6832 } else { 6833 cpu->env.features[fp->w] &= ~fp->mask; 6834 } 6835 cpu->env.user_features[fp->w] |= fp->mask; 6836 } 6837 6838 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 6839 void *opaque) 6840 { 6841 BitProperty *prop = opaque; 6842 g_free(prop); 6843 } 6844 6845 /* Register a boolean property to get/set a single bit in a uint32_t field. 6846 * 6847 * The same property name can be registered multiple times to make it affect 6848 * multiple bits in the same FeatureWord. In that case, the getter will return 6849 * true only if all bits are set. 6850 */ 6851 static void x86_cpu_register_bit_prop(X86CPU *cpu, 6852 const char *prop_name, 6853 FeatureWord w, 6854 int bitnr) 6855 { 6856 BitProperty *fp; 6857 ObjectProperty *op; 6858 uint64_t mask = (1ULL << bitnr); 6859 6860 op = object_property_find(OBJECT(cpu), prop_name, NULL); 6861 if (op) { 6862 fp = op->opaque; 6863 assert(fp->w == w); 6864 fp->mask |= mask; 6865 } else { 6866 fp = g_new0(BitProperty, 1); 6867 fp->w = w; 6868 fp->mask = mask; 6869 object_property_add(OBJECT(cpu), prop_name, "bool", 6870 x86_cpu_get_bit_prop, 6871 x86_cpu_set_bit_prop, 6872 x86_cpu_release_bit_prop, fp); 6873 } 6874 } 6875 6876 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 6877 FeatureWord w, 6878 int bitnr) 6879 { 6880 FeatureWordInfo *fi = &feature_word_info[w]; 6881 const char *name = fi->feat_names[bitnr]; 6882 6883 if (!name) { 6884 return; 6885 } 6886 6887 /* Property names should use "-" instead of "_". 6888 * Old names containing underscores are registered as aliases 6889 * using object_property_add_alias() 6890 */ 6891 assert(!strchr(name, '_')); 6892 /* aliases don't use "|" delimiters anymore, they are registered 6893 * manually using object_property_add_alias() */ 6894 assert(!strchr(name, '|')); 6895 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 6896 } 6897 6898 #if !defined(CONFIG_USER_ONLY) 6899 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6900 { 6901 X86CPU *cpu = X86_CPU(cs); 6902 CPUX86State *env = &cpu->env; 6903 GuestPanicInformation *panic_info = NULL; 6904 6905 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6906 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6907 6908 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6909 6910 assert(HV_CRASH_PARAMS >= 5); 6911 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6912 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6913 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6914 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6915 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6916 } 6917 6918 return panic_info; 6919 } 6920 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6921 const char *name, void *opaque, 6922 Error **errp) 6923 { 6924 CPUState *cs = CPU(obj); 6925 GuestPanicInformation *panic_info; 6926 6927 if (!cs->crash_occurred) { 6928 error_setg(errp, "No crash occured"); 6929 return; 6930 } 6931 6932 panic_info = x86_cpu_get_crash_info(cs); 6933 if (panic_info == NULL) { 6934 error_setg(errp, "No crash information"); 6935 return; 6936 } 6937 6938 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6939 errp); 6940 qapi_free_GuestPanicInformation(panic_info); 6941 } 6942 #endif /* !CONFIG_USER_ONLY */ 6943 6944 static void x86_cpu_initfn(Object *obj) 6945 { 6946 X86CPU *cpu = X86_CPU(obj); 6947 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 6948 CPUX86State *env = &cpu->env; 6949 FeatureWord w; 6950 6951 env->nr_dies = 1; 6952 env->nr_nodes = 1; 6953 cpu_set_cpustate_pointers(cpu); 6954 6955 object_property_add(obj, "family", "int", 6956 x86_cpuid_version_get_family, 6957 x86_cpuid_version_set_family, NULL, NULL); 6958 object_property_add(obj, "model", "int", 6959 x86_cpuid_version_get_model, 6960 x86_cpuid_version_set_model, NULL, NULL); 6961 object_property_add(obj, "stepping", "int", 6962 x86_cpuid_version_get_stepping, 6963 x86_cpuid_version_set_stepping, NULL, NULL); 6964 object_property_add_str(obj, "vendor", 6965 x86_cpuid_get_vendor, 6966 x86_cpuid_set_vendor); 6967 object_property_add_str(obj, "model-id", 6968 x86_cpuid_get_model_id, 6969 x86_cpuid_set_model_id); 6970 object_property_add(obj, "tsc-frequency", "int", 6971 x86_cpuid_get_tsc_freq, 6972 x86_cpuid_set_tsc_freq, NULL, NULL); 6973 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 6974 x86_cpu_get_feature_words, 6975 NULL, NULL, (void *)env->features); 6976 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 6977 x86_cpu_get_feature_words, 6978 NULL, NULL, (void *)cpu->filtered_features); 6979 /* 6980 * The "unavailable-features" property has the same semantics as 6981 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 6982 * QMP command: they list the features that would have prevented the 6983 * CPU from running if the "enforce" flag was set. 6984 */ 6985 object_property_add(obj, "unavailable-features", "strList", 6986 x86_cpu_get_unavailable_features, 6987 NULL, NULL, NULL); 6988 6989 #if !defined(CONFIG_USER_ONLY) 6990 object_property_add(obj, "crash-information", "GuestPanicInformation", 6991 x86_cpu_get_crash_info_qom, NULL, NULL, NULL); 6992 #endif 6993 6994 for (w = 0; w < FEATURE_WORDS; w++) { 6995 int bitnr; 6996 6997 for (bitnr = 0; bitnr < 64; bitnr++) { 6998 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 6999 } 7000 } 7001 7002 object_property_add_alias(obj, "sse3", obj, "pni"); 7003 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq"); 7004 object_property_add_alias(obj, "sse4-1", obj, "sse4.1"); 7005 object_property_add_alias(obj, "sse4-2", obj, "sse4.2"); 7006 object_property_add_alias(obj, "xd", obj, "nx"); 7007 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt"); 7008 object_property_add_alias(obj, "i64", obj, "lm"); 7009 7010 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl"); 7011 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust"); 7012 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt"); 7013 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm"); 7014 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy"); 7015 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr"); 7016 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core"); 7017 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb"); 7018 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay"); 7019 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu"); 7020 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf"); 7021 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time"); 7022 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi"); 7023 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt"); 7024 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control"); 7025 object_property_add_alias(obj, "svm_lock", obj, "svm-lock"); 7026 object_property_add_alias(obj, "nrip_save", obj, "nrip-save"); 7027 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale"); 7028 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean"); 7029 object_property_add_alias(obj, "pause_filter", obj, "pause-filter"); 7030 object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); 7031 object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); 7032 7033 if (xcc->model) { 7034 x86_cpu_load_model(cpu, xcc->model); 7035 } 7036 } 7037 7038 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7039 { 7040 X86CPU *cpu = X86_CPU(cs); 7041 7042 return cpu->apic_id; 7043 } 7044 7045 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7046 { 7047 X86CPU *cpu = X86_CPU(cs); 7048 7049 return cpu->env.cr[0] & CR0_PG_MASK; 7050 } 7051 7052 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7053 { 7054 X86CPU *cpu = X86_CPU(cs); 7055 7056 cpu->env.eip = value; 7057 } 7058 7059 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 7060 { 7061 X86CPU *cpu = X86_CPU(cs); 7062 7063 cpu->env.eip = tb->pc - tb->cs_base; 7064 } 7065 7066 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7067 { 7068 X86CPU *cpu = X86_CPU(cs); 7069 CPUX86State *env = &cpu->env; 7070 7071 #if !defined(CONFIG_USER_ONLY) 7072 if (interrupt_request & CPU_INTERRUPT_POLL) { 7073 return CPU_INTERRUPT_POLL; 7074 } 7075 #endif 7076 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7077 return CPU_INTERRUPT_SIPI; 7078 } 7079 7080 if (env->hflags2 & HF2_GIF_MASK) { 7081 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7082 !(env->hflags & HF_SMM_MASK)) { 7083 return CPU_INTERRUPT_SMI; 7084 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7085 !(env->hflags2 & HF2_NMI_MASK)) { 7086 return CPU_INTERRUPT_NMI; 7087 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7088 return CPU_INTERRUPT_MCE; 7089 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7090 (((env->hflags2 & HF2_VINTR_MASK) && 7091 (env->hflags2 & HF2_HIF_MASK)) || 7092 (!(env->hflags2 & HF2_VINTR_MASK) && 7093 (env->eflags & IF_MASK && 7094 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7095 return CPU_INTERRUPT_HARD; 7096 #if !defined(CONFIG_USER_ONLY) 7097 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7098 (env->eflags & IF_MASK) && 7099 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7100 return CPU_INTERRUPT_VIRQ; 7101 #endif 7102 } 7103 } 7104 7105 return 0; 7106 } 7107 7108 static bool x86_cpu_has_work(CPUState *cs) 7109 { 7110 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7111 } 7112 7113 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7114 { 7115 X86CPU *cpu = X86_CPU(cs); 7116 CPUX86State *env = &cpu->env; 7117 7118 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7119 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7120 : bfd_mach_i386_i8086); 7121 info->print_insn = print_insn_i386; 7122 7123 info->cap_arch = CS_ARCH_X86; 7124 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7125 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7126 : CS_MODE_16); 7127 info->cap_insn_unit = 1; 7128 info->cap_insn_split = 8; 7129 } 7130 7131 void x86_update_hflags(CPUX86State *env) 7132 { 7133 uint32_t hflags; 7134 #define HFLAG_COPY_MASK \ 7135 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7136 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7137 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7138 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7139 7140 hflags = env->hflags & HFLAG_COPY_MASK; 7141 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7142 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7143 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7144 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7145 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7146 7147 if (env->cr[4] & CR4_OSFXSR_MASK) { 7148 hflags |= HF_OSFXSR_MASK; 7149 } 7150 7151 if (env->efer & MSR_EFER_LMA) { 7152 hflags |= HF_LMA_MASK; 7153 } 7154 7155 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7156 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7157 } else { 7158 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7159 (DESC_B_SHIFT - HF_CS32_SHIFT); 7160 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7161 (DESC_B_SHIFT - HF_SS32_SHIFT); 7162 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7163 !(hflags & HF_CS32_MASK)) { 7164 hflags |= HF_ADDSEG_MASK; 7165 } else { 7166 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7167 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7168 } 7169 } 7170 env->hflags = hflags; 7171 } 7172 7173 static Property x86_cpu_properties[] = { 7174 #ifdef CONFIG_USER_ONLY 7175 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7176 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7177 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7178 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7179 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7180 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7181 #else 7182 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7183 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7184 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7185 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7186 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7187 #endif 7188 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7189 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7190 7191 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7192 HYPERV_SPINLOCK_NEVER_RETRY), 7193 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7194 HYPERV_FEAT_RELAXED, 0), 7195 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7196 HYPERV_FEAT_VAPIC, 0), 7197 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7198 HYPERV_FEAT_TIME, 0), 7199 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7200 HYPERV_FEAT_CRASH, 0), 7201 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7202 HYPERV_FEAT_RESET, 0), 7203 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7204 HYPERV_FEAT_VPINDEX, 0), 7205 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7206 HYPERV_FEAT_RUNTIME, 0), 7207 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7208 HYPERV_FEAT_SYNIC, 0), 7209 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7210 HYPERV_FEAT_STIMER, 0), 7211 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7212 HYPERV_FEAT_FREQUENCIES, 0), 7213 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7214 HYPERV_FEAT_REENLIGHTENMENT, 0), 7215 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7216 HYPERV_FEAT_TLBFLUSH, 0), 7217 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7218 HYPERV_FEAT_EVMCS, 0), 7219 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7220 HYPERV_FEAT_IPI, 0), 7221 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7222 HYPERV_FEAT_STIMER_DIRECT, 0), 7223 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7224 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7225 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7226 7227 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7228 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7229 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7230 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7231 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7232 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7233 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7234 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7235 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7236 UINT32_MAX), 7237 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7238 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7239 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7240 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7241 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7242 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7243 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7244 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7245 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 7246 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7247 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7248 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7249 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7250 false), 7251 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7252 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7253 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7254 true), 7255 /* 7256 * lecacy_cache defaults to true unless the CPU model provides its 7257 * own cache information (see x86_cpu_load_def()). 7258 */ 7259 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7260 7261 /* 7262 * From "Requirements for Implementing the Microsoft 7263 * Hypervisor Interface": 7264 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7265 * 7266 * "Starting with Windows Server 2012 and Windows 8, if 7267 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7268 * the hypervisor imposes no specific limit to the number of VPs. 7269 * In this case, Windows Server 2012 guest VMs may use more than 7270 * 64 VPs, up to the maximum supported number of processors applicable 7271 * to the specific Windows version being used." 7272 */ 7273 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7274 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7275 false), 7276 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7277 true), 7278 DEFINE_PROP_END_OF_LIST() 7279 }; 7280 7281 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7282 { 7283 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7284 CPUClass *cc = CPU_CLASS(oc); 7285 DeviceClass *dc = DEVICE_CLASS(oc); 7286 7287 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7288 &xcc->parent_realize); 7289 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7290 &xcc->parent_unrealize); 7291 device_class_set_props(dc, x86_cpu_properties); 7292 7293 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7294 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7295 7296 cc->class_by_name = x86_cpu_class_by_name; 7297 cc->parse_features = x86_cpu_parse_featurestr; 7298 cc->has_work = x86_cpu_has_work; 7299 #ifdef CONFIG_TCG 7300 cc->do_interrupt = x86_cpu_do_interrupt; 7301 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 7302 #endif 7303 cc->dump_state = x86_cpu_dump_state; 7304 cc->set_pc = x86_cpu_set_pc; 7305 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 7306 cc->gdb_read_register = x86_cpu_gdb_read_register; 7307 cc->gdb_write_register = x86_cpu_gdb_write_register; 7308 cc->get_arch_id = x86_cpu_get_arch_id; 7309 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7310 #ifndef CONFIG_USER_ONLY 7311 cc->asidx_from_attrs = x86_asidx_from_attrs; 7312 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7313 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7314 cc->get_crash_info = x86_cpu_get_crash_info; 7315 cc->write_elf64_note = x86_cpu_write_elf64_note; 7316 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7317 cc->write_elf32_note = x86_cpu_write_elf32_note; 7318 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7319 cc->vmsd = &vmstate_x86_cpu; 7320 #endif 7321 cc->gdb_arch_name = x86_gdb_arch_name; 7322 #ifdef TARGET_X86_64 7323 cc->gdb_core_xml_file = "i386-64bit.xml"; 7324 cc->gdb_num_core_regs = 66; 7325 #else 7326 cc->gdb_core_xml_file = "i386-32bit.xml"; 7327 cc->gdb_num_core_regs = 50; 7328 #endif 7329 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 7330 cc->debug_excp_handler = breakpoint_handler; 7331 #endif 7332 cc->cpu_exec_enter = x86_cpu_exec_enter; 7333 cc->cpu_exec_exit = x86_cpu_exec_exit; 7334 #ifdef CONFIG_TCG 7335 cc->tcg_initialize = tcg_x86_init; 7336 cc->tlb_fill = x86_cpu_tlb_fill; 7337 #endif 7338 cc->disas_set_info = x86_disas_set_info; 7339 7340 dc->user_creatable = true; 7341 } 7342 7343 static const TypeInfo x86_cpu_type_info = { 7344 .name = TYPE_X86_CPU, 7345 .parent = TYPE_CPU, 7346 .instance_size = sizeof(X86CPU), 7347 .instance_init = x86_cpu_initfn, 7348 .abstract = true, 7349 .class_size = sizeof(X86CPUClass), 7350 .class_init = x86_cpu_common_class_init, 7351 }; 7352 7353 7354 /* "base" CPU model, used by query-cpu-model-expansion */ 7355 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7356 { 7357 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7358 7359 xcc->static_model = true; 7360 xcc->migration_safe = true; 7361 xcc->model_description = "base CPU model type with no features enabled"; 7362 xcc->ordering = 8; 7363 } 7364 7365 static const TypeInfo x86_base_cpu_type_info = { 7366 .name = X86_CPU_TYPE_NAME("base"), 7367 .parent = TYPE_X86_CPU, 7368 .class_init = x86_cpu_base_class_init, 7369 }; 7370 7371 static void x86_cpu_register_types(void) 7372 { 7373 int i; 7374 7375 type_register_static(&x86_cpu_type_info); 7376 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7377 x86_register_cpudef_types(&builtin_x86_defs[i]); 7378 } 7379 type_register_static(&max_x86_cpu_type_info); 7380 type_register_static(&x86_base_cpu_type_info); 7381 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7382 type_register_static(&host_x86_cpu_type_info); 7383 #endif 7384 } 7385 7386 type_init(x86_cpu_register_types) 7387