1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/reset.h" 30 #include "sysemu/hvf.h" 31 #include "sysemu/cpus.h" 32 #include "sysemu/xen.h" 33 #include "kvm_i386.h" 34 #include "sev_i386.h" 35 36 #include "qemu/error-report.h" 37 #include "qemu/module.h" 38 #include "qemu/option.h" 39 #include "qemu/config-file.h" 40 #include "qapi/error.h" 41 #include "qapi/qapi-visit-machine.h" 42 #include "qapi/qapi-visit-run-state.h" 43 #include "qapi/qmp/qdict.h" 44 #include "qapi/qmp/qerror.h" 45 #include "qapi/visitor.h" 46 #include "qom/qom-qobject.h" 47 #include "sysemu/arch_init.h" 48 #include "qapi/qapi-commands-machine-target.h" 49 50 #include "standard-headers/asm-x86/kvm_para.h" 51 52 #include "sysemu/sysemu.h" 53 #include "sysemu/tcg.h" 54 #include "hw/qdev-properties.h" 55 #include "hw/i386/topology.h" 56 #ifndef CONFIG_USER_ONLY 57 #include "exec/address-spaces.h" 58 #include "hw/i386/apic_internal.h" 59 #include "hw/boards.h" 60 #endif 61 62 #include "disas/capstone.h" 63 64 /* Helpers for building CPUID[2] descriptors: */ 65 66 struct CPUID2CacheDescriptorInfo { 67 enum CacheType type; 68 int level; 69 int size; 70 int line_size; 71 int associativity; 72 }; 73 74 /* 75 * Known CPUID 2 cache descriptors. 76 * From Intel SDM Volume 2A, CPUID instruction 77 */ 78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 82 .associativity = 4, .line_size = 32, }, 83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 84 .associativity = 4, .line_size = 64, }, 85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 86 .associativity = 2, .line_size = 32, }, 87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 32, }, 89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 90 .associativity = 4, .line_size = 64, }, 91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 92 .associativity = 6, .line_size = 64, }, 93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 94 .associativity = 2, .line_size = 64, }, 95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 96 .associativity = 8, .line_size = 64, }, 97 /* lines per sector is not supported cpuid2_cache_descriptor(), 98 * so descriptors 0x22, 0x23 are not included 99 */ 100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 101 .associativity = 16, .line_size = 64, }, 102 /* lines per sector is not supported cpuid2_cache_descriptor(), 103 * so descriptors 0x25, 0x20 are not included 104 */ 105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 108 .associativity = 8, .line_size = 64, }, 109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 118 .associativity = 4, .line_size = 32, }, 119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 120 .associativity = 4, .line_size = 64, }, 121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 122 .associativity = 8, .line_size = 64, }, 123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 129 .associativity = 16, .line_size = 64, }, 130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 131 .associativity = 12, .line_size = 64, }, 132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 133 .associativity = 16, .line_size = 64, }, 134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 135 .associativity = 24, .line_size = 64, }, 136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 137 .associativity = 8, .line_size = 64, }, 138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 143 .associativity = 4, .line_size = 64, }, 144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 145 .associativity = 4, .line_size = 64, }, 146 /* lines per sector is not supported cpuid2_cache_descriptor(), 147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 148 */ 149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 150 .associativity = 8, .line_size = 64, }, 151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 2, .line_size = 64, }, 153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 154 .associativity = 8, .line_size = 64, }, 155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 162 .associativity = 8, .line_size = 32, }, 163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 164 .associativity = 4, .line_size = 64, }, 165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 166 .associativity = 8, .line_size = 64, }, 167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 172 .associativity = 4, .line_size = 64, }, 173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 178 .associativity = 8, .line_size = 64, }, 179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 184 .associativity = 12, .line_size = 64, }, 185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 190 .associativity = 16, .line_size = 64, }, 191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 196 .associativity = 24, .line_size = 64, }, 197 }; 198 199 /* 200 * "CPUID leaf 2 does not report cache descriptor information, 201 * use CPUID leaf 4 to query cache parameters" 202 */ 203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 204 205 /* 206 * Return a CPUID 2 cache descriptor for a given cache. 207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 208 */ 209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 210 { 211 int i; 212 213 assert(cache->size > 0); 214 assert(cache->level > 0); 215 assert(cache->line_size > 0); 216 assert(cache->associativity > 0); 217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 219 if (d->level == cache->level && d->type == cache->type && 220 d->size == cache->size && d->line_size == cache->line_size && 221 d->associativity == cache->associativity) { 222 return i; 223 } 224 } 225 226 return CACHE_DESCRIPTOR_UNAVAILABLE; 227 } 228 229 /* CPUID Leaf 4 constants: */ 230 231 /* EAX: */ 232 #define CACHE_TYPE_D 1 233 #define CACHE_TYPE_I 2 234 #define CACHE_TYPE_UNIFIED 3 235 236 #define CACHE_LEVEL(l) (l << 5) 237 238 #define CACHE_SELF_INIT_LEVEL (1 << 8) 239 240 /* EDX: */ 241 #define CACHE_NO_INVD_SHARING (1 << 0) 242 #define CACHE_INCLUSIVE (1 << 1) 243 #define CACHE_COMPLEX_IDX (1 << 2) 244 245 /* Encode CacheType for CPUID[4].EAX */ 246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 249 0 /* Invalid value */) 250 251 252 /* Encode cache info for CPUID[4] */ 253 static void encode_cache_cpuid4(CPUCacheInfo *cache, 254 int num_apic_ids, int num_cores, 255 uint32_t *eax, uint32_t *ebx, 256 uint32_t *ecx, uint32_t *edx) 257 { 258 assert(cache->size == cache->line_size * cache->associativity * 259 cache->partitions * cache->sets); 260 261 assert(num_apic_ids > 0); 262 *eax = CACHE_TYPE(cache->type) | 263 CACHE_LEVEL(cache->level) | 264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 265 ((num_cores - 1) << 26) | 266 ((num_apic_ids - 1) << 14); 267 268 assert(cache->line_size > 0); 269 assert(cache->partitions > 0); 270 assert(cache->associativity > 0); 271 /* We don't implement fully-associative caches */ 272 assert(cache->associativity < cache->sets); 273 *ebx = (cache->line_size - 1) | 274 ((cache->partitions - 1) << 12) | 275 ((cache->associativity - 1) << 22); 276 277 assert(cache->sets > 0); 278 *ecx = cache->sets - 1; 279 280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 281 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 283 } 284 285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 287 { 288 assert(cache->size % 1024 == 0); 289 assert(cache->lines_per_tag > 0); 290 assert(cache->associativity > 0); 291 assert(cache->line_size > 0); 292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 293 (cache->lines_per_tag << 8) | (cache->line_size); 294 } 295 296 #define ASSOC_FULL 0xFF 297 298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 300 a == 2 ? 0x2 : \ 301 a == 4 ? 0x4 : \ 302 a == 8 ? 0x6 : \ 303 a == 16 ? 0x8 : \ 304 a == 32 ? 0xA : \ 305 a == 48 ? 0xB : \ 306 a == 64 ? 0xC : \ 307 a == 96 ? 0xD : \ 308 a == 128 ? 0xE : \ 309 a == ASSOC_FULL ? 0xF : \ 310 0 /* invalid value */) 311 312 /* 313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 314 * @l3 can be NULL. 315 */ 316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 317 CPUCacheInfo *l3, 318 uint32_t *ecx, uint32_t *edx) 319 { 320 assert(l2->size % 1024 == 0); 321 assert(l2->associativity > 0); 322 assert(l2->lines_per_tag > 0); 323 assert(l2->line_size > 0); 324 *ecx = ((l2->size / 1024) << 16) | 325 (AMD_ENC_ASSOC(l2->associativity) << 12) | 326 (l2->lines_per_tag << 8) | (l2->line_size); 327 328 if (l3) { 329 assert(l3->size % (512 * 1024) == 0); 330 assert(l3->associativity > 0); 331 assert(l3->lines_per_tag > 0); 332 assert(l3->line_size > 0); 333 *edx = ((l3->size / (512 * 1024)) << 18) | 334 (AMD_ENC_ASSOC(l3->associativity) << 12) | 335 (l3->lines_per_tag << 8) | (l3->line_size); 336 } else { 337 *edx = 0; 338 } 339 } 340 341 /* Encode cache info for CPUID[8000001D] */ 342 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, 343 X86CPUTopoInfo *topo_info, 344 uint32_t *eax, uint32_t *ebx, 345 uint32_t *ecx, uint32_t *edx) 346 { 347 uint32_t l3_cores; 348 unsigned nodes = MAX(topo_info->nodes_per_pkg, 1); 349 350 assert(cache->size == cache->line_size * cache->associativity * 351 cache->partitions * cache->sets); 352 353 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 354 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 355 356 /* L3 is shared among multiple cores */ 357 if (cache->level == 3) { 358 l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg * 359 topo_info->cores_per_die * 360 topo_info->threads_per_core), 361 nodes); 362 *eax |= (l3_cores - 1) << 14; 363 } else { 364 *eax |= ((topo_info->threads_per_core - 1) << 14); 365 } 366 367 assert(cache->line_size > 0); 368 assert(cache->partitions > 0); 369 assert(cache->associativity > 0); 370 /* We don't implement fully-associative caches */ 371 assert(cache->associativity < cache->sets); 372 *ebx = (cache->line_size - 1) | 373 ((cache->partitions - 1) << 12) | 374 ((cache->associativity - 1) << 22); 375 376 assert(cache->sets > 0); 377 *ecx = cache->sets - 1; 378 379 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 380 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 381 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 382 } 383 384 /* Encode cache info for CPUID[8000001E] */ 385 static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu, 386 uint32_t *eax, uint32_t *ebx, 387 uint32_t *ecx, uint32_t *edx) 388 { 389 X86CPUTopoIDs topo_ids = {0}; 390 unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1); 391 int shift; 392 393 x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids); 394 395 *eax = cpu->apic_id; 396 /* 397 * CPUID_Fn8000001E_EBX 398 * 31:16 Reserved 399 * 15:8 Threads per core (The number of threads per core is 400 * Threads per core + 1) 401 * 7:0 Core id (see bit decoding below) 402 * SMT: 403 * 4:3 node id 404 * 2 Core complex id 405 * 1:0 Core id 406 * Non SMT: 407 * 5:4 node id 408 * 3 Core complex id 409 * 1:0 Core id 410 */ 411 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) | 412 (topo_ids.core_id); 413 /* 414 * CPUID_Fn8000001E_ECX 415 * 31:11 Reserved 416 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 417 * 7:0 Node id (see bit decoding below) 418 * 2 Socket id 419 * 1:0 Node id 420 */ 421 if (nodes <= 4) { 422 *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id; 423 } else { 424 /* 425 * Node id fix up. Actual hardware supports up to 4 nodes. But with 426 * more than 32 cores, we may end up with more than 4 nodes. 427 * Node id is a combination of socket id and node id. Only requirement 428 * here is that this number should be unique accross the system. 429 * Shift the socket id to accommodate more nodes. We dont expect both 430 * socket id and node id to be big number at the same time. This is not 431 * an ideal config but we need to to support it. Max nodes we can have 432 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 433 * 5 bits for nodes. Find the left most set bit to represent the total 434 * number of nodes. find_last_bit returns last set bit(0 based). Left 435 * shift(+1) the socket id to represent all the nodes. 436 */ 437 nodes -= 1; 438 shift = find_last_bit(&nodes, 8); 439 *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) | 440 topo_ids.node_id; 441 } 442 *edx = 0; 443 } 444 445 /* 446 * Definitions of the hardcoded cache entries we expose: 447 * These are legacy cache values. If there is a need to change any 448 * of these values please use builtin_x86_defs 449 */ 450 451 /* L1 data cache: */ 452 static CPUCacheInfo legacy_l1d_cache = { 453 .type = DATA_CACHE, 454 .level = 1, 455 .size = 32 * KiB, 456 .self_init = 1, 457 .line_size = 64, 458 .associativity = 8, 459 .sets = 64, 460 .partitions = 1, 461 .no_invd_sharing = true, 462 }; 463 464 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 465 static CPUCacheInfo legacy_l1d_cache_amd = { 466 .type = DATA_CACHE, 467 .level = 1, 468 .size = 64 * KiB, 469 .self_init = 1, 470 .line_size = 64, 471 .associativity = 2, 472 .sets = 512, 473 .partitions = 1, 474 .lines_per_tag = 1, 475 .no_invd_sharing = true, 476 }; 477 478 /* L1 instruction cache: */ 479 static CPUCacheInfo legacy_l1i_cache = { 480 .type = INSTRUCTION_CACHE, 481 .level = 1, 482 .size = 32 * KiB, 483 .self_init = 1, 484 .line_size = 64, 485 .associativity = 8, 486 .sets = 64, 487 .partitions = 1, 488 .no_invd_sharing = true, 489 }; 490 491 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 492 static CPUCacheInfo legacy_l1i_cache_amd = { 493 .type = INSTRUCTION_CACHE, 494 .level = 1, 495 .size = 64 * KiB, 496 .self_init = 1, 497 .line_size = 64, 498 .associativity = 2, 499 .sets = 512, 500 .partitions = 1, 501 .lines_per_tag = 1, 502 .no_invd_sharing = true, 503 }; 504 505 /* Level 2 unified cache: */ 506 static CPUCacheInfo legacy_l2_cache = { 507 .type = UNIFIED_CACHE, 508 .level = 2, 509 .size = 4 * MiB, 510 .self_init = 1, 511 .line_size = 64, 512 .associativity = 16, 513 .sets = 4096, 514 .partitions = 1, 515 .no_invd_sharing = true, 516 }; 517 518 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 519 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 520 .type = UNIFIED_CACHE, 521 .level = 2, 522 .size = 2 * MiB, 523 .line_size = 64, 524 .associativity = 8, 525 }; 526 527 528 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 529 static CPUCacheInfo legacy_l2_cache_amd = { 530 .type = UNIFIED_CACHE, 531 .level = 2, 532 .size = 512 * KiB, 533 .line_size = 64, 534 .lines_per_tag = 1, 535 .associativity = 16, 536 .sets = 512, 537 .partitions = 1, 538 }; 539 540 /* Level 3 unified cache: */ 541 static CPUCacheInfo legacy_l3_cache = { 542 .type = UNIFIED_CACHE, 543 .level = 3, 544 .size = 16 * MiB, 545 .line_size = 64, 546 .associativity = 16, 547 .sets = 16384, 548 .partitions = 1, 549 .lines_per_tag = 1, 550 .self_init = true, 551 .inclusive = true, 552 .complex_indexing = true, 553 }; 554 555 /* TLB definitions: */ 556 557 #define L1_DTLB_2M_ASSOC 1 558 #define L1_DTLB_2M_ENTRIES 255 559 #define L1_DTLB_4K_ASSOC 1 560 #define L1_DTLB_4K_ENTRIES 255 561 562 #define L1_ITLB_2M_ASSOC 1 563 #define L1_ITLB_2M_ENTRIES 255 564 #define L1_ITLB_4K_ASSOC 1 565 #define L1_ITLB_4K_ENTRIES 255 566 567 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 568 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 569 #define L2_DTLB_4K_ASSOC 4 570 #define L2_DTLB_4K_ENTRIES 512 571 572 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 573 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 574 #define L2_ITLB_4K_ASSOC 4 575 #define L2_ITLB_4K_ENTRIES 512 576 577 /* CPUID Leaf 0x14 constants: */ 578 #define INTEL_PT_MAX_SUBLEAF 0x1 579 /* 580 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 581 * MSR can be accessed; 582 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 583 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 584 * of Intel PT MSRs across warm reset; 585 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 586 */ 587 #define INTEL_PT_MINIMAL_EBX 0xf 588 /* 589 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 590 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 591 * accessed; 592 * bit[01]: ToPA tables can hold any number of output entries, up to the 593 * maximum allowed by the MaskOrTableOffset field of 594 * IA32_RTIT_OUTPUT_MASK_PTRS; 595 * bit[02]: Support Single-Range Output scheme; 596 */ 597 #define INTEL_PT_MINIMAL_ECX 0x7 598 /* generated packets which contain IP payloads have LIP values */ 599 #define INTEL_PT_IP_LIP (1 << 31) 600 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 601 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 602 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 603 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 604 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 605 606 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 607 uint32_t vendor2, uint32_t vendor3) 608 { 609 int i; 610 for (i = 0; i < 4; i++) { 611 dst[i] = vendor1 >> (8 * i); 612 dst[i + 4] = vendor2 >> (8 * i); 613 dst[i + 8] = vendor3 >> (8 * i); 614 } 615 dst[CPUID_VENDOR_SZ] = '\0'; 616 } 617 618 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 619 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 620 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 621 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 622 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 623 CPUID_PSE36 | CPUID_FXSR) 624 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 625 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 626 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 627 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 628 CPUID_PAE | CPUID_SEP | CPUID_APIC) 629 630 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 631 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 632 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 633 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 634 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 635 /* partly implemented: 636 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 637 /* missing: 638 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 639 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 640 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 641 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 642 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 643 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 644 CPUID_EXT_RDRAND) 645 /* missing: 646 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 647 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 648 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 649 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 650 CPUID_EXT_F16C */ 651 652 #ifdef TARGET_X86_64 653 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 654 #else 655 #define TCG_EXT2_X86_64_FEATURES 0 656 #endif 657 658 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 659 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 660 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 661 TCG_EXT2_X86_64_FEATURES) 662 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 663 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 664 #define TCG_EXT4_FEATURES 0 665 #define TCG_SVM_FEATURES CPUID_SVM_NPT 666 #define TCG_KVM_FEATURES 0 667 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 668 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 669 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 670 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 671 CPUID_7_0_EBX_ERMS) 672 /* missing: 673 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 674 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 675 CPUID_7_0_EBX_RDSEED */ 676 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 677 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 678 CPUID_7_0_ECX_LA57) 679 #define TCG_7_0_EDX_FEATURES 0 680 #define TCG_7_1_EAX_FEATURES 0 681 #define TCG_APM_FEATURES 0 682 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 683 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 684 /* missing: 685 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 686 687 typedef enum FeatureWordType { 688 CPUID_FEATURE_WORD, 689 MSR_FEATURE_WORD, 690 } FeatureWordType; 691 692 typedef struct FeatureWordInfo { 693 FeatureWordType type; 694 /* feature flags names are taken from "Intel Processor Identification and 695 * the CPUID Instruction" and AMD's "CPUID Specification". 696 * In cases of disagreement between feature naming conventions, 697 * aliases may be added. 698 */ 699 const char *feat_names[64]; 700 union { 701 /* If type==CPUID_FEATURE_WORD */ 702 struct { 703 uint32_t eax; /* Input EAX for CPUID */ 704 bool needs_ecx; /* CPUID instruction uses ECX as input */ 705 uint32_t ecx; /* Input ECX value for CPUID */ 706 int reg; /* output register (R_* constant) */ 707 } cpuid; 708 /* If type==MSR_FEATURE_WORD */ 709 struct { 710 uint32_t index; 711 } msr; 712 }; 713 uint64_t tcg_features; /* Feature flags supported by TCG */ 714 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 715 uint64_t migratable_flags; /* Feature flags known to be migratable */ 716 /* Features that shouldn't be auto-enabled by "-cpu host" */ 717 uint64_t no_autoenable_flags; 718 } FeatureWordInfo; 719 720 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 721 [FEAT_1_EDX] = { 722 .type = CPUID_FEATURE_WORD, 723 .feat_names = { 724 "fpu", "vme", "de", "pse", 725 "tsc", "msr", "pae", "mce", 726 "cx8", "apic", NULL, "sep", 727 "mtrr", "pge", "mca", "cmov", 728 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 729 NULL, "ds" /* Intel dts */, "acpi", "mmx", 730 "fxsr", "sse", "sse2", "ss", 731 "ht" /* Intel htt */, "tm", "ia64", "pbe", 732 }, 733 .cpuid = {.eax = 1, .reg = R_EDX, }, 734 .tcg_features = TCG_FEATURES, 735 }, 736 [FEAT_1_ECX] = { 737 .type = CPUID_FEATURE_WORD, 738 .feat_names = { 739 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 740 "ds-cpl", "vmx", "smx", "est", 741 "tm2", "ssse3", "cid", NULL, 742 "fma", "cx16", "xtpr", "pdcm", 743 NULL, "pcid", "dca", "sse4.1", 744 "sse4.2", "x2apic", "movbe", "popcnt", 745 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 746 "avx", "f16c", "rdrand", "hypervisor", 747 }, 748 .cpuid = { .eax = 1, .reg = R_ECX, }, 749 .tcg_features = TCG_EXT_FEATURES, 750 }, 751 /* Feature names that are already defined on feature_name[] but 752 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 753 * names on feat_names below. They are copied automatically 754 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 755 */ 756 [FEAT_8000_0001_EDX] = { 757 .type = CPUID_FEATURE_WORD, 758 .feat_names = { 759 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 760 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 761 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 762 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 763 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 764 "nx", NULL, "mmxext", NULL /* mmx */, 765 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 766 NULL, "lm", "3dnowext", "3dnow", 767 }, 768 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 769 .tcg_features = TCG_EXT2_FEATURES, 770 }, 771 [FEAT_8000_0001_ECX] = { 772 .type = CPUID_FEATURE_WORD, 773 .feat_names = { 774 "lahf-lm", "cmp-legacy", "svm", "extapic", 775 "cr8legacy", "abm", "sse4a", "misalignsse", 776 "3dnowprefetch", "osvw", "ibs", "xop", 777 "skinit", "wdt", NULL, "lwp", 778 "fma4", "tce", NULL, "nodeid-msr", 779 NULL, "tbm", "topoext", "perfctr-core", 780 "perfctr-nb", NULL, NULL, NULL, 781 NULL, NULL, NULL, NULL, 782 }, 783 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 784 .tcg_features = TCG_EXT3_FEATURES, 785 /* 786 * TOPOEXT is always allowed but can't be enabled blindly by 787 * "-cpu host", as it requires consistent cache topology info 788 * to be provided so it doesn't confuse guests. 789 */ 790 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 791 }, 792 [FEAT_C000_0001_EDX] = { 793 .type = CPUID_FEATURE_WORD, 794 .feat_names = { 795 NULL, NULL, "xstore", "xstore-en", 796 NULL, NULL, "xcrypt", "xcrypt-en", 797 "ace2", "ace2-en", "phe", "phe-en", 798 "pmm", "pmm-en", NULL, NULL, 799 NULL, NULL, NULL, NULL, 800 NULL, NULL, NULL, NULL, 801 NULL, NULL, NULL, NULL, 802 NULL, NULL, NULL, NULL, 803 }, 804 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 805 .tcg_features = TCG_EXT4_FEATURES, 806 }, 807 [FEAT_KVM] = { 808 .type = CPUID_FEATURE_WORD, 809 .feat_names = { 810 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 811 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 812 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 813 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL, 814 NULL, NULL, NULL, NULL, 815 NULL, NULL, NULL, NULL, 816 "kvmclock-stable-bit", NULL, NULL, NULL, 817 NULL, NULL, NULL, NULL, 818 }, 819 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 820 .tcg_features = TCG_KVM_FEATURES, 821 }, 822 [FEAT_KVM_HINTS] = { 823 .type = CPUID_FEATURE_WORD, 824 .feat_names = { 825 "kvm-hint-dedicated", NULL, NULL, NULL, 826 NULL, NULL, NULL, NULL, 827 NULL, NULL, NULL, NULL, 828 NULL, NULL, NULL, NULL, 829 NULL, NULL, NULL, NULL, 830 NULL, NULL, NULL, NULL, 831 NULL, NULL, NULL, NULL, 832 NULL, NULL, NULL, NULL, 833 }, 834 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 835 .tcg_features = TCG_KVM_FEATURES, 836 /* 837 * KVM hints aren't auto-enabled by -cpu host, they need to be 838 * explicitly enabled in the command-line. 839 */ 840 .no_autoenable_flags = ~0U, 841 }, 842 /* 843 * .feat_names are commented out for Hyper-V enlightenments because we 844 * don't want to have two different ways for enabling them on QEMU command 845 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 846 * enabling several feature bits simultaneously, exposing these bits 847 * individually may just confuse guests. 848 */ 849 [FEAT_HYPERV_EAX] = { 850 .type = CPUID_FEATURE_WORD, 851 .feat_names = { 852 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 853 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 854 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 855 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 856 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 857 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 858 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 859 NULL, NULL, 860 NULL, NULL, NULL, NULL, 861 NULL, NULL, NULL, NULL, 862 NULL, NULL, NULL, NULL, 863 NULL, NULL, NULL, NULL, 864 }, 865 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 866 }, 867 [FEAT_HYPERV_EBX] = { 868 .type = CPUID_FEATURE_WORD, 869 .feat_names = { 870 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 871 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 872 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 873 NULL /* hv_create_port */, NULL /* hv_connect_port */, 874 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 875 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 876 NULL, NULL, 877 NULL, NULL, NULL, NULL, 878 NULL, NULL, NULL, NULL, 879 NULL, NULL, NULL, NULL, 880 NULL, NULL, NULL, NULL, 881 }, 882 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 883 }, 884 [FEAT_HYPERV_EDX] = { 885 .type = CPUID_FEATURE_WORD, 886 .feat_names = { 887 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 888 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 889 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 890 NULL, NULL, 891 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 892 NULL, NULL, NULL, NULL, 893 NULL, NULL, NULL, NULL, 894 NULL, NULL, NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 NULL, NULL, NULL, NULL, 897 }, 898 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 899 }, 900 [FEAT_HV_RECOMM_EAX] = { 901 .type = CPUID_FEATURE_WORD, 902 .feat_names = { 903 NULL /* hv_recommend_pv_as_switch */, 904 NULL /* hv_recommend_pv_tlbflush_local */, 905 NULL /* hv_recommend_pv_tlbflush_remote */, 906 NULL /* hv_recommend_msr_apic_access */, 907 NULL /* hv_recommend_msr_reset */, 908 NULL /* hv_recommend_relaxed_timing */, 909 NULL /* hv_recommend_dma_remapping */, 910 NULL /* hv_recommend_int_remapping */, 911 NULL /* hv_recommend_x2apic_msrs */, 912 NULL /* hv_recommend_autoeoi_deprecation */, 913 NULL /* hv_recommend_pv_ipi */, 914 NULL /* hv_recommend_ex_hypercalls */, 915 NULL /* hv_hypervisor_is_nested */, 916 NULL /* hv_recommend_int_mbec */, 917 NULL /* hv_recommend_evmcs */, 918 NULL, 919 NULL, NULL, NULL, NULL, 920 NULL, NULL, NULL, NULL, 921 NULL, NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 }, 924 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 925 }, 926 [FEAT_HV_NESTED_EAX] = { 927 .type = CPUID_FEATURE_WORD, 928 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 929 }, 930 [FEAT_SVM] = { 931 .type = CPUID_FEATURE_WORD, 932 .feat_names = { 933 "npt", "lbrv", "svm-lock", "nrip-save", 934 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 935 NULL, NULL, "pause-filter", NULL, 936 "pfthreshold", NULL, NULL, NULL, 937 NULL, NULL, NULL, NULL, 938 NULL, NULL, NULL, NULL, 939 NULL, NULL, NULL, NULL, 940 NULL, NULL, NULL, NULL, 941 }, 942 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 943 .tcg_features = TCG_SVM_FEATURES, 944 }, 945 [FEAT_7_0_EBX] = { 946 .type = CPUID_FEATURE_WORD, 947 .feat_names = { 948 "fsgsbase", "tsc-adjust", NULL, "bmi1", 949 "hle", "avx2", NULL, "smep", 950 "bmi2", "erms", "invpcid", "rtm", 951 NULL, NULL, "mpx", NULL, 952 "avx512f", "avx512dq", "rdseed", "adx", 953 "smap", "avx512ifma", "pcommit", "clflushopt", 954 "clwb", "intel-pt", "avx512pf", "avx512er", 955 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 956 }, 957 .cpuid = { 958 .eax = 7, 959 .needs_ecx = true, .ecx = 0, 960 .reg = R_EBX, 961 }, 962 .tcg_features = TCG_7_0_EBX_FEATURES, 963 }, 964 [FEAT_7_0_ECX] = { 965 .type = CPUID_FEATURE_WORD, 966 .feat_names = { 967 NULL, "avx512vbmi", "umip", "pku", 968 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 969 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 970 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 971 "la57", NULL, NULL, NULL, 972 NULL, NULL, "rdpid", NULL, 973 NULL, "cldemote", NULL, "movdiri", 974 "movdir64b", NULL, NULL, NULL, 975 }, 976 .cpuid = { 977 .eax = 7, 978 .needs_ecx = true, .ecx = 0, 979 .reg = R_ECX, 980 }, 981 .tcg_features = TCG_7_0_ECX_FEATURES, 982 }, 983 [FEAT_7_0_EDX] = { 984 .type = CPUID_FEATURE_WORD, 985 .feat_names = { 986 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 987 NULL, NULL, NULL, NULL, 988 "avx512-vp2intersect", NULL, "md-clear", NULL, 989 NULL, NULL, NULL, NULL, 990 NULL, NULL, NULL /* pconfig */, NULL, 991 NULL, NULL, NULL, NULL, 992 NULL, NULL, "spec-ctrl", "stibp", 993 NULL, "arch-capabilities", "core-capability", "ssbd", 994 }, 995 .cpuid = { 996 .eax = 7, 997 .needs_ecx = true, .ecx = 0, 998 .reg = R_EDX, 999 }, 1000 .tcg_features = TCG_7_0_EDX_FEATURES, 1001 }, 1002 [FEAT_7_1_EAX] = { 1003 .type = CPUID_FEATURE_WORD, 1004 .feat_names = { 1005 NULL, NULL, NULL, NULL, 1006 NULL, "avx512-bf16", NULL, NULL, 1007 NULL, NULL, NULL, NULL, 1008 NULL, NULL, NULL, NULL, 1009 NULL, NULL, NULL, NULL, 1010 NULL, NULL, NULL, NULL, 1011 NULL, NULL, NULL, NULL, 1012 NULL, NULL, NULL, NULL, 1013 }, 1014 .cpuid = { 1015 .eax = 7, 1016 .needs_ecx = true, .ecx = 1, 1017 .reg = R_EAX, 1018 }, 1019 .tcg_features = TCG_7_1_EAX_FEATURES, 1020 }, 1021 [FEAT_8000_0007_EDX] = { 1022 .type = CPUID_FEATURE_WORD, 1023 .feat_names = { 1024 NULL, NULL, NULL, NULL, 1025 NULL, NULL, NULL, NULL, 1026 "invtsc", NULL, NULL, NULL, 1027 NULL, NULL, NULL, NULL, 1028 NULL, NULL, NULL, NULL, 1029 NULL, NULL, NULL, NULL, 1030 NULL, NULL, NULL, NULL, 1031 NULL, NULL, NULL, NULL, 1032 }, 1033 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1034 .tcg_features = TCG_APM_FEATURES, 1035 .unmigratable_flags = CPUID_APM_INVTSC, 1036 }, 1037 [FEAT_8000_0008_EBX] = { 1038 .type = CPUID_FEATURE_WORD, 1039 .feat_names = { 1040 "clzero", NULL, "xsaveerptr", NULL, 1041 NULL, NULL, NULL, NULL, 1042 NULL, "wbnoinvd", NULL, NULL, 1043 "ibpb", NULL, NULL, "amd-stibp", 1044 NULL, NULL, NULL, NULL, 1045 NULL, NULL, NULL, NULL, 1046 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1047 NULL, NULL, NULL, NULL, 1048 }, 1049 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1050 .tcg_features = 0, 1051 .unmigratable_flags = 0, 1052 }, 1053 [FEAT_XSAVE] = { 1054 .type = CPUID_FEATURE_WORD, 1055 .feat_names = { 1056 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1057 NULL, NULL, NULL, NULL, 1058 NULL, NULL, NULL, NULL, 1059 NULL, NULL, NULL, NULL, 1060 NULL, NULL, NULL, NULL, 1061 NULL, NULL, NULL, NULL, 1062 NULL, NULL, NULL, NULL, 1063 NULL, NULL, NULL, NULL, 1064 }, 1065 .cpuid = { 1066 .eax = 0xd, 1067 .needs_ecx = true, .ecx = 1, 1068 .reg = R_EAX, 1069 }, 1070 .tcg_features = TCG_XSAVE_FEATURES, 1071 }, 1072 [FEAT_6_EAX] = { 1073 .type = CPUID_FEATURE_WORD, 1074 .feat_names = { 1075 NULL, NULL, "arat", NULL, 1076 NULL, NULL, NULL, NULL, 1077 NULL, NULL, NULL, NULL, 1078 NULL, NULL, NULL, NULL, 1079 NULL, NULL, NULL, NULL, 1080 NULL, NULL, NULL, NULL, 1081 NULL, NULL, NULL, NULL, 1082 NULL, NULL, NULL, NULL, 1083 }, 1084 .cpuid = { .eax = 6, .reg = R_EAX, }, 1085 .tcg_features = TCG_6_EAX_FEATURES, 1086 }, 1087 [FEAT_XSAVE_COMP_LO] = { 1088 .type = CPUID_FEATURE_WORD, 1089 .cpuid = { 1090 .eax = 0xD, 1091 .needs_ecx = true, .ecx = 0, 1092 .reg = R_EAX, 1093 }, 1094 .tcg_features = ~0U, 1095 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1096 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1097 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1098 XSTATE_PKRU_MASK, 1099 }, 1100 [FEAT_XSAVE_COMP_HI] = { 1101 .type = CPUID_FEATURE_WORD, 1102 .cpuid = { 1103 .eax = 0xD, 1104 .needs_ecx = true, .ecx = 0, 1105 .reg = R_EDX, 1106 }, 1107 .tcg_features = ~0U, 1108 }, 1109 /*Below are MSR exposed features*/ 1110 [FEAT_ARCH_CAPABILITIES] = { 1111 .type = MSR_FEATURE_WORD, 1112 .feat_names = { 1113 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1114 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1115 "taa-no", NULL, NULL, NULL, 1116 NULL, NULL, NULL, NULL, 1117 NULL, NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 NULL, NULL, NULL, NULL, 1120 NULL, NULL, NULL, NULL, 1121 }, 1122 .msr = { 1123 .index = MSR_IA32_ARCH_CAPABILITIES, 1124 }, 1125 }, 1126 [FEAT_CORE_CAPABILITY] = { 1127 .type = MSR_FEATURE_WORD, 1128 .feat_names = { 1129 NULL, NULL, NULL, NULL, 1130 NULL, "split-lock-detect", NULL, NULL, 1131 NULL, NULL, NULL, NULL, 1132 NULL, NULL, NULL, NULL, 1133 NULL, NULL, NULL, NULL, 1134 NULL, NULL, NULL, NULL, 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 }, 1138 .msr = { 1139 .index = MSR_IA32_CORE_CAPABILITY, 1140 }, 1141 }, 1142 [FEAT_PERF_CAPABILITIES] = { 1143 .type = MSR_FEATURE_WORD, 1144 .feat_names = { 1145 NULL, NULL, NULL, NULL, 1146 NULL, NULL, NULL, NULL, 1147 NULL, NULL, NULL, NULL, 1148 NULL, "full-width-write", NULL, NULL, 1149 NULL, NULL, NULL, NULL, 1150 NULL, NULL, NULL, NULL, 1151 NULL, NULL, NULL, NULL, 1152 NULL, NULL, NULL, NULL, 1153 }, 1154 .msr = { 1155 .index = MSR_IA32_PERF_CAPABILITIES, 1156 }, 1157 }, 1158 1159 [FEAT_VMX_PROCBASED_CTLS] = { 1160 .type = MSR_FEATURE_WORD, 1161 .feat_names = { 1162 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1163 NULL, NULL, NULL, "vmx-hlt-exit", 1164 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1165 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1166 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1167 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1168 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1169 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1170 }, 1171 .msr = { 1172 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1173 } 1174 }, 1175 1176 [FEAT_VMX_SECONDARY_CTLS] = { 1177 .type = MSR_FEATURE_WORD, 1178 .feat_names = { 1179 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1180 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1181 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1182 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1183 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1184 "vmx-xsaves", NULL, NULL, NULL, 1185 NULL, NULL, NULL, NULL, 1186 NULL, NULL, NULL, NULL, 1187 }, 1188 .msr = { 1189 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1190 } 1191 }, 1192 1193 [FEAT_VMX_PINBASED_CTLS] = { 1194 .type = MSR_FEATURE_WORD, 1195 .feat_names = { 1196 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1197 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1198 NULL, NULL, NULL, NULL, 1199 NULL, NULL, NULL, NULL, 1200 NULL, NULL, NULL, NULL, 1201 NULL, NULL, NULL, NULL, 1202 NULL, NULL, NULL, NULL, 1203 NULL, NULL, NULL, NULL, 1204 }, 1205 .msr = { 1206 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1207 } 1208 }, 1209 1210 [FEAT_VMX_EXIT_CTLS] = { 1211 .type = MSR_FEATURE_WORD, 1212 /* 1213 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1214 * the LM CPUID bit. 1215 */ 1216 .feat_names = { 1217 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1218 NULL, NULL, NULL, NULL, 1219 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1220 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1221 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1222 "vmx-exit-save-efer", "vmx-exit-load-efer", 1223 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1224 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1225 NULL, NULL, NULL, NULL, 1226 }, 1227 .msr = { 1228 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1229 } 1230 }, 1231 1232 [FEAT_VMX_ENTRY_CTLS] = { 1233 .type = MSR_FEATURE_WORD, 1234 .feat_names = { 1235 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1236 NULL, NULL, NULL, NULL, 1237 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1238 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1239 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1240 NULL, NULL, NULL, NULL, 1241 NULL, NULL, NULL, NULL, 1242 NULL, NULL, NULL, NULL, 1243 }, 1244 .msr = { 1245 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1246 } 1247 }, 1248 1249 [FEAT_VMX_MISC] = { 1250 .type = MSR_FEATURE_WORD, 1251 .feat_names = { 1252 NULL, NULL, NULL, NULL, 1253 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1254 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1255 NULL, NULL, NULL, NULL, 1256 NULL, NULL, NULL, NULL, 1257 NULL, NULL, NULL, NULL, 1258 NULL, NULL, NULL, NULL, 1259 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1260 }, 1261 .msr = { 1262 .index = MSR_IA32_VMX_MISC, 1263 } 1264 }, 1265 1266 [FEAT_VMX_EPT_VPID_CAPS] = { 1267 .type = MSR_FEATURE_WORD, 1268 .feat_names = { 1269 "vmx-ept-execonly", NULL, NULL, NULL, 1270 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1271 NULL, NULL, NULL, NULL, 1272 NULL, NULL, NULL, NULL, 1273 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1274 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1275 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1276 NULL, NULL, NULL, NULL, 1277 "vmx-invvpid", NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1280 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1281 NULL, NULL, NULL, NULL, 1282 NULL, NULL, NULL, NULL, 1283 NULL, NULL, NULL, NULL, 1284 NULL, NULL, NULL, NULL, 1285 NULL, NULL, NULL, NULL, 1286 }, 1287 .msr = { 1288 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1289 } 1290 }, 1291 1292 [FEAT_VMX_BASIC] = { 1293 .type = MSR_FEATURE_WORD, 1294 .feat_names = { 1295 [54] = "vmx-ins-outs", 1296 [55] = "vmx-true-ctls", 1297 }, 1298 .msr = { 1299 .index = MSR_IA32_VMX_BASIC, 1300 }, 1301 /* Just to be safe - we don't support setting the MSEG version field. */ 1302 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1303 }, 1304 1305 [FEAT_VMX_VMFUNC] = { 1306 .type = MSR_FEATURE_WORD, 1307 .feat_names = { 1308 [0] = "vmx-eptp-switching", 1309 }, 1310 .msr = { 1311 .index = MSR_IA32_VMX_VMFUNC, 1312 } 1313 }, 1314 1315 }; 1316 1317 typedef struct FeatureMask { 1318 FeatureWord index; 1319 uint64_t mask; 1320 } FeatureMask; 1321 1322 typedef struct FeatureDep { 1323 FeatureMask from, to; 1324 } FeatureDep; 1325 1326 static FeatureDep feature_dependencies[] = { 1327 { 1328 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1329 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1330 }, 1331 { 1332 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1333 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1334 }, 1335 { 1336 .from = { FEAT_1_ECX, CPUID_EXT_PDCM }, 1337 .to = { FEAT_PERF_CAPABILITIES, ~0ull }, 1338 }, 1339 { 1340 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1341 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1342 }, 1343 { 1344 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1345 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1346 }, 1347 { 1348 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1349 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1350 }, 1351 { 1352 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1353 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1354 }, 1355 { 1356 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1357 .to = { FEAT_VMX_MISC, ~0ull }, 1358 }, 1359 { 1360 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1361 .to = { FEAT_VMX_BASIC, ~0ull }, 1362 }, 1363 { 1364 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1365 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1366 }, 1367 { 1368 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1369 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1370 }, 1371 { 1372 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1373 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1374 }, 1375 { 1376 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1377 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1378 }, 1379 { 1380 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1381 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1382 }, 1383 { 1384 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1385 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1386 }, 1387 { 1388 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1389 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1390 }, 1391 { 1392 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1393 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1394 }, 1395 { 1396 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1397 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1398 }, 1399 { 1400 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1401 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1402 }, 1403 { 1404 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1405 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1406 }, 1407 { 1408 .from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM }, 1409 .to = { FEAT_SVM, ~0ull }, 1410 }, 1411 }; 1412 1413 typedef struct X86RegisterInfo32 { 1414 /* Name of register */ 1415 const char *name; 1416 /* QAPI enum value register */ 1417 X86CPURegister32 qapi_enum; 1418 } X86RegisterInfo32; 1419 1420 #define REGISTER(reg) \ 1421 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1422 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1423 REGISTER(EAX), 1424 REGISTER(ECX), 1425 REGISTER(EDX), 1426 REGISTER(EBX), 1427 REGISTER(ESP), 1428 REGISTER(EBP), 1429 REGISTER(ESI), 1430 REGISTER(EDI), 1431 }; 1432 #undef REGISTER 1433 1434 typedef struct ExtSaveArea { 1435 uint32_t feature, bits; 1436 uint32_t offset, size; 1437 } ExtSaveArea; 1438 1439 static const ExtSaveArea x86_ext_save_areas[] = { 1440 [XSTATE_FP_BIT] = { 1441 /* x87 FP state component is always enabled if XSAVE is supported */ 1442 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1443 /* x87 state is in the legacy region of the XSAVE area */ 1444 .offset = 0, 1445 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1446 }, 1447 [XSTATE_SSE_BIT] = { 1448 /* SSE state component is always enabled if XSAVE is supported */ 1449 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1450 /* SSE state is in the legacy region of the XSAVE area */ 1451 .offset = 0, 1452 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1453 }, 1454 [XSTATE_YMM_BIT] = 1455 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1456 .offset = offsetof(X86XSaveArea, avx_state), 1457 .size = sizeof(XSaveAVX) }, 1458 [XSTATE_BNDREGS_BIT] = 1459 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1460 .offset = offsetof(X86XSaveArea, bndreg_state), 1461 .size = sizeof(XSaveBNDREG) }, 1462 [XSTATE_BNDCSR_BIT] = 1463 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1464 .offset = offsetof(X86XSaveArea, bndcsr_state), 1465 .size = sizeof(XSaveBNDCSR) }, 1466 [XSTATE_OPMASK_BIT] = 1467 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1468 .offset = offsetof(X86XSaveArea, opmask_state), 1469 .size = sizeof(XSaveOpmask) }, 1470 [XSTATE_ZMM_Hi256_BIT] = 1471 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1472 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1473 .size = sizeof(XSaveZMM_Hi256) }, 1474 [XSTATE_Hi16_ZMM_BIT] = 1475 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1476 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1477 .size = sizeof(XSaveHi16_ZMM) }, 1478 [XSTATE_PKRU_BIT] = 1479 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1480 .offset = offsetof(X86XSaveArea, pkru_state), 1481 .size = sizeof(XSavePKRU) }, 1482 }; 1483 1484 static uint32_t xsave_area_size(uint64_t mask) 1485 { 1486 int i; 1487 uint64_t ret = 0; 1488 1489 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1490 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1491 if ((mask >> i) & 1) { 1492 ret = MAX(ret, esa->offset + esa->size); 1493 } 1494 } 1495 return ret; 1496 } 1497 1498 static inline bool accel_uses_host_cpuid(void) 1499 { 1500 return kvm_enabled() || hvf_enabled(); 1501 } 1502 1503 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1504 { 1505 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1506 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1507 } 1508 1509 const char *get_register_name_32(unsigned int reg) 1510 { 1511 if (reg >= CPU_NB_REGS32) { 1512 return NULL; 1513 } 1514 return x86_reg_info_32[reg].name; 1515 } 1516 1517 /* 1518 * Returns the set of feature flags that are supported and migratable by 1519 * QEMU, for a given FeatureWord. 1520 */ 1521 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1522 { 1523 FeatureWordInfo *wi = &feature_word_info[w]; 1524 uint64_t r = 0; 1525 int i; 1526 1527 for (i = 0; i < 64; i++) { 1528 uint64_t f = 1ULL << i; 1529 1530 /* If the feature name is known, it is implicitly considered migratable, 1531 * unless it is explicitly set in unmigratable_flags */ 1532 if ((wi->migratable_flags & f) || 1533 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1534 r |= f; 1535 } 1536 } 1537 return r; 1538 } 1539 1540 void host_cpuid(uint32_t function, uint32_t count, 1541 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1542 { 1543 uint32_t vec[4]; 1544 1545 #ifdef __x86_64__ 1546 asm volatile("cpuid" 1547 : "=a"(vec[0]), "=b"(vec[1]), 1548 "=c"(vec[2]), "=d"(vec[3]) 1549 : "0"(function), "c"(count) : "cc"); 1550 #elif defined(__i386__) 1551 asm volatile("pusha \n\t" 1552 "cpuid \n\t" 1553 "mov %%eax, 0(%2) \n\t" 1554 "mov %%ebx, 4(%2) \n\t" 1555 "mov %%ecx, 8(%2) \n\t" 1556 "mov %%edx, 12(%2) \n\t" 1557 "popa" 1558 : : "a"(function), "c"(count), "S"(vec) 1559 : "memory", "cc"); 1560 #else 1561 abort(); 1562 #endif 1563 1564 if (eax) 1565 *eax = vec[0]; 1566 if (ebx) 1567 *ebx = vec[1]; 1568 if (ecx) 1569 *ecx = vec[2]; 1570 if (edx) 1571 *edx = vec[3]; 1572 } 1573 1574 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1575 { 1576 uint32_t eax, ebx, ecx, edx; 1577 1578 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1579 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1580 1581 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1582 if (family) { 1583 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1584 } 1585 if (model) { 1586 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1587 } 1588 if (stepping) { 1589 *stepping = eax & 0x0F; 1590 } 1591 } 1592 1593 /* CPU class name definitions: */ 1594 1595 /* Return type name for a given CPU model name 1596 * Caller is responsible for freeing the returned string. 1597 */ 1598 static char *x86_cpu_type_name(const char *model_name) 1599 { 1600 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1601 } 1602 1603 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1604 { 1605 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1606 return object_class_by_name(typename); 1607 } 1608 1609 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1610 { 1611 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1612 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1613 return g_strndup(class_name, 1614 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1615 } 1616 1617 typedef struct PropValue { 1618 const char *prop, *value; 1619 } PropValue; 1620 1621 typedef struct X86CPUVersionDefinition { 1622 X86CPUVersion version; 1623 const char *alias; 1624 const char *note; 1625 PropValue *props; 1626 } X86CPUVersionDefinition; 1627 1628 /* Base definition for a CPU model */ 1629 typedef struct X86CPUDefinition { 1630 const char *name; 1631 uint32_t level; 1632 uint32_t xlevel; 1633 /* vendor is zero-terminated, 12 character ASCII string */ 1634 char vendor[CPUID_VENDOR_SZ + 1]; 1635 int family; 1636 int model; 1637 int stepping; 1638 FeatureWordArray features; 1639 const char *model_id; 1640 CPUCaches *cache_info; 1641 1642 /* Use AMD EPYC encoding for apic id */ 1643 bool use_epyc_apic_id_encoding; 1644 1645 /* 1646 * Definitions for alternative versions of CPU model. 1647 * List is terminated by item with version == 0. 1648 * If NULL, version 1 will be registered automatically. 1649 */ 1650 const X86CPUVersionDefinition *versions; 1651 } X86CPUDefinition; 1652 1653 /* Reference to a specific CPU model version */ 1654 struct X86CPUModel { 1655 /* Base CPU definition */ 1656 X86CPUDefinition *cpudef; 1657 /* CPU model version */ 1658 X86CPUVersion version; 1659 const char *note; 1660 /* 1661 * If true, this is an alias CPU model. 1662 * This matters only for "-cpu help" and query-cpu-definitions 1663 */ 1664 bool is_alias; 1665 }; 1666 1667 /* Get full model name for CPU version */ 1668 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1669 X86CPUVersion version) 1670 { 1671 assert(version > 0); 1672 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1673 } 1674 1675 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1676 { 1677 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1678 static const X86CPUVersionDefinition default_version_list[] = { 1679 { 1 }, 1680 { /* end of list */ } 1681 }; 1682 1683 return def->versions ?: default_version_list; 1684 } 1685 1686 bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type) 1687 { 1688 X86CPUClass *xcc = X86_CPU_CLASS(object_class_by_name(cpu_type)); 1689 1690 assert(xcc); 1691 if (xcc->model && xcc->model->cpudef) { 1692 return xcc->model->cpudef->use_epyc_apic_id_encoding; 1693 } else { 1694 return false; 1695 } 1696 } 1697 1698 static CPUCaches epyc_cache_info = { 1699 .l1d_cache = &(CPUCacheInfo) { 1700 .type = DATA_CACHE, 1701 .level = 1, 1702 .size = 32 * KiB, 1703 .line_size = 64, 1704 .associativity = 8, 1705 .partitions = 1, 1706 .sets = 64, 1707 .lines_per_tag = 1, 1708 .self_init = 1, 1709 .no_invd_sharing = true, 1710 }, 1711 .l1i_cache = &(CPUCacheInfo) { 1712 .type = INSTRUCTION_CACHE, 1713 .level = 1, 1714 .size = 64 * KiB, 1715 .line_size = 64, 1716 .associativity = 4, 1717 .partitions = 1, 1718 .sets = 256, 1719 .lines_per_tag = 1, 1720 .self_init = 1, 1721 .no_invd_sharing = true, 1722 }, 1723 .l2_cache = &(CPUCacheInfo) { 1724 .type = UNIFIED_CACHE, 1725 .level = 2, 1726 .size = 512 * KiB, 1727 .line_size = 64, 1728 .associativity = 8, 1729 .partitions = 1, 1730 .sets = 1024, 1731 .lines_per_tag = 1, 1732 }, 1733 .l3_cache = &(CPUCacheInfo) { 1734 .type = UNIFIED_CACHE, 1735 .level = 3, 1736 .size = 8 * MiB, 1737 .line_size = 64, 1738 .associativity = 16, 1739 .partitions = 1, 1740 .sets = 8192, 1741 .lines_per_tag = 1, 1742 .self_init = true, 1743 .inclusive = true, 1744 .complex_indexing = true, 1745 }, 1746 }; 1747 1748 static CPUCaches epyc_rome_cache_info = { 1749 .l1d_cache = &(CPUCacheInfo) { 1750 .type = DATA_CACHE, 1751 .level = 1, 1752 .size = 32 * KiB, 1753 .line_size = 64, 1754 .associativity = 8, 1755 .partitions = 1, 1756 .sets = 64, 1757 .lines_per_tag = 1, 1758 .self_init = 1, 1759 .no_invd_sharing = true, 1760 }, 1761 .l1i_cache = &(CPUCacheInfo) { 1762 .type = INSTRUCTION_CACHE, 1763 .level = 1, 1764 .size = 32 * KiB, 1765 .line_size = 64, 1766 .associativity = 8, 1767 .partitions = 1, 1768 .sets = 64, 1769 .lines_per_tag = 1, 1770 .self_init = 1, 1771 .no_invd_sharing = true, 1772 }, 1773 .l2_cache = &(CPUCacheInfo) { 1774 .type = UNIFIED_CACHE, 1775 .level = 2, 1776 .size = 512 * KiB, 1777 .line_size = 64, 1778 .associativity = 8, 1779 .partitions = 1, 1780 .sets = 1024, 1781 .lines_per_tag = 1, 1782 }, 1783 .l3_cache = &(CPUCacheInfo) { 1784 .type = UNIFIED_CACHE, 1785 .level = 3, 1786 .size = 16 * MiB, 1787 .line_size = 64, 1788 .associativity = 16, 1789 .partitions = 1, 1790 .sets = 16384, 1791 .lines_per_tag = 1, 1792 .self_init = true, 1793 .inclusive = true, 1794 .complex_indexing = true, 1795 }, 1796 }; 1797 1798 /* The following VMX features are not supported by KVM and are left out in the 1799 * CPU definitions: 1800 * 1801 * Dual-monitor support (all processors) 1802 * Entry to SMM 1803 * Deactivate dual-monitor treatment 1804 * Number of CR3-target values 1805 * Shutdown activity state 1806 * Wait-for-SIPI activity state 1807 * PAUSE-loop exiting (Westmere and newer) 1808 * EPT-violation #VE (Broadwell and newer) 1809 * Inject event with insn length=0 (Skylake and newer) 1810 * Conceal non-root operation from PT 1811 * Conceal VM exits from PT 1812 * Conceal VM entries from PT 1813 * Enable ENCLS exiting 1814 * Mode-based execute control (XS/XU) 1815 s TSC scaling (Skylake Server and newer) 1816 * GPA translation for PT (IceLake and newer) 1817 * User wait and pause 1818 * ENCLV exiting 1819 * Load IA32_RTIT_CTL 1820 * Clear IA32_RTIT_CTL 1821 * Advanced VM-exit information for EPT violations 1822 * Sub-page write permissions 1823 * PT in VMX operation 1824 */ 1825 1826 static X86CPUDefinition builtin_x86_defs[] = { 1827 { 1828 .name = "qemu64", 1829 .level = 0xd, 1830 .vendor = CPUID_VENDOR_AMD, 1831 .family = 6, 1832 .model = 6, 1833 .stepping = 3, 1834 .features[FEAT_1_EDX] = 1835 PPRO_FEATURES | 1836 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1837 CPUID_PSE36, 1838 .features[FEAT_1_ECX] = 1839 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1840 .features[FEAT_8000_0001_EDX] = 1841 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1842 .features[FEAT_8000_0001_ECX] = 1843 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1844 .xlevel = 0x8000000A, 1845 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1846 }, 1847 { 1848 .name = "phenom", 1849 .level = 5, 1850 .vendor = CPUID_VENDOR_AMD, 1851 .family = 16, 1852 .model = 2, 1853 .stepping = 3, 1854 /* Missing: CPUID_HT */ 1855 .features[FEAT_1_EDX] = 1856 PPRO_FEATURES | 1857 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1858 CPUID_PSE36 | CPUID_VME, 1859 .features[FEAT_1_ECX] = 1860 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1861 CPUID_EXT_POPCNT, 1862 .features[FEAT_8000_0001_EDX] = 1863 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1864 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1865 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1866 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1867 CPUID_EXT3_CR8LEG, 1868 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1869 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1870 .features[FEAT_8000_0001_ECX] = 1871 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1872 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1873 /* Missing: CPUID_SVM_LBRV */ 1874 .features[FEAT_SVM] = 1875 CPUID_SVM_NPT, 1876 .xlevel = 0x8000001A, 1877 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1878 }, 1879 { 1880 .name = "core2duo", 1881 .level = 10, 1882 .vendor = CPUID_VENDOR_INTEL, 1883 .family = 6, 1884 .model = 15, 1885 .stepping = 11, 1886 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1887 .features[FEAT_1_EDX] = 1888 PPRO_FEATURES | 1889 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1890 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1891 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1892 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1893 .features[FEAT_1_ECX] = 1894 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1895 CPUID_EXT_CX16, 1896 .features[FEAT_8000_0001_EDX] = 1897 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1898 .features[FEAT_8000_0001_ECX] = 1899 CPUID_EXT3_LAHF_LM, 1900 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1901 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1902 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1903 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1904 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1905 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1906 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1907 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1908 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1909 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1910 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1911 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1912 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1913 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1914 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1915 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1916 .features[FEAT_VMX_SECONDARY_CTLS] = 1917 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1918 .xlevel = 0x80000008, 1919 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1920 }, 1921 { 1922 .name = "kvm64", 1923 .level = 0xd, 1924 .vendor = CPUID_VENDOR_INTEL, 1925 .family = 15, 1926 .model = 6, 1927 .stepping = 1, 1928 /* Missing: CPUID_HT */ 1929 .features[FEAT_1_EDX] = 1930 PPRO_FEATURES | CPUID_VME | 1931 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1932 CPUID_PSE36, 1933 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1934 .features[FEAT_1_ECX] = 1935 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1936 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1937 .features[FEAT_8000_0001_EDX] = 1938 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1939 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1940 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1941 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1942 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1943 .features[FEAT_8000_0001_ECX] = 1944 0, 1945 /* VMX features from Cedar Mill/Prescott */ 1946 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1947 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1948 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1949 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1950 VMX_PIN_BASED_NMI_EXITING, 1951 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1952 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1953 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1954 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1955 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1956 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1957 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1958 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1959 .xlevel = 0x80000008, 1960 .model_id = "Common KVM processor" 1961 }, 1962 { 1963 .name = "qemu32", 1964 .level = 4, 1965 .vendor = CPUID_VENDOR_INTEL, 1966 .family = 6, 1967 .model = 6, 1968 .stepping = 3, 1969 .features[FEAT_1_EDX] = 1970 PPRO_FEATURES, 1971 .features[FEAT_1_ECX] = 1972 CPUID_EXT_SSE3, 1973 .xlevel = 0x80000004, 1974 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1975 }, 1976 { 1977 .name = "kvm32", 1978 .level = 5, 1979 .vendor = CPUID_VENDOR_INTEL, 1980 .family = 15, 1981 .model = 6, 1982 .stepping = 1, 1983 .features[FEAT_1_EDX] = 1984 PPRO_FEATURES | CPUID_VME | 1985 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1986 .features[FEAT_1_ECX] = 1987 CPUID_EXT_SSE3, 1988 .features[FEAT_8000_0001_ECX] = 1989 0, 1990 /* VMX features from Yonah */ 1991 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1992 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1993 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1994 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1995 VMX_PIN_BASED_NMI_EXITING, 1996 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1997 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1998 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1999 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2000 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2001 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2002 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2003 .xlevel = 0x80000008, 2004 .model_id = "Common 32-bit KVM processor" 2005 }, 2006 { 2007 .name = "coreduo", 2008 .level = 10, 2009 .vendor = CPUID_VENDOR_INTEL, 2010 .family = 6, 2011 .model = 14, 2012 .stepping = 8, 2013 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2014 .features[FEAT_1_EDX] = 2015 PPRO_FEATURES | CPUID_VME | 2016 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2017 CPUID_SS, 2018 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2019 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2020 .features[FEAT_1_ECX] = 2021 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2022 .features[FEAT_8000_0001_EDX] = 2023 CPUID_EXT2_NX, 2024 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2025 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2026 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2027 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2028 VMX_PIN_BASED_NMI_EXITING, 2029 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2030 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2031 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2032 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2033 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2034 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2035 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2036 .xlevel = 0x80000008, 2037 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2038 }, 2039 { 2040 .name = "486", 2041 .level = 1, 2042 .vendor = CPUID_VENDOR_INTEL, 2043 .family = 4, 2044 .model = 8, 2045 .stepping = 0, 2046 .features[FEAT_1_EDX] = 2047 I486_FEATURES, 2048 .xlevel = 0, 2049 .model_id = "", 2050 }, 2051 { 2052 .name = "pentium", 2053 .level = 1, 2054 .vendor = CPUID_VENDOR_INTEL, 2055 .family = 5, 2056 .model = 4, 2057 .stepping = 3, 2058 .features[FEAT_1_EDX] = 2059 PENTIUM_FEATURES, 2060 .xlevel = 0, 2061 .model_id = "", 2062 }, 2063 { 2064 .name = "pentium2", 2065 .level = 2, 2066 .vendor = CPUID_VENDOR_INTEL, 2067 .family = 6, 2068 .model = 5, 2069 .stepping = 2, 2070 .features[FEAT_1_EDX] = 2071 PENTIUM2_FEATURES, 2072 .xlevel = 0, 2073 .model_id = "", 2074 }, 2075 { 2076 .name = "pentium3", 2077 .level = 3, 2078 .vendor = CPUID_VENDOR_INTEL, 2079 .family = 6, 2080 .model = 7, 2081 .stepping = 3, 2082 .features[FEAT_1_EDX] = 2083 PENTIUM3_FEATURES, 2084 .xlevel = 0, 2085 .model_id = "", 2086 }, 2087 { 2088 .name = "athlon", 2089 .level = 2, 2090 .vendor = CPUID_VENDOR_AMD, 2091 .family = 6, 2092 .model = 2, 2093 .stepping = 3, 2094 .features[FEAT_1_EDX] = 2095 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2096 CPUID_MCA, 2097 .features[FEAT_8000_0001_EDX] = 2098 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2099 .xlevel = 0x80000008, 2100 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2101 }, 2102 { 2103 .name = "n270", 2104 .level = 10, 2105 .vendor = CPUID_VENDOR_INTEL, 2106 .family = 6, 2107 .model = 28, 2108 .stepping = 2, 2109 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2110 .features[FEAT_1_EDX] = 2111 PPRO_FEATURES | 2112 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2113 CPUID_ACPI | CPUID_SS, 2114 /* Some CPUs got no CPUID_SEP */ 2115 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2116 * CPUID_EXT_XTPR */ 2117 .features[FEAT_1_ECX] = 2118 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2119 CPUID_EXT_MOVBE, 2120 .features[FEAT_8000_0001_EDX] = 2121 CPUID_EXT2_NX, 2122 .features[FEAT_8000_0001_ECX] = 2123 CPUID_EXT3_LAHF_LM, 2124 .xlevel = 0x80000008, 2125 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2126 }, 2127 { 2128 .name = "Conroe", 2129 .level = 10, 2130 .vendor = CPUID_VENDOR_INTEL, 2131 .family = 6, 2132 .model = 15, 2133 .stepping = 3, 2134 .features[FEAT_1_EDX] = 2135 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2136 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2137 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2138 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2139 CPUID_DE | CPUID_FP87, 2140 .features[FEAT_1_ECX] = 2141 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2142 .features[FEAT_8000_0001_EDX] = 2143 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2144 .features[FEAT_8000_0001_ECX] = 2145 CPUID_EXT3_LAHF_LM, 2146 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2147 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2148 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2149 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2150 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2151 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2152 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2153 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2154 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2155 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2156 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2157 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2158 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2159 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2160 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2161 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2162 .features[FEAT_VMX_SECONDARY_CTLS] = 2163 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2164 .xlevel = 0x80000008, 2165 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2166 }, 2167 { 2168 .name = "Penryn", 2169 .level = 10, 2170 .vendor = CPUID_VENDOR_INTEL, 2171 .family = 6, 2172 .model = 23, 2173 .stepping = 3, 2174 .features[FEAT_1_EDX] = 2175 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2176 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2177 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2178 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2179 CPUID_DE | CPUID_FP87, 2180 .features[FEAT_1_ECX] = 2181 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2182 CPUID_EXT_SSE3, 2183 .features[FEAT_8000_0001_EDX] = 2184 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2185 .features[FEAT_8000_0001_ECX] = 2186 CPUID_EXT3_LAHF_LM, 2187 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2188 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2189 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2190 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2191 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2192 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2193 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2194 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2195 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2196 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2197 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2198 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2199 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2200 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2201 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2202 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2203 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2204 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2205 .features[FEAT_VMX_SECONDARY_CTLS] = 2206 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2207 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2208 .xlevel = 0x80000008, 2209 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2210 }, 2211 { 2212 .name = "Nehalem", 2213 .level = 11, 2214 .vendor = CPUID_VENDOR_INTEL, 2215 .family = 6, 2216 .model = 26, 2217 .stepping = 3, 2218 .features[FEAT_1_EDX] = 2219 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2220 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2221 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2222 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2223 CPUID_DE | CPUID_FP87, 2224 .features[FEAT_1_ECX] = 2225 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2226 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2227 .features[FEAT_8000_0001_EDX] = 2228 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2229 .features[FEAT_8000_0001_ECX] = 2230 CPUID_EXT3_LAHF_LM, 2231 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2232 MSR_VMX_BASIC_TRUE_CTLS, 2233 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2234 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2235 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2236 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2237 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2238 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2239 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2240 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2241 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2242 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2243 .features[FEAT_VMX_EXIT_CTLS] = 2244 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2245 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2246 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2247 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2248 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2249 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2250 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2251 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2252 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2253 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2254 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2255 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2256 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2257 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2258 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2259 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2260 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2261 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2262 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2263 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2264 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2265 .features[FEAT_VMX_SECONDARY_CTLS] = 2266 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2267 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2268 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2269 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2270 VMX_SECONDARY_EXEC_ENABLE_VPID, 2271 .xlevel = 0x80000008, 2272 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2273 .versions = (X86CPUVersionDefinition[]) { 2274 { .version = 1 }, 2275 { 2276 .version = 2, 2277 .alias = "Nehalem-IBRS", 2278 .props = (PropValue[]) { 2279 { "spec-ctrl", "on" }, 2280 { "model-id", 2281 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2282 { /* end of list */ } 2283 } 2284 }, 2285 { /* end of list */ } 2286 } 2287 }, 2288 { 2289 .name = "Westmere", 2290 .level = 11, 2291 .vendor = CPUID_VENDOR_INTEL, 2292 .family = 6, 2293 .model = 44, 2294 .stepping = 1, 2295 .features[FEAT_1_EDX] = 2296 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2297 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2298 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2299 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2300 CPUID_DE | CPUID_FP87, 2301 .features[FEAT_1_ECX] = 2302 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2303 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2304 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2305 .features[FEAT_8000_0001_EDX] = 2306 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2307 .features[FEAT_8000_0001_ECX] = 2308 CPUID_EXT3_LAHF_LM, 2309 .features[FEAT_6_EAX] = 2310 CPUID_6_EAX_ARAT, 2311 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2312 MSR_VMX_BASIC_TRUE_CTLS, 2313 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2314 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2315 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2316 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2317 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2318 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2319 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2320 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2321 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2322 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2323 .features[FEAT_VMX_EXIT_CTLS] = 2324 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2325 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2326 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2327 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2328 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2329 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2330 MSR_VMX_MISC_STORE_LMA, 2331 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2332 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2333 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2334 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2335 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2336 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2337 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2338 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2339 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2340 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2341 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2342 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2343 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2344 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2345 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2346 .features[FEAT_VMX_SECONDARY_CTLS] = 2347 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2348 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2349 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2350 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2351 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2352 .xlevel = 0x80000008, 2353 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2354 .versions = (X86CPUVersionDefinition[]) { 2355 { .version = 1 }, 2356 { 2357 .version = 2, 2358 .alias = "Westmere-IBRS", 2359 .props = (PropValue[]) { 2360 { "spec-ctrl", "on" }, 2361 { "model-id", 2362 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2363 { /* end of list */ } 2364 } 2365 }, 2366 { /* end of list */ } 2367 } 2368 }, 2369 { 2370 .name = "SandyBridge", 2371 .level = 0xd, 2372 .vendor = CPUID_VENDOR_INTEL, 2373 .family = 6, 2374 .model = 42, 2375 .stepping = 1, 2376 .features[FEAT_1_EDX] = 2377 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2378 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2379 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2380 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2381 CPUID_DE | CPUID_FP87, 2382 .features[FEAT_1_ECX] = 2383 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2384 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2385 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2386 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2387 CPUID_EXT_SSE3, 2388 .features[FEAT_8000_0001_EDX] = 2389 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2390 CPUID_EXT2_SYSCALL, 2391 .features[FEAT_8000_0001_ECX] = 2392 CPUID_EXT3_LAHF_LM, 2393 .features[FEAT_XSAVE] = 2394 CPUID_XSAVE_XSAVEOPT, 2395 .features[FEAT_6_EAX] = 2396 CPUID_6_EAX_ARAT, 2397 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2398 MSR_VMX_BASIC_TRUE_CTLS, 2399 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2400 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2401 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2402 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2403 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2404 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2405 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2406 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2407 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2408 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2409 .features[FEAT_VMX_EXIT_CTLS] = 2410 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2411 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2412 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2413 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2414 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2415 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2416 MSR_VMX_MISC_STORE_LMA, 2417 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2418 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2419 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2420 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2421 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2422 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2423 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2424 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2425 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2426 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2427 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2428 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2429 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2430 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2431 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2432 .features[FEAT_VMX_SECONDARY_CTLS] = 2433 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2434 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2435 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2436 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2437 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2438 .xlevel = 0x80000008, 2439 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2440 .versions = (X86CPUVersionDefinition[]) { 2441 { .version = 1 }, 2442 { 2443 .version = 2, 2444 .alias = "SandyBridge-IBRS", 2445 .props = (PropValue[]) { 2446 { "spec-ctrl", "on" }, 2447 { "model-id", 2448 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2449 { /* end of list */ } 2450 } 2451 }, 2452 { /* end of list */ } 2453 } 2454 }, 2455 { 2456 .name = "IvyBridge", 2457 .level = 0xd, 2458 .vendor = CPUID_VENDOR_INTEL, 2459 .family = 6, 2460 .model = 58, 2461 .stepping = 9, 2462 .features[FEAT_1_EDX] = 2463 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2464 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2465 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2466 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2467 CPUID_DE | CPUID_FP87, 2468 .features[FEAT_1_ECX] = 2469 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2470 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2471 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2472 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2473 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2474 .features[FEAT_7_0_EBX] = 2475 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2476 CPUID_7_0_EBX_ERMS, 2477 .features[FEAT_8000_0001_EDX] = 2478 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2479 CPUID_EXT2_SYSCALL, 2480 .features[FEAT_8000_0001_ECX] = 2481 CPUID_EXT3_LAHF_LM, 2482 .features[FEAT_XSAVE] = 2483 CPUID_XSAVE_XSAVEOPT, 2484 .features[FEAT_6_EAX] = 2485 CPUID_6_EAX_ARAT, 2486 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2487 MSR_VMX_BASIC_TRUE_CTLS, 2488 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2489 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2490 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2491 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2492 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2493 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2494 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2495 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2496 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2497 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2498 .features[FEAT_VMX_EXIT_CTLS] = 2499 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2500 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2501 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2502 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2503 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2504 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2505 MSR_VMX_MISC_STORE_LMA, 2506 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2507 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2508 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2509 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2510 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2511 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2512 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2513 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2514 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2515 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2516 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2517 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2518 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2519 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2520 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2521 .features[FEAT_VMX_SECONDARY_CTLS] = 2522 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2523 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2524 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2525 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2526 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2527 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2528 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2529 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2530 .xlevel = 0x80000008, 2531 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2532 .versions = (X86CPUVersionDefinition[]) { 2533 { .version = 1 }, 2534 { 2535 .version = 2, 2536 .alias = "IvyBridge-IBRS", 2537 .props = (PropValue[]) { 2538 { "spec-ctrl", "on" }, 2539 { "model-id", 2540 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2541 { /* end of list */ } 2542 } 2543 }, 2544 { /* end of list */ } 2545 } 2546 }, 2547 { 2548 .name = "Haswell", 2549 .level = 0xd, 2550 .vendor = CPUID_VENDOR_INTEL, 2551 .family = 6, 2552 .model = 60, 2553 .stepping = 4, 2554 .features[FEAT_1_EDX] = 2555 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2556 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2557 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2558 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2559 CPUID_DE | CPUID_FP87, 2560 .features[FEAT_1_ECX] = 2561 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2562 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2563 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2564 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2565 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2566 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2567 .features[FEAT_8000_0001_EDX] = 2568 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2569 CPUID_EXT2_SYSCALL, 2570 .features[FEAT_8000_0001_ECX] = 2571 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2572 .features[FEAT_7_0_EBX] = 2573 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2574 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2575 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2576 CPUID_7_0_EBX_RTM, 2577 .features[FEAT_XSAVE] = 2578 CPUID_XSAVE_XSAVEOPT, 2579 .features[FEAT_6_EAX] = 2580 CPUID_6_EAX_ARAT, 2581 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2582 MSR_VMX_BASIC_TRUE_CTLS, 2583 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2584 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2585 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2586 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2587 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2588 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2589 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2590 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2591 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2592 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2593 .features[FEAT_VMX_EXIT_CTLS] = 2594 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2595 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2596 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2597 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2598 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2599 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2600 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2601 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2602 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2603 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2604 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2605 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2606 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2607 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2608 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2609 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2610 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2611 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2612 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2613 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2614 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2615 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2616 .features[FEAT_VMX_SECONDARY_CTLS] = 2617 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2618 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2619 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2620 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2621 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2622 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2623 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2624 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2625 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2626 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2627 .xlevel = 0x80000008, 2628 .model_id = "Intel Core Processor (Haswell)", 2629 .versions = (X86CPUVersionDefinition[]) { 2630 { .version = 1 }, 2631 { 2632 .version = 2, 2633 .alias = "Haswell-noTSX", 2634 .props = (PropValue[]) { 2635 { "hle", "off" }, 2636 { "rtm", "off" }, 2637 { "stepping", "1" }, 2638 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2639 { /* end of list */ } 2640 }, 2641 }, 2642 { 2643 .version = 3, 2644 .alias = "Haswell-IBRS", 2645 .props = (PropValue[]) { 2646 /* Restore TSX features removed by -v2 above */ 2647 { "hle", "on" }, 2648 { "rtm", "on" }, 2649 /* 2650 * Haswell and Haswell-IBRS had stepping=4 in 2651 * QEMU 4.0 and older 2652 */ 2653 { "stepping", "4" }, 2654 { "spec-ctrl", "on" }, 2655 { "model-id", 2656 "Intel Core Processor (Haswell, IBRS)" }, 2657 { /* end of list */ } 2658 } 2659 }, 2660 { 2661 .version = 4, 2662 .alias = "Haswell-noTSX-IBRS", 2663 .props = (PropValue[]) { 2664 { "hle", "off" }, 2665 { "rtm", "off" }, 2666 /* spec-ctrl was already enabled by -v3 above */ 2667 { "stepping", "1" }, 2668 { "model-id", 2669 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2670 { /* end of list */ } 2671 } 2672 }, 2673 { /* end of list */ } 2674 } 2675 }, 2676 { 2677 .name = "Broadwell", 2678 .level = 0xd, 2679 .vendor = CPUID_VENDOR_INTEL, 2680 .family = 6, 2681 .model = 61, 2682 .stepping = 2, 2683 .features[FEAT_1_EDX] = 2684 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2685 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2686 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2687 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2688 CPUID_DE | CPUID_FP87, 2689 .features[FEAT_1_ECX] = 2690 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2691 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2692 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2693 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2694 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2695 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2696 .features[FEAT_8000_0001_EDX] = 2697 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2698 CPUID_EXT2_SYSCALL, 2699 .features[FEAT_8000_0001_ECX] = 2700 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2701 .features[FEAT_7_0_EBX] = 2702 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2703 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2704 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2705 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2706 CPUID_7_0_EBX_SMAP, 2707 .features[FEAT_XSAVE] = 2708 CPUID_XSAVE_XSAVEOPT, 2709 .features[FEAT_6_EAX] = 2710 CPUID_6_EAX_ARAT, 2711 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2712 MSR_VMX_BASIC_TRUE_CTLS, 2713 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2714 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2715 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2716 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2717 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2718 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2719 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2720 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2721 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2722 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2723 .features[FEAT_VMX_EXIT_CTLS] = 2724 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2725 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2726 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2727 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2728 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2729 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2730 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2731 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2732 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2733 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2734 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2735 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2736 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2737 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2738 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2739 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2740 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2741 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2742 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2743 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2744 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2745 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2746 .features[FEAT_VMX_SECONDARY_CTLS] = 2747 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2748 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2749 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2750 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2751 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2752 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2753 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2754 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2755 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2756 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2757 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2758 .xlevel = 0x80000008, 2759 .model_id = "Intel Core Processor (Broadwell)", 2760 .versions = (X86CPUVersionDefinition[]) { 2761 { .version = 1 }, 2762 { 2763 .version = 2, 2764 .alias = "Broadwell-noTSX", 2765 .props = (PropValue[]) { 2766 { "hle", "off" }, 2767 { "rtm", "off" }, 2768 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2769 { /* end of list */ } 2770 }, 2771 }, 2772 { 2773 .version = 3, 2774 .alias = "Broadwell-IBRS", 2775 .props = (PropValue[]) { 2776 /* Restore TSX features removed by -v2 above */ 2777 { "hle", "on" }, 2778 { "rtm", "on" }, 2779 { "spec-ctrl", "on" }, 2780 { "model-id", 2781 "Intel Core Processor (Broadwell, IBRS)" }, 2782 { /* end of list */ } 2783 } 2784 }, 2785 { 2786 .version = 4, 2787 .alias = "Broadwell-noTSX-IBRS", 2788 .props = (PropValue[]) { 2789 { "hle", "off" }, 2790 { "rtm", "off" }, 2791 /* spec-ctrl was already enabled by -v3 above */ 2792 { "model-id", 2793 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2794 { /* end of list */ } 2795 } 2796 }, 2797 { /* end of list */ } 2798 } 2799 }, 2800 { 2801 .name = "Skylake-Client", 2802 .level = 0xd, 2803 .vendor = CPUID_VENDOR_INTEL, 2804 .family = 6, 2805 .model = 94, 2806 .stepping = 3, 2807 .features[FEAT_1_EDX] = 2808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2812 CPUID_DE | CPUID_FP87, 2813 .features[FEAT_1_ECX] = 2814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2815 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2816 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2817 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2818 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2819 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2820 .features[FEAT_8000_0001_EDX] = 2821 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2822 CPUID_EXT2_SYSCALL, 2823 .features[FEAT_8000_0001_ECX] = 2824 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2825 .features[FEAT_7_0_EBX] = 2826 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2827 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2828 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2829 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2830 CPUID_7_0_EBX_SMAP, 2831 /* Missing: XSAVES (not supported by some Linux versions, 2832 * including v4.1 to v4.12). 2833 * KVM doesn't yet expose any XSAVES state save component, 2834 * and the only one defined in Skylake (processor tracing) 2835 * probably will block migration anyway. 2836 */ 2837 .features[FEAT_XSAVE] = 2838 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2839 CPUID_XSAVE_XGETBV1, 2840 .features[FEAT_6_EAX] = 2841 CPUID_6_EAX_ARAT, 2842 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2843 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2844 MSR_VMX_BASIC_TRUE_CTLS, 2845 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2846 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2847 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2848 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2849 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2850 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2851 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2852 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2853 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2854 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2855 .features[FEAT_VMX_EXIT_CTLS] = 2856 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2857 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2858 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2859 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2860 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2861 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2862 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2863 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2864 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2865 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2866 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2867 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2868 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2869 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2870 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2871 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2872 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2873 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2874 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2875 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2876 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2877 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2878 .features[FEAT_VMX_SECONDARY_CTLS] = 2879 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2880 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2881 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2882 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2883 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2884 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2885 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2886 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2887 .xlevel = 0x80000008, 2888 .model_id = "Intel Core Processor (Skylake)", 2889 .versions = (X86CPUVersionDefinition[]) { 2890 { .version = 1 }, 2891 { 2892 .version = 2, 2893 .alias = "Skylake-Client-IBRS", 2894 .props = (PropValue[]) { 2895 { "spec-ctrl", "on" }, 2896 { "model-id", 2897 "Intel Core Processor (Skylake, IBRS)" }, 2898 { /* end of list */ } 2899 } 2900 }, 2901 { 2902 .version = 3, 2903 .alias = "Skylake-Client-noTSX-IBRS", 2904 .props = (PropValue[]) { 2905 { "hle", "off" }, 2906 { "rtm", "off" }, 2907 { "model-id", 2908 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2909 { /* end of list */ } 2910 } 2911 }, 2912 { /* end of list */ } 2913 } 2914 }, 2915 { 2916 .name = "Skylake-Server", 2917 .level = 0xd, 2918 .vendor = CPUID_VENDOR_INTEL, 2919 .family = 6, 2920 .model = 85, 2921 .stepping = 4, 2922 .features[FEAT_1_EDX] = 2923 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2927 CPUID_DE | CPUID_FP87, 2928 .features[FEAT_1_ECX] = 2929 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2930 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2931 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2932 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2933 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2934 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2935 .features[FEAT_8000_0001_EDX] = 2936 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2937 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2938 .features[FEAT_8000_0001_ECX] = 2939 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2940 .features[FEAT_7_0_EBX] = 2941 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2942 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2943 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2944 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2945 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2946 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2947 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2948 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2949 .features[FEAT_7_0_ECX] = 2950 CPUID_7_0_ECX_PKU, 2951 /* Missing: XSAVES (not supported by some Linux versions, 2952 * including v4.1 to v4.12). 2953 * KVM doesn't yet expose any XSAVES state save component, 2954 * and the only one defined in Skylake (processor tracing) 2955 * probably will block migration anyway. 2956 */ 2957 .features[FEAT_XSAVE] = 2958 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2959 CPUID_XSAVE_XGETBV1, 2960 .features[FEAT_6_EAX] = 2961 CPUID_6_EAX_ARAT, 2962 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2963 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2964 MSR_VMX_BASIC_TRUE_CTLS, 2965 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2966 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2967 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2968 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2969 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2970 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2971 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2972 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2973 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2974 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2975 .features[FEAT_VMX_EXIT_CTLS] = 2976 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2977 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2978 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2979 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2980 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2981 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2982 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2983 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2984 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2985 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2986 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2987 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2988 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2989 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2990 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2991 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2992 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2993 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2994 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2995 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2996 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2997 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2998 .features[FEAT_VMX_SECONDARY_CTLS] = 2999 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3000 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3001 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3002 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3003 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3004 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3005 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3006 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3007 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3008 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3009 .xlevel = 0x80000008, 3010 .model_id = "Intel Xeon Processor (Skylake)", 3011 .versions = (X86CPUVersionDefinition[]) { 3012 { .version = 1 }, 3013 { 3014 .version = 2, 3015 .alias = "Skylake-Server-IBRS", 3016 .props = (PropValue[]) { 3017 /* clflushopt was not added to Skylake-Server-IBRS */ 3018 /* TODO: add -v3 including clflushopt */ 3019 { "clflushopt", "off" }, 3020 { "spec-ctrl", "on" }, 3021 { "model-id", 3022 "Intel Xeon Processor (Skylake, IBRS)" }, 3023 { /* end of list */ } 3024 } 3025 }, 3026 { 3027 .version = 3, 3028 .alias = "Skylake-Server-noTSX-IBRS", 3029 .props = (PropValue[]) { 3030 { "hle", "off" }, 3031 { "rtm", "off" }, 3032 { "model-id", 3033 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3034 { /* end of list */ } 3035 } 3036 }, 3037 { /* end of list */ } 3038 } 3039 }, 3040 { 3041 .name = "Cascadelake-Server", 3042 .level = 0xd, 3043 .vendor = CPUID_VENDOR_INTEL, 3044 .family = 6, 3045 .model = 85, 3046 .stepping = 6, 3047 .features[FEAT_1_EDX] = 3048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3052 CPUID_DE | CPUID_FP87, 3053 .features[FEAT_1_ECX] = 3054 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3055 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3057 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3059 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3060 .features[FEAT_8000_0001_EDX] = 3061 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3062 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3063 .features[FEAT_8000_0001_ECX] = 3064 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3065 .features[FEAT_7_0_EBX] = 3066 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3067 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3068 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3069 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3070 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3071 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3072 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3073 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3074 .features[FEAT_7_0_ECX] = 3075 CPUID_7_0_ECX_PKU | 3076 CPUID_7_0_ECX_AVX512VNNI, 3077 .features[FEAT_7_0_EDX] = 3078 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3079 /* Missing: XSAVES (not supported by some Linux versions, 3080 * including v4.1 to v4.12). 3081 * KVM doesn't yet expose any XSAVES state save component, 3082 * and the only one defined in Skylake (processor tracing) 3083 * probably will block migration anyway. 3084 */ 3085 .features[FEAT_XSAVE] = 3086 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3087 CPUID_XSAVE_XGETBV1, 3088 .features[FEAT_6_EAX] = 3089 CPUID_6_EAX_ARAT, 3090 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3091 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3092 MSR_VMX_BASIC_TRUE_CTLS, 3093 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3094 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3095 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3096 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3097 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3098 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3099 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3100 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3101 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3102 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3103 .features[FEAT_VMX_EXIT_CTLS] = 3104 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3105 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3106 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3107 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3108 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3109 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3110 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3111 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3112 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3113 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3114 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3115 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3116 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3117 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3118 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3119 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3120 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3121 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3122 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3123 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3124 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3125 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3126 .features[FEAT_VMX_SECONDARY_CTLS] = 3127 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3128 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3129 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3130 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3131 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3132 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3133 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3134 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3135 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3136 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3137 .xlevel = 0x80000008, 3138 .model_id = "Intel Xeon Processor (Cascadelake)", 3139 .versions = (X86CPUVersionDefinition[]) { 3140 { .version = 1 }, 3141 { .version = 2, 3142 .note = "ARCH_CAPABILITIES", 3143 .props = (PropValue[]) { 3144 { "arch-capabilities", "on" }, 3145 { "rdctl-no", "on" }, 3146 { "ibrs-all", "on" }, 3147 { "skip-l1dfl-vmentry", "on" }, 3148 { "mds-no", "on" }, 3149 { /* end of list */ } 3150 }, 3151 }, 3152 { .version = 3, 3153 .alias = "Cascadelake-Server-noTSX", 3154 .note = "ARCH_CAPABILITIES, no TSX", 3155 .props = (PropValue[]) { 3156 { "hle", "off" }, 3157 { "rtm", "off" }, 3158 { /* end of list */ } 3159 }, 3160 }, 3161 { /* end of list */ } 3162 } 3163 }, 3164 { 3165 .name = "Cooperlake", 3166 .level = 0xd, 3167 .vendor = CPUID_VENDOR_INTEL, 3168 .family = 6, 3169 .model = 85, 3170 .stepping = 10, 3171 .features[FEAT_1_EDX] = 3172 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3173 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3174 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3175 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3176 CPUID_DE | CPUID_FP87, 3177 .features[FEAT_1_ECX] = 3178 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3179 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3180 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3181 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3182 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3183 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3184 .features[FEAT_8000_0001_EDX] = 3185 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3186 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3187 .features[FEAT_8000_0001_ECX] = 3188 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3189 .features[FEAT_7_0_EBX] = 3190 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3191 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3192 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3193 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3194 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3195 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3196 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3197 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3198 .features[FEAT_7_0_ECX] = 3199 CPUID_7_0_ECX_PKU | 3200 CPUID_7_0_ECX_AVX512VNNI, 3201 .features[FEAT_7_0_EDX] = 3202 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3203 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3204 .features[FEAT_ARCH_CAPABILITIES] = 3205 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3206 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3207 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3208 .features[FEAT_7_1_EAX] = 3209 CPUID_7_1_EAX_AVX512_BF16, 3210 /* 3211 * Missing: XSAVES (not supported by some Linux versions, 3212 * including v4.1 to v4.12). 3213 * KVM doesn't yet expose any XSAVES state save component, 3214 * and the only one defined in Skylake (processor tracing) 3215 * probably will block migration anyway. 3216 */ 3217 .features[FEAT_XSAVE] = 3218 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3219 CPUID_XSAVE_XGETBV1, 3220 .features[FEAT_6_EAX] = 3221 CPUID_6_EAX_ARAT, 3222 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3223 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3224 MSR_VMX_BASIC_TRUE_CTLS, 3225 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3226 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3227 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3228 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3229 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3230 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3231 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3232 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3233 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3234 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3235 .features[FEAT_VMX_EXIT_CTLS] = 3236 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3237 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3238 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3239 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3240 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3241 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3242 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3243 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3244 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3245 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3246 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3247 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3248 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3249 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3250 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3251 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3252 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3253 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3254 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3255 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3256 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3257 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3258 .features[FEAT_VMX_SECONDARY_CTLS] = 3259 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3260 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3261 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3262 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3263 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3264 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3265 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3266 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3267 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3268 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3269 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3270 .xlevel = 0x80000008, 3271 .model_id = "Intel Xeon Processor (Cooperlake)", 3272 }, 3273 { 3274 .name = "Icelake-Client", 3275 .level = 0xd, 3276 .vendor = CPUID_VENDOR_INTEL, 3277 .family = 6, 3278 .model = 126, 3279 .stepping = 0, 3280 .features[FEAT_1_EDX] = 3281 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3282 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3283 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3284 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3285 CPUID_DE | CPUID_FP87, 3286 .features[FEAT_1_ECX] = 3287 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3288 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3289 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3290 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3291 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3292 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3293 .features[FEAT_8000_0001_EDX] = 3294 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3295 CPUID_EXT2_SYSCALL, 3296 .features[FEAT_8000_0001_ECX] = 3297 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3298 .features[FEAT_8000_0008_EBX] = 3299 CPUID_8000_0008_EBX_WBNOINVD, 3300 .features[FEAT_7_0_EBX] = 3301 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3302 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3303 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3304 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3305 CPUID_7_0_EBX_SMAP, 3306 .features[FEAT_7_0_ECX] = 3307 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3308 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3309 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3310 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3311 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3312 .features[FEAT_7_0_EDX] = 3313 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3314 /* Missing: XSAVES (not supported by some Linux versions, 3315 * including v4.1 to v4.12). 3316 * KVM doesn't yet expose any XSAVES state save component, 3317 * and the only one defined in Skylake (processor tracing) 3318 * probably will block migration anyway. 3319 */ 3320 .features[FEAT_XSAVE] = 3321 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3322 CPUID_XSAVE_XGETBV1, 3323 .features[FEAT_6_EAX] = 3324 CPUID_6_EAX_ARAT, 3325 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3326 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3327 MSR_VMX_BASIC_TRUE_CTLS, 3328 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3329 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3330 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3331 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3332 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3333 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3334 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3335 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3336 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3337 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3338 .features[FEAT_VMX_EXIT_CTLS] = 3339 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3340 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3341 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3342 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3343 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3344 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3345 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3346 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3347 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3348 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3349 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3350 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3351 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3352 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3353 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3354 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3355 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3356 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3357 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3358 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3359 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3360 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3361 .features[FEAT_VMX_SECONDARY_CTLS] = 3362 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3363 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3364 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3365 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3366 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3367 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3368 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3369 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3370 .xlevel = 0x80000008, 3371 .model_id = "Intel Core Processor (Icelake)", 3372 .versions = (X86CPUVersionDefinition[]) { 3373 { .version = 1 }, 3374 { 3375 .version = 2, 3376 .note = "no TSX", 3377 .alias = "Icelake-Client-noTSX", 3378 .props = (PropValue[]) { 3379 { "hle", "off" }, 3380 { "rtm", "off" }, 3381 { /* end of list */ } 3382 }, 3383 }, 3384 { /* end of list */ } 3385 } 3386 }, 3387 { 3388 .name = "Icelake-Server", 3389 .level = 0xd, 3390 .vendor = CPUID_VENDOR_INTEL, 3391 .family = 6, 3392 .model = 134, 3393 .stepping = 0, 3394 .features[FEAT_1_EDX] = 3395 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3396 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3397 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3398 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3399 CPUID_DE | CPUID_FP87, 3400 .features[FEAT_1_ECX] = 3401 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3402 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3403 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3404 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3405 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3406 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3407 .features[FEAT_8000_0001_EDX] = 3408 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3409 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3410 .features[FEAT_8000_0001_ECX] = 3411 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3412 .features[FEAT_8000_0008_EBX] = 3413 CPUID_8000_0008_EBX_WBNOINVD, 3414 .features[FEAT_7_0_EBX] = 3415 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3416 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3417 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3418 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3419 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3420 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3421 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3422 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3423 .features[FEAT_7_0_ECX] = 3424 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3425 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3426 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3427 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3428 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3429 .features[FEAT_7_0_EDX] = 3430 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3431 /* Missing: XSAVES (not supported by some Linux versions, 3432 * including v4.1 to v4.12). 3433 * KVM doesn't yet expose any XSAVES state save component, 3434 * and the only one defined in Skylake (processor tracing) 3435 * probably will block migration anyway. 3436 */ 3437 .features[FEAT_XSAVE] = 3438 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3439 CPUID_XSAVE_XGETBV1, 3440 .features[FEAT_6_EAX] = 3441 CPUID_6_EAX_ARAT, 3442 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3443 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3444 MSR_VMX_BASIC_TRUE_CTLS, 3445 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3446 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3447 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3448 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3449 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3450 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3451 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3452 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3453 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3454 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3455 .features[FEAT_VMX_EXIT_CTLS] = 3456 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3457 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3458 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3459 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3460 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3461 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3462 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3463 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3464 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3465 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3466 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3467 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3468 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3469 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3470 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3471 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3472 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3473 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3474 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3475 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3476 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3477 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3478 .features[FEAT_VMX_SECONDARY_CTLS] = 3479 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3480 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3481 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3482 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3483 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3484 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3485 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3486 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3487 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3488 .xlevel = 0x80000008, 3489 .model_id = "Intel Xeon Processor (Icelake)", 3490 .versions = (X86CPUVersionDefinition[]) { 3491 { .version = 1 }, 3492 { 3493 .version = 2, 3494 .note = "no TSX", 3495 .alias = "Icelake-Server-noTSX", 3496 .props = (PropValue[]) { 3497 { "hle", "off" }, 3498 { "rtm", "off" }, 3499 { /* end of list */ } 3500 }, 3501 }, 3502 { 3503 .version = 3, 3504 .props = (PropValue[]) { 3505 { "arch-capabilities", "on" }, 3506 { "rdctl-no", "on" }, 3507 { "ibrs-all", "on" }, 3508 { "skip-l1dfl-vmentry", "on" }, 3509 { "mds-no", "on" }, 3510 { "pschange-mc-no", "on" }, 3511 { "taa-no", "on" }, 3512 { /* end of list */ } 3513 }, 3514 }, 3515 { /* end of list */ } 3516 } 3517 }, 3518 { 3519 .name = "Denverton", 3520 .level = 21, 3521 .vendor = CPUID_VENDOR_INTEL, 3522 .family = 6, 3523 .model = 95, 3524 .stepping = 1, 3525 .features[FEAT_1_EDX] = 3526 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3527 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3528 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3529 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3530 CPUID_SSE | CPUID_SSE2, 3531 .features[FEAT_1_ECX] = 3532 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3533 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3534 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3535 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3536 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3537 .features[FEAT_8000_0001_EDX] = 3538 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3539 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3540 .features[FEAT_8000_0001_ECX] = 3541 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3542 .features[FEAT_7_0_EBX] = 3543 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3544 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3545 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3546 .features[FEAT_7_0_EDX] = 3547 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3548 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3549 /* 3550 * Missing: XSAVES (not supported by some Linux versions, 3551 * including v4.1 to v4.12). 3552 * KVM doesn't yet expose any XSAVES state save component, 3553 * and the only one defined in Skylake (processor tracing) 3554 * probably will block migration anyway. 3555 */ 3556 .features[FEAT_XSAVE] = 3557 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3558 .features[FEAT_6_EAX] = 3559 CPUID_6_EAX_ARAT, 3560 .features[FEAT_ARCH_CAPABILITIES] = 3561 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3562 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3563 MSR_VMX_BASIC_TRUE_CTLS, 3564 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3565 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3566 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3567 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3568 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3569 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3570 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3571 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3572 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3573 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3574 .features[FEAT_VMX_EXIT_CTLS] = 3575 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3576 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3577 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3578 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3579 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3580 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3581 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3582 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3583 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3584 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3585 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3586 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3587 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3588 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3589 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3590 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3591 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3592 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3593 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3594 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3595 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3596 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3597 .features[FEAT_VMX_SECONDARY_CTLS] = 3598 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3599 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3600 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3601 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3602 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3603 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3604 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3605 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3606 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3607 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3608 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3609 .xlevel = 0x80000008, 3610 .model_id = "Intel Atom Processor (Denverton)", 3611 .versions = (X86CPUVersionDefinition[]) { 3612 { .version = 1 }, 3613 { 3614 .version = 2, 3615 .note = "no MPX, no MONITOR", 3616 .props = (PropValue[]) { 3617 { "monitor", "off" }, 3618 { "mpx", "off" }, 3619 { /* end of list */ }, 3620 }, 3621 }, 3622 { /* end of list */ }, 3623 }, 3624 }, 3625 { 3626 .name = "Snowridge", 3627 .level = 27, 3628 .vendor = CPUID_VENDOR_INTEL, 3629 .family = 6, 3630 .model = 134, 3631 .stepping = 1, 3632 .features[FEAT_1_EDX] = 3633 /* missing: CPUID_PN CPUID_IA64 */ 3634 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3635 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3636 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3637 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3638 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3639 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3640 CPUID_MMX | 3641 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3642 .features[FEAT_1_ECX] = 3643 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3644 CPUID_EXT_SSSE3 | 3645 CPUID_EXT_CX16 | 3646 CPUID_EXT_SSE41 | 3647 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3648 CPUID_EXT_POPCNT | 3649 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3650 CPUID_EXT_RDRAND, 3651 .features[FEAT_8000_0001_EDX] = 3652 CPUID_EXT2_SYSCALL | 3653 CPUID_EXT2_NX | 3654 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3655 CPUID_EXT2_LM, 3656 .features[FEAT_8000_0001_ECX] = 3657 CPUID_EXT3_LAHF_LM | 3658 CPUID_EXT3_3DNOWPREFETCH, 3659 .features[FEAT_7_0_EBX] = 3660 CPUID_7_0_EBX_FSGSBASE | 3661 CPUID_7_0_EBX_SMEP | 3662 CPUID_7_0_EBX_ERMS | 3663 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3664 CPUID_7_0_EBX_RDSEED | 3665 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3666 CPUID_7_0_EBX_CLWB | 3667 CPUID_7_0_EBX_SHA_NI, 3668 .features[FEAT_7_0_ECX] = 3669 CPUID_7_0_ECX_UMIP | 3670 /* missing bit 5 */ 3671 CPUID_7_0_ECX_GFNI | 3672 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3673 CPUID_7_0_ECX_MOVDIR64B, 3674 .features[FEAT_7_0_EDX] = 3675 CPUID_7_0_EDX_SPEC_CTRL | 3676 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3677 CPUID_7_0_EDX_CORE_CAPABILITY, 3678 .features[FEAT_CORE_CAPABILITY] = 3679 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3680 /* 3681 * Missing: XSAVES (not supported by some Linux versions, 3682 * including v4.1 to v4.12). 3683 * KVM doesn't yet expose any XSAVES state save component, 3684 * and the only one defined in Skylake (processor tracing) 3685 * probably will block migration anyway. 3686 */ 3687 .features[FEAT_XSAVE] = 3688 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3689 CPUID_XSAVE_XGETBV1, 3690 .features[FEAT_6_EAX] = 3691 CPUID_6_EAX_ARAT, 3692 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3693 MSR_VMX_BASIC_TRUE_CTLS, 3694 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3695 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3696 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3697 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3698 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3699 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3700 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3701 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3702 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3703 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3704 .features[FEAT_VMX_EXIT_CTLS] = 3705 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3706 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3707 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3708 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3709 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3710 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3711 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3712 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3713 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3714 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3715 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3716 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3717 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3718 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3719 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3720 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3721 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3722 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3723 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3724 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3725 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3726 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3727 .features[FEAT_VMX_SECONDARY_CTLS] = 3728 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3729 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3730 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3731 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3732 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3733 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3734 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3735 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3736 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3737 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3738 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3739 .xlevel = 0x80000008, 3740 .model_id = "Intel Atom Processor (SnowRidge)", 3741 .versions = (X86CPUVersionDefinition[]) { 3742 { .version = 1 }, 3743 { 3744 .version = 2, 3745 .props = (PropValue[]) { 3746 { "mpx", "off" }, 3747 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3748 { /* end of list */ }, 3749 }, 3750 }, 3751 { /* end of list */ }, 3752 }, 3753 }, 3754 { 3755 .name = "KnightsMill", 3756 .level = 0xd, 3757 .vendor = CPUID_VENDOR_INTEL, 3758 .family = 6, 3759 .model = 133, 3760 .stepping = 0, 3761 .features[FEAT_1_EDX] = 3762 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3763 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3764 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3765 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3766 CPUID_PSE | CPUID_DE | CPUID_FP87, 3767 .features[FEAT_1_ECX] = 3768 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3769 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3770 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3771 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3772 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3773 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3774 .features[FEAT_8000_0001_EDX] = 3775 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3776 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3777 .features[FEAT_8000_0001_ECX] = 3778 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3779 .features[FEAT_7_0_EBX] = 3780 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3781 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3782 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3783 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3784 CPUID_7_0_EBX_AVX512ER, 3785 .features[FEAT_7_0_ECX] = 3786 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3787 .features[FEAT_7_0_EDX] = 3788 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3789 .features[FEAT_XSAVE] = 3790 CPUID_XSAVE_XSAVEOPT, 3791 .features[FEAT_6_EAX] = 3792 CPUID_6_EAX_ARAT, 3793 .xlevel = 0x80000008, 3794 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3795 }, 3796 { 3797 .name = "Opteron_G1", 3798 .level = 5, 3799 .vendor = CPUID_VENDOR_AMD, 3800 .family = 15, 3801 .model = 6, 3802 .stepping = 1, 3803 .features[FEAT_1_EDX] = 3804 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3805 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3806 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3807 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3808 CPUID_DE | CPUID_FP87, 3809 .features[FEAT_1_ECX] = 3810 CPUID_EXT_SSE3, 3811 .features[FEAT_8000_0001_EDX] = 3812 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3813 .xlevel = 0x80000008, 3814 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3815 }, 3816 { 3817 .name = "Opteron_G2", 3818 .level = 5, 3819 .vendor = CPUID_VENDOR_AMD, 3820 .family = 15, 3821 .model = 6, 3822 .stepping = 1, 3823 .features[FEAT_1_EDX] = 3824 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3825 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3826 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3827 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3828 CPUID_DE | CPUID_FP87, 3829 .features[FEAT_1_ECX] = 3830 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3831 .features[FEAT_8000_0001_EDX] = 3832 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3833 .features[FEAT_8000_0001_ECX] = 3834 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3835 .xlevel = 0x80000008, 3836 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3837 }, 3838 { 3839 .name = "Opteron_G3", 3840 .level = 5, 3841 .vendor = CPUID_VENDOR_AMD, 3842 .family = 16, 3843 .model = 2, 3844 .stepping = 3, 3845 .features[FEAT_1_EDX] = 3846 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3847 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3848 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3849 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3850 CPUID_DE | CPUID_FP87, 3851 .features[FEAT_1_ECX] = 3852 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3853 CPUID_EXT_SSE3, 3854 .features[FEAT_8000_0001_EDX] = 3855 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3856 CPUID_EXT2_RDTSCP, 3857 .features[FEAT_8000_0001_ECX] = 3858 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3859 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3860 .xlevel = 0x80000008, 3861 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3862 }, 3863 { 3864 .name = "Opteron_G4", 3865 .level = 0xd, 3866 .vendor = CPUID_VENDOR_AMD, 3867 .family = 21, 3868 .model = 1, 3869 .stepping = 2, 3870 .features[FEAT_1_EDX] = 3871 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3872 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3873 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3874 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3875 CPUID_DE | CPUID_FP87, 3876 .features[FEAT_1_ECX] = 3877 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3878 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3879 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3880 CPUID_EXT_SSE3, 3881 .features[FEAT_8000_0001_EDX] = 3882 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3883 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3884 .features[FEAT_8000_0001_ECX] = 3885 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3886 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3887 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3888 CPUID_EXT3_LAHF_LM, 3889 .features[FEAT_SVM] = 3890 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3891 /* no xsaveopt! */ 3892 .xlevel = 0x8000001A, 3893 .model_id = "AMD Opteron 62xx class CPU", 3894 }, 3895 { 3896 .name = "Opteron_G5", 3897 .level = 0xd, 3898 .vendor = CPUID_VENDOR_AMD, 3899 .family = 21, 3900 .model = 2, 3901 .stepping = 0, 3902 .features[FEAT_1_EDX] = 3903 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3904 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3905 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3906 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3907 CPUID_DE | CPUID_FP87, 3908 .features[FEAT_1_ECX] = 3909 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3910 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3911 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3912 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3913 .features[FEAT_8000_0001_EDX] = 3914 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3915 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3916 .features[FEAT_8000_0001_ECX] = 3917 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3918 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3919 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3920 CPUID_EXT3_LAHF_LM, 3921 .features[FEAT_SVM] = 3922 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3923 /* no xsaveopt! */ 3924 .xlevel = 0x8000001A, 3925 .model_id = "AMD Opteron 63xx class CPU", 3926 }, 3927 { 3928 .name = "EPYC", 3929 .level = 0xd, 3930 .vendor = CPUID_VENDOR_AMD, 3931 .family = 23, 3932 .model = 1, 3933 .stepping = 2, 3934 .features[FEAT_1_EDX] = 3935 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3936 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3937 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3938 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3939 CPUID_VME | CPUID_FP87, 3940 .features[FEAT_1_ECX] = 3941 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3942 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3943 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3944 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3945 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3946 .features[FEAT_8000_0001_EDX] = 3947 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3948 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3949 CPUID_EXT2_SYSCALL, 3950 .features[FEAT_8000_0001_ECX] = 3951 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3952 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3953 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3954 CPUID_EXT3_TOPOEXT, 3955 .features[FEAT_7_0_EBX] = 3956 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3957 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3958 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3959 CPUID_7_0_EBX_SHA_NI, 3960 .features[FEAT_XSAVE] = 3961 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3962 CPUID_XSAVE_XGETBV1, 3963 .features[FEAT_6_EAX] = 3964 CPUID_6_EAX_ARAT, 3965 .features[FEAT_SVM] = 3966 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3967 .xlevel = 0x8000001E, 3968 .model_id = "AMD EPYC Processor", 3969 .cache_info = &epyc_cache_info, 3970 .use_epyc_apic_id_encoding = 1, 3971 .versions = (X86CPUVersionDefinition[]) { 3972 { .version = 1 }, 3973 { 3974 .version = 2, 3975 .alias = "EPYC-IBPB", 3976 .props = (PropValue[]) { 3977 { "ibpb", "on" }, 3978 { "model-id", 3979 "AMD EPYC Processor (with IBPB)" }, 3980 { /* end of list */ } 3981 } 3982 }, 3983 { 3984 .version = 3, 3985 .props = (PropValue[]) { 3986 { "ibpb", "on" }, 3987 { "perfctr-core", "on" }, 3988 { "clzero", "on" }, 3989 { "xsaveerptr", "on" }, 3990 { "xsaves", "on" }, 3991 { "model-id", 3992 "AMD EPYC Processor" }, 3993 { /* end of list */ } 3994 } 3995 }, 3996 { /* end of list */ } 3997 } 3998 }, 3999 { 4000 .name = "Dhyana", 4001 .level = 0xd, 4002 .vendor = CPUID_VENDOR_HYGON, 4003 .family = 24, 4004 .model = 0, 4005 .stepping = 1, 4006 .features[FEAT_1_EDX] = 4007 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4008 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4009 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4010 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4011 CPUID_VME | CPUID_FP87, 4012 .features[FEAT_1_ECX] = 4013 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4014 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 4015 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4016 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4017 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 4018 .features[FEAT_8000_0001_EDX] = 4019 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4020 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4021 CPUID_EXT2_SYSCALL, 4022 .features[FEAT_8000_0001_ECX] = 4023 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4024 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4025 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4026 CPUID_EXT3_TOPOEXT, 4027 .features[FEAT_8000_0008_EBX] = 4028 CPUID_8000_0008_EBX_IBPB, 4029 .features[FEAT_7_0_EBX] = 4030 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4031 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4032 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4033 /* 4034 * Missing: XSAVES (not supported by some Linux versions, 4035 * including v4.1 to v4.12). 4036 * KVM doesn't yet expose any XSAVES state save component. 4037 */ 4038 .features[FEAT_XSAVE] = 4039 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4040 CPUID_XSAVE_XGETBV1, 4041 .features[FEAT_6_EAX] = 4042 CPUID_6_EAX_ARAT, 4043 .features[FEAT_SVM] = 4044 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4045 .xlevel = 0x8000001E, 4046 .model_id = "Hygon Dhyana Processor", 4047 .cache_info = &epyc_cache_info, 4048 }, 4049 { 4050 .name = "EPYC-Rome", 4051 .level = 0xd, 4052 .vendor = CPUID_VENDOR_AMD, 4053 .family = 23, 4054 .model = 49, 4055 .stepping = 0, 4056 .features[FEAT_1_EDX] = 4057 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4058 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4059 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4060 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4061 CPUID_VME | CPUID_FP87, 4062 .features[FEAT_1_ECX] = 4063 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4064 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4065 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4066 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4067 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4068 .features[FEAT_8000_0001_EDX] = 4069 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4070 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4071 CPUID_EXT2_SYSCALL, 4072 .features[FEAT_8000_0001_ECX] = 4073 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4074 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4075 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4076 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4077 .features[FEAT_8000_0008_EBX] = 4078 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4079 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4080 CPUID_8000_0008_EBX_STIBP, 4081 .features[FEAT_7_0_EBX] = 4082 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4083 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4084 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4085 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4086 .features[FEAT_7_0_ECX] = 4087 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4088 .features[FEAT_XSAVE] = 4089 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4090 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4091 .features[FEAT_6_EAX] = 4092 CPUID_6_EAX_ARAT, 4093 .features[FEAT_SVM] = 4094 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4095 .xlevel = 0x8000001E, 4096 .model_id = "AMD EPYC-Rome Processor", 4097 .cache_info = &epyc_rome_cache_info, 4098 .use_epyc_apic_id_encoding = 1, 4099 }, 4100 }; 4101 4102 /* KVM-specific features that are automatically added/removed 4103 * from all CPU models when KVM is enabled. 4104 */ 4105 static PropValue kvm_default_props[] = { 4106 { "kvmclock", "on" }, 4107 { "kvm-nopiodelay", "on" }, 4108 { "kvm-asyncpf", "on" }, 4109 { "kvm-steal-time", "on" }, 4110 { "kvm-pv-eoi", "on" }, 4111 { "kvmclock-stable-bit", "on" }, 4112 { "x2apic", "on" }, 4113 { "acpi", "off" }, 4114 { "monitor", "off" }, 4115 { "svm", "off" }, 4116 { NULL, NULL }, 4117 }; 4118 4119 /* TCG-specific defaults that override all CPU models when using TCG 4120 */ 4121 static PropValue tcg_default_props[] = { 4122 { "vme", "off" }, 4123 { NULL, NULL }, 4124 }; 4125 4126 4127 /* 4128 * We resolve CPU model aliases using -v1 when using "-machine 4129 * none", but this is just for compatibility while libvirt isn't 4130 * adapted to resolve CPU model versions before creating VMs. 4131 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi. 4132 */ 4133 X86CPUVersion default_cpu_version = 1; 4134 4135 void x86_cpu_set_default_version(X86CPUVersion version) 4136 { 4137 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4138 assert(version != CPU_VERSION_AUTO); 4139 default_cpu_version = version; 4140 } 4141 4142 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4143 { 4144 int v = 0; 4145 const X86CPUVersionDefinition *vdef = 4146 x86_cpu_def_get_versions(model->cpudef); 4147 while (vdef->version) { 4148 v = vdef->version; 4149 vdef++; 4150 } 4151 return v; 4152 } 4153 4154 /* Return the actual version being used for a specific CPU model */ 4155 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4156 { 4157 X86CPUVersion v = model->version; 4158 if (v == CPU_VERSION_AUTO) { 4159 v = default_cpu_version; 4160 } 4161 if (v == CPU_VERSION_LATEST) { 4162 return x86_cpu_model_last_version(model); 4163 } 4164 return v; 4165 } 4166 4167 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4168 { 4169 PropValue *pv; 4170 for (pv = kvm_default_props; pv->prop; pv++) { 4171 if (!strcmp(pv->prop, prop)) { 4172 pv->value = value; 4173 break; 4174 } 4175 } 4176 4177 /* It is valid to call this function only for properties that 4178 * are already present in the kvm_default_props table. 4179 */ 4180 assert(pv->prop); 4181 } 4182 4183 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 4184 bool migratable_only); 4185 4186 static bool lmce_supported(void) 4187 { 4188 uint64_t mce_cap = 0; 4189 4190 #ifdef CONFIG_KVM 4191 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4192 return false; 4193 } 4194 #endif 4195 4196 return !!(mce_cap & MCG_LMCE_P); 4197 } 4198 4199 #define CPUID_MODEL_ID_SZ 48 4200 4201 /** 4202 * cpu_x86_fill_model_id: 4203 * Get CPUID model ID string from host CPU. 4204 * 4205 * @str should have at least CPUID_MODEL_ID_SZ bytes 4206 * 4207 * The function does NOT add a null terminator to the string 4208 * automatically. 4209 */ 4210 static int cpu_x86_fill_model_id(char *str) 4211 { 4212 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4213 int i; 4214 4215 for (i = 0; i < 3; i++) { 4216 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4217 memcpy(str + i * 16 + 0, &eax, 4); 4218 memcpy(str + i * 16 + 4, &ebx, 4); 4219 memcpy(str + i * 16 + 8, &ecx, 4); 4220 memcpy(str + i * 16 + 12, &edx, 4); 4221 } 4222 return 0; 4223 } 4224 4225 static Property max_x86_cpu_properties[] = { 4226 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4227 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4228 DEFINE_PROP_END_OF_LIST() 4229 }; 4230 4231 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4232 { 4233 DeviceClass *dc = DEVICE_CLASS(oc); 4234 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4235 4236 xcc->ordering = 9; 4237 4238 xcc->model_description = 4239 "Enables all features supported by the accelerator in the current host"; 4240 4241 device_class_set_props(dc, max_x86_cpu_properties); 4242 } 4243 4244 static void max_x86_cpu_initfn(Object *obj) 4245 { 4246 X86CPU *cpu = X86_CPU(obj); 4247 CPUX86State *env = &cpu->env; 4248 KVMState *s = kvm_state; 4249 4250 /* We can't fill the features array here because we don't know yet if 4251 * "migratable" is true or false. 4252 */ 4253 cpu->max_features = true; 4254 4255 if (accel_uses_host_cpuid()) { 4256 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4257 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4258 int family, model, stepping; 4259 4260 host_vendor_fms(vendor, &family, &model, &stepping); 4261 cpu_x86_fill_model_id(model_id); 4262 4263 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 4264 object_property_set_int(OBJECT(cpu), "family", family, &error_abort); 4265 object_property_set_int(OBJECT(cpu), "model", model, &error_abort); 4266 object_property_set_int(OBJECT(cpu), "stepping", stepping, 4267 &error_abort); 4268 object_property_set_str(OBJECT(cpu), "model-id", model_id, 4269 &error_abort); 4270 4271 if (kvm_enabled()) { 4272 env->cpuid_min_level = 4273 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4274 env->cpuid_min_xlevel = 4275 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4276 env->cpuid_min_xlevel2 = 4277 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4278 } else { 4279 env->cpuid_min_level = 4280 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4281 env->cpuid_min_xlevel = 4282 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4283 env->cpuid_min_xlevel2 = 4284 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4285 } 4286 4287 if (lmce_supported()) { 4288 object_property_set_bool(OBJECT(cpu), "lmce", true, &error_abort); 4289 } 4290 } else { 4291 object_property_set_str(OBJECT(cpu), "vendor", CPUID_VENDOR_AMD, 4292 &error_abort); 4293 object_property_set_int(OBJECT(cpu), "family", 6, &error_abort); 4294 object_property_set_int(OBJECT(cpu), "model", 6, &error_abort); 4295 object_property_set_int(OBJECT(cpu), "stepping", 3, &error_abort); 4296 object_property_set_str(OBJECT(cpu), "model-id", 4297 "QEMU TCG CPU version " QEMU_HW_VERSION, 4298 &error_abort); 4299 } 4300 4301 object_property_set_bool(OBJECT(cpu), "pmu", true, &error_abort); 4302 } 4303 4304 static const TypeInfo max_x86_cpu_type_info = { 4305 .name = X86_CPU_TYPE_NAME("max"), 4306 .parent = TYPE_X86_CPU, 4307 .instance_init = max_x86_cpu_initfn, 4308 .class_init = max_x86_cpu_class_init, 4309 }; 4310 4311 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4312 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4313 { 4314 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4315 4316 xcc->host_cpuid_required = true; 4317 xcc->ordering = 8; 4318 4319 #if defined(CONFIG_KVM) 4320 xcc->model_description = 4321 "KVM processor with all supported host features "; 4322 #elif defined(CONFIG_HVF) 4323 xcc->model_description = 4324 "HVF processor with all supported host features "; 4325 #endif 4326 } 4327 4328 static const TypeInfo host_x86_cpu_type_info = { 4329 .name = X86_CPU_TYPE_NAME("host"), 4330 .parent = X86_CPU_TYPE_NAME("max"), 4331 .class_init = host_x86_cpu_class_init, 4332 }; 4333 4334 #endif 4335 4336 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4337 { 4338 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4339 4340 switch (f->type) { 4341 case CPUID_FEATURE_WORD: 4342 { 4343 const char *reg = get_register_name_32(f->cpuid.reg); 4344 assert(reg); 4345 return g_strdup_printf("CPUID.%02XH:%s", 4346 f->cpuid.eax, reg); 4347 } 4348 case MSR_FEATURE_WORD: 4349 return g_strdup_printf("MSR(%02XH)", 4350 f->msr.index); 4351 } 4352 4353 return NULL; 4354 } 4355 4356 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4357 { 4358 FeatureWord w; 4359 4360 for (w = 0; w < FEATURE_WORDS; w++) { 4361 if (cpu->filtered_features[w]) { 4362 return true; 4363 } 4364 } 4365 4366 return false; 4367 } 4368 4369 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4370 const char *verbose_prefix) 4371 { 4372 CPUX86State *env = &cpu->env; 4373 FeatureWordInfo *f = &feature_word_info[w]; 4374 int i; 4375 4376 if (!cpu->force_features) { 4377 env->features[w] &= ~mask; 4378 } 4379 cpu->filtered_features[w] |= mask; 4380 4381 if (!verbose_prefix) { 4382 return; 4383 } 4384 4385 for (i = 0; i < 64; ++i) { 4386 if ((1ULL << i) & mask) { 4387 g_autofree char *feat_word_str = feature_word_description(f, i); 4388 warn_report("%s: %s%s%s [bit %d]", 4389 verbose_prefix, 4390 feat_word_str, 4391 f->feat_names[i] ? "." : "", 4392 f->feat_names[i] ? f->feat_names[i] : "", i); 4393 } 4394 } 4395 } 4396 4397 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4398 const char *name, void *opaque, 4399 Error **errp) 4400 { 4401 X86CPU *cpu = X86_CPU(obj); 4402 CPUX86State *env = &cpu->env; 4403 int64_t value; 4404 4405 value = (env->cpuid_version >> 8) & 0xf; 4406 if (value == 0xf) { 4407 value += (env->cpuid_version >> 20) & 0xff; 4408 } 4409 visit_type_int(v, name, &value, errp); 4410 } 4411 4412 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4413 const char *name, void *opaque, 4414 Error **errp) 4415 { 4416 X86CPU *cpu = X86_CPU(obj); 4417 CPUX86State *env = &cpu->env; 4418 const int64_t min = 0; 4419 const int64_t max = 0xff + 0xf; 4420 int64_t value; 4421 4422 if (!visit_type_int(v, name, &value, errp)) { 4423 return; 4424 } 4425 if (value < min || value > max) { 4426 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4427 name ? name : "null", value, min, max); 4428 return; 4429 } 4430 4431 env->cpuid_version &= ~0xff00f00; 4432 if (value > 0x0f) { 4433 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4434 } else { 4435 env->cpuid_version |= value << 8; 4436 } 4437 } 4438 4439 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4440 const char *name, void *opaque, 4441 Error **errp) 4442 { 4443 X86CPU *cpu = X86_CPU(obj); 4444 CPUX86State *env = &cpu->env; 4445 int64_t value; 4446 4447 value = (env->cpuid_version >> 4) & 0xf; 4448 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4449 visit_type_int(v, name, &value, errp); 4450 } 4451 4452 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4453 const char *name, void *opaque, 4454 Error **errp) 4455 { 4456 X86CPU *cpu = X86_CPU(obj); 4457 CPUX86State *env = &cpu->env; 4458 const int64_t min = 0; 4459 const int64_t max = 0xff; 4460 int64_t value; 4461 4462 if (!visit_type_int(v, name, &value, errp)) { 4463 return; 4464 } 4465 if (value < min || value > max) { 4466 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4467 name ? name : "null", value, min, max); 4468 return; 4469 } 4470 4471 env->cpuid_version &= ~0xf00f0; 4472 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4473 } 4474 4475 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4476 const char *name, void *opaque, 4477 Error **errp) 4478 { 4479 X86CPU *cpu = X86_CPU(obj); 4480 CPUX86State *env = &cpu->env; 4481 int64_t value; 4482 4483 value = env->cpuid_version & 0xf; 4484 visit_type_int(v, name, &value, errp); 4485 } 4486 4487 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4488 const char *name, void *opaque, 4489 Error **errp) 4490 { 4491 X86CPU *cpu = X86_CPU(obj); 4492 CPUX86State *env = &cpu->env; 4493 const int64_t min = 0; 4494 const int64_t max = 0xf; 4495 int64_t value; 4496 4497 if (!visit_type_int(v, name, &value, errp)) { 4498 return; 4499 } 4500 if (value < min || value > max) { 4501 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4502 name ? name : "null", value, min, max); 4503 return; 4504 } 4505 4506 env->cpuid_version &= ~0xf; 4507 env->cpuid_version |= value & 0xf; 4508 } 4509 4510 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4511 { 4512 X86CPU *cpu = X86_CPU(obj); 4513 CPUX86State *env = &cpu->env; 4514 char *value; 4515 4516 value = g_malloc(CPUID_VENDOR_SZ + 1); 4517 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4518 env->cpuid_vendor3); 4519 return value; 4520 } 4521 4522 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4523 Error **errp) 4524 { 4525 X86CPU *cpu = X86_CPU(obj); 4526 CPUX86State *env = &cpu->env; 4527 int i; 4528 4529 if (strlen(value) != CPUID_VENDOR_SZ) { 4530 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4531 return; 4532 } 4533 4534 env->cpuid_vendor1 = 0; 4535 env->cpuid_vendor2 = 0; 4536 env->cpuid_vendor3 = 0; 4537 for (i = 0; i < 4; i++) { 4538 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4539 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4540 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4541 } 4542 } 4543 4544 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4545 { 4546 X86CPU *cpu = X86_CPU(obj); 4547 CPUX86State *env = &cpu->env; 4548 char *value; 4549 int i; 4550 4551 value = g_malloc(48 + 1); 4552 for (i = 0; i < 48; i++) { 4553 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4554 } 4555 value[48] = '\0'; 4556 return value; 4557 } 4558 4559 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4560 Error **errp) 4561 { 4562 X86CPU *cpu = X86_CPU(obj); 4563 CPUX86State *env = &cpu->env; 4564 int c, len, i; 4565 4566 if (model_id == NULL) { 4567 model_id = ""; 4568 } 4569 len = strlen(model_id); 4570 memset(env->cpuid_model, 0, 48); 4571 for (i = 0; i < 48; i++) { 4572 if (i >= len) { 4573 c = '\0'; 4574 } else { 4575 c = (uint8_t)model_id[i]; 4576 } 4577 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4578 } 4579 } 4580 4581 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4582 void *opaque, Error **errp) 4583 { 4584 X86CPU *cpu = X86_CPU(obj); 4585 int64_t value; 4586 4587 value = cpu->env.tsc_khz * 1000; 4588 visit_type_int(v, name, &value, errp); 4589 } 4590 4591 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4592 void *opaque, Error **errp) 4593 { 4594 X86CPU *cpu = X86_CPU(obj); 4595 const int64_t min = 0; 4596 const int64_t max = INT64_MAX; 4597 int64_t value; 4598 4599 if (!visit_type_int(v, name, &value, errp)) { 4600 return; 4601 } 4602 if (value < min || value > max) { 4603 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4604 name ? name : "null", value, min, max); 4605 return; 4606 } 4607 4608 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4609 } 4610 4611 /* Generic getter for "feature-words" and "filtered-features" properties */ 4612 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4613 const char *name, void *opaque, 4614 Error **errp) 4615 { 4616 uint64_t *array = (uint64_t *)opaque; 4617 FeatureWord w; 4618 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4619 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4620 X86CPUFeatureWordInfoList *list = NULL; 4621 4622 for (w = 0; w < FEATURE_WORDS; w++) { 4623 FeatureWordInfo *wi = &feature_word_info[w]; 4624 /* 4625 * We didn't have MSR features when "feature-words" was 4626 * introduced. Therefore skipped other type entries. 4627 */ 4628 if (wi->type != CPUID_FEATURE_WORD) { 4629 continue; 4630 } 4631 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4632 qwi->cpuid_input_eax = wi->cpuid.eax; 4633 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4634 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4635 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4636 qwi->features = array[w]; 4637 4638 /* List will be in reverse order, but order shouldn't matter */ 4639 list_entries[w].next = list; 4640 list_entries[w].value = &word_infos[w]; 4641 list = &list_entries[w]; 4642 } 4643 4644 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4645 } 4646 4647 /* Convert all '_' in a feature string option name to '-', to make feature 4648 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4649 */ 4650 static inline void feat2prop(char *s) 4651 { 4652 while ((s = strchr(s, '_'))) { 4653 *s = '-'; 4654 } 4655 } 4656 4657 /* Return the feature property name for a feature flag bit */ 4658 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4659 { 4660 const char *name; 4661 /* XSAVE components are automatically enabled by other features, 4662 * so return the original feature name instead 4663 */ 4664 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4665 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4666 4667 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4668 x86_ext_save_areas[comp].bits) { 4669 w = x86_ext_save_areas[comp].feature; 4670 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4671 } 4672 } 4673 4674 assert(bitnr < 64); 4675 assert(w < FEATURE_WORDS); 4676 name = feature_word_info[w].feat_names[bitnr]; 4677 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4678 return name; 4679 } 4680 4681 /* Compatibily hack to maintain legacy +-feat semantic, 4682 * where +-feat overwrites any feature set by 4683 * feat=on|feat even if the later is parsed after +-feat 4684 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4685 */ 4686 static GList *plus_features, *minus_features; 4687 4688 static gint compare_string(gconstpointer a, gconstpointer b) 4689 { 4690 return g_strcmp0(a, b); 4691 } 4692 4693 /* Parse "+feature,-feature,feature=foo" CPU feature string 4694 */ 4695 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4696 Error **errp) 4697 { 4698 char *featurestr; /* Single 'key=value" string being parsed */ 4699 static bool cpu_globals_initialized; 4700 bool ambiguous = false; 4701 4702 if (cpu_globals_initialized) { 4703 return; 4704 } 4705 cpu_globals_initialized = true; 4706 4707 if (!features) { 4708 return; 4709 } 4710 4711 for (featurestr = strtok(features, ","); 4712 featurestr; 4713 featurestr = strtok(NULL, ",")) { 4714 const char *name; 4715 const char *val = NULL; 4716 char *eq = NULL; 4717 char num[32]; 4718 GlobalProperty *prop; 4719 4720 /* Compatibility syntax: */ 4721 if (featurestr[0] == '+') { 4722 plus_features = g_list_append(plus_features, 4723 g_strdup(featurestr + 1)); 4724 continue; 4725 } else if (featurestr[0] == '-') { 4726 minus_features = g_list_append(minus_features, 4727 g_strdup(featurestr + 1)); 4728 continue; 4729 } 4730 4731 eq = strchr(featurestr, '='); 4732 if (eq) { 4733 *eq++ = 0; 4734 val = eq; 4735 } else { 4736 val = "on"; 4737 } 4738 4739 feat2prop(featurestr); 4740 name = featurestr; 4741 4742 if (g_list_find_custom(plus_features, name, compare_string)) { 4743 warn_report("Ambiguous CPU model string. " 4744 "Don't mix both \"+%s\" and \"%s=%s\"", 4745 name, name, val); 4746 ambiguous = true; 4747 } 4748 if (g_list_find_custom(minus_features, name, compare_string)) { 4749 warn_report("Ambiguous CPU model string. " 4750 "Don't mix both \"-%s\" and \"%s=%s\"", 4751 name, name, val); 4752 ambiguous = true; 4753 } 4754 4755 /* Special case: */ 4756 if (!strcmp(name, "tsc-freq")) { 4757 int ret; 4758 uint64_t tsc_freq; 4759 4760 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4761 if (ret < 0 || tsc_freq > INT64_MAX) { 4762 error_setg(errp, "bad numerical value %s", val); 4763 return; 4764 } 4765 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4766 val = num; 4767 name = "tsc-frequency"; 4768 } 4769 4770 prop = g_new0(typeof(*prop), 1); 4771 prop->driver = typename; 4772 prop->property = g_strdup(name); 4773 prop->value = g_strdup(val); 4774 qdev_prop_register_global(prop); 4775 } 4776 4777 if (ambiguous) { 4778 warn_report("Compatibility of ambiguous CPU model " 4779 "strings won't be kept on future QEMU versions"); 4780 } 4781 } 4782 4783 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4784 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4785 4786 /* Build a list with the name of all features on a feature word array */ 4787 static void x86_cpu_list_feature_names(FeatureWordArray features, 4788 strList **feat_names) 4789 { 4790 FeatureWord w; 4791 strList **next = feat_names; 4792 4793 for (w = 0; w < FEATURE_WORDS; w++) { 4794 uint64_t filtered = features[w]; 4795 int i; 4796 for (i = 0; i < 64; i++) { 4797 if (filtered & (1ULL << i)) { 4798 strList *new = g_new0(strList, 1); 4799 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4800 *next = new; 4801 next = &new->next; 4802 } 4803 } 4804 } 4805 } 4806 4807 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4808 const char *name, void *opaque, 4809 Error **errp) 4810 { 4811 X86CPU *xc = X86_CPU(obj); 4812 strList *result = NULL; 4813 4814 x86_cpu_list_feature_names(xc->filtered_features, &result); 4815 visit_type_strList(v, "unavailable-features", &result, errp); 4816 } 4817 4818 /* Check for missing features that may prevent the CPU class from 4819 * running using the current machine and accelerator. 4820 */ 4821 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4822 strList **missing_feats) 4823 { 4824 X86CPU *xc; 4825 Error *err = NULL; 4826 strList **next = missing_feats; 4827 4828 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4829 strList *new = g_new0(strList, 1); 4830 new->value = g_strdup("kvm"); 4831 *missing_feats = new; 4832 return; 4833 } 4834 4835 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4836 4837 x86_cpu_expand_features(xc, &err); 4838 if (err) { 4839 /* Errors at x86_cpu_expand_features should never happen, 4840 * but in case it does, just report the model as not 4841 * runnable at all using the "type" property. 4842 */ 4843 strList *new = g_new0(strList, 1); 4844 new->value = g_strdup("type"); 4845 *next = new; 4846 next = &new->next; 4847 } 4848 4849 x86_cpu_filter_features(xc, false); 4850 4851 x86_cpu_list_feature_names(xc->filtered_features, next); 4852 4853 object_unref(OBJECT(xc)); 4854 } 4855 4856 /* Print all cpuid feature names in featureset 4857 */ 4858 static void listflags(GList *features) 4859 { 4860 size_t len = 0; 4861 GList *tmp; 4862 4863 for (tmp = features; tmp; tmp = tmp->next) { 4864 const char *name = tmp->data; 4865 if ((len + strlen(name) + 1) >= 75) { 4866 qemu_printf("\n"); 4867 len = 0; 4868 } 4869 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4870 len += strlen(name) + 1; 4871 } 4872 qemu_printf("\n"); 4873 } 4874 4875 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4876 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4877 { 4878 ObjectClass *class_a = (ObjectClass *)a; 4879 ObjectClass *class_b = (ObjectClass *)b; 4880 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4881 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4882 int ret; 4883 4884 if (cc_a->ordering != cc_b->ordering) { 4885 ret = cc_a->ordering - cc_b->ordering; 4886 } else { 4887 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4888 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4889 ret = strcmp(name_a, name_b); 4890 } 4891 return ret; 4892 } 4893 4894 static GSList *get_sorted_cpu_model_list(void) 4895 { 4896 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4897 list = g_slist_sort(list, x86_cpu_list_compare); 4898 return list; 4899 } 4900 4901 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4902 { 4903 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4904 char *r = object_property_get_str(obj, "model-id", &error_abort); 4905 object_unref(obj); 4906 return r; 4907 } 4908 4909 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4910 { 4911 X86CPUVersion version; 4912 4913 if (!cc->model || !cc->model->is_alias) { 4914 return NULL; 4915 } 4916 version = x86_cpu_model_resolve_version(cc->model); 4917 if (version <= 0) { 4918 return NULL; 4919 } 4920 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4921 } 4922 4923 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4924 { 4925 ObjectClass *oc = data; 4926 X86CPUClass *cc = X86_CPU_CLASS(oc); 4927 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4928 g_autofree char *desc = g_strdup(cc->model_description); 4929 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4930 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4931 4932 if (!desc && alias_of) { 4933 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4934 desc = g_strdup("(alias configured by machine type)"); 4935 } else { 4936 desc = g_strdup_printf("(alias of %s)", alias_of); 4937 } 4938 } 4939 if (!desc && cc->model && cc->model->note) { 4940 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4941 } 4942 if (!desc) { 4943 desc = g_strdup_printf("%s", model_id); 4944 } 4945 4946 qemu_printf("x86 %-20s %-58s\n", name, desc); 4947 } 4948 4949 /* list available CPU models and flags */ 4950 void x86_cpu_list(void) 4951 { 4952 int i, j; 4953 GSList *list; 4954 GList *names = NULL; 4955 4956 qemu_printf("Available CPUs:\n"); 4957 list = get_sorted_cpu_model_list(); 4958 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4959 g_slist_free(list); 4960 4961 names = NULL; 4962 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4963 FeatureWordInfo *fw = &feature_word_info[i]; 4964 for (j = 0; j < 64; j++) { 4965 if (fw->feat_names[j]) { 4966 names = g_list_append(names, (gpointer)fw->feat_names[j]); 4967 } 4968 } 4969 } 4970 4971 names = g_list_sort(names, (GCompareFunc)strcmp); 4972 4973 qemu_printf("\nRecognized CPUID flags:\n"); 4974 listflags(names); 4975 qemu_printf("\n"); 4976 g_list_free(names); 4977 } 4978 4979 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 4980 { 4981 ObjectClass *oc = data; 4982 X86CPUClass *cc = X86_CPU_CLASS(oc); 4983 CpuDefinitionInfoList **cpu_list = user_data; 4984 CpuDefinitionInfoList *entry; 4985 CpuDefinitionInfo *info; 4986 4987 info = g_malloc0(sizeof(*info)); 4988 info->name = x86_cpu_class_get_model_name(cc); 4989 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 4990 info->has_unavailable_features = true; 4991 info->q_typename = g_strdup(object_class_get_name(oc)); 4992 info->migration_safe = cc->migration_safe; 4993 info->has_migration_safe = true; 4994 info->q_static = cc->static_model; 4995 /* 4996 * Old machine types won't report aliases, so that alias translation 4997 * doesn't break compatibility with previous QEMU versions. 4998 */ 4999 if (default_cpu_version != CPU_VERSION_LEGACY) { 5000 info->alias_of = x86_cpu_class_get_alias_of(cc); 5001 info->has_alias_of = !!info->alias_of; 5002 } 5003 5004 entry = g_malloc0(sizeof(*entry)); 5005 entry->value = info; 5006 entry->next = *cpu_list; 5007 *cpu_list = entry; 5008 } 5009 5010 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 5011 { 5012 CpuDefinitionInfoList *cpu_list = NULL; 5013 GSList *list = get_sorted_cpu_model_list(); 5014 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 5015 g_slist_free(list); 5016 return cpu_list; 5017 } 5018 5019 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5020 bool migratable_only) 5021 { 5022 FeatureWordInfo *wi = &feature_word_info[w]; 5023 uint64_t r = 0; 5024 5025 if (kvm_enabled()) { 5026 switch (wi->type) { 5027 case CPUID_FEATURE_WORD: 5028 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5029 wi->cpuid.ecx, 5030 wi->cpuid.reg); 5031 break; 5032 case MSR_FEATURE_WORD: 5033 r = kvm_arch_get_supported_msr_feature(kvm_state, 5034 wi->msr.index); 5035 break; 5036 } 5037 } else if (hvf_enabled()) { 5038 if (wi->type != CPUID_FEATURE_WORD) { 5039 return 0; 5040 } 5041 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5042 wi->cpuid.ecx, 5043 wi->cpuid.reg); 5044 } else if (tcg_enabled()) { 5045 r = wi->tcg_features; 5046 } else { 5047 return ~0; 5048 } 5049 if (migratable_only) { 5050 r &= x86_cpu_get_migratable_flags(w); 5051 } 5052 return r; 5053 } 5054 5055 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5056 { 5057 PropValue *pv; 5058 for (pv = props; pv->prop; pv++) { 5059 if (!pv->value) { 5060 continue; 5061 } 5062 object_property_parse(OBJECT(cpu), pv->prop, pv->value, 5063 &error_abort); 5064 } 5065 } 5066 5067 /* Apply properties for the CPU model version specified in model */ 5068 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5069 { 5070 const X86CPUVersionDefinition *vdef; 5071 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5072 5073 if (version == CPU_VERSION_LEGACY) { 5074 return; 5075 } 5076 5077 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5078 PropValue *p; 5079 5080 for (p = vdef->props; p && p->prop; p++) { 5081 object_property_parse(OBJECT(cpu), p->prop, p->value, 5082 &error_abort); 5083 } 5084 5085 if (vdef->version == version) { 5086 break; 5087 } 5088 } 5089 5090 /* 5091 * If we reached the end of the list, version number was invalid 5092 */ 5093 assert(vdef->version == version); 5094 } 5095 5096 /* Load data from X86CPUDefinition into a X86CPU object 5097 */ 5098 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) 5099 { 5100 X86CPUDefinition *def = model->cpudef; 5101 CPUX86State *env = &cpu->env; 5102 const char *vendor; 5103 char host_vendor[CPUID_VENDOR_SZ + 1]; 5104 FeatureWord w; 5105 5106 /*NOTE: any property set by this function should be returned by 5107 * x86_cpu_static_props(), so static expansion of 5108 * query-cpu-model-expansion is always complete. 5109 */ 5110 5111 /* CPU models only set _minimum_ values for level/xlevel: */ 5112 object_property_set_uint(OBJECT(cpu), "min-level", def->level, 5113 &error_abort); 5114 object_property_set_uint(OBJECT(cpu), "min-xlevel", def->xlevel, 5115 &error_abort); 5116 5117 object_property_set_int(OBJECT(cpu), "family", def->family, &error_abort); 5118 object_property_set_int(OBJECT(cpu), "model", def->model, &error_abort); 5119 object_property_set_int(OBJECT(cpu), "stepping", def->stepping, 5120 &error_abort); 5121 object_property_set_str(OBJECT(cpu), "model-id", def->model_id, 5122 &error_abort); 5123 for (w = 0; w < FEATURE_WORDS; w++) { 5124 env->features[w] = def->features[w]; 5125 } 5126 5127 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5128 cpu->legacy_cache = !def->cache_info; 5129 5130 /* Special cases not set in the X86CPUDefinition structs: */ 5131 /* TODO: in-kernel irqchip for hvf */ 5132 if (kvm_enabled()) { 5133 if (!kvm_irqchip_in_kernel()) { 5134 x86_cpu_change_kvm_default("x2apic", "off"); 5135 } 5136 5137 x86_cpu_apply_props(cpu, kvm_default_props); 5138 } else if (tcg_enabled()) { 5139 x86_cpu_apply_props(cpu, tcg_default_props); 5140 } 5141 5142 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5143 5144 /* sysenter isn't supported in compatibility mode on AMD, 5145 * syscall isn't supported in compatibility mode on Intel. 5146 * Normally we advertise the actual CPU vendor, but you can 5147 * override this using the 'vendor' property if you want to use 5148 * KVM's sysenter/syscall emulation in compatibility mode and 5149 * when doing cross vendor migration 5150 */ 5151 vendor = def->vendor; 5152 if (accel_uses_host_cpuid()) { 5153 uint32_t ebx = 0, ecx = 0, edx = 0; 5154 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5155 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5156 vendor = host_vendor; 5157 } 5158 5159 object_property_set_str(OBJECT(cpu), "vendor", vendor, &error_abort); 5160 5161 x86_cpu_apply_version_props(cpu, model); 5162 } 5163 5164 #ifndef CONFIG_USER_ONLY 5165 /* Return a QDict containing keys for all properties that can be included 5166 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5167 * must be included in the dictionary. 5168 */ 5169 static QDict *x86_cpu_static_props(void) 5170 { 5171 FeatureWord w; 5172 int i; 5173 static const char *props[] = { 5174 "min-level", 5175 "min-xlevel", 5176 "family", 5177 "model", 5178 "stepping", 5179 "model-id", 5180 "vendor", 5181 "lmce", 5182 NULL, 5183 }; 5184 static QDict *d; 5185 5186 if (d) { 5187 return d; 5188 } 5189 5190 d = qdict_new(); 5191 for (i = 0; props[i]; i++) { 5192 qdict_put_null(d, props[i]); 5193 } 5194 5195 for (w = 0; w < FEATURE_WORDS; w++) { 5196 FeatureWordInfo *fi = &feature_word_info[w]; 5197 int bit; 5198 for (bit = 0; bit < 64; bit++) { 5199 if (!fi->feat_names[bit]) { 5200 continue; 5201 } 5202 qdict_put_null(d, fi->feat_names[bit]); 5203 } 5204 } 5205 5206 return d; 5207 } 5208 5209 /* Add an entry to @props dict, with the value for property. */ 5210 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5211 { 5212 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5213 &error_abort); 5214 5215 qdict_put_obj(props, prop, value); 5216 } 5217 5218 /* Convert CPU model data from X86CPU object to a property dictionary 5219 * that can recreate exactly the same CPU model. 5220 */ 5221 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5222 { 5223 QDict *sprops = x86_cpu_static_props(); 5224 const QDictEntry *e; 5225 5226 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5227 const char *prop = qdict_entry_key(e); 5228 x86_cpu_expand_prop(cpu, props, prop); 5229 } 5230 } 5231 5232 /* Convert CPU model data from X86CPU object to a property dictionary 5233 * that can recreate exactly the same CPU model, including every 5234 * writeable QOM property. 5235 */ 5236 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5237 { 5238 ObjectPropertyIterator iter; 5239 ObjectProperty *prop; 5240 5241 object_property_iter_init(&iter, OBJECT(cpu)); 5242 while ((prop = object_property_iter_next(&iter))) { 5243 /* skip read-only or write-only properties */ 5244 if (!prop->get || !prop->set) { 5245 continue; 5246 } 5247 5248 /* "hotplugged" is the only property that is configurable 5249 * on the command-line but will be set differently on CPUs 5250 * created using "-cpu ... -smp ..." and by CPUs created 5251 * on the fly by x86_cpu_from_model() for querying. Skip it. 5252 */ 5253 if (!strcmp(prop->name, "hotplugged")) { 5254 continue; 5255 } 5256 x86_cpu_expand_prop(cpu, props, prop->name); 5257 } 5258 } 5259 5260 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5261 { 5262 const QDictEntry *prop; 5263 Error *err = NULL; 5264 5265 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5266 if (!object_property_set_qobject(obj, qdict_entry_key(prop), 5267 qdict_entry_value(prop), &err)) { 5268 break; 5269 } 5270 } 5271 5272 error_propagate(errp, err); 5273 } 5274 5275 /* Create X86CPU object according to model+props specification */ 5276 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5277 { 5278 X86CPU *xc = NULL; 5279 X86CPUClass *xcc; 5280 Error *err = NULL; 5281 5282 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5283 if (xcc == NULL) { 5284 error_setg(&err, "CPU model '%s' not found", model); 5285 goto out; 5286 } 5287 5288 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5289 if (props) { 5290 object_apply_props(OBJECT(xc), props, &err); 5291 if (err) { 5292 goto out; 5293 } 5294 } 5295 5296 x86_cpu_expand_features(xc, &err); 5297 if (err) { 5298 goto out; 5299 } 5300 5301 out: 5302 if (err) { 5303 error_propagate(errp, err); 5304 object_unref(OBJECT(xc)); 5305 xc = NULL; 5306 } 5307 return xc; 5308 } 5309 5310 CpuModelExpansionInfo * 5311 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5312 CpuModelInfo *model, 5313 Error **errp) 5314 { 5315 X86CPU *xc = NULL; 5316 Error *err = NULL; 5317 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5318 QDict *props = NULL; 5319 const char *base_name; 5320 5321 xc = x86_cpu_from_model(model->name, 5322 model->has_props ? 5323 qobject_to(QDict, model->props) : 5324 NULL, &err); 5325 if (err) { 5326 goto out; 5327 } 5328 5329 props = qdict_new(); 5330 ret->model = g_new0(CpuModelInfo, 1); 5331 ret->model->props = QOBJECT(props); 5332 ret->model->has_props = true; 5333 5334 switch (type) { 5335 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5336 /* Static expansion will be based on "base" only */ 5337 base_name = "base"; 5338 x86_cpu_to_dict(xc, props); 5339 break; 5340 case CPU_MODEL_EXPANSION_TYPE_FULL: 5341 /* As we don't return every single property, full expansion needs 5342 * to keep the original model name+props, and add extra 5343 * properties on top of that. 5344 */ 5345 base_name = model->name; 5346 x86_cpu_to_dict_full(xc, props); 5347 break; 5348 default: 5349 error_setg(&err, "Unsupported expansion type"); 5350 goto out; 5351 } 5352 5353 x86_cpu_to_dict(xc, props); 5354 5355 ret->model->name = g_strdup(base_name); 5356 5357 out: 5358 object_unref(OBJECT(xc)); 5359 if (err) { 5360 error_propagate(errp, err); 5361 qapi_free_CpuModelExpansionInfo(ret); 5362 ret = NULL; 5363 } 5364 return ret; 5365 } 5366 #endif /* !CONFIG_USER_ONLY */ 5367 5368 static gchar *x86_gdb_arch_name(CPUState *cs) 5369 { 5370 #ifdef TARGET_X86_64 5371 return g_strdup("i386:x86-64"); 5372 #else 5373 return g_strdup("i386"); 5374 #endif 5375 } 5376 5377 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5378 { 5379 X86CPUModel *model = data; 5380 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5381 5382 xcc->model = model; 5383 xcc->migration_safe = true; 5384 } 5385 5386 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5387 { 5388 g_autofree char *typename = x86_cpu_type_name(name); 5389 TypeInfo ti = { 5390 .name = typename, 5391 .parent = TYPE_X86_CPU, 5392 .class_init = x86_cpu_cpudef_class_init, 5393 .class_data = model, 5394 }; 5395 5396 type_register(&ti); 5397 } 5398 5399 static void x86_register_cpudef_types(X86CPUDefinition *def) 5400 { 5401 X86CPUModel *m; 5402 const X86CPUVersionDefinition *vdef; 5403 5404 /* AMD aliases are handled at runtime based on CPUID vendor, so 5405 * they shouldn't be set on the CPU model table. 5406 */ 5407 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5408 /* catch mistakes instead of silently truncating model_id when too long */ 5409 assert(def->model_id && strlen(def->model_id) <= 48); 5410 5411 /* Unversioned model: */ 5412 m = g_new0(X86CPUModel, 1); 5413 m->cpudef = def; 5414 m->version = CPU_VERSION_AUTO; 5415 m->is_alias = true; 5416 x86_register_cpu_model_type(def->name, m); 5417 5418 /* Versioned models: */ 5419 5420 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5421 X86CPUModel *m = g_new0(X86CPUModel, 1); 5422 g_autofree char *name = 5423 x86_cpu_versioned_model_name(def, vdef->version); 5424 m->cpudef = def; 5425 m->version = vdef->version; 5426 m->note = vdef->note; 5427 x86_register_cpu_model_type(name, m); 5428 5429 if (vdef->alias) { 5430 X86CPUModel *am = g_new0(X86CPUModel, 1); 5431 am->cpudef = def; 5432 am->version = vdef->version; 5433 am->is_alias = true; 5434 x86_register_cpu_model_type(vdef->alias, am); 5435 } 5436 } 5437 5438 } 5439 5440 #if !defined(CONFIG_USER_ONLY) 5441 5442 void cpu_clear_apic_feature(CPUX86State *env) 5443 { 5444 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5445 } 5446 5447 #endif /* !CONFIG_USER_ONLY */ 5448 5449 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5450 uint32_t *eax, uint32_t *ebx, 5451 uint32_t *ecx, uint32_t *edx) 5452 { 5453 X86CPU *cpu = env_archcpu(env); 5454 CPUState *cs = env_cpu(env); 5455 uint32_t die_offset; 5456 uint32_t limit; 5457 uint32_t signature[3]; 5458 X86CPUTopoInfo topo_info; 5459 5460 topo_info.nodes_per_pkg = env->nr_nodes; 5461 topo_info.dies_per_pkg = env->nr_dies; 5462 topo_info.cores_per_die = cs->nr_cores; 5463 topo_info.threads_per_core = cs->nr_threads; 5464 5465 /* Calculate & apply limits for different index ranges */ 5466 if (index >= 0xC0000000) { 5467 limit = env->cpuid_xlevel2; 5468 } else if (index >= 0x80000000) { 5469 limit = env->cpuid_xlevel; 5470 } else if (index >= 0x40000000) { 5471 limit = 0x40000001; 5472 } else { 5473 limit = env->cpuid_level; 5474 } 5475 5476 if (index > limit) { 5477 /* Intel documentation states that invalid EAX input will 5478 * return the same information as EAX=cpuid_level 5479 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5480 */ 5481 index = env->cpuid_level; 5482 } 5483 5484 switch(index) { 5485 case 0: 5486 *eax = env->cpuid_level; 5487 *ebx = env->cpuid_vendor1; 5488 *edx = env->cpuid_vendor2; 5489 *ecx = env->cpuid_vendor3; 5490 break; 5491 case 1: 5492 *eax = env->cpuid_version; 5493 *ebx = (cpu->apic_id << 24) | 5494 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5495 *ecx = env->features[FEAT_1_ECX]; 5496 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5497 *ecx |= CPUID_EXT_OSXSAVE; 5498 } 5499 *edx = env->features[FEAT_1_EDX]; 5500 if (cs->nr_cores * cs->nr_threads > 1) { 5501 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5502 *edx |= CPUID_HT; 5503 } 5504 if (!cpu->enable_pmu) { 5505 *ecx &= ~CPUID_EXT_PDCM; 5506 } 5507 break; 5508 case 2: 5509 /* cache info: needed for Pentium Pro compatibility */ 5510 if (cpu->cache_info_passthrough) { 5511 host_cpuid(index, 0, eax, ebx, ecx, edx); 5512 break; 5513 } 5514 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5515 *ebx = 0; 5516 if (!cpu->enable_l3_cache) { 5517 *ecx = 0; 5518 } else { 5519 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5520 } 5521 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5522 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5523 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5524 break; 5525 case 4: 5526 /* cache info: needed for Core compatibility */ 5527 if (cpu->cache_info_passthrough) { 5528 host_cpuid(index, count, eax, ebx, ecx, edx); 5529 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5530 *eax &= ~0xFC000000; 5531 if ((*eax & 31) && cs->nr_cores > 1) { 5532 *eax |= (cs->nr_cores - 1) << 26; 5533 } 5534 } else { 5535 *eax = 0; 5536 switch (count) { 5537 case 0: /* L1 dcache info */ 5538 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5539 1, cs->nr_cores, 5540 eax, ebx, ecx, edx); 5541 break; 5542 case 1: /* L1 icache info */ 5543 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5544 1, cs->nr_cores, 5545 eax, ebx, ecx, edx); 5546 break; 5547 case 2: /* L2 cache info */ 5548 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5549 cs->nr_threads, cs->nr_cores, 5550 eax, ebx, ecx, edx); 5551 break; 5552 case 3: /* L3 cache info */ 5553 die_offset = apicid_die_offset(&topo_info); 5554 if (cpu->enable_l3_cache) { 5555 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5556 (1 << die_offset), cs->nr_cores, 5557 eax, ebx, ecx, edx); 5558 break; 5559 } 5560 /* fall through */ 5561 default: /* end of info */ 5562 *eax = *ebx = *ecx = *edx = 0; 5563 break; 5564 } 5565 } 5566 break; 5567 case 5: 5568 /* MONITOR/MWAIT Leaf */ 5569 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5570 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5571 *ecx = cpu->mwait.ecx; /* flags */ 5572 *edx = cpu->mwait.edx; /* mwait substates */ 5573 break; 5574 case 6: 5575 /* Thermal and Power Leaf */ 5576 *eax = env->features[FEAT_6_EAX]; 5577 *ebx = 0; 5578 *ecx = 0; 5579 *edx = 0; 5580 break; 5581 case 7: 5582 /* Structured Extended Feature Flags Enumeration Leaf */ 5583 if (count == 0) { 5584 /* Maximum ECX value for sub-leaves */ 5585 *eax = env->cpuid_level_func7; 5586 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5587 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5588 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5589 *ecx |= CPUID_7_0_ECX_OSPKE; 5590 } 5591 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5592 } else if (count == 1) { 5593 *eax = env->features[FEAT_7_1_EAX]; 5594 *ebx = 0; 5595 *ecx = 0; 5596 *edx = 0; 5597 } else { 5598 *eax = 0; 5599 *ebx = 0; 5600 *ecx = 0; 5601 *edx = 0; 5602 } 5603 break; 5604 case 9: 5605 /* Direct Cache Access Information Leaf */ 5606 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5607 *ebx = 0; 5608 *ecx = 0; 5609 *edx = 0; 5610 break; 5611 case 0xA: 5612 /* Architectural Performance Monitoring Leaf */ 5613 if (kvm_enabled() && cpu->enable_pmu) { 5614 KVMState *s = cs->kvm_state; 5615 5616 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5617 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5618 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5619 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5620 } else if (hvf_enabled() && cpu->enable_pmu) { 5621 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5622 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5623 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5624 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5625 } else { 5626 *eax = 0; 5627 *ebx = 0; 5628 *ecx = 0; 5629 *edx = 0; 5630 } 5631 break; 5632 case 0xB: 5633 /* Extended Topology Enumeration Leaf */ 5634 if (!cpu->enable_cpuid_0xb) { 5635 *eax = *ebx = *ecx = *edx = 0; 5636 break; 5637 } 5638 5639 *ecx = count & 0xff; 5640 *edx = cpu->apic_id; 5641 5642 switch (count) { 5643 case 0: 5644 *eax = apicid_core_offset(&topo_info); 5645 *ebx = cs->nr_threads; 5646 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5647 break; 5648 case 1: 5649 *eax = env->pkg_offset; 5650 *ebx = cs->nr_cores * cs->nr_threads; 5651 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5652 break; 5653 default: 5654 *eax = 0; 5655 *ebx = 0; 5656 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5657 } 5658 5659 assert(!(*eax & ~0x1f)); 5660 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5661 break; 5662 case 0x1F: 5663 /* V2 Extended Topology Enumeration Leaf */ 5664 if (env->nr_dies < 2) { 5665 *eax = *ebx = *ecx = *edx = 0; 5666 break; 5667 } 5668 5669 *ecx = count & 0xff; 5670 *edx = cpu->apic_id; 5671 switch (count) { 5672 case 0: 5673 *eax = apicid_core_offset(&topo_info); 5674 *ebx = cs->nr_threads; 5675 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5676 break; 5677 case 1: 5678 *eax = apicid_die_offset(&topo_info); 5679 *ebx = cs->nr_cores * cs->nr_threads; 5680 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5681 break; 5682 case 2: 5683 *eax = env->pkg_offset; 5684 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5685 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5686 break; 5687 default: 5688 *eax = 0; 5689 *ebx = 0; 5690 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5691 } 5692 assert(!(*eax & ~0x1f)); 5693 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5694 break; 5695 case 0xD: { 5696 /* Processor Extended State */ 5697 *eax = 0; 5698 *ebx = 0; 5699 *ecx = 0; 5700 *edx = 0; 5701 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5702 break; 5703 } 5704 5705 if (count == 0) { 5706 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5707 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5708 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5709 /* 5710 * The initial value of xcr0 and ebx == 0, On host without kvm 5711 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5712 * even through guest update xcr0, this will crash some legacy guest 5713 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5714 */ 5715 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5716 } else if (count == 1) { 5717 *eax = env->features[FEAT_XSAVE]; 5718 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5719 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5720 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5721 *eax = esa->size; 5722 *ebx = esa->offset; 5723 } 5724 } 5725 break; 5726 } 5727 case 0x14: { 5728 /* Intel Processor Trace Enumeration */ 5729 *eax = 0; 5730 *ebx = 0; 5731 *ecx = 0; 5732 *edx = 0; 5733 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5734 !kvm_enabled()) { 5735 break; 5736 } 5737 5738 if (count == 0) { 5739 *eax = INTEL_PT_MAX_SUBLEAF; 5740 *ebx = INTEL_PT_MINIMAL_EBX; 5741 *ecx = INTEL_PT_MINIMAL_ECX; 5742 } else if (count == 1) { 5743 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5744 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5745 } 5746 break; 5747 } 5748 case 0x40000000: 5749 /* 5750 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5751 * set here, but we restrict to TCG none the less. 5752 */ 5753 if (tcg_enabled() && cpu->expose_tcg) { 5754 memcpy(signature, "TCGTCGTCGTCG", 12); 5755 *eax = 0x40000001; 5756 *ebx = signature[0]; 5757 *ecx = signature[1]; 5758 *edx = signature[2]; 5759 } else { 5760 *eax = 0; 5761 *ebx = 0; 5762 *ecx = 0; 5763 *edx = 0; 5764 } 5765 break; 5766 case 0x40000001: 5767 *eax = 0; 5768 *ebx = 0; 5769 *ecx = 0; 5770 *edx = 0; 5771 break; 5772 case 0x80000000: 5773 *eax = env->cpuid_xlevel; 5774 *ebx = env->cpuid_vendor1; 5775 *edx = env->cpuid_vendor2; 5776 *ecx = env->cpuid_vendor3; 5777 break; 5778 case 0x80000001: 5779 *eax = env->cpuid_version; 5780 *ebx = 0; 5781 *ecx = env->features[FEAT_8000_0001_ECX]; 5782 *edx = env->features[FEAT_8000_0001_EDX]; 5783 5784 /* The Linux kernel checks for the CMPLegacy bit and 5785 * discards multiple thread information if it is set. 5786 * So don't set it here for Intel to make Linux guests happy. 5787 */ 5788 if (cs->nr_cores * cs->nr_threads > 1) { 5789 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5790 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5791 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5792 *ecx |= 1 << 1; /* CmpLegacy bit */ 5793 } 5794 } 5795 break; 5796 case 0x80000002: 5797 case 0x80000003: 5798 case 0x80000004: 5799 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5800 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5801 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5802 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5803 break; 5804 case 0x80000005: 5805 /* cache info (L1 cache) */ 5806 if (cpu->cache_info_passthrough) { 5807 host_cpuid(index, 0, eax, ebx, ecx, edx); 5808 break; 5809 } 5810 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | 5811 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5812 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | 5813 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5814 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5815 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5816 break; 5817 case 0x80000006: 5818 /* cache info (L2 cache) */ 5819 if (cpu->cache_info_passthrough) { 5820 host_cpuid(index, 0, eax, ebx, ecx, edx); 5821 break; 5822 } 5823 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | 5824 (L2_DTLB_2M_ENTRIES << 16) | 5825 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | 5826 (L2_ITLB_2M_ENTRIES); 5827 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | 5828 (L2_DTLB_4K_ENTRIES << 16) | 5829 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | 5830 (L2_ITLB_4K_ENTRIES); 5831 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5832 cpu->enable_l3_cache ? 5833 env->cache_info_amd.l3_cache : NULL, 5834 ecx, edx); 5835 break; 5836 case 0x80000007: 5837 *eax = 0; 5838 *ebx = 0; 5839 *ecx = 0; 5840 *edx = env->features[FEAT_8000_0007_EDX]; 5841 break; 5842 case 0x80000008: 5843 /* virtual & phys address size in low 2 bytes. */ 5844 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5845 /* 64 bit processor */ 5846 *eax = cpu->phys_bits; /* configurable physical bits */ 5847 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5848 *eax |= 0x00003900; /* 57 bits virtual */ 5849 } else { 5850 *eax |= 0x00003000; /* 48 bits virtual */ 5851 } 5852 } else { 5853 *eax = cpu->phys_bits; 5854 } 5855 *ebx = env->features[FEAT_8000_0008_EBX]; 5856 if (cs->nr_cores * cs->nr_threads > 1) { 5857 /* 5858 * Bits 15:12 is "The number of bits in the initial 5859 * Core::X86::Apic::ApicId[ApicId] value that indicate 5860 * thread ID within a package". This is already stored at 5861 * CPUX86State::pkg_offset. 5862 * Bits 7:0 is "The number of threads in the package is NC+1" 5863 */ 5864 *ecx = (env->pkg_offset << 12) | 5865 ((cs->nr_cores * cs->nr_threads) - 1); 5866 } else { 5867 *ecx = 0; 5868 } 5869 *edx = 0; 5870 break; 5871 case 0x8000000A: 5872 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5873 *eax = 0x00000001; /* SVM Revision */ 5874 *ebx = 0x00000010; /* nr of ASIDs */ 5875 *ecx = 0; 5876 *edx = env->features[FEAT_SVM]; /* optional features */ 5877 } else { 5878 *eax = 0; 5879 *ebx = 0; 5880 *ecx = 0; 5881 *edx = 0; 5882 } 5883 break; 5884 case 0x8000001D: 5885 *eax = 0; 5886 if (cpu->cache_info_passthrough) { 5887 host_cpuid(index, count, eax, ebx, ecx, edx); 5888 break; 5889 } 5890 switch (count) { 5891 case 0: /* L1 dcache info */ 5892 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, 5893 &topo_info, eax, ebx, ecx, edx); 5894 break; 5895 case 1: /* L1 icache info */ 5896 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, 5897 &topo_info, eax, ebx, ecx, edx); 5898 break; 5899 case 2: /* L2 cache info */ 5900 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, 5901 &topo_info, eax, ebx, ecx, edx); 5902 break; 5903 case 3: /* L3 cache info */ 5904 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, 5905 &topo_info, eax, ebx, ecx, edx); 5906 break; 5907 default: /* end of info */ 5908 *eax = *ebx = *ecx = *edx = 0; 5909 break; 5910 } 5911 break; 5912 case 0x8000001E: 5913 assert(cpu->core_id <= 255); 5914 encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx); 5915 break; 5916 case 0xC0000000: 5917 *eax = env->cpuid_xlevel2; 5918 *ebx = 0; 5919 *ecx = 0; 5920 *edx = 0; 5921 break; 5922 case 0xC0000001: 5923 /* Support for VIA CPU's CPUID instruction */ 5924 *eax = env->cpuid_version; 5925 *ebx = 0; 5926 *ecx = 0; 5927 *edx = env->features[FEAT_C000_0001_EDX]; 5928 break; 5929 case 0xC0000002: 5930 case 0xC0000003: 5931 case 0xC0000004: 5932 /* Reserved for the future, and now filled with zero */ 5933 *eax = 0; 5934 *ebx = 0; 5935 *ecx = 0; 5936 *edx = 0; 5937 break; 5938 case 0x8000001F: 5939 *eax = sev_enabled() ? 0x2 : 0; 5940 *ebx = sev_get_cbit_position(); 5941 *ebx |= sev_get_reduced_phys_bits() << 6; 5942 *ecx = 0; 5943 *edx = 0; 5944 break; 5945 default: 5946 /* reserved values: zero */ 5947 *eax = 0; 5948 *ebx = 0; 5949 *ecx = 0; 5950 *edx = 0; 5951 break; 5952 } 5953 } 5954 5955 static void x86_cpu_reset(DeviceState *dev) 5956 { 5957 CPUState *s = CPU(dev); 5958 X86CPU *cpu = X86_CPU(s); 5959 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 5960 CPUX86State *env = &cpu->env; 5961 target_ulong cr4; 5962 uint64_t xcr0; 5963 int i; 5964 5965 xcc->parent_reset(dev); 5966 5967 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 5968 5969 env->old_exception = -1; 5970 5971 /* init to reset state */ 5972 5973 env->hflags2 |= HF2_GIF_MASK; 5974 5975 cpu_x86_update_cr0(env, 0x60000010); 5976 env->a20_mask = ~0x0; 5977 env->smbase = 0x30000; 5978 env->msr_smi_count = 0; 5979 5980 env->idt.limit = 0xffff; 5981 env->gdt.limit = 0xffff; 5982 env->ldt.limit = 0xffff; 5983 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 5984 env->tr.limit = 0xffff; 5985 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 5986 5987 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 5988 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 5989 DESC_R_MASK | DESC_A_MASK); 5990 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 5991 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5992 DESC_A_MASK); 5993 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 5994 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5995 DESC_A_MASK); 5996 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 5997 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5998 DESC_A_MASK); 5999 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 6000 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6001 DESC_A_MASK); 6002 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 6003 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6004 DESC_A_MASK); 6005 6006 env->eip = 0xfff0; 6007 env->regs[R_EDX] = env->cpuid_version; 6008 6009 env->eflags = 0x2; 6010 6011 /* FPU init */ 6012 for (i = 0; i < 8; i++) { 6013 env->fptags[i] = 1; 6014 } 6015 cpu_set_fpuc(env, 0x37f); 6016 6017 env->mxcsr = 0x1f80; 6018 /* All units are in INIT state. */ 6019 env->xstate_bv = 0; 6020 6021 env->pat = 0x0007040600070406ULL; 6022 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6023 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6024 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6025 } 6026 6027 memset(env->dr, 0, sizeof(env->dr)); 6028 env->dr[6] = DR6_FIXED_1; 6029 env->dr[7] = DR7_FIXED_1; 6030 cpu_breakpoint_remove_all(s, BP_CPU); 6031 cpu_watchpoint_remove_all(s, BP_CPU); 6032 6033 cr4 = 0; 6034 xcr0 = XSTATE_FP_MASK; 6035 6036 #ifdef CONFIG_USER_ONLY 6037 /* Enable all the features for user-mode. */ 6038 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6039 xcr0 |= XSTATE_SSE_MASK; 6040 } 6041 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6042 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6043 if (env->features[esa->feature] & esa->bits) { 6044 xcr0 |= 1ull << i; 6045 } 6046 } 6047 6048 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6049 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6050 } 6051 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6052 cr4 |= CR4_FSGSBASE_MASK; 6053 } 6054 #endif 6055 6056 env->xcr0 = xcr0; 6057 cpu_x86_update_cr4(env, cr4); 6058 6059 /* 6060 * SDM 11.11.5 requires: 6061 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6062 * - IA32_MTRR_PHYSMASKn.V = 0 6063 * All other bits are undefined. For simplification, zero it all. 6064 */ 6065 env->mtrr_deftype = 0; 6066 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6067 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6068 6069 env->interrupt_injected = -1; 6070 env->exception_nr = -1; 6071 env->exception_pending = 0; 6072 env->exception_injected = 0; 6073 env->exception_has_payload = false; 6074 env->exception_payload = 0; 6075 env->nmi_injected = false; 6076 #if !defined(CONFIG_USER_ONLY) 6077 /* We hard-wire the BSP to the first CPU. */ 6078 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6079 6080 s->halted = !cpu_is_bsp(cpu); 6081 6082 if (kvm_enabled()) { 6083 kvm_arch_reset_vcpu(cpu); 6084 } 6085 else if (hvf_enabled()) { 6086 hvf_reset_vcpu(s); 6087 } 6088 #endif 6089 } 6090 6091 #ifndef CONFIG_USER_ONLY 6092 bool cpu_is_bsp(X86CPU *cpu) 6093 { 6094 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6095 } 6096 6097 /* TODO: remove me, when reset over QOM tree is implemented */ 6098 static void x86_cpu_machine_reset_cb(void *opaque) 6099 { 6100 X86CPU *cpu = opaque; 6101 cpu_reset(CPU(cpu)); 6102 } 6103 #endif 6104 6105 static void mce_init(X86CPU *cpu) 6106 { 6107 CPUX86State *cenv = &cpu->env; 6108 unsigned int bank; 6109 6110 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6111 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6112 (CPUID_MCE | CPUID_MCA)) { 6113 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6114 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6115 cenv->mcg_ctl = ~(uint64_t)0; 6116 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6117 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6118 } 6119 } 6120 } 6121 6122 #ifndef CONFIG_USER_ONLY 6123 APICCommonClass *apic_get_class(void) 6124 { 6125 const char *apic_type = "apic"; 6126 6127 /* TODO: in-kernel irqchip for hvf */ 6128 if (kvm_apic_in_kernel()) { 6129 apic_type = "kvm-apic"; 6130 } else if (xen_enabled()) { 6131 apic_type = "xen-apic"; 6132 } 6133 6134 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6135 } 6136 6137 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6138 { 6139 APICCommonState *apic; 6140 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6141 6142 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6143 6144 object_property_add_child(OBJECT(cpu), "lapic", 6145 OBJECT(cpu->apic_state)); 6146 object_unref(OBJECT(cpu->apic_state)); 6147 6148 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6149 /* TODO: convert to link<> */ 6150 apic = APIC_COMMON(cpu->apic_state); 6151 apic->cpu = cpu; 6152 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6153 } 6154 6155 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6156 { 6157 APICCommonState *apic; 6158 static bool apic_mmio_map_once; 6159 6160 if (cpu->apic_state == NULL) { 6161 return; 6162 } 6163 qdev_realize(DEVICE(cpu->apic_state), NULL, errp); 6164 6165 /* Map APIC MMIO area */ 6166 apic = APIC_COMMON(cpu->apic_state); 6167 if (!apic_mmio_map_once) { 6168 memory_region_add_subregion_overlap(get_system_memory(), 6169 apic->apicbase & 6170 MSR_IA32_APICBASE_BASE, 6171 &apic->io_memory, 6172 0x1000); 6173 apic_mmio_map_once = true; 6174 } 6175 } 6176 6177 static void x86_cpu_machine_done(Notifier *n, void *unused) 6178 { 6179 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6180 MemoryRegion *smram = 6181 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6182 6183 if (smram) { 6184 cpu->smram = g_new(MemoryRegion, 1); 6185 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6186 smram, 0, 4 * GiB); 6187 memory_region_set_enabled(cpu->smram, true); 6188 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6189 } 6190 } 6191 #else 6192 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6193 { 6194 } 6195 #endif 6196 6197 /* Note: Only safe for use on x86(-64) hosts */ 6198 static uint32_t x86_host_phys_bits(void) 6199 { 6200 uint32_t eax; 6201 uint32_t host_phys_bits; 6202 6203 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6204 if (eax >= 0x80000008) { 6205 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6206 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6207 * at 23:16 that can specify a maximum physical address bits for 6208 * the guest that can override this value; but I've not seen 6209 * anything with that set. 6210 */ 6211 host_phys_bits = eax & 0xff; 6212 } else { 6213 /* It's an odd 64 bit machine that doesn't have the leaf for 6214 * physical address bits; fall back to 36 that's most older 6215 * Intel. 6216 */ 6217 host_phys_bits = 36; 6218 } 6219 6220 return host_phys_bits; 6221 } 6222 6223 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6224 { 6225 if (*min < value) { 6226 *min = value; 6227 } 6228 } 6229 6230 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6231 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6232 { 6233 CPUX86State *env = &cpu->env; 6234 FeatureWordInfo *fi = &feature_word_info[w]; 6235 uint32_t eax = fi->cpuid.eax; 6236 uint32_t region = eax & 0xF0000000; 6237 6238 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6239 if (!env->features[w]) { 6240 return; 6241 } 6242 6243 switch (region) { 6244 case 0x00000000: 6245 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6246 break; 6247 case 0x80000000: 6248 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6249 break; 6250 case 0xC0000000: 6251 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6252 break; 6253 } 6254 6255 if (eax == 7) { 6256 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6257 fi->cpuid.ecx); 6258 } 6259 } 6260 6261 /* Calculate XSAVE components based on the configured CPU feature flags */ 6262 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6263 { 6264 CPUX86State *env = &cpu->env; 6265 int i; 6266 uint64_t mask; 6267 6268 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6269 return; 6270 } 6271 6272 mask = 0; 6273 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6274 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6275 if (env->features[esa->feature] & esa->bits) { 6276 mask |= (1ULL << i); 6277 } 6278 } 6279 6280 env->features[FEAT_XSAVE_COMP_LO] = mask; 6281 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6282 } 6283 6284 /***** Steps involved on loading and filtering CPUID data 6285 * 6286 * When initializing and realizing a CPU object, the steps 6287 * involved in setting up CPUID data are: 6288 * 6289 * 1) Loading CPU model definition (X86CPUDefinition). This is 6290 * implemented by x86_cpu_load_model() and should be completely 6291 * transparent, as it is done automatically by instance_init. 6292 * No code should need to look at X86CPUDefinition structs 6293 * outside instance_init. 6294 * 6295 * 2) CPU expansion. This is done by realize before CPUID 6296 * filtering, and will make sure host/accelerator data is 6297 * loaded for CPU models that depend on host capabilities 6298 * (e.g. "host"). Done by x86_cpu_expand_features(). 6299 * 6300 * 3) CPUID filtering. This initializes extra data related to 6301 * CPUID, and checks if the host supports all capabilities 6302 * required by the CPU. Runnability of a CPU model is 6303 * determined at this step. Done by x86_cpu_filter_features(). 6304 * 6305 * Some operations don't require all steps to be performed. 6306 * More precisely: 6307 * 6308 * - CPU instance creation (instance_init) will run only CPU 6309 * model loading. CPU expansion can't run at instance_init-time 6310 * because host/accelerator data may be not available yet. 6311 * - CPU realization will perform both CPU model expansion and CPUID 6312 * filtering, and return an error in case one of them fails. 6313 * - query-cpu-definitions needs to run all 3 steps. It needs 6314 * to run CPUID filtering, as the 'unavailable-features' 6315 * field is set based on the filtering results. 6316 * - The query-cpu-model-expansion QMP command only needs to run 6317 * CPU model loading and CPU expansion. It should not filter 6318 * any CPUID data based on host capabilities. 6319 */ 6320 6321 /* Expand CPU configuration data, based on configured features 6322 * and host/accelerator capabilities when appropriate. 6323 */ 6324 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6325 { 6326 CPUX86State *env = &cpu->env; 6327 FeatureWord w; 6328 int i; 6329 GList *l; 6330 Error *local_err = NULL; 6331 6332 for (l = plus_features; l; l = l->next) { 6333 const char *prop = l->data; 6334 if (!object_property_set_bool(OBJECT(cpu), prop, true, &local_err)) { 6335 goto out; 6336 } 6337 } 6338 6339 for (l = minus_features; l; l = l->next) { 6340 const char *prop = l->data; 6341 if (!object_property_set_bool(OBJECT(cpu), prop, false, &local_err)) { 6342 goto out; 6343 } 6344 } 6345 6346 /*TODO: Now cpu->max_features doesn't overwrite features 6347 * set using QOM properties, and we can convert 6348 * plus_features & minus_features to global properties 6349 * inside x86_cpu_parse_featurestr() too. 6350 */ 6351 if (cpu->max_features) { 6352 for (w = 0; w < FEATURE_WORDS; w++) { 6353 /* Override only features that weren't set explicitly 6354 * by the user. 6355 */ 6356 env->features[w] |= 6357 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6358 ~env->user_features[w] & 6359 ~feature_word_info[w].no_autoenable_flags; 6360 } 6361 } 6362 6363 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6364 FeatureDep *d = &feature_dependencies[i]; 6365 if (!(env->features[d->from.index] & d->from.mask)) { 6366 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6367 6368 /* Not an error unless the dependent feature was added explicitly. */ 6369 mark_unavailable_features(cpu, d->to.index, 6370 unavailable_features & env->user_features[d->to.index], 6371 "This feature depends on other features that were not requested"); 6372 6373 env->user_features[d->to.index] |= unavailable_features; 6374 env->features[d->to.index] &= ~unavailable_features; 6375 } 6376 } 6377 6378 if (!kvm_enabled() || !cpu->expose_kvm) { 6379 env->features[FEAT_KVM] = 0; 6380 } 6381 6382 x86_cpu_enable_xsave_components(cpu); 6383 6384 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6385 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6386 if (cpu->full_cpuid_auto_level) { 6387 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6388 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6389 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6390 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6391 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6392 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6393 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6394 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6395 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6396 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6397 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6398 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6399 6400 /* Intel Processor Trace requires CPUID[0x14] */ 6401 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { 6402 if (cpu->intel_pt_auto_level) { 6403 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6404 } else if (cpu->env.cpuid_min_level < 0x14) { 6405 mark_unavailable_features(cpu, FEAT_7_0_EBX, 6406 CPUID_7_0_EBX_INTEL_PT, 6407 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\""); 6408 } 6409 } 6410 6411 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6412 if (env->nr_dies > 1) { 6413 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6414 } 6415 6416 /* SVM requires CPUID[0x8000000A] */ 6417 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6418 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6419 } 6420 6421 /* SEV requires CPUID[0x8000001F] */ 6422 if (sev_enabled()) { 6423 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6424 } 6425 } 6426 6427 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6428 if (env->cpuid_level_func7 == UINT32_MAX) { 6429 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6430 } 6431 if (env->cpuid_level == UINT32_MAX) { 6432 env->cpuid_level = env->cpuid_min_level; 6433 } 6434 if (env->cpuid_xlevel == UINT32_MAX) { 6435 env->cpuid_xlevel = env->cpuid_min_xlevel; 6436 } 6437 if (env->cpuid_xlevel2 == UINT32_MAX) { 6438 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6439 } 6440 6441 out: 6442 if (local_err != NULL) { 6443 error_propagate(errp, local_err); 6444 } 6445 } 6446 6447 /* 6448 * Finishes initialization of CPUID data, filters CPU feature 6449 * words based on host availability of each feature. 6450 * 6451 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6452 */ 6453 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6454 { 6455 CPUX86State *env = &cpu->env; 6456 FeatureWord w; 6457 const char *prefix = NULL; 6458 6459 if (verbose) { 6460 prefix = accel_uses_host_cpuid() 6461 ? "host doesn't support requested feature" 6462 : "TCG doesn't support requested feature"; 6463 } 6464 6465 for (w = 0; w < FEATURE_WORDS; w++) { 6466 uint64_t host_feat = 6467 x86_cpu_get_supported_feature_word(w, false); 6468 uint64_t requested_features = env->features[w]; 6469 uint64_t unavailable_features = requested_features & ~host_feat; 6470 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6471 } 6472 6473 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6474 kvm_enabled()) { 6475 KVMState *s = CPU(cpu)->kvm_state; 6476 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6477 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6478 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6479 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6480 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6481 6482 if (!eax_0 || 6483 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6484 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6485 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6486 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6487 INTEL_PT_ADDR_RANGES_NUM) || 6488 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6489 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6490 (ecx_0 & INTEL_PT_IP_LIP)) { 6491 /* 6492 * Processor Trace capabilities aren't configurable, so if the 6493 * host can't emulate the capabilities we report on 6494 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6495 */ 6496 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6497 } 6498 } 6499 } 6500 6501 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6502 { 6503 CPUState *cs = CPU(dev); 6504 X86CPU *cpu = X86_CPU(dev); 6505 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6506 CPUX86State *env = &cpu->env; 6507 Error *local_err = NULL; 6508 static bool ht_warned; 6509 6510 if (xcc->host_cpuid_required) { 6511 if (!accel_uses_host_cpuid()) { 6512 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6513 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6514 goto out; 6515 } 6516 } 6517 6518 if (cpu->max_features && accel_uses_host_cpuid()) { 6519 if (enable_cpu_pm) { 6520 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6521 &cpu->mwait.ecx, &cpu->mwait.edx); 6522 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6523 } 6524 if (kvm_enabled() && cpu->ucode_rev == 0) { 6525 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6526 MSR_IA32_UCODE_REV); 6527 } 6528 } 6529 6530 if (cpu->ucode_rev == 0) { 6531 /* The default is the same as KVM's. */ 6532 if (IS_AMD_CPU(env)) { 6533 cpu->ucode_rev = 0x01000065; 6534 } else { 6535 cpu->ucode_rev = 0x100000000ULL; 6536 } 6537 } 6538 6539 /* mwait extended info: needed for Core compatibility */ 6540 /* We always wake on interrupt even if host does not have the capability */ 6541 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6542 6543 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6544 error_setg(errp, "apic-id property was not initialized properly"); 6545 return; 6546 } 6547 6548 x86_cpu_expand_features(cpu, &local_err); 6549 if (local_err) { 6550 goto out; 6551 } 6552 6553 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6554 6555 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6556 error_setg(&local_err, 6557 accel_uses_host_cpuid() ? 6558 "Host doesn't support requested features" : 6559 "TCG doesn't support requested features"); 6560 goto out; 6561 } 6562 6563 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6564 * CPUID[1].EDX. 6565 */ 6566 if (IS_AMD_CPU(env)) { 6567 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6568 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6569 & CPUID_EXT2_AMD_ALIASES); 6570 } 6571 6572 /* For 64bit systems think about the number of physical bits to present. 6573 * ideally this should be the same as the host; anything other than matching 6574 * the host can cause incorrect guest behaviour. 6575 * QEMU used to pick the magic value of 40 bits that corresponds to 6576 * consumer AMD devices but nothing else. 6577 */ 6578 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6579 if (accel_uses_host_cpuid()) { 6580 uint32_t host_phys_bits = x86_host_phys_bits(); 6581 static bool warned; 6582 6583 /* Print a warning if the user set it to a value that's not the 6584 * host value. 6585 */ 6586 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6587 !warned) { 6588 warn_report("Host physical bits (%u)" 6589 " does not match phys-bits property (%u)", 6590 host_phys_bits, cpu->phys_bits); 6591 warned = true; 6592 } 6593 6594 if (cpu->host_phys_bits) { 6595 /* The user asked for us to use the host physical bits */ 6596 cpu->phys_bits = host_phys_bits; 6597 if (cpu->host_phys_bits_limit && 6598 cpu->phys_bits > cpu->host_phys_bits_limit) { 6599 cpu->phys_bits = cpu->host_phys_bits_limit; 6600 } 6601 } 6602 6603 if (cpu->phys_bits && 6604 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6605 cpu->phys_bits < 32)) { 6606 error_setg(errp, "phys-bits should be between 32 and %u " 6607 " (but is %u)", 6608 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6609 return; 6610 } 6611 } else { 6612 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6613 error_setg(errp, "TCG only supports phys-bits=%u", 6614 TCG_PHYS_ADDR_BITS); 6615 return; 6616 } 6617 } 6618 /* 0 means it was not explicitly set by the user (or by machine 6619 * compat_props or by the host code above). In this case, the default 6620 * is the value used by TCG (40). 6621 */ 6622 if (cpu->phys_bits == 0) { 6623 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6624 } 6625 } else { 6626 /* For 32 bit systems don't use the user set value, but keep 6627 * phys_bits consistent with what we tell the guest. 6628 */ 6629 if (cpu->phys_bits != 0) { 6630 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6631 return; 6632 } 6633 6634 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6635 cpu->phys_bits = 36; 6636 } else { 6637 cpu->phys_bits = 32; 6638 } 6639 } 6640 6641 /* Cache information initialization */ 6642 if (!cpu->legacy_cache) { 6643 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6644 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6645 error_setg(errp, 6646 "CPU model '%s' doesn't support legacy-cache=off", name); 6647 return; 6648 } 6649 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6650 *xcc->model->cpudef->cache_info; 6651 } else { 6652 /* Build legacy cache information */ 6653 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6654 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6655 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6656 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6657 6658 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6659 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6660 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6661 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6662 6663 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6664 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6665 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6666 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6667 } 6668 6669 6670 cpu_exec_realizefn(cs, &local_err); 6671 if (local_err != NULL) { 6672 error_propagate(errp, local_err); 6673 return; 6674 } 6675 6676 #ifndef CONFIG_USER_ONLY 6677 MachineState *ms = MACHINE(qdev_get_machine()); 6678 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6679 6680 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6681 x86_cpu_apic_create(cpu, &local_err); 6682 if (local_err != NULL) { 6683 goto out; 6684 } 6685 } 6686 #endif 6687 6688 mce_init(cpu); 6689 6690 #ifndef CONFIG_USER_ONLY 6691 if (tcg_enabled()) { 6692 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6693 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6694 6695 /* Outer container... */ 6696 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6697 memory_region_set_enabled(cpu->cpu_as_root, true); 6698 6699 /* ... with two regions inside: normal system memory with low 6700 * priority, and... 6701 */ 6702 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6703 get_system_memory(), 0, ~0ull); 6704 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6705 memory_region_set_enabled(cpu->cpu_as_mem, true); 6706 6707 cs->num_ases = 2; 6708 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6709 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6710 6711 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6712 cpu->machine_done.notify = x86_cpu_machine_done; 6713 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6714 } 6715 #endif 6716 6717 qemu_init_vcpu(cs); 6718 6719 /* 6720 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6721 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6722 * based on inputs (sockets,cores,threads), it is still better to give 6723 * users a warning. 6724 * 6725 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6726 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6727 */ 6728 if (IS_AMD_CPU(env) && 6729 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6730 cs->nr_threads > 1 && !ht_warned) { 6731 warn_report("This family of AMD CPU doesn't support " 6732 "hyperthreading(%d)", 6733 cs->nr_threads); 6734 error_printf("Please configure -smp options properly" 6735 " or try enabling topoext feature.\n"); 6736 ht_warned = true; 6737 } 6738 6739 x86_cpu_apic_realize(cpu, &local_err); 6740 if (local_err != NULL) { 6741 goto out; 6742 } 6743 cpu_reset(cs); 6744 6745 xcc->parent_realize(dev, &local_err); 6746 6747 out: 6748 if (local_err != NULL) { 6749 error_propagate(errp, local_err); 6750 return; 6751 } 6752 } 6753 6754 static void x86_cpu_unrealizefn(DeviceState *dev) 6755 { 6756 X86CPU *cpu = X86_CPU(dev); 6757 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6758 6759 #ifndef CONFIG_USER_ONLY 6760 cpu_remove_sync(CPU(dev)); 6761 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6762 #endif 6763 6764 if (cpu->apic_state) { 6765 object_unparent(OBJECT(cpu->apic_state)); 6766 cpu->apic_state = NULL; 6767 } 6768 6769 xcc->parent_unrealize(dev); 6770 } 6771 6772 typedef struct BitProperty { 6773 FeatureWord w; 6774 uint64_t mask; 6775 } BitProperty; 6776 6777 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6778 void *opaque, Error **errp) 6779 { 6780 X86CPU *cpu = X86_CPU(obj); 6781 BitProperty *fp = opaque; 6782 uint64_t f = cpu->env.features[fp->w]; 6783 bool value = (f & fp->mask) == fp->mask; 6784 visit_type_bool(v, name, &value, errp); 6785 } 6786 6787 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6788 void *opaque, Error **errp) 6789 { 6790 DeviceState *dev = DEVICE(obj); 6791 X86CPU *cpu = X86_CPU(obj); 6792 BitProperty *fp = opaque; 6793 bool value; 6794 6795 if (dev->realized) { 6796 qdev_prop_set_after_realize(dev, name, errp); 6797 return; 6798 } 6799 6800 if (!visit_type_bool(v, name, &value, errp)) { 6801 return; 6802 } 6803 6804 if (value) { 6805 cpu->env.features[fp->w] |= fp->mask; 6806 } else { 6807 cpu->env.features[fp->w] &= ~fp->mask; 6808 } 6809 cpu->env.user_features[fp->w] |= fp->mask; 6810 } 6811 6812 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 6813 void *opaque) 6814 { 6815 BitProperty *prop = opaque; 6816 g_free(prop); 6817 } 6818 6819 /* Register a boolean property to get/set a single bit in a uint32_t field. 6820 * 6821 * The same property name can be registered multiple times to make it affect 6822 * multiple bits in the same FeatureWord. In that case, the getter will return 6823 * true only if all bits are set. 6824 */ 6825 static void x86_cpu_register_bit_prop(X86CPU *cpu, 6826 const char *prop_name, 6827 FeatureWord w, 6828 int bitnr) 6829 { 6830 BitProperty *fp; 6831 ObjectProperty *op; 6832 uint64_t mask = (1ULL << bitnr); 6833 6834 op = object_property_find(OBJECT(cpu), prop_name, NULL); 6835 if (op) { 6836 fp = op->opaque; 6837 assert(fp->w == w); 6838 fp->mask |= mask; 6839 } else { 6840 fp = g_new0(BitProperty, 1); 6841 fp->w = w; 6842 fp->mask = mask; 6843 object_property_add(OBJECT(cpu), prop_name, "bool", 6844 x86_cpu_get_bit_prop, 6845 x86_cpu_set_bit_prop, 6846 x86_cpu_release_bit_prop, fp); 6847 } 6848 } 6849 6850 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 6851 FeatureWord w, 6852 int bitnr) 6853 { 6854 FeatureWordInfo *fi = &feature_word_info[w]; 6855 const char *name = fi->feat_names[bitnr]; 6856 6857 if (!name) { 6858 return; 6859 } 6860 6861 /* Property names should use "-" instead of "_". 6862 * Old names containing underscores are registered as aliases 6863 * using object_property_add_alias() 6864 */ 6865 assert(!strchr(name, '_')); 6866 /* aliases don't use "|" delimiters anymore, they are registered 6867 * manually using object_property_add_alias() */ 6868 assert(!strchr(name, '|')); 6869 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 6870 } 6871 6872 #if !defined(CONFIG_USER_ONLY) 6873 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6874 { 6875 X86CPU *cpu = X86_CPU(cs); 6876 CPUX86State *env = &cpu->env; 6877 GuestPanicInformation *panic_info = NULL; 6878 6879 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6880 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6881 6882 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6883 6884 assert(HV_CRASH_PARAMS >= 5); 6885 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6886 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6887 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6888 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6889 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6890 } 6891 6892 return panic_info; 6893 } 6894 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6895 const char *name, void *opaque, 6896 Error **errp) 6897 { 6898 CPUState *cs = CPU(obj); 6899 GuestPanicInformation *panic_info; 6900 6901 if (!cs->crash_occurred) { 6902 error_setg(errp, "No crash occured"); 6903 return; 6904 } 6905 6906 panic_info = x86_cpu_get_crash_info(cs); 6907 if (panic_info == NULL) { 6908 error_setg(errp, "No crash information"); 6909 return; 6910 } 6911 6912 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6913 errp); 6914 qapi_free_GuestPanicInformation(panic_info); 6915 } 6916 #endif /* !CONFIG_USER_ONLY */ 6917 6918 static void x86_cpu_initfn(Object *obj) 6919 { 6920 X86CPU *cpu = X86_CPU(obj); 6921 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 6922 CPUX86State *env = &cpu->env; 6923 FeatureWord w; 6924 6925 env->nr_dies = 1; 6926 env->nr_nodes = 1; 6927 cpu_set_cpustate_pointers(cpu); 6928 6929 object_property_add(obj, "family", "int", 6930 x86_cpuid_version_get_family, 6931 x86_cpuid_version_set_family, NULL, NULL); 6932 object_property_add(obj, "model", "int", 6933 x86_cpuid_version_get_model, 6934 x86_cpuid_version_set_model, NULL, NULL); 6935 object_property_add(obj, "stepping", "int", 6936 x86_cpuid_version_get_stepping, 6937 x86_cpuid_version_set_stepping, NULL, NULL); 6938 object_property_add_str(obj, "vendor", 6939 x86_cpuid_get_vendor, 6940 x86_cpuid_set_vendor); 6941 object_property_add_str(obj, "model-id", 6942 x86_cpuid_get_model_id, 6943 x86_cpuid_set_model_id); 6944 object_property_add(obj, "tsc-frequency", "int", 6945 x86_cpuid_get_tsc_freq, 6946 x86_cpuid_set_tsc_freq, NULL, NULL); 6947 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 6948 x86_cpu_get_feature_words, 6949 NULL, NULL, (void *)env->features); 6950 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 6951 x86_cpu_get_feature_words, 6952 NULL, NULL, (void *)cpu->filtered_features); 6953 /* 6954 * The "unavailable-features" property has the same semantics as 6955 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 6956 * QMP command: they list the features that would have prevented the 6957 * CPU from running if the "enforce" flag was set. 6958 */ 6959 object_property_add(obj, "unavailable-features", "strList", 6960 x86_cpu_get_unavailable_features, 6961 NULL, NULL, NULL); 6962 6963 #if !defined(CONFIG_USER_ONLY) 6964 object_property_add(obj, "crash-information", "GuestPanicInformation", 6965 x86_cpu_get_crash_info_qom, NULL, NULL, NULL); 6966 #endif 6967 6968 for (w = 0; w < FEATURE_WORDS; w++) { 6969 int bitnr; 6970 6971 for (bitnr = 0; bitnr < 64; bitnr++) { 6972 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 6973 } 6974 } 6975 6976 object_property_add_alias(obj, "sse3", obj, "pni"); 6977 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq"); 6978 object_property_add_alias(obj, "sse4-1", obj, "sse4.1"); 6979 object_property_add_alias(obj, "sse4-2", obj, "sse4.2"); 6980 object_property_add_alias(obj, "xd", obj, "nx"); 6981 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt"); 6982 object_property_add_alias(obj, "i64", obj, "lm"); 6983 6984 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl"); 6985 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust"); 6986 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt"); 6987 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm"); 6988 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy"); 6989 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr"); 6990 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core"); 6991 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb"); 6992 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay"); 6993 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu"); 6994 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf"); 6995 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time"); 6996 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi"); 6997 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt"); 6998 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control"); 6999 object_property_add_alias(obj, "svm_lock", obj, "svm-lock"); 7000 object_property_add_alias(obj, "nrip_save", obj, "nrip-save"); 7001 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale"); 7002 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean"); 7003 object_property_add_alias(obj, "pause_filter", obj, "pause-filter"); 7004 object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); 7005 object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); 7006 7007 if (xcc->model) { 7008 x86_cpu_load_model(cpu, xcc->model); 7009 } 7010 } 7011 7012 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7013 { 7014 X86CPU *cpu = X86_CPU(cs); 7015 7016 return cpu->apic_id; 7017 } 7018 7019 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7020 { 7021 X86CPU *cpu = X86_CPU(cs); 7022 7023 return cpu->env.cr[0] & CR0_PG_MASK; 7024 } 7025 7026 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7027 { 7028 X86CPU *cpu = X86_CPU(cs); 7029 7030 cpu->env.eip = value; 7031 } 7032 7033 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 7034 { 7035 X86CPU *cpu = X86_CPU(cs); 7036 7037 cpu->env.eip = tb->pc - tb->cs_base; 7038 } 7039 7040 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7041 { 7042 X86CPU *cpu = X86_CPU(cs); 7043 CPUX86State *env = &cpu->env; 7044 7045 #if !defined(CONFIG_USER_ONLY) 7046 if (interrupt_request & CPU_INTERRUPT_POLL) { 7047 return CPU_INTERRUPT_POLL; 7048 } 7049 #endif 7050 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7051 return CPU_INTERRUPT_SIPI; 7052 } 7053 7054 if (env->hflags2 & HF2_GIF_MASK) { 7055 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7056 !(env->hflags & HF_SMM_MASK)) { 7057 return CPU_INTERRUPT_SMI; 7058 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7059 !(env->hflags2 & HF2_NMI_MASK)) { 7060 return CPU_INTERRUPT_NMI; 7061 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7062 return CPU_INTERRUPT_MCE; 7063 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7064 (((env->hflags2 & HF2_VINTR_MASK) && 7065 (env->hflags2 & HF2_HIF_MASK)) || 7066 (!(env->hflags2 & HF2_VINTR_MASK) && 7067 (env->eflags & IF_MASK && 7068 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7069 return CPU_INTERRUPT_HARD; 7070 #if !defined(CONFIG_USER_ONLY) 7071 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7072 (env->eflags & IF_MASK) && 7073 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7074 return CPU_INTERRUPT_VIRQ; 7075 #endif 7076 } 7077 } 7078 7079 return 0; 7080 } 7081 7082 static bool x86_cpu_has_work(CPUState *cs) 7083 { 7084 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7085 } 7086 7087 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7088 { 7089 X86CPU *cpu = X86_CPU(cs); 7090 CPUX86State *env = &cpu->env; 7091 7092 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7093 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7094 : bfd_mach_i386_i8086); 7095 info->print_insn = print_insn_i386; 7096 7097 info->cap_arch = CS_ARCH_X86; 7098 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7099 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7100 : CS_MODE_16); 7101 info->cap_insn_unit = 1; 7102 info->cap_insn_split = 8; 7103 } 7104 7105 void x86_update_hflags(CPUX86State *env) 7106 { 7107 uint32_t hflags; 7108 #define HFLAG_COPY_MASK \ 7109 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7110 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7111 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7112 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7113 7114 hflags = env->hflags & HFLAG_COPY_MASK; 7115 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7116 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7117 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7118 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7119 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7120 7121 if (env->cr[4] & CR4_OSFXSR_MASK) { 7122 hflags |= HF_OSFXSR_MASK; 7123 } 7124 7125 if (env->efer & MSR_EFER_LMA) { 7126 hflags |= HF_LMA_MASK; 7127 } 7128 7129 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7130 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7131 } else { 7132 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7133 (DESC_B_SHIFT - HF_CS32_SHIFT); 7134 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7135 (DESC_B_SHIFT - HF_SS32_SHIFT); 7136 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7137 !(hflags & HF_CS32_MASK)) { 7138 hflags |= HF_ADDSEG_MASK; 7139 } else { 7140 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7141 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7142 } 7143 } 7144 env->hflags = hflags; 7145 } 7146 7147 static Property x86_cpu_properties[] = { 7148 #ifdef CONFIG_USER_ONLY 7149 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7150 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7151 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7152 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7153 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7154 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7155 #else 7156 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7157 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7158 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7159 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7160 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7161 #endif 7162 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7163 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7164 7165 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7166 HYPERV_SPINLOCK_NEVER_RETRY), 7167 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7168 HYPERV_FEAT_RELAXED, 0), 7169 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7170 HYPERV_FEAT_VAPIC, 0), 7171 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7172 HYPERV_FEAT_TIME, 0), 7173 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7174 HYPERV_FEAT_CRASH, 0), 7175 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7176 HYPERV_FEAT_RESET, 0), 7177 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7178 HYPERV_FEAT_VPINDEX, 0), 7179 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7180 HYPERV_FEAT_RUNTIME, 0), 7181 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7182 HYPERV_FEAT_SYNIC, 0), 7183 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7184 HYPERV_FEAT_STIMER, 0), 7185 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7186 HYPERV_FEAT_FREQUENCIES, 0), 7187 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7188 HYPERV_FEAT_REENLIGHTENMENT, 0), 7189 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7190 HYPERV_FEAT_TLBFLUSH, 0), 7191 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7192 HYPERV_FEAT_EVMCS, 0), 7193 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7194 HYPERV_FEAT_IPI, 0), 7195 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7196 HYPERV_FEAT_STIMER_DIRECT, 0), 7197 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7198 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7199 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7200 7201 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7202 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7203 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7204 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7205 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7206 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7207 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7208 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7209 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7210 UINT32_MAX), 7211 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7212 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7213 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7214 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7215 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7216 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7217 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7218 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7219 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 7220 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7221 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7222 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7223 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7224 false), 7225 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7226 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7227 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7228 true), 7229 /* 7230 * lecacy_cache defaults to true unless the CPU model provides its 7231 * own cache information (see x86_cpu_load_def()). 7232 */ 7233 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7234 7235 /* 7236 * From "Requirements for Implementing the Microsoft 7237 * Hypervisor Interface": 7238 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7239 * 7240 * "Starting with Windows Server 2012 and Windows 8, if 7241 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7242 * the hypervisor imposes no specific limit to the number of VPs. 7243 * In this case, Windows Server 2012 guest VMs may use more than 7244 * 64 VPs, up to the maximum supported number of processors applicable 7245 * to the specific Windows version being used." 7246 */ 7247 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7248 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7249 false), 7250 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7251 true), 7252 DEFINE_PROP_END_OF_LIST() 7253 }; 7254 7255 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7256 { 7257 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7258 CPUClass *cc = CPU_CLASS(oc); 7259 DeviceClass *dc = DEVICE_CLASS(oc); 7260 7261 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7262 &xcc->parent_realize); 7263 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7264 &xcc->parent_unrealize); 7265 device_class_set_props(dc, x86_cpu_properties); 7266 7267 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7268 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7269 7270 cc->class_by_name = x86_cpu_class_by_name; 7271 cc->parse_features = x86_cpu_parse_featurestr; 7272 cc->has_work = x86_cpu_has_work; 7273 #ifdef CONFIG_TCG 7274 cc->do_interrupt = x86_cpu_do_interrupt; 7275 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 7276 #endif 7277 cc->dump_state = x86_cpu_dump_state; 7278 cc->set_pc = x86_cpu_set_pc; 7279 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 7280 cc->gdb_read_register = x86_cpu_gdb_read_register; 7281 cc->gdb_write_register = x86_cpu_gdb_write_register; 7282 cc->get_arch_id = x86_cpu_get_arch_id; 7283 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7284 #ifndef CONFIG_USER_ONLY 7285 cc->asidx_from_attrs = x86_asidx_from_attrs; 7286 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7287 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7288 cc->get_crash_info = x86_cpu_get_crash_info; 7289 cc->write_elf64_note = x86_cpu_write_elf64_note; 7290 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7291 cc->write_elf32_note = x86_cpu_write_elf32_note; 7292 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7293 cc->vmsd = &vmstate_x86_cpu; 7294 #endif 7295 cc->gdb_arch_name = x86_gdb_arch_name; 7296 #ifdef TARGET_X86_64 7297 cc->gdb_core_xml_file = "i386-64bit.xml"; 7298 cc->gdb_num_core_regs = 66; 7299 #else 7300 cc->gdb_core_xml_file = "i386-32bit.xml"; 7301 cc->gdb_num_core_regs = 50; 7302 #endif 7303 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 7304 cc->debug_excp_handler = breakpoint_handler; 7305 #endif 7306 cc->cpu_exec_enter = x86_cpu_exec_enter; 7307 cc->cpu_exec_exit = x86_cpu_exec_exit; 7308 #ifdef CONFIG_TCG 7309 cc->tcg_initialize = tcg_x86_init; 7310 cc->tlb_fill = x86_cpu_tlb_fill; 7311 #endif 7312 cc->disas_set_info = x86_disas_set_info; 7313 7314 dc->user_creatable = true; 7315 } 7316 7317 static const TypeInfo x86_cpu_type_info = { 7318 .name = TYPE_X86_CPU, 7319 .parent = TYPE_CPU, 7320 .instance_size = sizeof(X86CPU), 7321 .instance_init = x86_cpu_initfn, 7322 .abstract = true, 7323 .class_size = sizeof(X86CPUClass), 7324 .class_init = x86_cpu_common_class_init, 7325 }; 7326 7327 7328 /* "base" CPU model, used by query-cpu-model-expansion */ 7329 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7330 { 7331 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7332 7333 xcc->static_model = true; 7334 xcc->migration_safe = true; 7335 xcc->model_description = "base CPU model type with no features enabled"; 7336 xcc->ordering = 8; 7337 } 7338 7339 static const TypeInfo x86_base_cpu_type_info = { 7340 .name = X86_CPU_TYPE_NAME("base"), 7341 .parent = TYPE_X86_CPU, 7342 .class_init = x86_cpu_base_class_init, 7343 }; 7344 7345 static void x86_cpu_register_types(void) 7346 { 7347 int i; 7348 7349 type_register_static(&x86_cpu_type_info); 7350 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7351 x86_register_cpudef_types(&builtin_x86_defs[i]); 7352 } 7353 type_register_static(&max_x86_cpu_type_info); 7354 type_register_static(&x86_base_cpu_type_info); 7355 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7356 type_register_static(&host_x86_cpu_type_info); 7357 #endif 7358 } 7359 7360 type_init(x86_cpu_register_types) 7361