1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/reset.h" 30 #include "sysemu/hvf.h" 31 #include "sysemu/cpus.h" 32 #include "sysemu/xen.h" 33 #include "kvm_i386.h" 34 #include "sev_i386.h" 35 36 #include "qemu/error-report.h" 37 #include "qemu/module.h" 38 #include "qemu/option.h" 39 #include "qemu/config-file.h" 40 #include "qapi/error.h" 41 #include "qapi/qapi-visit-machine.h" 42 #include "qapi/qapi-visit-run-state.h" 43 #include "qapi/qmp/qdict.h" 44 #include "qapi/qmp/qerror.h" 45 #include "qapi/visitor.h" 46 #include "qom/qom-qobject.h" 47 #include "sysemu/arch_init.h" 48 #include "qapi/qapi-commands-machine-target.h" 49 50 #include "standard-headers/asm-x86/kvm_para.h" 51 52 #include "sysemu/sysemu.h" 53 #include "sysemu/tcg.h" 54 #include "hw/qdev-properties.h" 55 #include "hw/i386/topology.h" 56 #ifndef CONFIG_USER_ONLY 57 #include "exec/address-spaces.h" 58 #include "hw/i386/apic_internal.h" 59 #include "hw/boards.h" 60 #endif 61 62 #include "disas/capstone.h" 63 64 /* Helpers for building CPUID[2] descriptors: */ 65 66 struct CPUID2CacheDescriptorInfo { 67 enum CacheType type; 68 int level; 69 int size; 70 int line_size; 71 int associativity; 72 }; 73 74 /* 75 * Known CPUID 2 cache descriptors. 76 * From Intel SDM Volume 2A, CPUID instruction 77 */ 78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 82 .associativity = 4, .line_size = 32, }, 83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 84 .associativity = 4, .line_size = 64, }, 85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 86 .associativity = 2, .line_size = 32, }, 87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 32, }, 89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 90 .associativity = 4, .line_size = 64, }, 91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 92 .associativity = 6, .line_size = 64, }, 93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 94 .associativity = 2, .line_size = 64, }, 95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 96 .associativity = 8, .line_size = 64, }, 97 /* lines per sector is not supported cpuid2_cache_descriptor(), 98 * so descriptors 0x22, 0x23 are not included 99 */ 100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 101 .associativity = 16, .line_size = 64, }, 102 /* lines per sector is not supported cpuid2_cache_descriptor(), 103 * so descriptors 0x25, 0x20 are not included 104 */ 105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 108 .associativity = 8, .line_size = 64, }, 109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 118 .associativity = 4, .line_size = 32, }, 119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 120 .associativity = 4, .line_size = 64, }, 121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 122 .associativity = 8, .line_size = 64, }, 123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 129 .associativity = 16, .line_size = 64, }, 130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 131 .associativity = 12, .line_size = 64, }, 132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 133 .associativity = 16, .line_size = 64, }, 134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 135 .associativity = 24, .line_size = 64, }, 136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 137 .associativity = 8, .line_size = 64, }, 138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 143 .associativity = 4, .line_size = 64, }, 144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 145 .associativity = 4, .line_size = 64, }, 146 /* lines per sector is not supported cpuid2_cache_descriptor(), 147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 148 */ 149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 150 .associativity = 8, .line_size = 64, }, 151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 2, .line_size = 64, }, 153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 154 .associativity = 8, .line_size = 64, }, 155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 162 .associativity = 8, .line_size = 32, }, 163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 164 .associativity = 4, .line_size = 64, }, 165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 166 .associativity = 8, .line_size = 64, }, 167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 172 .associativity = 4, .line_size = 64, }, 173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 178 .associativity = 8, .line_size = 64, }, 179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 184 .associativity = 12, .line_size = 64, }, 185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 190 .associativity = 16, .line_size = 64, }, 191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 196 .associativity = 24, .line_size = 64, }, 197 }; 198 199 /* 200 * "CPUID leaf 2 does not report cache descriptor information, 201 * use CPUID leaf 4 to query cache parameters" 202 */ 203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 204 205 /* 206 * Return a CPUID 2 cache descriptor for a given cache. 207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 208 */ 209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 210 { 211 int i; 212 213 assert(cache->size > 0); 214 assert(cache->level > 0); 215 assert(cache->line_size > 0); 216 assert(cache->associativity > 0); 217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 219 if (d->level == cache->level && d->type == cache->type && 220 d->size == cache->size && d->line_size == cache->line_size && 221 d->associativity == cache->associativity) { 222 return i; 223 } 224 } 225 226 return CACHE_DESCRIPTOR_UNAVAILABLE; 227 } 228 229 /* CPUID Leaf 4 constants: */ 230 231 /* EAX: */ 232 #define CACHE_TYPE_D 1 233 #define CACHE_TYPE_I 2 234 #define CACHE_TYPE_UNIFIED 3 235 236 #define CACHE_LEVEL(l) (l << 5) 237 238 #define CACHE_SELF_INIT_LEVEL (1 << 8) 239 240 /* EDX: */ 241 #define CACHE_NO_INVD_SHARING (1 << 0) 242 #define CACHE_INCLUSIVE (1 << 1) 243 #define CACHE_COMPLEX_IDX (1 << 2) 244 245 /* Encode CacheType for CPUID[4].EAX */ 246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 249 0 /* Invalid value */) 250 251 252 /* Encode cache info for CPUID[4] */ 253 static void encode_cache_cpuid4(CPUCacheInfo *cache, 254 int num_apic_ids, int num_cores, 255 uint32_t *eax, uint32_t *ebx, 256 uint32_t *ecx, uint32_t *edx) 257 { 258 assert(cache->size == cache->line_size * cache->associativity * 259 cache->partitions * cache->sets); 260 261 assert(num_apic_ids > 0); 262 *eax = CACHE_TYPE(cache->type) | 263 CACHE_LEVEL(cache->level) | 264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 265 ((num_cores - 1) << 26) | 266 ((num_apic_ids - 1) << 14); 267 268 assert(cache->line_size > 0); 269 assert(cache->partitions > 0); 270 assert(cache->associativity > 0); 271 /* We don't implement fully-associative caches */ 272 assert(cache->associativity < cache->sets); 273 *ebx = (cache->line_size - 1) | 274 ((cache->partitions - 1) << 12) | 275 ((cache->associativity - 1) << 22); 276 277 assert(cache->sets > 0); 278 *ecx = cache->sets - 1; 279 280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 281 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 283 } 284 285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 287 { 288 assert(cache->size % 1024 == 0); 289 assert(cache->lines_per_tag > 0); 290 assert(cache->associativity > 0); 291 assert(cache->line_size > 0); 292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 293 (cache->lines_per_tag << 8) | (cache->line_size); 294 } 295 296 #define ASSOC_FULL 0xFF 297 298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 300 a == 2 ? 0x2 : \ 301 a == 4 ? 0x4 : \ 302 a == 8 ? 0x6 : \ 303 a == 16 ? 0x8 : \ 304 a == 32 ? 0xA : \ 305 a == 48 ? 0xB : \ 306 a == 64 ? 0xC : \ 307 a == 96 ? 0xD : \ 308 a == 128 ? 0xE : \ 309 a == ASSOC_FULL ? 0xF : \ 310 0 /* invalid value */) 311 312 /* 313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 314 * @l3 can be NULL. 315 */ 316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 317 CPUCacheInfo *l3, 318 uint32_t *ecx, uint32_t *edx) 319 { 320 assert(l2->size % 1024 == 0); 321 assert(l2->associativity > 0); 322 assert(l2->lines_per_tag > 0); 323 assert(l2->line_size > 0); 324 *ecx = ((l2->size / 1024) << 16) | 325 (AMD_ENC_ASSOC(l2->associativity) << 12) | 326 (l2->lines_per_tag << 8) | (l2->line_size); 327 328 if (l3) { 329 assert(l3->size % (512 * 1024) == 0); 330 assert(l3->associativity > 0); 331 assert(l3->lines_per_tag > 0); 332 assert(l3->line_size > 0); 333 *edx = ((l3->size / (512 * 1024)) << 18) | 334 (AMD_ENC_ASSOC(l3->associativity) << 12) | 335 (l3->lines_per_tag << 8) | (l3->line_size); 336 } else { 337 *edx = 0; 338 } 339 } 340 341 /* Encode cache info for CPUID[8000001D] */ 342 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, 343 X86CPUTopoInfo *topo_info, 344 uint32_t *eax, uint32_t *ebx, 345 uint32_t *ecx, uint32_t *edx) 346 { 347 uint32_t l3_cores; 348 unsigned nodes = MAX(topo_info->nodes_per_pkg, 1); 349 350 assert(cache->size == cache->line_size * cache->associativity * 351 cache->partitions * cache->sets); 352 353 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 354 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 355 356 /* L3 is shared among multiple cores */ 357 if (cache->level == 3) { 358 l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg * 359 topo_info->cores_per_die * 360 topo_info->threads_per_core), 361 nodes); 362 *eax |= (l3_cores - 1) << 14; 363 } else { 364 *eax |= ((topo_info->threads_per_core - 1) << 14); 365 } 366 367 assert(cache->line_size > 0); 368 assert(cache->partitions > 0); 369 assert(cache->associativity > 0); 370 /* We don't implement fully-associative caches */ 371 assert(cache->associativity < cache->sets); 372 *ebx = (cache->line_size - 1) | 373 ((cache->partitions - 1) << 12) | 374 ((cache->associativity - 1) << 22); 375 376 assert(cache->sets > 0); 377 *ecx = cache->sets - 1; 378 379 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 380 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 381 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 382 } 383 384 /* Encode cache info for CPUID[8000001E] */ 385 static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu, 386 uint32_t *eax, uint32_t *ebx, 387 uint32_t *ecx, uint32_t *edx) 388 { 389 X86CPUTopoIDs topo_ids = {0}; 390 unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1); 391 int shift; 392 393 x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids); 394 395 *eax = cpu->apic_id; 396 /* 397 * CPUID_Fn8000001E_EBX 398 * 31:16 Reserved 399 * 15:8 Threads per core (The number of threads per core is 400 * Threads per core + 1) 401 * 7:0 Core id (see bit decoding below) 402 * SMT: 403 * 4:3 node id 404 * 2 Core complex id 405 * 1:0 Core id 406 * Non SMT: 407 * 5:4 node id 408 * 3 Core complex id 409 * 1:0 Core id 410 */ 411 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) | 412 (topo_ids.core_id); 413 /* 414 * CPUID_Fn8000001E_ECX 415 * 31:11 Reserved 416 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 417 * 7:0 Node id (see bit decoding below) 418 * 2 Socket id 419 * 1:0 Node id 420 */ 421 if (nodes <= 4) { 422 *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id; 423 } else { 424 /* 425 * Node id fix up. Actual hardware supports up to 4 nodes. But with 426 * more than 32 cores, we may end up with more than 4 nodes. 427 * Node id is a combination of socket id and node id. Only requirement 428 * here is that this number should be unique accross the system. 429 * Shift the socket id to accommodate more nodes. We dont expect both 430 * socket id and node id to be big number at the same time. This is not 431 * an ideal config but we need to to support it. Max nodes we can have 432 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 433 * 5 bits for nodes. Find the left most set bit to represent the total 434 * number of nodes. find_last_bit returns last set bit(0 based). Left 435 * shift(+1) the socket id to represent all the nodes. 436 */ 437 nodes -= 1; 438 shift = find_last_bit(&nodes, 8); 439 *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) | 440 topo_ids.node_id; 441 } 442 *edx = 0; 443 } 444 445 /* 446 * Definitions of the hardcoded cache entries we expose: 447 * These are legacy cache values. If there is a need to change any 448 * of these values please use builtin_x86_defs 449 */ 450 451 /* L1 data cache: */ 452 static CPUCacheInfo legacy_l1d_cache = { 453 .type = DATA_CACHE, 454 .level = 1, 455 .size = 32 * KiB, 456 .self_init = 1, 457 .line_size = 64, 458 .associativity = 8, 459 .sets = 64, 460 .partitions = 1, 461 .no_invd_sharing = true, 462 }; 463 464 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 465 static CPUCacheInfo legacy_l1d_cache_amd = { 466 .type = DATA_CACHE, 467 .level = 1, 468 .size = 64 * KiB, 469 .self_init = 1, 470 .line_size = 64, 471 .associativity = 2, 472 .sets = 512, 473 .partitions = 1, 474 .lines_per_tag = 1, 475 .no_invd_sharing = true, 476 }; 477 478 /* L1 instruction cache: */ 479 static CPUCacheInfo legacy_l1i_cache = { 480 .type = INSTRUCTION_CACHE, 481 .level = 1, 482 .size = 32 * KiB, 483 .self_init = 1, 484 .line_size = 64, 485 .associativity = 8, 486 .sets = 64, 487 .partitions = 1, 488 .no_invd_sharing = true, 489 }; 490 491 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 492 static CPUCacheInfo legacy_l1i_cache_amd = { 493 .type = INSTRUCTION_CACHE, 494 .level = 1, 495 .size = 64 * KiB, 496 .self_init = 1, 497 .line_size = 64, 498 .associativity = 2, 499 .sets = 512, 500 .partitions = 1, 501 .lines_per_tag = 1, 502 .no_invd_sharing = true, 503 }; 504 505 /* Level 2 unified cache: */ 506 static CPUCacheInfo legacy_l2_cache = { 507 .type = UNIFIED_CACHE, 508 .level = 2, 509 .size = 4 * MiB, 510 .self_init = 1, 511 .line_size = 64, 512 .associativity = 16, 513 .sets = 4096, 514 .partitions = 1, 515 .no_invd_sharing = true, 516 }; 517 518 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 519 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 520 .type = UNIFIED_CACHE, 521 .level = 2, 522 .size = 2 * MiB, 523 .line_size = 64, 524 .associativity = 8, 525 }; 526 527 528 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 529 static CPUCacheInfo legacy_l2_cache_amd = { 530 .type = UNIFIED_CACHE, 531 .level = 2, 532 .size = 512 * KiB, 533 .line_size = 64, 534 .lines_per_tag = 1, 535 .associativity = 16, 536 .sets = 512, 537 .partitions = 1, 538 }; 539 540 /* Level 3 unified cache: */ 541 static CPUCacheInfo legacy_l3_cache = { 542 .type = UNIFIED_CACHE, 543 .level = 3, 544 .size = 16 * MiB, 545 .line_size = 64, 546 .associativity = 16, 547 .sets = 16384, 548 .partitions = 1, 549 .lines_per_tag = 1, 550 .self_init = true, 551 .inclusive = true, 552 .complex_indexing = true, 553 }; 554 555 /* TLB definitions: */ 556 557 #define L1_DTLB_2M_ASSOC 1 558 #define L1_DTLB_2M_ENTRIES 255 559 #define L1_DTLB_4K_ASSOC 1 560 #define L1_DTLB_4K_ENTRIES 255 561 562 #define L1_ITLB_2M_ASSOC 1 563 #define L1_ITLB_2M_ENTRIES 255 564 #define L1_ITLB_4K_ASSOC 1 565 #define L1_ITLB_4K_ENTRIES 255 566 567 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 568 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 569 #define L2_DTLB_4K_ASSOC 4 570 #define L2_DTLB_4K_ENTRIES 512 571 572 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 573 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 574 #define L2_ITLB_4K_ASSOC 4 575 #define L2_ITLB_4K_ENTRIES 512 576 577 /* CPUID Leaf 0x14 constants: */ 578 #define INTEL_PT_MAX_SUBLEAF 0x1 579 /* 580 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 581 * MSR can be accessed; 582 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 583 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 584 * of Intel PT MSRs across warm reset; 585 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 586 */ 587 #define INTEL_PT_MINIMAL_EBX 0xf 588 /* 589 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 590 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 591 * accessed; 592 * bit[01]: ToPA tables can hold any number of output entries, up to the 593 * maximum allowed by the MaskOrTableOffset field of 594 * IA32_RTIT_OUTPUT_MASK_PTRS; 595 * bit[02]: Support Single-Range Output scheme; 596 */ 597 #define INTEL_PT_MINIMAL_ECX 0x7 598 /* generated packets which contain IP payloads have LIP values */ 599 #define INTEL_PT_IP_LIP (1 << 31) 600 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 601 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 602 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 603 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 604 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 605 606 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 607 uint32_t vendor2, uint32_t vendor3) 608 { 609 int i; 610 for (i = 0; i < 4; i++) { 611 dst[i] = vendor1 >> (8 * i); 612 dst[i + 4] = vendor2 >> (8 * i); 613 dst[i + 8] = vendor3 >> (8 * i); 614 } 615 dst[CPUID_VENDOR_SZ] = '\0'; 616 } 617 618 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 619 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 620 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 621 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 622 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 623 CPUID_PSE36 | CPUID_FXSR) 624 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 625 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 626 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 627 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 628 CPUID_PAE | CPUID_SEP | CPUID_APIC) 629 630 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 631 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 632 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 633 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 634 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 635 /* partly implemented: 636 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 637 /* missing: 638 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 639 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 640 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 641 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 642 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 643 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 644 CPUID_EXT_RDRAND) 645 /* missing: 646 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 647 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 648 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 649 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 650 CPUID_EXT_F16C */ 651 652 #ifdef TARGET_X86_64 653 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 654 #else 655 #define TCG_EXT2_X86_64_FEATURES 0 656 #endif 657 658 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 659 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 660 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 661 TCG_EXT2_X86_64_FEATURES) 662 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 663 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 664 #define TCG_EXT4_FEATURES 0 665 #define TCG_SVM_FEATURES CPUID_SVM_NPT 666 #define TCG_KVM_FEATURES 0 667 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 668 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 669 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 670 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 671 CPUID_7_0_EBX_ERMS) 672 /* missing: 673 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 674 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 675 CPUID_7_0_EBX_RDSEED */ 676 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 677 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 678 CPUID_7_0_ECX_LA57) 679 #define TCG_7_0_EDX_FEATURES 0 680 #define TCG_7_1_EAX_FEATURES 0 681 #define TCG_APM_FEATURES 0 682 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 683 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 684 /* missing: 685 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 686 687 typedef enum FeatureWordType { 688 CPUID_FEATURE_WORD, 689 MSR_FEATURE_WORD, 690 } FeatureWordType; 691 692 typedef struct FeatureWordInfo { 693 FeatureWordType type; 694 /* feature flags names are taken from "Intel Processor Identification and 695 * the CPUID Instruction" and AMD's "CPUID Specification". 696 * In cases of disagreement between feature naming conventions, 697 * aliases may be added. 698 */ 699 const char *feat_names[64]; 700 union { 701 /* If type==CPUID_FEATURE_WORD */ 702 struct { 703 uint32_t eax; /* Input EAX for CPUID */ 704 bool needs_ecx; /* CPUID instruction uses ECX as input */ 705 uint32_t ecx; /* Input ECX value for CPUID */ 706 int reg; /* output register (R_* constant) */ 707 } cpuid; 708 /* If type==MSR_FEATURE_WORD */ 709 struct { 710 uint32_t index; 711 } msr; 712 }; 713 uint64_t tcg_features; /* Feature flags supported by TCG */ 714 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 715 uint64_t migratable_flags; /* Feature flags known to be migratable */ 716 /* Features that shouldn't be auto-enabled by "-cpu host" */ 717 uint64_t no_autoenable_flags; 718 } FeatureWordInfo; 719 720 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 721 [FEAT_1_EDX] = { 722 .type = CPUID_FEATURE_WORD, 723 .feat_names = { 724 "fpu", "vme", "de", "pse", 725 "tsc", "msr", "pae", "mce", 726 "cx8", "apic", NULL, "sep", 727 "mtrr", "pge", "mca", "cmov", 728 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 729 NULL, "ds" /* Intel dts */, "acpi", "mmx", 730 "fxsr", "sse", "sse2", "ss", 731 "ht" /* Intel htt */, "tm", "ia64", "pbe", 732 }, 733 .cpuid = {.eax = 1, .reg = R_EDX, }, 734 .tcg_features = TCG_FEATURES, 735 }, 736 [FEAT_1_ECX] = { 737 .type = CPUID_FEATURE_WORD, 738 .feat_names = { 739 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 740 "ds-cpl", "vmx", "smx", "est", 741 "tm2", "ssse3", "cid", NULL, 742 "fma", "cx16", "xtpr", "pdcm", 743 NULL, "pcid", "dca", "sse4.1", 744 "sse4.2", "x2apic", "movbe", "popcnt", 745 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 746 "avx", "f16c", "rdrand", "hypervisor", 747 }, 748 .cpuid = { .eax = 1, .reg = R_ECX, }, 749 .tcg_features = TCG_EXT_FEATURES, 750 }, 751 /* Feature names that are already defined on feature_name[] but 752 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 753 * names on feat_names below. They are copied automatically 754 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 755 */ 756 [FEAT_8000_0001_EDX] = { 757 .type = CPUID_FEATURE_WORD, 758 .feat_names = { 759 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 760 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 761 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 762 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 763 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 764 "nx", NULL, "mmxext", NULL /* mmx */, 765 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 766 NULL, "lm", "3dnowext", "3dnow", 767 }, 768 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 769 .tcg_features = TCG_EXT2_FEATURES, 770 }, 771 [FEAT_8000_0001_ECX] = { 772 .type = CPUID_FEATURE_WORD, 773 .feat_names = { 774 "lahf-lm", "cmp-legacy", "svm", "extapic", 775 "cr8legacy", "abm", "sse4a", "misalignsse", 776 "3dnowprefetch", "osvw", "ibs", "xop", 777 "skinit", "wdt", NULL, "lwp", 778 "fma4", "tce", NULL, "nodeid-msr", 779 NULL, "tbm", "topoext", "perfctr-core", 780 "perfctr-nb", NULL, NULL, NULL, 781 NULL, NULL, NULL, NULL, 782 }, 783 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 784 .tcg_features = TCG_EXT3_FEATURES, 785 /* 786 * TOPOEXT is always allowed but can't be enabled blindly by 787 * "-cpu host", as it requires consistent cache topology info 788 * to be provided so it doesn't confuse guests. 789 */ 790 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 791 }, 792 [FEAT_C000_0001_EDX] = { 793 .type = CPUID_FEATURE_WORD, 794 .feat_names = { 795 NULL, NULL, "xstore", "xstore-en", 796 NULL, NULL, "xcrypt", "xcrypt-en", 797 "ace2", "ace2-en", "phe", "phe-en", 798 "pmm", "pmm-en", NULL, NULL, 799 NULL, NULL, NULL, NULL, 800 NULL, NULL, NULL, NULL, 801 NULL, NULL, NULL, NULL, 802 NULL, NULL, NULL, NULL, 803 }, 804 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 805 .tcg_features = TCG_EXT4_FEATURES, 806 }, 807 [FEAT_KVM] = { 808 .type = CPUID_FEATURE_WORD, 809 .feat_names = { 810 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 811 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 812 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 813 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL, 814 NULL, NULL, NULL, NULL, 815 NULL, NULL, NULL, NULL, 816 "kvmclock-stable-bit", NULL, NULL, NULL, 817 NULL, NULL, NULL, NULL, 818 }, 819 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 820 .tcg_features = TCG_KVM_FEATURES, 821 }, 822 [FEAT_KVM_HINTS] = { 823 .type = CPUID_FEATURE_WORD, 824 .feat_names = { 825 "kvm-hint-dedicated", NULL, NULL, NULL, 826 NULL, NULL, NULL, NULL, 827 NULL, NULL, NULL, NULL, 828 NULL, NULL, NULL, NULL, 829 NULL, NULL, NULL, NULL, 830 NULL, NULL, NULL, NULL, 831 NULL, NULL, NULL, NULL, 832 NULL, NULL, NULL, NULL, 833 }, 834 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 835 .tcg_features = TCG_KVM_FEATURES, 836 /* 837 * KVM hints aren't auto-enabled by -cpu host, they need to be 838 * explicitly enabled in the command-line. 839 */ 840 .no_autoenable_flags = ~0U, 841 }, 842 /* 843 * .feat_names are commented out for Hyper-V enlightenments because we 844 * don't want to have two different ways for enabling them on QEMU command 845 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 846 * enabling several feature bits simultaneously, exposing these bits 847 * individually may just confuse guests. 848 */ 849 [FEAT_HYPERV_EAX] = { 850 .type = CPUID_FEATURE_WORD, 851 .feat_names = { 852 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 853 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 854 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 855 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 856 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 857 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 858 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 859 NULL, NULL, 860 NULL, NULL, NULL, NULL, 861 NULL, NULL, NULL, NULL, 862 NULL, NULL, NULL, NULL, 863 NULL, NULL, NULL, NULL, 864 }, 865 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 866 }, 867 [FEAT_HYPERV_EBX] = { 868 .type = CPUID_FEATURE_WORD, 869 .feat_names = { 870 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 871 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 872 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 873 NULL /* hv_create_port */, NULL /* hv_connect_port */, 874 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 875 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 876 NULL, NULL, 877 NULL, NULL, NULL, NULL, 878 NULL, NULL, NULL, NULL, 879 NULL, NULL, NULL, NULL, 880 NULL, NULL, NULL, NULL, 881 }, 882 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 883 }, 884 [FEAT_HYPERV_EDX] = { 885 .type = CPUID_FEATURE_WORD, 886 .feat_names = { 887 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 888 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 889 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 890 NULL, NULL, 891 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 892 NULL, NULL, NULL, NULL, 893 NULL, NULL, NULL, NULL, 894 NULL, NULL, NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 NULL, NULL, NULL, NULL, 897 }, 898 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 899 }, 900 [FEAT_HV_RECOMM_EAX] = { 901 .type = CPUID_FEATURE_WORD, 902 .feat_names = { 903 NULL /* hv_recommend_pv_as_switch */, 904 NULL /* hv_recommend_pv_tlbflush_local */, 905 NULL /* hv_recommend_pv_tlbflush_remote */, 906 NULL /* hv_recommend_msr_apic_access */, 907 NULL /* hv_recommend_msr_reset */, 908 NULL /* hv_recommend_relaxed_timing */, 909 NULL /* hv_recommend_dma_remapping */, 910 NULL /* hv_recommend_int_remapping */, 911 NULL /* hv_recommend_x2apic_msrs */, 912 NULL /* hv_recommend_autoeoi_deprecation */, 913 NULL /* hv_recommend_pv_ipi */, 914 NULL /* hv_recommend_ex_hypercalls */, 915 NULL /* hv_hypervisor_is_nested */, 916 NULL /* hv_recommend_int_mbec */, 917 NULL /* hv_recommend_evmcs */, 918 NULL, 919 NULL, NULL, NULL, NULL, 920 NULL, NULL, NULL, NULL, 921 NULL, NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 }, 924 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 925 }, 926 [FEAT_HV_NESTED_EAX] = { 927 .type = CPUID_FEATURE_WORD, 928 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 929 }, 930 [FEAT_SVM] = { 931 .type = CPUID_FEATURE_WORD, 932 .feat_names = { 933 "npt", "lbrv", "svm-lock", "nrip-save", 934 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 935 NULL, NULL, "pause-filter", NULL, 936 "pfthreshold", NULL, NULL, NULL, 937 NULL, NULL, NULL, NULL, 938 NULL, NULL, NULL, NULL, 939 NULL, NULL, NULL, NULL, 940 NULL, NULL, NULL, NULL, 941 }, 942 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 943 .tcg_features = TCG_SVM_FEATURES, 944 }, 945 [FEAT_7_0_EBX] = { 946 .type = CPUID_FEATURE_WORD, 947 .feat_names = { 948 "fsgsbase", "tsc-adjust", NULL, "bmi1", 949 "hle", "avx2", NULL, "smep", 950 "bmi2", "erms", "invpcid", "rtm", 951 NULL, NULL, "mpx", NULL, 952 "avx512f", "avx512dq", "rdseed", "adx", 953 "smap", "avx512ifma", "pcommit", "clflushopt", 954 "clwb", "intel-pt", "avx512pf", "avx512er", 955 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 956 }, 957 .cpuid = { 958 .eax = 7, 959 .needs_ecx = true, .ecx = 0, 960 .reg = R_EBX, 961 }, 962 .tcg_features = TCG_7_0_EBX_FEATURES, 963 }, 964 [FEAT_7_0_ECX] = { 965 .type = CPUID_FEATURE_WORD, 966 .feat_names = { 967 NULL, "avx512vbmi", "umip", "pku", 968 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 969 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 970 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 971 "la57", NULL, NULL, NULL, 972 NULL, NULL, "rdpid", NULL, 973 NULL, "cldemote", NULL, "movdiri", 974 "movdir64b", NULL, NULL, NULL, 975 }, 976 .cpuid = { 977 .eax = 7, 978 .needs_ecx = true, .ecx = 0, 979 .reg = R_ECX, 980 }, 981 .tcg_features = TCG_7_0_ECX_FEATURES, 982 }, 983 [FEAT_7_0_EDX] = { 984 .type = CPUID_FEATURE_WORD, 985 .feat_names = { 986 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 987 NULL, NULL, NULL, NULL, 988 NULL, NULL, "md-clear", NULL, 989 NULL, NULL, NULL, NULL, 990 NULL, NULL, NULL /* pconfig */, NULL, 991 NULL, NULL, NULL, NULL, 992 NULL, NULL, "spec-ctrl", "stibp", 993 NULL, "arch-capabilities", "core-capability", "ssbd", 994 }, 995 .cpuid = { 996 .eax = 7, 997 .needs_ecx = true, .ecx = 0, 998 .reg = R_EDX, 999 }, 1000 .tcg_features = TCG_7_0_EDX_FEATURES, 1001 }, 1002 [FEAT_7_1_EAX] = { 1003 .type = CPUID_FEATURE_WORD, 1004 .feat_names = { 1005 NULL, NULL, NULL, NULL, 1006 NULL, "avx512-bf16", NULL, NULL, 1007 NULL, NULL, NULL, NULL, 1008 NULL, NULL, NULL, NULL, 1009 NULL, NULL, NULL, NULL, 1010 NULL, NULL, NULL, NULL, 1011 NULL, NULL, NULL, NULL, 1012 NULL, NULL, NULL, NULL, 1013 }, 1014 .cpuid = { 1015 .eax = 7, 1016 .needs_ecx = true, .ecx = 1, 1017 .reg = R_EAX, 1018 }, 1019 .tcg_features = TCG_7_1_EAX_FEATURES, 1020 }, 1021 [FEAT_8000_0007_EDX] = { 1022 .type = CPUID_FEATURE_WORD, 1023 .feat_names = { 1024 NULL, NULL, NULL, NULL, 1025 NULL, NULL, NULL, NULL, 1026 "invtsc", NULL, NULL, NULL, 1027 NULL, NULL, NULL, NULL, 1028 NULL, NULL, NULL, NULL, 1029 NULL, NULL, NULL, NULL, 1030 NULL, NULL, NULL, NULL, 1031 NULL, NULL, NULL, NULL, 1032 }, 1033 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1034 .tcg_features = TCG_APM_FEATURES, 1035 .unmigratable_flags = CPUID_APM_INVTSC, 1036 }, 1037 [FEAT_8000_0008_EBX] = { 1038 .type = CPUID_FEATURE_WORD, 1039 .feat_names = { 1040 "clzero", NULL, "xsaveerptr", NULL, 1041 NULL, NULL, NULL, NULL, 1042 NULL, "wbnoinvd", NULL, NULL, 1043 "ibpb", NULL, NULL, "amd-stibp", 1044 NULL, NULL, NULL, NULL, 1045 NULL, NULL, NULL, NULL, 1046 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1047 NULL, NULL, NULL, NULL, 1048 }, 1049 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1050 .tcg_features = 0, 1051 .unmigratable_flags = 0, 1052 }, 1053 [FEAT_XSAVE] = { 1054 .type = CPUID_FEATURE_WORD, 1055 .feat_names = { 1056 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1057 NULL, NULL, NULL, NULL, 1058 NULL, NULL, NULL, NULL, 1059 NULL, NULL, NULL, NULL, 1060 NULL, NULL, NULL, NULL, 1061 NULL, NULL, NULL, NULL, 1062 NULL, NULL, NULL, NULL, 1063 NULL, NULL, NULL, NULL, 1064 }, 1065 .cpuid = { 1066 .eax = 0xd, 1067 .needs_ecx = true, .ecx = 1, 1068 .reg = R_EAX, 1069 }, 1070 .tcg_features = TCG_XSAVE_FEATURES, 1071 }, 1072 [FEAT_6_EAX] = { 1073 .type = CPUID_FEATURE_WORD, 1074 .feat_names = { 1075 NULL, NULL, "arat", NULL, 1076 NULL, NULL, NULL, NULL, 1077 NULL, NULL, NULL, NULL, 1078 NULL, NULL, NULL, NULL, 1079 NULL, NULL, NULL, NULL, 1080 NULL, NULL, NULL, NULL, 1081 NULL, NULL, NULL, NULL, 1082 NULL, NULL, NULL, NULL, 1083 }, 1084 .cpuid = { .eax = 6, .reg = R_EAX, }, 1085 .tcg_features = TCG_6_EAX_FEATURES, 1086 }, 1087 [FEAT_XSAVE_COMP_LO] = { 1088 .type = CPUID_FEATURE_WORD, 1089 .cpuid = { 1090 .eax = 0xD, 1091 .needs_ecx = true, .ecx = 0, 1092 .reg = R_EAX, 1093 }, 1094 .tcg_features = ~0U, 1095 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1096 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1097 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1098 XSTATE_PKRU_MASK, 1099 }, 1100 [FEAT_XSAVE_COMP_HI] = { 1101 .type = CPUID_FEATURE_WORD, 1102 .cpuid = { 1103 .eax = 0xD, 1104 .needs_ecx = true, .ecx = 0, 1105 .reg = R_EDX, 1106 }, 1107 .tcg_features = ~0U, 1108 }, 1109 /*Below are MSR exposed features*/ 1110 [FEAT_ARCH_CAPABILITIES] = { 1111 .type = MSR_FEATURE_WORD, 1112 .feat_names = { 1113 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1114 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1115 "taa-no", NULL, NULL, NULL, 1116 NULL, NULL, NULL, NULL, 1117 NULL, NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 NULL, NULL, NULL, NULL, 1120 NULL, NULL, NULL, NULL, 1121 }, 1122 .msr = { 1123 .index = MSR_IA32_ARCH_CAPABILITIES, 1124 }, 1125 }, 1126 [FEAT_CORE_CAPABILITY] = { 1127 .type = MSR_FEATURE_WORD, 1128 .feat_names = { 1129 NULL, NULL, NULL, NULL, 1130 NULL, "split-lock-detect", NULL, NULL, 1131 NULL, NULL, NULL, NULL, 1132 NULL, NULL, NULL, NULL, 1133 NULL, NULL, NULL, NULL, 1134 NULL, NULL, NULL, NULL, 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 }, 1138 .msr = { 1139 .index = MSR_IA32_CORE_CAPABILITY, 1140 }, 1141 }, 1142 1143 [FEAT_VMX_PROCBASED_CTLS] = { 1144 .type = MSR_FEATURE_WORD, 1145 .feat_names = { 1146 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1147 NULL, NULL, NULL, "vmx-hlt-exit", 1148 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1149 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1150 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1151 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1152 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1153 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1154 }, 1155 .msr = { 1156 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1157 } 1158 }, 1159 1160 [FEAT_VMX_SECONDARY_CTLS] = { 1161 .type = MSR_FEATURE_WORD, 1162 .feat_names = { 1163 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1164 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1165 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1166 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1167 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1168 "vmx-xsaves", NULL, NULL, NULL, 1169 NULL, NULL, NULL, NULL, 1170 NULL, NULL, NULL, NULL, 1171 }, 1172 .msr = { 1173 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1174 } 1175 }, 1176 1177 [FEAT_VMX_PINBASED_CTLS] = { 1178 .type = MSR_FEATURE_WORD, 1179 .feat_names = { 1180 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1181 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1182 NULL, NULL, NULL, NULL, 1183 NULL, NULL, NULL, NULL, 1184 NULL, NULL, NULL, NULL, 1185 NULL, NULL, NULL, NULL, 1186 NULL, NULL, NULL, NULL, 1187 NULL, NULL, NULL, NULL, 1188 }, 1189 .msr = { 1190 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1191 } 1192 }, 1193 1194 [FEAT_VMX_EXIT_CTLS] = { 1195 .type = MSR_FEATURE_WORD, 1196 /* 1197 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1198 * the LM CPUID bit. 1199 */ 1200 .feat_names = { 1201 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1202 NULL, NULL, NULL, NULL, 1203 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1204 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1205 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1206 "vmx-exit-save-efer", "vmx-exit-load-efer", 1207 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1208 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1209 NULL, NULL, NULL, NULL, 1210 }, 1211 .msr = { 1212 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1213 } 1214 }, 1215 1216 [FEAT_VMX_ENTRY_CTLS] = { 1217 .type = MSR_FEATURE_WORD, 1218 .feat_names = { 1219 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1220 NULL, NULL, NULL, NULL, 1221 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1222 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1223 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1224 NULL, NULL, NULL, NULL, 1225 NULL, NULL, NULL, NULL, 1226 NULL, NULL, NULL, NULL, 1227 }, 1228 .msr = { 1229 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1230 } 1231 }, 1232 1233 [FEAT_VMX_MISC] = { 1234 .type = MSR_FEATURE_WORD, 1235 .feat_names = { 1236 NULL, NULL, NULL, NULL, 1237 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1238 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1239 NULL, NULL, NULL, NULL, 1240 NULL, NULL, NULL, NULL, 1241 NULL, NULL, NULL, NULL, 1242 NULL, NULL, NULL, NULL, 1243 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1244 }, 1245 .msr = { 1246 .index = MSR_IA32_VMX_MISC, 1247 } 1248 }, 1249 1250 [FEAT_VMX_EPT_VPID_CAPS] = { 1251 .type = MSR_FEATURE_WORD, 1252 .feat_names = { 1253 "vmx-ept-execonly", NULL, NULL, NULL, 1254 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1255 NULL, NULL, NULL, NULL, 1256 NULL, NULL, NULL, NULL, 1257 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1258 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1259 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1260 NULL, NULL, NULL, NULL, 1261 "vmx-invvpid", NULL, NULL, NULL, 1262 NULL, NULL, NULL, NULL, 1263 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1264 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1265 NULL, NULL, NULL, NULL, 1266 NULL, NULL, NULL, NULL, 1267 NULL, NULL, NULL, NULL, 1268 NULL, NULL, NULL, NULL, 1269 NULL, NULL, NULL, NULL, 1270 }, 1271 .msr = { 1272 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1273 } 1274 }, 1275 1276 [FEAT_VMX_BASIC] = { 1277 .type = MSR_FEATURE_WORD, 1278 .feat_names = { 1279 [54] = "vmx-ins-outs", 1280 [55] = "vmx-true-ctls", 1281 }, 1282 .msr = { 1283 .index = MSR_IA32_VMX_BASIC, 1284 }, 1285 /* Just to be safe - we don't support setting the MSEG version field. */ 1286 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1287 }, 1288 1289 [FEAT_VMX_VMFUNC] = { 1290 .type = MSR_FEATURE_WORD, 1291 .feat_names = { 1292 [0] = "vmx-eptp-switching", 1293 }, 1294 .msr = { 1295 .index = MSR_IA32_VMX_VMFUNC, 1296 } 1297 }, 1298 1299 }; 1300 1301 typedef struct FeatureMask { 1302 FeatureWord index; 1303 uint64_t mask; 1304 } FeatureMask; 1305 1306 typedef struct FeatureDep { 1307 FeatureMask from, to; 1308 } FeatureDep; 1309 1310 static FeatureDep feature_dependencies[] = { 1311 { 1312 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1313 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1314 }, 1315 { 1316 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1317 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1318 }, 1319 { 1320 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1321 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1322 }, 1323 { 1324 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1325 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1326 }, 1327 { 1328 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1329 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1330 }, 1331 { 1332 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1333 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1334 }, 1335 { 1336 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1337 .to = { FEAT_VMX_MISC, ~0ull }, 1338 }, 1339 { 1340 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1341 .to = { FEAT_VMX_BASIC, ~0ull }, 1342 }, 1343 { 1344 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1345 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1346 }, 1347 { 1348 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1349 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1350 }, 1351 { 1352 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1353 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1354 }, 1355 { 1356 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1357 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1358 }, 1359 { 1360 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1361 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1362 }, 1363 { 1364 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1365 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1366 }, 1367 { 1368 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1369 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1370 }, 1371 { 1372 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1373 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1374 }, 1375 { 1376 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1377 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1378 }, 1379 { 1380 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1381 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1382 }, 1383 { 1384 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1385 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1386 }, 1387 }; 1388 1389 typedef struct X86RegisterInfo32 { 1390 /* Name of register */ 1391 const char *name; 1392 /* QAPI enum value register */ 1393 X86CPURegister32 qapi_enum; 1394 } X86RegisterInfo32; 1395 1396 #define REGISTER(reg) \ 1397 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1398 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1399 REGISTER(EAX), 1400 REGISTER(ECX), 1401 REGISTER(EDX), 1402 REGISTER(EBX), 1403 REGISTER(ESP), 1404 REGISTER(EBP), 1405 REGISTER(ESI), 1406 REGISTER(EDI), 1407 }; 1408 #undef REGISTER 1409 1410 typedef struct ExtSaveArea { 1411 uint32_t feature, bits; 1412 uint32_t offset, size; 1413 } ExtSaveArea; 1414 1415 static const ExtSaveArea x86_ext_save_areas[] = { 1416 [XSTATE_FP_BIT] = { 1417 /* x87 FP state component is always enabled if XSAVE is supported */ 1418 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1419 /* x87 state is in the legacy region of the XSAVE area */ 1420 .offset = 0, 1421 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1422 }, 1423 [XSTATE_SSE_BIT] = { 1424 /* SSE state component is always enabled if XSAVE is supported */ 1425 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1426 /* SSE state is in the legacy region of the XSAVE area */ 1427 .offset = 0, 1428 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1429 }, 1430 [XSTATE_YMM_BIT] = 1431 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1432 .offset = offsetof(X86XSaveArea, avx_state), 1433 .size = sizeof(XSaveAVX) }, 1434 [XSTATE_BNDREGS_BIT] = 1435 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1436 .offset = offsetof(X86XSaveArea, bndreg_state), 1437 .size = sizeof(XSaveBNDREG) }, 1438 [XSTATE_BNDCSR_BIT] = 1439 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1440 .offset = offsetof(X86XSaveArea, bndcsr_state), 1441 .size = sizeof(XSaveBNDCSR) }, 1442 [XSTATE_OPMASK_BIT] = 1443 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1444 .offset = offsetof(X86XSaveArea, opmask_state), 1445 .size = sizeof(XSaveOpmask) }, 1446 [XSTATE_ZMM_Hi256_BIT] = 1447 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1448 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1449 .size = sizeof(XSaveZMM_Hi256) }, 1450 [XSTATE_Hi16_ZMM_BIT] = 1451 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1452 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1453 .size = sizeof(XSaveHi16_ZMM) }, 1454 [XSTATE_PKRU_BIT] = 1455 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1456 .offset = offsetof(X86XSaveArea, pkru_state), 1457 .size = sizeof(XSavePKRU) }, 1458 }; 1459 1460 static uint32_t xsave_area_size(uint64_t mask) 1461 { 1462 int i; 1463 uint64_t ret = 0; 1464 1465 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1466 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1467 if ((mask >> i) & 1) { 1468 ret = MAX(ret, esa->offset + esa->size); 1469 } 1470 } 1471 return ret; 1472 } 1473 1474 static inline bool accel_uses_host_cpuid(void) 1475 { 1476 return kvm_enabled() || hvf_enabled(); 1477 } 1478 1479 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1480 { 1481 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1482 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1483 } 1484 1485 const char *get_register_name_32(unsigned int reg) 1486 { 1487 if (reg >= CPU_NB_REGS32) { 1488 return NULL; 1489 } 1490 return x86_reg_info_32[reg].name; 1491 } 1492 1493 /* 1494 * Returns the set of feature flags that are supported and migratable by 1495 * QEMU, for a given FeatureWord. 1496 */ 1497 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1498 { 1499 FeatureWordInfo *wi = &feature_word_info[w]; 1500 uint64_t r = 0; 1501 int i; 1502 1503 for (i = 0; i < 64; i++) { 1504 uint64_t f = 1ULL << i; 1505 1506 /* If the feature name is known, it is implicitly considered migratable, 1507 * unless it is explicitly set in unmigratable_flags */ 1508 if ((wi->migratable_flags & f) || 1509 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1510 r |= f; 1511 } 1512 } 1513 return r; 1514 } 1515 1516 void host_cpuid(uint32_t function, uint32_t count, 1517 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1518 { 1519 uint32_t vec[4]; 1520 1521 #ifdef __x86_64__ 1522 asm volatile("cpuid" 1523 : "=a"(vec[0]), "=b"(vec[1]), 1524 "=c"(vec[2]), "=d"(vec[3]) 1525 : "0"(function), "c"(count) : "cc"); 1526 #elif defined(__i386__) 1527 asm volatile("pusha \n\t" 1528 "cpuid \n\t" 1529 "mov %%eax, 0(%2) \n\t" 1530 "mov %%ebx, 4(%2) \n\t" 1531 "mov %%ecx, 8(%2) \n\t" 1532 "mov %%edx, 12(%2) \n\t" 1533 "popa" 1534 : : "a"(function), "c"(count), "S"(vec) 1535 : "memory", "cc"); 1536 #else 1537 abort(); 1538 #endif 1539 1540 if (eax) 1541 *eax = vec[0]; 1542 if (ebx) 1543 *ebx = vec[1]; 1544 if (ecx) 1545 *ecx = vec[2]; 1546 if (edx) 1547 *edx = vec[3]; 1548 } 1549 1550 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1551 { 1552 uint32_t eax, ebx, ecx, edx; 1553 1554 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1555 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1556 1557 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1558 if (family) { 1559 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1560 } 1561 if (model) { 1562 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1563 } 1564 if (stepping) { 1565 *stepping = eax & 0x0F; 1566 } 1567 } 1568 1569 /* CPU class name definitions: */ 1570 1571 /* Return type name for a given CPU model name 1572 * Caller is responsible for freeing the returned string. 1573 */ 1574 static char *x86_cpu_type_name(const char *model_name) 1575 { 1576 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1577 } 1578 1579 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1580 { 1581 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1582 return object_class_by_name(typename); 1583 } 1584 1585 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1586 { 1587 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1588 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1589 return g_strndup(class_name, 1590 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1591 } 1592 1593 typedef struct PropValue { 1594 const char *prop, *value; 1595 } PropValue; 1596 1597 typedef struct X86CPUVersionDefinition { 1598 X86CPUVersion version; 1599 const char *alias; 1600 const char *note; 1601 PropValue *props; 1602 } X86CPUVersionDefinition; 1603 1604 /* Base definition for a CPU model */ 1605 typedef struct X86CPUDefinition { 1606 const char *name; 1607 uint32_t level; 1608 uint32_t xlevel; 1609 /* vendor is zero-terminated, 12 character ASCII string */ 1610 char vendor[CPUID_VENDOR_SZ + 1]; 1611 int family; 1612 int model; 1613 int stepping; 1614 FeatureWordArray features; 1615 const char *model_id; 1616 CPUCaches *cache_info; 1617 1618 /* Use AMD EPYC encoding for apic id */ 1619 bool use_epyc_apic_id_encoding; 1620 1621 /* 1622 * Definitions for alternative versions of CPU model. 1623 * List is terminated by item with version == 0. 1624 * If NULL, version 1 will be registered automatically. 1625 */ 1626 const X86CPUVersionDefinition *versions; 1627 } X86CPUDefinition; 1628 1629 /* Reference to a specific CPU model version */ 1630 struct X86CPUModel { 1631 /* Base CPU definition */ 1632 X86CPUDefinition *cpudef; 1633 /* CPU model version */ 1634 X86CPUVersion version; 1635 const char *note; 1636 /* 1637 * If true, this is an alias CPU model. 1638 * This matters only for "-cpu help" and query-cpu-definitions 1639 */ 1640 bool is_alias; 1641 }; 1642 1643 /* Get full model name for CPU version */ 1644 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1645 X86CPUVersion version) 1646 { 1647 assert(version > 0); 1648 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1649 } 1650 1651 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1652 { 1653 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1654 static const X86CPUVersionDefinition default_version_list[] = { 1655 { 1 }, 1656 { /* end of list */ } 1657 }; 1658 1659 return def->versions ?: default_version_list; 1660 } 1661 1662 bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type) 1663 { 1664 X86CPUClass *xcc = X86_CPU_CLASS(object_class_by_name(cpu_type)); 1665 1666 assert(xcc); 1667 if (xcc->model && xcc->model->cpudef) { 1668 return xcc->model->cpudef->use_epyc_apic_id_encoding; 1669 } else { 1670 return false; 1671 } 1672 } 1673 1674 static CPUCaches epyc_cache_info = { 1675 .l1d_cache = &(CPUCacheInfo) { 1676 .type = DATA_CACHE, 1677 .level = 1, 1678 .size = 32 * KiB, 1679 .line_size = 64, 1680 .associativity = 8, 1681 .partitions = 1, 1682 .sets = 64, 1683 .lines_per_tag = 1, 1684 .self_init = 1, 1685 .no_invd_sharing = true, 1686 }, 1687 .l1i_cache = &(CPUCacheInfo) { 1688 .type = INSTRUCTION_CACHE, 1689 .level = 1, 1690 .size = 64 * KiB, 1691 .line_size = 64, 1692 .associativity = 4, 1693 .partitions = 1, 1694 .sets = 256, 1695 .lines_per_tag = 1, 1696 .self_init = 1, 1697 .no_invd_sharing = true, 1698 }, 1699 .l2_cache = &(CPUCacheInfo) { 1700 .type = UNIFIED_CACHE, 1701 .level = 2, 1702 .size = 512 * KiB, 1703 .line_size = 64, 1704 .associativity = 8, 1705 .partitions = 1, 1706 .sets = 1024, 1707 .lines_per_tag = 1, 1708 }, 1709 .l3_cache = &(CPUCacheInfo) { 1710 .type = UNIFIED_CACHE, 1711 .level = 3, 1712 .size = 8 * MiB, 1713 .line_size = 64, 1714 .associativity = 16, 1715 .partitions = 1, 1716 .sets = 8192, 1717 .lines_per_tag = 1, 1718 .self_init = true, 1719 .inclusive = true, 1720 .complex_indexing = true, 1721 }, 1722 }; 1723 1724 static CPUCaches epyc_rome_cache_info = { 1725 .l1d_cache = &(CPUCacheInfo) { 1726 .type = DATA_CACHE, 1727 .level = 1, 1728 .size = 32 * KiB, 1729 .line_size = 64, 1730 .associativity = 8, 1731 .partitions = 1, 1732 .sets = 64, 1733 .lines_per_tag = 1, 1734 .self_init = 1, 1735 .no_invd_sharing = true, 1736 }, 1737 .l1i_cache = &(CPUCacheInfo) { 1738 .type = INSTRUCTION_CACHE, 1739 .level = 1, 1740 .size = 32 * KiB, 1741 .line_size = 64, 1742 .associativity = 8, 1743 .partitions = 1, 1744 .sets = 64, 1745 .lines_per_tag = 1, 1746 .self_init = 1, 1747 .no_invd_sharing = true, 1748 }, 1749 .l2_cache = &(CPUCacheInfo) { 1750 .type = UNIFIED_CACHE, 1751 .level = 2, 1752 .size = 512 * KiB, 1753 .line_size = 64, 1754 .associativity = 8, 1755 .partitions = 1, 1756 .sets = 1024, 1757 .lines_per_tag = 1, 1758 }, 1759 .l3_cache = &(CPUCacheInfo) { 1760 .type = UNIFIED_CACHE, 1761 .level = 3, 1762 .size = 16 * MiB, 1763 .line_size = 64, 1764 .associativity = 16, 1765 .partitions = 1, 1766 .sets = 16384, 1767 .lines_per_tag = 1, 1768 .self_init = true, 1769 .inclusive = true, 1770 .complex_indexing = true, 1771 }, 1772 }; 1773 1774 /* The following VMX features are not supported by KVM and are left out in the 1775 * CPU definitions: 1776 * 1777 * Dual-monitor support (all processors) 1778 * Entry to SMM 1779 * Deactivate dual-monitor treatment 1780 * Number of CR3-target values 1781 * Shutdown activity state 1782 * Wait-for-SIPI activity state 1783 * PAUSE-loop exiting (Westmere and newer) 1784 * EPT-violation #VE (Broadwell and newer) 1785 * Inject event with insn length=0 (Skylake and newer) 1786 * Conceal non-root operation from PT 1787 * Conceal VM exits from PT 1788 * Conceal VM entries from PT 1789 * Enable ENCLS exiting 1790 * Mode-based execute control (XS/XU) 1791 s TSC scaling (Skylake Server and newer) 1792 * GPA translation for PT (IceLake and newer) 1793 * User wait and pause 1794 * ENCLV exiting 1795 * Load IA32_RTIT_CTL 1796 * Clear IA32_RTIT_CTL 1797 * Advanced VM-exit information for EPT violations 1798 * Sub-page write permissions 1799 * PT in VMX operation 1800 */ 1801 1802 static X86CPUDefinition builtin_x86_defs[] = { 1803 { 1804 .name = "qemu64", 1805 .level = 0xd, 1806 .vendor = CPUID_VENDOR_AMD, 1807 .family = 6, 1808 .model = 6, 1809 .stepping = 3, 1810 .features[FEAT_1_EDX] = 1811 PPRO_FEATURES | 1812 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1813 CPUID_PSE36, 1814 .features[FEAT_1_ECX] = 1815 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1816 .features[FEAT_8000_0001_EDX] = 1817 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1818 .features[FEAT_8000_0001_ECX] = 1819 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1820 .xlevel = 0x8000000A, 1821 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1822 }, 1823 { 1824 .name = "phenom", 1825 .level = 5, 1826 .vendor = CPUID_VENDOR_AMD, 1827 .family = 16, 1828 .model = 2, 1829 .stepping = 3, 1830 /* Missing: CPUID_HT */ 1831 .features[FEAT_1_EDX] = 1832 PPRO_FEATURES | 1833 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1834 CPUID_PSE36 | CPUID_VME, 1835 .features[FEAT_1_ECX] = 1836 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1837 CPUID_EXT_POPCNT, 1838 .features[FEAT_8000_0001_EDX] = 1839 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1840 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1841 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1842 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1843 CPUID_EXT3_CR8LEG, 1844 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1845 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1846 .features[FEAT_8000_0001_ECX] = 1847 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1848 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1849 /* Missing: CPUID_SVM_LBRV */ 1850 .features[FEAT_SVM] = 1851 CPUID_SVM_NPT, 1852 .xlevel = 0x8000001A, 1853 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1854 }, 1855 { 1856 .name = "core2duo", 1857 .level = 10, 1858 .vendor = CPUID_VENDOR_INTEL, 1859 .family = 6, 1860 .model = 15, 1861 .stepping = 11, 1862 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1863 .features[FEAT_1_EDX] = 1864 PPRO_FEATURES | 1865 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1866 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1867 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1868 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1869 .features[FEAT_1_ECX] = 1870 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1871 CPUID_EXT_CX16, 1872 .features[FEAT_8000_0001_EDX] = 1873 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1874 .features[FEAT_8000_0001_ECX] = 1875 CPUID_EXT3_LAHF_LM, 1876 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1877 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1878 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1879 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1880 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1881 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1882 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1883 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1884 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1885 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1886 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1887 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1888 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1889 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1890 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1891 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1892 .features[FEAT_VMX_SECONDARY_CTLS] = 1893 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1894 .xlevel = 0x80000008, 1895 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1896 }, 1897 { 1898 .name = "kvm64", 1899 .level = 0xd, 1900 .vendor = CPUID_VENDOR_INTEL, 1901 .family = 15, 1902 .model = 6, 1903 .stepping = 1, 1904 /* Missing: CPUID_HT */ 1905 .features[FEAT_1_EDX] = 1906 PPRO_FEATURES | CPUID_VME | 1907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1908 CPUID_PSE36, 1909 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1910 .features[FEAT_1_ECX] = 1911 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1912 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1913 .features[FEAT_8000_0001_EDX] = 1914 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1915 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1916 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1917 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1918 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1919 .features[FEAT_8000_0001_ECX] = 1920 0, 1921 /* VMX features from Cedar Mill/Prescott */ 1922 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1923 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1924 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1925 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1926 VMX_PIN_BASED_NMI_EXITING, 1927 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1928 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1929 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1930 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1931 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1932 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1933 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1934 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1935 .xlevel = 0x80000008, 1936 .model_id = "Common KVM processor" 1937 }, 1938 { 1939 .name = "qemu32", 1940 .level = 4, 1941 .vendor = CPUID_VENDOR_INTEL, 1942 .family = 6, 1943 .model = 6, 1944 .stepping = 3, 1945 .features[FEAT_1_EDX] = 1946 PPRO_FEATURES, 1947 .features[FEAT_1_ECX] = 1948 CPUID_EXT_SSE3, 1949 .xlevel = 0x80000004, 1950 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1951 }, 1952 { 1953 .name = "kvm32", 1954 .level = 5, 1955 .vendor = CPUID_VENDOR_INTEL, 1956 .family = 15, 1957 .model = 6, 1958 .stepping = 1, 1959 .features[FEAT_1_EDX] = 1960 PPRO_FEATURES | CPUID_VME | 1961 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1962 .features[FEAT_1_ECX] = 1963 CPUID_EXT_SSE3, 1964 .features[FEAT_8000_0001_ECX] = 1965 0, 1966 /* VMX features from Yonah */ 1967 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1968 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1969 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1970 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1971 VMX_PIN_BASED_NMI_EXITING, 1972 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1973 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1974 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1975 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1976 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 1977 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 1978 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 1979 .xlevel = 0x80000008, 1980 .model_id = "Common 32-bit KVM processor" 1981 }, 1982 { 1983 .name = "coreduo", 1984 .level = 10, 1985 .vendor = CPUID_VENDOR_INTEL, 1986 .family = 6, 1987 .model = 14, 1988 .stepping = 8, 1989 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1990 .features[FEAT_1_EDX] = 1991 PPRO_FEATURES | CPUID_VME | 1992 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 1993 CPUID_SS, 1994 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 1995 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1996 .features[FEAT_1_ECX] = 1997 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 1998 .features[FEAT_8000_0001_EDX] = 1999 CPUID_EXT2_NX, 2000 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2001 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2002 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2003 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2004 VMX_PIN_BASED_NMI_EXITING, 2005 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2006 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2007 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2008 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2009 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2010 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2011 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2012 .xlevel = 0x80000008, 2013 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2014 }, 2015 { 2016 .name = "486", 2017 .level = 1, 2018 .vendor = CPUID_VENDOR_INTEL, 2019 .family = 4, 2020 .model = 8, 2021 .stepping = 0, 2022 .features[FEAT_1_EDX] = 2023 I486_FEATURES, 2024 .xlevel = 0, 2025 .model_id = "", 2026 }, 2027 { 2028 .name = "pentium", 2029 .level = 1, 2030 .vendor = CPUID_VENDOR_INTEL, 2031 .family = 5, 2032 .model = 4, 2033 .stepping = 3, 2034 .features[FEAT_1_EDX] = 2035 PENTIUM_FEATURES, 2036 .xlevel = 0, 2037 .model_id = "", 2038 }, 2039 { 2040 .name = "pentium2", 2041 .level = 2, 2042 .vendor = CPUID_VENDOR_INTEL, 2043 .family = 6, 2044 .model = 5, 2045 .stepping = 2, 2046 .features[FEAT_1_EDX] = 2047 PENTIUM2_FEATURES, 2048 .xlevel = 0, 2049 .model_id = "", 2050 }, 2051 { 2052 .name = "pentium3", 2053 .level = 3, 2054 .vendor = CPUID_VENDOR_INTEL, 2055 .family = 6, 2056 .model = 7, 2057 .stepping = 3, 2058 .features[FEAT_1_EDX] = 2059 PENTIUM3_FEATURES, 2060 .xlevel = 0, 2061 .model_id = "", 2062 }, 2063 { 2064 .name = "athlon", 2065 .level = 2, 2066 .vendor = CPUID_VENDOR_AMD, 2067 .family = 6, 2068 .model = 2, 2069 .stepping = 3, 2070 .features[FEAT_1_EDX] = 2071 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2072 CPUID_MCA, 2073 .features[FEAT_8000_0001_EDX] = 2074 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2075 .xlevel = 0x80000008, 2076 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2077 }, 2078 { 2079 .name = "n270", 2080 .level = 10, 2081 .vendor = CPUID_VENDOR_INTEL, 2082 .family = 6, 2083 .model = 28, 2084 .stepping = 2, 2085 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2086 .features[FEAT_1_EDX] = 2087 PPRO_FEATURES | 2088 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2089 CPUID_ACPI | CPUID_SS, 2090 /* Some CPUs got no CPUID_SEP */ 2091 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2092 * CPUID_EXT_XTPR */ 2093 .features[FEAT_1_ECX] = 2094 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2095 CPUID_EXT_MOVBE, 2096 .features[FEAT_8000_0001_EDX] = 2097 CPUID_EXT2_NX, 2098 .features[FEAT_8000_0001_ECX] = 2099 CPUID_EXT3_LAHF_LM, 2100 .xlevel = 0x80000008, 2101 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2102 }, 2103 { 2104 .name = "Conroe", 2105 .level = 10, 2106 .vendor = CPUID_VENDOR_INTEL, 2107 .family = 6, 2108 .model = 15, 2109 .stepping = 3, 2110 .features[FEAT_1_EDX] = 2111 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2112 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2113 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2114 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2115 CPUID_DE | CPUID_FP87, 2116 .features[FEAT_1_ECX] = 2117 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2118 .features[FEAT_8000_0001_EDX] = 2119 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2120 .features[FEAT_8000_0001_ECX] = 2121 CPUID_EXT3_LAHF_LM, 2122 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2123 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2124 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2125 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2126 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2127 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2128 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2129 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2130 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2131 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2132 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2133 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2134 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2135 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2136 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2137 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2138 .features[FEAT_VMX_SECONDARY_CTLS] = 2139 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2140 .xlevel = 0x80000008, 2141 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2142 }, 2143 { 2144 .name = "Penryn", 2145 .level = 10, 2146 .vendor = CPUID_VENDOR_INTEL, 2147 .family = 6, 2148 .model = 23, 2149 .stepping = 3, 2150 .features[FEAT_1_EDX] = 2151 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2152 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2153 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2154 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2155 CPUID_DE | CPUID_FP87, 2156 .features[FEAT_1_ECX] = 2157 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2158 CPUID_EXT_SSE3, 2159 .features[FEAT_8000_0001_EDX] = 2160 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2161 .features[FEAT_8000_0001_ECX] = 2162 CPUID_EXT3_LAHF_LM, 2163 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2164 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2165 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2166 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2167 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2168 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2169 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2170 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2171 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2172 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2173 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2174 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2175 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2176 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2177 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2178 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2179 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2180 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2181 .features[FEAT_VMX_SECONDARY_CTLS] = 2182 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2183 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2184 .xlevel = 0x80000008, 2185 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2186 }, 2187 { 2188 .name = "Nehalem", 2189 .level = 11, 2190 .vendor = CPUID_VENDOR_INTEL, 2191 .family = 6, 2192 .model = 26, 2193 .stepping = 3, 2194 .features[FEAT_1_EDX] = 2195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2199 CPUID_DE | CPUID_FP87, 2200 .features[FEAT_1_ECX] = 2201 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2202 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2203 .features[FEAT_8000_0001_EDX] = 2204 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2205 .features[FEAT_8000_0001_ECX] = 2206 CPUID_EXT3_LAHF_LM, 2207 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2208 MSR_VMX_BASIC_TRUE_CTLS, 2209 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2210 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2211 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2212 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2213 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2214 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2215 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2216 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2217 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2218 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2219 .features[FEAT_VMX_EXIT_CTLS] = 2220 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2221 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2222 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2223 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2224 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2225 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2226 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2227 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2228 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2229 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2230 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2231 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2232 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2233 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2234 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2235 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2236 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2237 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2238 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2239 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2240 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2241 .features[FEAT_VMX_SECONDARY_CTLS] = 2242 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2243 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2244 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2245 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2246 VMX_SECONDARY_EXEC_ENABLE_VPID, 2247 .xlevel = 0x80000008, 2248 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2249 .versions = (X86CPUVersionDefinition[]) { 2250 { .version = 1 }, 2251 { 2252 .version = 2, 2253 .alias = "Nehalem-IBRS", 2254 .props = (PropValue[]) { 2255 { "spec-ctrl", "on" }, 2256 { "model-id", 2257 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2258 { /* end of list */ } 2259 } 2260 }, 2261 { /* end of list */ } 2262 } 2263 }, 2264 { 2265 .name = "Westmere", 2266 .level = 11, 2267 .vendor = CPUID_VENDOR_INTEL, 2268 .family = 6, 2269 .model = 44, 2270 .stepping = 1, 2271 .features[FEAT_1_EDX] = 2272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2276 CPUID_DE | CPUID_FP87, 2277 .features[FEAT_1_ECX] = 2278 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2279 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2280 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2281 .features[FEAT_8000_0001_EDX] = 2282 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2283 .features[FEAT_8000_0001_ECX] = 2284 CPUID_EXT3_LAHF_LM, 2285 .features[FEAT_6_EAX] = 2286 CPUID_6_EAX_ARAT, 2287 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2288 MSR_VMX_BASIC_TRUE_CTLS, 2289 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2290 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2291 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2292 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2293 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2294 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2295 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2296 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2297 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2298 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2299 .features[FEAT_VMX_EXIT_CTLS] = 2300 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2301 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2302 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2303 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2304 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2305 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2306 MSR_VMX_MISC_STORE_LMA, 2307 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2308 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2309 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2310 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2311 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2312 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2313 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2314 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2315 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2316 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2317 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2318 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2319 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2320 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2321 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2322 .features[FEAT_VMX_SECONDARY_CTLS] = 2323 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2324 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2325 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2326 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2327 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2328 .xlevel = 0x80000008, 2329 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2330 .versions = (X86CPUVersionDefinition[]) { 2331 { .version = 1 }, 2332 { 2333 .version = 2, 2334 .alias = "Westmere-IBRS", 2335 .props = (PropValue[]) { 2336 { "spec-ctrl", "on" }, 2337 { "model-id", 2338 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2339 { /* end of list */ } 2340 } 2341 }, 2342 { /* end of list */ } 2343 } 2344 }, 2345 { 2346 .name = "SandyBridge", 2347 .level = 0xd, 2348 .vendor = CPUID_VENDOR_INTEL, 2349 .family = 6, 2350 .model = 42, 2351 .stepping = 1, 2352 .features[FEAT_1_EDX] = 2353 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2354 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2355 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2356 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2357 CPUID_DE | CPUID_FP87, 2358 .features[FEAT_1_ECX] = 2359 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2360 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2361 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2362 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2363 CPUID_EXT_SSE3, 2364 .features[FEAT_8000_0001_EDX] = 2365 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2366 CPUID_EXT2_SYSCALL, 2367 .features[FEAT_8000_0001_ECX] = 2368 CPUID_EXT3_LAHF_LM, 2369 .features[FEAT_XSAVE] = 2370 CPUID_XSAVE_XSAVEOPT, 2371 .features[FEAT_6_EAX] = 2372 CPUID_6_EAX_ARAT, 2373 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2374 MSR_VMX_BASIC_TRUE_CTLS, 2375 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2376 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2377 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2378 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2379 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2380 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2381 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2382 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2383 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2384 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2385 .features[FEAT_VMX_EXIT_CTLS] = 2386 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2387 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2388 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2389 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2390 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2391 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2392 MSR_VMX_MISC_STORE_LMA, 2393 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2394 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2395 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2396 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2397 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2398 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2399 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2400 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2401 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2402 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2403 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2404 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2405 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2406 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2407 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2408 .features[FEAT_VMX_SECONDARY_CTLS] = 2409 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2410 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2411 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2412 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2413 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2414 .xlevel = 0x80000008, 2415 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2416 .versions = (X86CPUVersionDefinition[]) { 2417 { .version = 1 }, 2418 { 2419 .version = 2, 2420 .alias = "SandyBridge-IBRS", 2421 .props = (PropValue[]) { 2422 { "spec-ctrl", "on" }, 2423 { "model-id", 2424 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2425 { /* end of list */ } 2426 } 2427 }, 2428 { /* end of list */ } 2429 } 2430 }, 2431 { 2432 .name = "IvyBridge", 2433 .level = 0xd, 2434 .vendor = CPUID_VENDOR_INTEL, 2435 .family = 6, 2436 .model = 58, 2437 .stepping = 9, 2438 .features[FEAT_1_EDX] = 2439 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2440 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2441 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2442 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2443 CPUID_DE | CPUID_FP87, 2444 .features[FEAT_1_ECX] = 2445 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2446 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2447 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2448 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2449 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2450 .features[FEAT_7_0_EBX] = 2451 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2452 CPUID_7_0_EBX_ERMS, 2453 .features[FEAT_8000_0001_EDX] = 2454 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2455 CPUID_EXT2_SYSCALL, 2456 .features[FEAT_8000_0001_ECX] = 2457 CPUID_EXT3_LAHF_LM, 2458 .features[FEAT_XSAVE] = 2459 CPUID_XSAVE_XSAVEOPT, 2460 .features[FEAT_6_EAX] = 2461 CPUID_6_EAX_ARAT, 2462 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2463 MSR_VMX_BASIC_TRUE_CTLS, 2464 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2465 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2466 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2467 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2468 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2469 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2470 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2471 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2472 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2473 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2474 .features[FEAT_VMX_EXIT_CTLS] = 2475 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2476 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2477 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2478 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2479 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2480 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2481 MSR_VMX_MISC_STORE_LMA, 2482 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2483 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2484 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2485 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2486 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2487 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2488 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2489 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2490 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2491 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2492 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2493 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2494 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2495 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2496 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2497 .features[FEAT_VMX_SECONDARY_CTLS] = 2498 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2499 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2500 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2501 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2502 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2503 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2504 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2505 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2506 .xlevel = 0x80000008, 2507 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2508 .versions = (X86CPUVersionDefinition[]) { 2509 { .version = 1 }, 2510 { 2511 .version = 2, 2512 .alias = "IvyBridge-IBRS", 2513 .props = (PropValue[]) { 2514 { "spec-ctrl", "on" }, 2515 { "model-id", 2516 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2517 { /* end of list */ } 2518 } 2519 }, 2520 { /* end of list */ } 2521 } 2522 }, 2523 { 2524 .name = "Haswell", 2525 .level = 0xd, 2526 .vendor = CPUID_VENDOR_INTEL, 2527 .family = 6, 2528 .model = 60, 2529 .stepping = 4, 2530 .features[FEAT_1_EDX] = 2531 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2532 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2533 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2534 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2535 CPUID_DE | CPUID_FP87, 2536 .features[FEAT_1_ECX] = 2537 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2538 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2539 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2540 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2541 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2542 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2543 .features[FEAT_8000_0001_EDX] = 2544 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2545 CPUID_EXT2_SYSCALL, 2546 .features[FEAT_8000_0001_ECX] = 2547 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2548 .features[FEAT_7_0_EBX] = 2549 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2550 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2551 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2552 CPUID_7_0_EBX_RTM, 2553 .features[FEAT_XSAVE] = 2554 CPUID_XSAVE_XSAVEOPT, 2555 .features[FEAT_6_EAX] = 2556 CPUID_6_EAX_ARAT, 2557 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2558 MSR_VMX_BASIC_TRUE_CTLS, 2559 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2560 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2561 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2562 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2563 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2564 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2565 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2566 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2567 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2568 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2569 .features[FEAT_VMX_EXIT_CTLS] = 2570 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2571 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2572 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2573 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2574 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2575 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2576 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2577 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2578 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2579 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2580 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2581 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2582 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2583 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2584 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2585 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2586 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2587 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2588 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2589 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2590 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2591 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2592 .features[FEAT_VMX_SECONDARY_CTLS] = 2593 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2594 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2595 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2596 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2597 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2598 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2599 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2600 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2601 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2602 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2603 .xlevel = 0x80000008, 2604 .model_id = "Intel Core Processor (Haswell)", 2605 .versions = (X86CPUVersionDefinition[]) { 2606 { .version = 1 }, 2607 { 2608 .version = 2, 2609 .alias = "Haswell-noTSX", 2610 .props = (PropValue[]) { 2611 { "hle", "off" }, 2612 { "rtm", "off" }, 2613 { "stepping", "1" }, 2614 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2615 { /* end of list */ } 2616 }, 2617 }, 2618 { 2619 .version = 3, 2620 .alias = "Haswell-IBRS", 2621 .props = (PropValue[]) { 2622 /* Restore TSX features removed by -v2 above */ 2623 { "hle", "on" }, 2624 { "rtm", "on" }, 2625 /* 2626 * Haswell and Haswell-IBRS had stepping=4 in 2627 * QEMU 4.0 and older 2628 */ 2629 { "stepping", "4" }, 2630 { "spec-ctrl", "on" }, 2631 { "model-id", 2632 "Intel Core Processor (Haswell, IBRS)" }, 2633 { /* end of list */ } 2634 } 2635 }, 2636 { 2637 .version = 4, 2638 .alias = "Haswell-noTSX-IBRS", 2639 .props = (PropValue[]) { 2640 { "hle", "off" }, 2641 { "rtm", "off" }, 2642 /* spec-ctrl was already enabled by -v3 above */ 2643 { "stepping", "1" }, 2644 { "model-id", 2645 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2646 { /* end of list */ } 2647 } 2648 }, 2649 { /* end of list */ } 2650 } 2651 }, 2652 { 2653 .name = "Broadwell", 2654 .level = 0xd, 2655 .vendor = CPUID_VENDOR_INTEL, 2656 .family = 6, 2657 .model = 61, 2658 .stepping = 2, 2659 .features[FEAT_1_EDX] = 2660 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2661 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2662 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2663 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2664 CPUID_DE | CPUID_FP87, 2665 .features[FEAT_1_ECX] = 2666 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2667 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2668 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2669 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2670 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2671 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2672 .features[FEAT_8000_0001_EDX] = 2673 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2674 CPUID_EXT2_SYSCALL, 2675 .features[FEAT_8000_0001_ECX] = 2676 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2677 .features[FEAT_7_0_EBX] = 2678 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2679 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2680 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2681 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2682 CPUID_7_0_EBX_SMAP, 2683 .features[FEAT_XSAVE] = 2684 CPUID_XSAVE_XSAVEOPT, 2685 .features[FEAT_6_EAX] = 2686 CPUID_6_EAX_ARAT, 2687 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2688 MSR_VMX_BASIC_TRUE_CTLS, 2689 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2690 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2691 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2692 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2693 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2694 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2695 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2696 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2697 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2698 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2699 .features[FEAT_VMX_EXIT_CTLS] = 2700 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2701 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2702 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2703 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2704 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2705 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2706 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2707 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2708 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2709 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2710 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2711 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2712 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2713 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2714 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2715 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2716 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2717 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2718 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2719 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2720 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2721 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2722 .features[FEAT_VMX_SECONDARY_CTLS] = 2723 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2724 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2725 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2726 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2727 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2728 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2729 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2730 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2731 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2732 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2733 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2734 .xlevel = 0x80000008, 2735 .model_id = "Intel Core Processor (Broadwell)", 2736 .versions = (X86CPUVersionDefinition[]) { 2737 { .version = 1 }, 2738 { 2739 .version = 2, 2740 .alias = "Broadwell-noTSX", 2741 .props = (PropValue[]) { 2742 { "hle", "off" }, 2743 { "rtm", "off" }, 2744 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2745 { /* end of list */ } 2746 }, 2747 }, 2748 { 2749 .version = 3, 2750 .alias = "Broadwell-IBRS", 2751 .props = (PropValue[]) { 2752 /* Restore TSX features removed by -v2 above */ 2753 { "hle", "on" }, 2754 { "rtm", "on" }, 2755 { "spec-ctrl", "on" }, 2756 { "model-id", 2757 "Intel Core Processor (Broadwell, IBRS)" }, 2758 { /* end of list */ } 2759 } 2760 }, 2761 { 2762 .version = 4, 2763 .alias = "Broadwell-noTSX-IBRS", 2764 .props = (PropValue[]) { 2765 { "hle", "off" }, 2766 { "rtm", "off" }, 2767 /* spec-ctrl was already enabled by -v3 above */ 2768 { "model-id", 2769 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2770 { /* end of list */ } 2771 } 2772 }, 2773 { /* end of list */ } 2774 } 2775 }, 2776 { 2777 .name = "Skylake-Client", 2778 .level = 0xd, 2779 .vendor = CPUID_VENDOR_INTEL, 2780 .family = 6, 2781 .model = 94, 2782 .stepping = 3, 2783 .features[FEAT_1_EDX] = 2784 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2785 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2786 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2787 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2788 CPUID_DE | CPUID_FP87, 2789 .features[FEAT_1_ECX] = 2790 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2791 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2792 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2793 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2794 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2795 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2796 .features[FEAT_8000_0001_EDX] = 2797 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2798 CPUID_EXT2_SYSCALL, 2799 .features[FEAT_8000_0001_ECX] = 2800 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2801 .features[FEAT_7_0_EBX] = 2802 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2803 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2804 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2805 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2806 CPUID_7_0_EBX_SMAP, 2807 /* Missing: XSAVES (not supported by some Linux versions, 2808 * including v4.1 to v4.12). 2809 * KVM doesn't yet expose any XSAVES state save component, 2810 * and the only one defined in Skylake (processor tracing) 2811 * probably will block migration anyway. 2812 */ 2813 .features[FEAT_XSAVE] = 2814 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2815 CPUID_XSAVE_XGETBV1, 2816 .features[FEAT_6_EAX] = 2817 CPUID_6_EAX_ARAT, 2818 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2819 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2820 MSR_VMX_BASIC_TRUE_CTLS, 2821 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2822 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2823 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2824 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2825 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2826 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2827 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2828 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2829 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2830 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2831 .features[FEAT_VMX_EXIT_CTLS] = 2832 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2833 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2834 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2835 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2836 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2837 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2838 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2839 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2840 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2841 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2842 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2843 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2844 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2845 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2846 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2847 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2848 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2849 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2850 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2851 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2852 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2853 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2854 .features[FEAT_VMX_SECONDARY_CTLS] = 2855 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2856 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2857 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2858 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2859 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2860 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2861 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2862 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2863 .xlevel = 0x80000008, 2864 .model_id = "Intel Core Processor (Skylake)", 2865 .versions = (X86CPUVersionDefinition[]) { 2866 { .version = 1 }, 2867 { 2868 .version = 2, 2869 .alias = "Skylake-Client-IBRS", 2870 .props = (PropValue[]) { 2871 { "spec-ctrl", "on" }, 2872 { "model-id", 2873 "Intel Core Processor (Skylake, IBRS)" }, 2874 { /* end of list */ } 2875 } 2876 }, 2877 { 2878 .version = 3, 2879 .alias = "Skylake-Client-noTSX-IBRS", 2880 .props = (PropValue[]) { 2881 { "hle", "off" }, 2882 { "rtm", "off" }, 2883 { "model-id", 2884 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2885 { /* end of list */ } 2886 } 2887 }, 2888 { /* end of list */ } 2889 } 2890 }, 2891 { 2892 .name = "Skylake-Server", 2893 .level = 0xd, 2894 .vendor = CPUID_VENDOR_INTEL, 2895 .family = 6, 2896 .model = 85, 2897 .stepping = 4, 2898 .features[FEAT_1_EDX] = 2899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2903 CPUID_DE | CPUID_FP87, 2904 .features[FEAT_1_ECX] = 2905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2906 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2907 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2908 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2909 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2910 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2911 .features[FEAT_8000_0001_EDX] = 2912 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2913 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2914 .features[FEAT_8000_0001_ECX] = 2915 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2916 .features[FEAT_7_0_EBX] = 2917 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2918 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2919 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2920 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2921 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2922 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2923 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2924 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2925 .features[FEAT_7_0_ECX] = 2926 CPUID_7_0_ECX_PKU, 2927 /* Missing: XSAVES (not supported by some Linux versions, 2928 * including v4.1 to v4.12). 2929 * KVM doesn't yet expose any XSAVES state save component, 2930 * and the only one defined in Skylake (processor tracing) 2931 * probably will block migration anyway. 2932 */ 2933 .features[FEAT_XSAVE] = 2934 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2935 CPUID_XSAVE_XGETBV1, 2936 .features[FEAT_6_EAX] = 2937 CPUID_6_EAX_ARAT, 2938 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2939 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2940 MSR_VMX_BASIC_TRUE_CTLS, 2941 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2942 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2943 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2944 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2945 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2946 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2947 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2948 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2949 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2950 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2951 .features[FEAT_VMX_EXIT_CTLS] = 2952 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2953 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2954 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2955 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2956 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2957 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2958 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2959 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2960 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2961 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2962 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2963 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2964 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2965 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2966 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2967 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2968 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2969 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2970 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2971 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2972 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2973 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2974 .features[FEAT_VMX_SECONDARY_CTLS] = 2975 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2976 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2977 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2978 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2979 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2980 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2981 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2982 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2983 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2984 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2985 .xlevel = 0x80000008, 2986 .model_id = "Intel Xeon Processor (Skylake)", 2987 .versions = (X86CPUVersionDefinition[]) { 2988 { .version = 1 }, 2989 { 2990 .version = 2, 2991 .alias = "Skylake-Server-IBRS", 2992 .props = (PropValue[]) { 2993 /* clflushopt was not added to Skylake-Server-IBRS */ 2994 /* TODO: add -v3 including clflushopt */ 2995 { "clflushopt", "off" }, 2996 { "spec-ctrl", "on" }, 2997 { "model-id", 2998 "Intel Xeon Processor (Skylake, IBRS)" }, 2999 { /* end of list */ } 3000 } 3001 }, 3002 { 3003 .version = 3, 3004 .alias = "Skylake-Server-noTSX-IBRS", 3005 .props = (PropValue[]) { 3006 { "hle", "off" }, 3007 { "rtm", "off" }, 3008 { "model-id", 3009 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3010 { /* end of list */ } 3011 } 3012 }, 3013 { /* end of list */ } 3014 } 3015 }, 3016 { 3017 .name = "Cascadelake-Server", 3018 .level = 0xd, 3019 .vendor = CPUID_VENDOR_INTEL, 3020 .family = 6, 3021 .model = 85, 3022 .stepping = 6, 3023 .features[FEAT_1_EDX] = 3024 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3025 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3026 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3027 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3028 CPUID_DE | CPUID_FP87, 3029 .features[FEAT_1_ECX] = 3030 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3031 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3032 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3033 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3034 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3035 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3036 .features[FEAT_8000_0001_EDX] = 3037 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3038 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3039 .features[FEAT_8000_0001_ECX] = 3040 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3041 .features[FEAT_7_0_EBX] = 3042 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3043 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3044 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3045 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3046 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3047 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3048 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3049 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3050 .features[FEAT_7_0_ECX] = 3051 CPUID_7_0_ECX_PKU | 3052 CPUID_7_0_ECX_AVX512VNNI, 3053 .features[FEAT_7_0_EDX] = 3054 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3055 /* Missing: XSAVES (not supported by some Linux versions, 3056 * including v4.1 to v4.12). 3057 * KVM doesn't yet expose any XSAVES state save component, 3058 * and the only one defined in Skylake (processor tracing) 3059 * probably will block migration anyway. 3060 */ 3061 .features[FEAT_XSAVE] = 3062 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3063 CPUID_XSAVE_XGETBV1, 3064 .features[FEAT_6_EAX] = 3065 CPUID_6_EAX_ARAT, 3066 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3067 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3068 MSR_VMX_BASIC_TRUE_CTLS, 3069 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3070 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3071 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3072 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3073 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3074 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3075 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3076 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3077 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3078 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3079 .features[FEAT_VMX_EXIT_CTLS] = 3080 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3081 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3082 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3083 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3084 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3085 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3086 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3087 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3088 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3089 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3090 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3091 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3092 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3093 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3094 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3095 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3096 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3097 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3098 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3099 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3100 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3101 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3102 .features[FEAT_VMX_SECONDARY_CTLS] = 3103 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3104 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3105 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3106 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3107 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3108 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3109 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3110 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3111 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3112 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3113 .xlevel = 0x80000008, 3114 .model_id = "Intel Xeon Processor (Cascadelake)", 3115 .versions = (X86CPUVersionDefinition[]) { 3116 { .version = 1 }, 3117 { .version = 2, 3118 .props = (PropValue[]) { 3119 { "arch-capabilities", "on" }, 3120 { "rdctl-no", "on" }, 3121 { "ibrs-all", "on" }, 3122 { "skip-l1dfl-vmentry", "on" }, 3123 { "mds-no", "on" }, 3124 { /* end of list */ } 3125 }, 3126 }, 3127 { .version = 3, 3128 .alias = "Cascadelake-Server-noTSX", 3129 .props = (PropValue[]) { 3130 { "hle", "off" }, 3131 { "rtm", "off" }, 3132 { /* end of list */ } 3133 }, 3134 }, 3135 { /* end of list */ } 3136 } 3137 }, 3138 { 3139 .name = "Cooperlake", 3140 .level = 0xd, 3141 .vendor = CPUID_VENDOR_INTEL, 3142 .family = 6, 3143 .model = 85, 3144 .stepping = 10, 3145 .features[FEAT_1_EDX] = 3146 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3150 CPUID_DE | CPUID_FP87, 3151 .features[FEAT_1_ECX] = 3152 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3153 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3154 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3155 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3156 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3157 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3158 .features[FEAT_8000_0001_EDX] = 3159 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3160 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3161 .features[FEAT_8000_0001_ECX] = 3162 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3163 .features[FEAT_7_0_EBX] = 3164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3167 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3168 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3169 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3170 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3171 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3172 .features[FEAT_7_0_ECX] = 3173 CPUID_7_0_ECX_PKU | 3174 CPUID_7_0_ECX_AVX512VNNI, 3175 .features[FEAT_7_0_EDX] = 3176 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3177 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3178 .features[FEAT_ARCH_CAPABILITIES] = 3179 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3180 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3181 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3182 .features[FEAT_7_1_EAX] = 3183 CPUID_7_1_EAX_AVX512_BF16, 3184 /* 3185 * Missing: XSAVES (not supported by some Linux versions, 3186 * including v4.1 to v4.12). 3187 * KVM doesn't yet expose any XSAVES state save component, 3188 * and the only one defined in Skylake (processor tracing) 3189 * probably will block migration anyway. 3190 */ 3191 .features[FEAT_XSAVE] = 3192 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3193 CPUID_XSAVE_XGETBV1, 3194 .features[FEAT_6_EAX] = 3195 CPUID_6_EAX_ARAT, 3196 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3197 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3198 MSR_VMX_BASIC_TRUE_CTLS, 3199 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3200 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3201 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3202 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3203 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3204 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3205 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3206 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3207 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3208 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3209 .features[FEAT_VMX_EXIT_CTLS] = 3210 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3211 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3212 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3213 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3214 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3215 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3216 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3217 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3218 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3219 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3220 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3221 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3222 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3223 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3224 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3225 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3226 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3227 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3228 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3229 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3230 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3231 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3232 .features[FEAT_VMX_SECONDARY_CTLS] = 3233 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3234 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3235 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3236 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3237 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3238 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3239 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3240 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3241 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3242 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3243 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3244 .xlevel = 0x80000008, 3245 .model_id = "Intel Xeon Processor (Cooperlake)", 3246 }, 3247 { 3248 .name = "Icelake-Client", 3249 .level = 0xd, 3250 .vendor = CPUID_VENDOR_INTEL, 3251 .family = 6, 3252 .model = 126, 3253 .stepping = 0, 3254 .features[FEAT_1_EDX] = 3255 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3256 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3257 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3258 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3259 CPUID_DE | CPUID_FP87, 3260 .features[FEAT_1_ECX] = 3261 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3262 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3263 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3264 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3265 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3266 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3267 .features[FEAT_8000_0001_EDX] = 3268 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3269 CPUID_EXT2_SYSCALL, 3270 .features[FEAT_8000_0001_ECX] = 3271 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3272 .features[FEAT_8000_0008_EBX] = 3273 CPUID_8000_0008_EBX_WBNOINVD, 3274 .features[FEAT_7_0_EBX] = 3275 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3276 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3277 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3278 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3279 CPUID_7_0_EBX_SMAP, 3280 .features[FEAT_7_0_ECX] = 3281 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3282 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3283 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3284 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3285 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3286 .features[FEAT_7_0_EDX] = 3287 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3288 /* Missing: XSAVES (not supported by some Linux versions, 3289 * including v4.1 to v4.12). 3290 * KVM doesn't yet expose any XSAVES state save component, 3291 * and the only one defined in Skylake (processor tracing) 3292 * probably will block migration anyway. 3293 */ 3294 .features[FEAT_XSAVE] = 3295 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3296 CPUID_XSAVE_XGETBV1, 3297 .features[FEAT_6_EAX] = 3298 CPUID_6_EAX_ARAT, 3299 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3300 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3301 MSR_VMX_BASIC_TRUE_CTLS, 3302 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3303 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3304 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3305 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3306 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3307 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3308 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3309 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3310 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3311 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3312 .features[FEAT_VMX_EXIT_CTLS] = 3313 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3314 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3315 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3316 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3317 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3318 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3319 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3320 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3321 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3322 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3323 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3324 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3325 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3326 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3327 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3328 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3329 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3330 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3331 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3332 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3333 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3334 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3335 .features[FEAT_VMX_SECONDARY_CTLS] = 3336 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3337 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3338 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3339 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3340 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3341 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3342 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3343 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3344 .xlevel = 0x80000008, 3345 .model_id = "Intel Core Processor (Icelake)", 3346 .versions = (X86CPUVersionDefinition[]) { 3347 { .version = 1 }, 3348 { 3349 .version = 2, 3350 .alias = "Icelake-Client-noTSX", 3351 .props = (PropValue[]) { 3352 { "hle", "off" }, 3353 { "rtm", "off" }, 3354 { /* end of list */ } 3355 }, 3356 }, 3357 { /* end of list */ } 3358 } 3359 }, 3360 { 3361 .name = "Icelake-Server", 3362 .level = 0xd, 3363 .vendor = CPUID_VENDOR_INTEL, 3364 .family = 6, 3365 .model = 134, 3366 .stepping = 0, 3367 .features[FEAT_1_EDX] = 3368 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3369 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3370 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3371 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3372 CPUID_DE | CPUID_FP87, 3373 .features[FEAT_1_ECX] = 3374 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3375 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3376 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3377 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3378 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3379 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3380 .features[FEAT_8000_0001_EDX] = 3381 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3382 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3383 .features[FEAT_8000_0001_ECX] = 3384 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3385 .features[FEAT_8000_0008_EBX] = 3386 CPUID_8000_0008_EBX_WBNOINVD, 3387 .features[FEAT_7_0_EBX] = 3388 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3389 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3390 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3391 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3392 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3393 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3394 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3395 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3396 .features[FEAT_7_0_ECX] = 3397 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3398 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3399 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3400 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3401 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3402 .features[FEAT_7_0_EDX] = 3403 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3404 /* Missing: XSAVES (not supported by some Linux versions, 3405 * including v4.1 to v4.12). 3406 * KVM doesn't yet expose any XSAVES state save component, 3407 * and the only one defined in Skylake (processor tracing) 3408 * probably will block migration anyway. 3409 */ 3410 .features[FEAT_XSAVE] = 3411 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3412 CPUID_XSAVE_XGETBV1, 3413 .features[FEAT_6_EAX] = 3414 CPUID_6_EAX_ARAT, 3415 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3416 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3417 MSR_VMX_BASIC_TRUE_CTLS, 3418 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3419 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3420 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3421 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3422 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3423 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3424 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3425 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3426 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3427 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3428 .features[FEAT_VMX_EXIT_CTLS] = 3429 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3430 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3431 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3432 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3433 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3434 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3435 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3436 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3437 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3438 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3439 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3440 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3441 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3442 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3443 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3444 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3445 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3446 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3447 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3448 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3449 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3450 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3451 .features[FEAT_VMX_SECONDARY_CTLS] = 3452 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3453 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3454 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3455 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3456 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3457 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3458 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3459 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3460 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3461 .xlevel = 0x80000008, 3462 .model_id = "Intel Xeon Processor (Icelake)", 3463 .versions = (X86CPUVersionDefinition[]) { 3464 { .version = 1 }, 3465 { 3466 .version = 2, 3467 .alias = "Icelake-Server-noTSX", 3468 .props = (PropValue[]) { 3469 { "hle", "off" }, 3470 { "rtm", "off" }, 3471 { /* end of list */ } 3472 }, 3473 }, 3474 { 3475 .version = 3, 3476 .props = (PropValue[]) { 3477 { "arch-capabilities", "on" }, 3478 { "rdctl-no", "on" }, 3479 { "ibrs-all", "on" }, 3480 { "skip-l1dfl-vmentry", "on" }, 3481 { "mds-no", "on" }, 3482 { "pschange-mc-no", "on" }, 3483 { "taa-no", "on" }, 3484 { /* end of list */ } 3485 }, 3486 }, 3487 { /* end of list */ } 3488 } 3489 }, 3490 { 3491 .name = "Denverton", 3492 .level = 21, 3493 .vendor = CPUID_VENDOR_INTEL, 3494 .family = 6, 3495 .model = 95, 3496 .stepping = 1, 3497 .features[FEAT_1_EDX] = 3498 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3499 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3500 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3501 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3502 CPUID_SSE | CPUID_SSE2, 3503 .features[FEAT_1_ECX] = 3504 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3505 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3506 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3507 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3508 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3509 .features[FEAT_8000_0001_EDX] = 3510 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3511 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3512 .features[FEAT_8000_0001_ECX] = 3513 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3514 .features[FEAT_7_0_EBX] = 3515 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3516 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3517 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3518 .features[FEAT_7_0_EDX] = 3519 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3520 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3521 /* 3522 * Missing: XSAVES (not supported by some Linux versions, 3523 * including v4.1 to v4.12). 3524 * KVM doesn't yet expose any XSAVES state save component, 3525 * and the only one defined in Skylake (processor tracing) 3526 * probably will block migration anyway. 3527 */ 3528 .features[FEAT_XSAVE] = 3529 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3530 .features[FEAT_6_EAX] = 3531 CPUID_6_EAX_ARAT, 3532 .features[FEAT_ARCH_CAPABILITIES] = 3533 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3534 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3535 MSR_VMX_BASIC_TRUE_CTLS, 3536 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3537 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3538 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3539 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3540 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3541 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3542 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3543 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3544 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3545 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3546 .features[FEAT_VMX_EXIT_CTLS] = 3547 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3548 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3549 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3550 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3551 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3552 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3553 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3554 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3555 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3556 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3557 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3558 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3559 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3560 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3561 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3562 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3563 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3564 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3565 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3566 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3567 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3568 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3569 .features[FEAT_VMX_SECONDARY_CTLS] = 3570 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3571 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3572 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3573 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3574 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3575 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3576 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3577 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3578 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3579 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3580 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3581 .xlevel = 0x80000008, 3582 .model_id = "Intel Atom Processor (Denverton)", 3583 .versions = (X86CPUVersionDefinition[]) { 3584 { .version = 1 }, 3585 { 3586 .version = 2, 3587 .props = (PropValue[]) { 3588 { "monitor", "off" }, 3589 { "mpx", "off" }, 3590 { /* end of list */ }, 3591 }, 3592 }, 3593 { /* end of list */ }, 3594 }, 3595 }, 3596 { 3597 .name = "Snowridge", 3598 .level = 27, 3599 .vendor = CPUID_VENDOR_INTEL, 3600 .family = 6, 3601 .model = 134, 3602 .stepping = 1, 3603 .features[FEAT_1_EDX] = 3604 /* missing: CPUID_PN CPUID_IA64 */ 3605 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3606 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3607 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3608 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3609 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3610 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3611 CPUID_MMX | 3612 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3613 .features[FEAT_1_ECX] = 3614 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3615 CPUID_EXT_SSSE3 | 3616 CPUID_EXT_CX16 | 3617 CPUID_EXT_SSE41 | 3618 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3619 CPUID_EXT_POPCNT | 3620 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3621 CPUID_EXT_RDRAND, 3622 .features[FEAT_8000_0001_EDX] = 3623 CPUID_EXT2_SYSCALL | 3624 CPUID_EXT2_NX | 3625 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3626 CPUID_EXT2_LM, 3627 .features[FEAT_8000_0001_ECX] = 3628 CPUID_EXT3_LAHF_LM | 3629 CPUID_EXT3_3DNOWPREFETCH, 3630 .features[FEAT_7_0_EBX] = 3631 CPUID_7_0_EBX_FSGSBASE | 3632 CPUID_7_0_EBX_SMEP | 3633 CPUID_7_0_EBX_ERMS | 3634 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3635 CPUID_7_0_EBX_RDSEED | 3636 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3637 CPUID_7_0_EBX_CLWB | 3638 CPUID_7_0_EBX_SHA_NI, 3639 .features[FEAT_7_0_ECX] = 3640 CPUID_7_0_ECX_UMIP | 3641 /* missing bit 5 */ 3642 CPUID_7_0_ECX_GFNI | 3643 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3644 CPUID_7_0_ECX_MOVDIR64B, 3645 .features[FEAT_7_0_EDX] = 3646 CPUID_7_0_EDX_SPEC_CTRL | 3647 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3648 CPUID_7_0_EDX_CORE_CAPABILITY, 3649 .features[FEAT_CORE_CAPABILITY] = 3650 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3651 /* 3652 * Missing: XSAVES (not supported by some Linux versions, 3653 * including v4.1 to v4.12). 3654 * KVM doesn't yet expose any XSAVES state save component, 3655 * and the only one defined in Skylake (processor tracing) 3656 * probably will block migration anyway. 3657 */ 3658 .features[FEAT_XSAVE] = 3659 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3660 CPUID_XSAVE_XGETBV1, 3661 .features[FEAT_6_EAX] = 3662 CPUID_6_EAX_ARAT, 3663 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3664 MSR_VMX_BASIC_TRUE_CTLS, 3665 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3666 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3667 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3668 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3669 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3670 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3671 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3672 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3673 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3674 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3675 .features[FEAT_VMX_EXIT_CTLS] = 3676 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3677 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3678 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3679 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3680 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3681 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3682 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3683 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3684 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3685 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3686 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3687 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3688 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3689 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3690 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3691 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3692 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3693 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3694 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3695 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3696 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3697 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3698 .features[FEAT_VMX_SECONDARY_CTLS] = 3699 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3700 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3701 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3702 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3703 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3704 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3705 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3706 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3707 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3708 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3709 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3710 .xlevel = 0x80000008, 3711 .model_id = "Intel Atom Processor (SnowRidge)", 3712 .versions = (X86CPUVersionDefinition[]) { 3713 { .version = 1 }, 3714 { 3715 .version = 2, 3716 .props = (PropValue[]) { 3717 { "mpx", "off" }, 3718 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3719 { /* end of list */ }, 3720 }, 3721 }, 3722 { /* end of list */ }, 3723 }, 3724 }, 3725 { 3726 .name = "KnightsMill", 3727 .level = 0xd, 3728 .vendor = CPUID_VENDOR_INTEL, 3729 .family = 6, 3730 .model = 133, 3731 .stepping = 0, 3732 .features[FEAT_1_EDX] = 3733 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3734 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3735 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3736 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3737 CPUID_PSE | CPUID_DE | CPUID_FP87, 3738 .features[FEAT_1_ECX] = 3739 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3740 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3741 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3742 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3743 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3744 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3745 .features[FEAT_8000_0001_EDX] = 3746 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3747 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3748 .features[FEAT_8000_0001_ECX] = 3749 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3750 .features[FEAT_7_0_EBX] = 3751 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3752 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3753 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3754 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3755 CPUID_7_0_EBX_AVX512ER, 3756 .features[FEAT_7_0_ECX] = 3757 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3758 .features[FEAT_7_0_EDX] = 3759 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3760 .features[FEAT_XSAVE] = 3761 CPUID_XSAVE_XSAVEOPT, 3762 .features[FEAT_6_EAX] = 3763 CPUID_6_EAX_ARAT, 3764 .xlevel = 0x80000008, 3765 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3766 }, 3767 { 3768 .name = "Opteron_G1", 3769 .level = 5, 3770 .vendor = CPUID_VENDOR_AMD, 3771 .family = 15, 3772 .model = 6, 3773 .stepping = 1, 3774 .features[FEAT_1_EDX] = 3775 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3776 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3777 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3778 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3779 CPUID_DE | CPUID_FP87, 3780 .features[FEAT_1_ECX] = 3781 CPUID_EXT_SSE3, 3782 .features[FEAT_8000_0001_EDX] = 3783 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3784 .xlevel = 0x80000008, 3785 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3786 }, 3787 { 3788 .name = "Opteron_G2", 3789 .level = 5, 3790 .vendor = CPUID_VENDOR_AMD, 3791 .family = 15, 3792 .model = 6, 3793 .stepping = 1, 3794 .features[FEAT_1_EDX] = 3795 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3796 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3797 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3798 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3799 CPUID_DE | CPUID_FP87, 3800 .features[FEAT_1_ECX] = 3801 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3802 .features[FEAT_8000_0001_EDX] = 3803 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3804 .features[FEAT_8000_0001_ECX] = 3805 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3806 .xlevel = 0x80000008, 3807 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3808 }, 3809 { 3810 .name = "Opteron_G3", 3811 .level = 5, 3812 .vendor = CPUID_VENDOR_AMD, 3813 .family = 16, 3814 .model = 2, 3815 .stepping = 3, 3816 .features[FEAT_1_EDX] = 3817 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3818 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3819 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3820 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3821 CPUID_DE | CPUID_FP87, 3822 .features[FEAT_1_ECX] = 3823 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3824 CPUID_EXT_SSE3, 3825 .features[FEAT_8000_0001_EDX] = 3826 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3827 CPUID_EXT2_RDTSCP, 3828 .features[FEAT_8000_0001_ECX] = 3829 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3830 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3831 .xlevel = 0x80000008, 3832 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3833 }, 3834 { 3835 .name = "Opteron_G4", 3836 .level = 0xd, 3837 .vendor = CPUID_VENDOR_AMD, 3838 .family = 21, 3839 .model = 1, 3840 .stepping = 2, 3841 .features[FEAT_1_EDX] = 3842 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3843 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3844 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3845 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3846 CPUID_DE | CPUID_FP87, 3847 .features[FEAT_1_ECX] = 3848 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3849 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3850 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3851 CPUID_EXT_SSE3, 3852 .features[FEAT_8000_0001_EDX] = 3853 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3854 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3855 .features[FEAT_8000_0001_ECX] = 3856 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3857 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3858 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3859 CPUID_EXT3_LAHF_LM, 3860 .features[FEAT_SVM] = 3861 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3862 /* no xsaveopt! */ 3863 .xlevel = 0x8000001A, 3864 .model_id = "AMD Opteron 62xx class CPU", 3865 }, 3866 { 3867 .name = "Opteron_G5", 3868 .level = 0xd, 3869 .vendor = CPUID_VENDOR_AMD, 3870 .family = 21, 3871 .model = 2, 3872 .stepping = 0, 3873 .features[FEAT_1_EDX] = 3874 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3875 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3876 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3877 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3878 CPUID_DE | CPUID_FP87, 3879 .features[FEAT_1_ECX] = 3880 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3881 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3882 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3883 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3884 .features[FEAT_8000_0001_EDX] = 3885 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3886 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3887 .features[FEAT_8000_0001_ECX] = 3888 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3889 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3890 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3891 CPUID_EXT3_LAHF_LM, 3892 .features[FEAT_SVM] = 3893 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3894 /* no xsaveopt! */ 3895 .xlevel = 0x8000001A, 3896 .model_id = "AMD Opteron 63xx class CPU", 3897 }, 3898 { 3899 .name = "EPYC", 3900 .level = 0xd, 3901 .vendor = CPUID_VENDOR_AMD, 3902 .family = 23, 3903 .model = 1, 3904 .stepping = 2, 3905 .features[FEAT_1_EDX] = 3906 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3907 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3908 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3909 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3910 CPUID_VME | CPUID_FP87, 3911 .features[FEAT_1_ECX] = 3912 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3913 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3914 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3915 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3916 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3917 .features[FEAT_8000_0001_EDX] = 3918 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3919 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3920 CPUID_EXT2_SYSCALL, 3921 .features[FEAT_8000_0001_ECX] = 3922 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3923 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3924 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3925 CPUID_EXT3_TOPOEXT, 3926 .features[FEAT_7_0_EBX] = 3927 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3928 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3929 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3930 CPUID_7_0_EBX_SHA_NI, 3931 .features[FEAT_XSAVE] = 3932 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3933 CPUID_XSAVE_XGETBV1, 3934 .features[FEAT_6_EAX] = 3935 CPUID_6_EAX_ARAT, 3936 .features[FEAT_SVM] = 3937 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3938 .xlevel = 0x8000001E, 3939 .model_id = "AMD EPYC Processor", 3940 .cache_info = &epyc_cache_info, 3941 .use_epyc_apic_id_encoding = 1, 3942 .versions = (X86CPUVersionDefinition[]) { 3943 { .version = 1 }, 3944 { 3945 .version = 2, 3946 .alias = "EPYC-IBPB", 3947 .props = (PropValue[]) { 3948 { "ibpb", "on" }, 3949 { "model-id", 3950 "AMD EPYC Processor (with IBPB)" }, 3951 { /* end of list */ } 3952 } 3953 }, 3954 { 3955 .version = 3, 3956 .props = (PropValue[]) { 3957 { "ibpb", "on" }, 3958 { "perfctr-core", "on" }, 3959 { "clzero", "on" }, 3960 { "xsaveerptr", "on" }, 3961 { "xsaves", "on" }, 3962 { "model-id", 3963 "AMD EPYC Processor" }, 3964 { /* end of list */ } 3965 } 3966 }, 3967 { /* end of list */ } 3968 } 3969 }, 3970 { 3971 .name = "Dhyana", 3972 .level = 0xd, 3973 .vendor = CPUID_VENDOR_HYGON, 3974 .family = 24, 3975 .model = 0, 3976 .stepping = 1, 3977 .features[FEAT_1_EDX] = 3978 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3979 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3980 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3981 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3982 CPUID_VME | CPUID_FP87, 3983 .features[FEAT_1_ECX] = 3984 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3985 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 3986 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3987 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3988 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 3989 .features[FEAT_8000_0001_EDX] = 3990 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3991 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3992 CPUID_EXT2_SYSCALL, 3993 .features[FEAT_8000_0001_ECX] = 3994 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3995 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3996 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3997 CPUID_EXT3_TOPOEXT, 3998 .features[FEAT_8000_0008_EBX] = 3999 CPUID_8000_0008_EBX_IBPB, 4000 .features[FEAT_7_0_EBX] = 4001 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4002 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4003 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4004 /* 4005 * Missing: XSAVES (not supported by some Linux versions, 4006 * including v4.1 to v4.12). 4007 * KVM doesn't yet expose any XSAVES state save component. 4008 */ 4009 .features[FEAT_XSAVE] = 4010 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4011 CPUID_XSAVE_XGETBV1, 4012 .features[FEAT_6_EAX] = 4013 CPUID_6_EAX_ARAT, 4014 .features[FEAT_SVM] = 4015 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4016 .xlevel = 0x8000001E, 4017 .model_id = "Hygon Dhyana Processor", 4018 .cache_info = &epyc_cache_info, 4019 }, 4020 { 4021 .name = "EPYC-Rome", 4022 .level = 0xd, 4023 .vendor = CPUID_VENDOR_AMD, 4024 .family = 23, 4025 .model = 49, 4026 .stepping = 0, 4027 .features[FEAT_1_EDX] = 4028 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4029 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4030 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4031 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4032 CPUID_VME | CPUID_FP87, 4033 .features[FEAT_1_ECX] = 4034 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4035 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4036 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4037 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4038 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4039 .features[FEAT_8000_0001_EDX] = 4040 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4041 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4042 CPUID_EXT2_SYSCALL, 4043 .features[FEAT_8000_0001_ECX] = 4044 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4045 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4046 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4047 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4048 .features[FEAT_8000_0008_EBX] = 4049 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4050 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4051 CPUID_8000_0008_EBX_STIBP, 4052 .features[FEAT_7_0_EBX] = 4053 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4054 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4055 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4056 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4057 .features[FEAT_7_0_ECX] = 4058 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4059 .features[FEAT_XSAVE] = 4060 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4061 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4062 .features[FEAT_6_EAX] = 4063 CPUID_6_EAX_ARAT, 4064 .features[FEAT_SVM] = 4065 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4066 .xlevel = 0x8000001E, 4067 .model_id = "AMD EPYC-Rome Processor", 4068 .cache_info = &epyc_rome_cache_info, 4069 .use_epyc_apic_id_encoding = 1, 4070 }, 4071 }; 4072 4073 /* KVM-specific features that are automatically added/removed 4074 * from all CPU models when KVM is enabled. 4075 */ 4076 static PropValue kvm_default_props[] = { 4077 { "kvmclock", "on" }, 4078 { "kvm-nopiodelay", "on" }, 4079 { "kvm-asyncpf", "on" }, 4080 { "kvm-steal-time", "on" }, 4081 { "kvm-pv-eoi", "on" }, 4082 { "kvmclock-stable-bit", "on" }, 4083 { "x2apic", "on" }, 4084 { "acpi", "off" }, 4085 { "monitor", "off" }, 4086 { "svm", "off" }, 4087 { NULL, NULL }, 4088 }; 4089 4090 /* TCG-specific defaults that override all CPU models when using TCG 4091 */ 4092 static PropValue tcg_default_props[] = { 4093 { "vme", "off" }, 4094 { NULL, NULL }, 4095 }; 4096 4097 4098 /* 4099 * We resolve CPU model aliases using -v1 when using "-machine 4100 * none", but this is just for compatibility while libvirt isn't 4101 * adapted to resolve CPU model versions before creating VMs. 4102 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi. 4103 */ 4104 X86CPUVersion default_cpu_version = 1; 4105 4106 void x86_cpu_set_default_version(X86CPUVersion version) 4107 { 4108 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4109 assert(version != CPU_VERSION_AUTO); 4110 default_cpu_version = version; 4111 } 4112 4113 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4114 { 4115 int v = 0; 4116 const X86CPUVersionDefinition *vdef = 4117 x86_cpu_def_get_versions(model->cpudef); 4118 while (vdef->version) { 4119 v = vdef->version; 4120 vdef++; 4121 } 4122 return v; 4123 } 4124 4125 /* Return the actual version being used for a specific CPU model */ 4126 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4127 { 4128 X86CPUVersion v = model->version; 4129 if (v == CPU_VERSION_AUTO) { 4130 v = default_cpu_version; 4131 } 4132 if (v == CPU_VERSION_LATEST) { 4133 return x86_cpu_model_last_version(model); 4134 } 4135 return v; 4136 } 4137 4138 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4139 { 4140 PropValue *pv; 4141 for (pv = kvm_default_props; pv->prop; pv++) { 4142 if (!strcmp(pv->prop, prop)) { 4143 pv->value = value; 4144 break; 4145 } 4146 } 4147 4148 /* It is valid to call this function only for properties that 4149 * are already present in the kvm_default_props table. 4150 */ 4151 assert(pv->prop); 4152 } 4153 4154 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 4155 bool migratable_only); 4156 4157 static bool lmce_supported(void) 4158 { 4159 uint64_t mce_cap = 0; 4160 4161 #ifdef CONFIG_KVM 4162 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4163 return false; 4164 } 4165 #endif 4166 4167 return !!(mce_cap & MCG_LMCE_P); 4168 } 4169 4170 #define CPUID_MODEL_ID_SZ 48 4171 4172 /** 4173 * cpu_x86_fill_model_id: 4174 * Get CPUID model ID string from host CPU. 4175 * 4176 * @str should have at least CPUID_MODEL_ID_SZ bytes 4177 * 4178 * The function does NOT add a null terminator to the string 4179 * automatically. 4180 */ 4181 static int cpu_x86_fill_model_id(char *str) 4182 { 4183 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4184 int i; 4185 4186 for (i = 0; i < 3; i++) { 4187 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4188 memcpy(str + i * 16 + 0, &eax, 4); 4189 memcpy(str + i * 16 + 4, &ebx, 4); 4190 memcpy(str + i * 16 + 8, &ecx, 4); 4191 memcpy(str + i * 16 + 12, &edx, 4); 4192 } 4193 return 0; 4194 } 4195 4196 static Property max_x86_cpu_properties[] = { 4197 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4198 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4199 DEFINE_PROP_END_OF_LIST() 4200 }; 4201 4202 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4203 { 4204 DeviceClass *dc = DEVICE_CLASS(oc); 4205 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4206 4207 xcc->ordering = 9; 4208 4209 xcc->model_description = 4210 "Enables all features supported by the accelerator in the current host"; 4211 4212 device_class_set_props(dc, max_x86_cpu_properties); 4213 } 4214 4215 static void max_x86_cpu_initfn(Object *obj) 4216 { 4217 X86CPU *cpu = X86_CPU(obj); 4218 CPUX86State *env = &cpu->env; 4219 KVMState *s = kvm_state; 4220 4221 /* We can't fill the features array here because we don't know yet if 4222 * "migratable" is true or false. 4223 */ 4224 cpu->max_features = true; 4225 4226 if (accel_uses_host_cpuid()) { 4227 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4228 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4229 int family, model, stepping; 4230 4231 host_vendor_fms(vendor, &family, &model, &stepping); 4232 cpu_x86_fill_model_id(model_id); 4233 4234 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 4235 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 4236 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 4237 object_property_set_int(OBJECT(cpu), stepping, "stepping", 4238 &error_abort); 4239 object_property_set_str(OBJECT(cpu), model_id, "model-id", 4240 &error_abort); 4241 4242 if (kvm_enabled()) { 4243 env->cpuid_min_level = 4244 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4245 env->cpuid_min_xlevel = 4246 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4247 env->cpuid_min_xlevel2 = 4248 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4249 } else { 4250 env->cpuid_min_level = 4251 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4252 env->cpuid_min_xlevel = 4253 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4254 env->cpuid_min_xlevel2 = 4255 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4256 } 4257 4258 if (lmce_supported()) { 4259 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 4260 } 4261 } else { 4262 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 4263 "vendor", &error_abort); 4264 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 4265 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 4266 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 4267 object_property_set_str(OBJECT(cpu), 4268 "QEMU TCG CPU version " QEMU_HW_VERSION, 4269 "model-id", &error_abort); 4270 } 4271 4272 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 4273 } 4274 4275 static const TypeInfo max_x86_cpu_type_info = { 4276 .name = X86_CPU_TYPE_NAME("max"), 4277 .parent = TYPE_X86_CPU, 4278 .instance_init = max_x86_cpu_initfn, 4279 .class_init = max_x86_cpu_class_init, 4280 }; 4281 4282 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4283 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4284 { 4285 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4286 4287 xcc->host_cpuid_required = true; 4288 xcc->ordering = 8; 4289 4290 #if defined(CONFIG_KVM) 4291 xcc->model_description = 4292 "KVM processor with all supported host features "; 4293 #elif defined(CONFIG_HVF) 4294 xcc->model_description = 4295 "HVF processor with all supported host features "; 4296 #endif 4297 } 4298 4299 static const TypeInfo host_x86_cpu_type_info = { 4300 .name = X86_CPU_TYPE_NAME("host"), 4301 .parent = X86_CPU_TYPE_NAME("max"), 4302 .class_init = host_x86_cpu_class_init, 4303 }; 4304 4305 #endif 4306 4307 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4308 { 4309 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4310 4311 switch (f->type) { 4312 case CPUID_FEATURE_WORD: 4313 { 4314 const char *reg = get_register_name_32(f->cpuid.reg); 4315 assert(reg); 4316 return g_strdup_printf("CPUID.%02XH:%s", 4317 f->cpuid.eax, reg); 4318 } 4319 case MSR_FEATURE_WORD: 4320 return g_strdup_printf("MSR(%02XH)", 4321 f->msr.index); 4322 } 4323 4324 return NULL; 4325 } 4326 4327 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4328 { 4329 FeatureWord w; 4330 4331 for (w = 0; w < FEATURE_WORDS; w++) { 4332 if (cpu->filtered_features[w]) { 4333 return true; 4334 } 4335 } 4336 4337 return false; 4338 } 4339 4340 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4341 const char *verbose_prefix) 4342 { 4343 CPUX86State *env = &cpu->env; 4344 FeatureWordInfo *f = &feature_word_info[w]; 4345 int i; 4346 4347 if (!cpu->force_features) { 4348 env->features[w] &= ~mask; 4349 } 4350 cpu->filtered_features[w] |= mask; 4351 4352 if (!verbose_prefix) { 4353 return; 4354 } 4355 4356 for (i = 0; i < 64; ++i) { 4357 if ((1ULL << i) & mask) { 4358 g_autofree char *feat_word_str = feature_word_description(f, i); 4359 warn_report("%s: %s%s%s [bit %d]", 4360 verbose_prefix, 4361 feat_word_str, 4362 f->feat_names[i] ? "." : "", 4363 f->feat_names[i] ? f->feat_names[i] : "", i); 4364 } 4365 } 4366 } 4367 4368 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4369 const char *name, void *opaque, 4370 Error **errp) 4371 { 4372 X86CPU *cpu = X86_CPU(obj); 4373 CPUX86State *env = &cpu->env; 4374 int64_t value; 4375 4376 value = (env->cpuid_version >> 8) & 0xf; 4377 if (value == 0xf) { 4378 value += (env->cpuid_version >> 20) & 0xff; 4379 } 4380 visit_type_int(v, name, &value, errp); 4381 } 4382 4383 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4384 const char *name, void *opaque, 4385 Error **errp) 4386 { 4387 X86CPU *cpu = X86_CPU(obj); 4388 CPUX86State *env = &cpu->env; 4389 const int64_t min = 0; 4390 const int64_t max = 0xff + 0xf; 4391 Error *local_err = NULL; 4392 int64_t value; 4393 4394 visit_type_int(v, name, &value, &local_err); 4395 if (local_err) { 4396 error_propagate(errp, local_err); 4397 return; 4398 } 4399 if (value < min || value > max) { 4400 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4401 name ? name : "null", value, min, max); 4402 return; 4403 } 4404 4405 env->cpuid_version &= ~0xff00f00; 4406 if (value > 0x0f) { 4407 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4408 } else { 4409 env->cpuid_version |= value << 8; 4410 } 4411 } 4412 4413 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4414 const char *name, void *opaque, 4415 Error **errp) 4416 { 4417 X86CPU *cpu = X86_CPU(obj); 4418 CPUX86State *env = &cpu->env; 4419 int64_t value; 4420 4421 value = (env->cpuid_version >> 4) & 0xf; 4422 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4423 visit_type_int(v, name, &value, errp); 4424 } 4425 4426 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4427 const char *name, void *opaque, 4428 Error **errp) 4429 { 4430 X86CPU *cpu = X86_CPU(obj); 4431 CPUX86State *env = &cpu->env; 4432 const int64_t min = 0; 4433 const int64_t max = 0xff; 4434 Error *local_err = NULL; 4435 int64_t value; 4436 4437 visit_type_int(v, name, &value, &local_err); 4438 if (local_err) { 4439 error_propagate(errp, local_err); 4440 return; 4441 } 4442 if (value < min || value > max) { 4443 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4444 name ? name : "null", value, min, max); 4445 return; 4446 } 4447 4448 env->cpuid_version &= ~0xf00f0; 4449 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4450 } 4451 4452 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4453 const char *name, void *opaque, 4454 Error **errp) 4455 { 4456 X86CPU *cpu = X86_CPU(obj); 4457 CPUX86State *env = &cpu->env; 4458 int64_t value; 4459 4460 value = env->cpuid_version & 0xf; 4461 visit_type_int(v, name, &value, errp); 4462 } 4463 4464 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4465 const char *name, void *opaque, 4466 Error **errp) 4467 { 4468 X86CPU *cpu = X86_CPU(obj); 4469 CPUX86State *env = &cpu->env; 4470 const int64_t min = 0; 4471 const int64_t max = 0xf; 4472 Error *local_err = NULL; 4473 int64_t value; 4474 4475 visit_type_int(v, name, &value, &local_err); 4476 if (local_err) { 4477 error_propagate(errp, local_err); 4478 return; 4479 } 4480 if (value < min || value > max) { 4481 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4482 name ? name : "null", value, min, max); 4483 return; 4484 } 4485 4486 env->cpuid_version &= ~0xf; 4487 env->cpuid_version |= value & 0xf; 4488 } 4489 4490 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4491 { 4492 X86CPU *cpu = X86_CPU(obj); 4493 CPUX86State *env = &cpu->env; 4494 char *value; 4495 4496 value = g_malloc(CPUID_VENDOR_SZ + 1); 4497 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4498 env->cpuid_vendor3); 4499 return value; 4500 } 4501 4502 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4503 Error **errp) 4504 { 4505 X86CPU *cpu = X86_CPU(obj); 4506 CPUX86State *env = &cpu->env; 4507 int i; 4508 4509 if (strlen(value) != CPUID_VENDOR_SZ) { 4510 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4511 return; 4512 } 4513 4514 env->cpuid_vendor1 = 0; 4515 env->cpuid_vendor2 = 0; 4516 env->cpuid_vendor3 = 0; 4517 for (i = 0; i < 4; i++) { 4518 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4519 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4520 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4521 } 4522 } 4523 4524 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4525 { 4526 X86CPU *cpu = X86_CPU(obj); 4527 CPUX86State *env = &cpu->env; 4528 char *value; 4529 int i; 4530 4531 value = g_malloc(48 + 1); 4532 for (i = 0; i < 48; i++) { 4533 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4534 } 4535 value[48] = '\0'; 4536 return value; 4537 } 4538 4539 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4540 Error **errp) 4541 { 4542 X86CPU *cpu = X86_CPU(obj); 4543 CPUX86State *env = &cpu->env; 4544 int c, len, i; 4545 4546 if (model_id == NULL) { 4547 model_id = ""; 4548 } 4549 len = strlen(model_id); 4550 memset(env->cpuid_model, 0, 48); 4551 for (i = 0; i < 48; i++) { 4552 if (i >= len) { 4553 c = '\0'; 4554 } else { 4555 c = (uint8_t)model_id[i]; 4556 } 4557 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4558 } 4559 } 4560 4561 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4562 void *opaque, Error **errp) 4563 { 4564 X86CPU *cpu = X86_CPU(obj); 4565 int64_t value; 4566 4567 value = cpu->env.tsc_khz * 1000; 4568 visit_type_int(v, name, &value, errp); 4569 } 4570 4571 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4572 void *opaque, Error **errp) 4573 { 4574 X86CPU *cpu = X86_CPU(obj); 4575 const int64_t min = 0; 4576 const int64_t max = INT64_MAX; 4577 Error *local_err = NULL; 4578 int64_t value; 4579 4580 visit_type_int(v, name, &value, &local_err); 4581 if (local_err) { 4582 error_propagate(errp, local_err); 4583 return; 4584 } 4585 if (value < min || value > max) { 4586 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4587 name ? name : "null", value, min, max); 4588 return; 4589 } 4590 4591 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4592 } 4593 4594 /* Generic getter for "feature-words" and "filtered-features" properties */ 4595 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4596 const char *name, void *opaque, 4597 Error **errp) 4598 { 4599 uint64_t *array = (uint64_t *)opaque; 4600 FeatureWord w; 4601 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4602 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4603 X86CPUFeatureWordInfoList *list = NULL; 4604 4605 for (w = 0; w < FEATURE_WORDS; w++) { 4606 FeatureWordInfo *wi = &feature_word_info[w]; 4607 /* 4608 * We didn't have MSR features when "feature-words" was 4609 * introduced. Therefore skipped other type entries. 4610 */ 4611 if (wi->type != CPUID_FEATURE_WORD) { 4612 continue; 4613 } 4614 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4615 qwi->cpuid_input_eax = wi->cpuid.eax; 4616 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4617 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4618 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4619 qwi->features = array[w]; 4620 4621 /* List will be in reverse order, but order shouldn't matter */ 4622 list_entries[w].next = list; 4623 list_entries[w].value = &word_infos[w]; 4624 list = &list_entries[w]; 4625 } 4626 4627 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4628 } 4629 4630 /* Convert all '_' in a feature string option name to '-', to make feature 4631 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4632 */ 4633 static inline void feat2prop(char *s) 4634 { 4635 while ((s = strchr(s, '_'))) { 4636 *s = '-'; 4637 } 4638 } 4639 4640 /* Return the feature property name for a feature flag bit */ 4641 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4642 { 4643 const char *name; 4644 /* XSAVE components are automatically enabled by other features, 4645 * so return the original feature name instead 4646 */ 4647 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4648 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4649 4650 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4651 x86_ext_save_areas[comp].bits) { 4652 w = x86_ext_save_areas[comp].feature; 4653 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4654 } 4655 } 4656 4657 assert(bitnr < 64); 4658 assert(w < FEATURE_WORDS); 4659 name = feature_word_info[w].feat_names[bitnr]; 4660 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4661 return name; 4662 } 4663 4664 /* Compatibily hack to maintain legacy +-feat semantic, 4665 * where +-feat overwrites any feature set by 4666 * feat=on|feat even if the later is parsed after +-feat 4667 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4668 */ 4669 static GList *plus_features, *minus_features; 4670 4671 static gint compare_string(gconstpointer a, gconstpointer b) 4672 { 4673 return g_strcmp0(a, b); 4674 } 4675 4676 /* Parse "+feature,-feature,feature=foo" CPU feature string 4677 */ 4678 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4679 Error **errp) 4680 { 4681 char *featurestr; /* Single 'key=value" string being parsed */ 4682 static bool cpu_globals_initialized; 4683 bool ambiguous = false; 4684 4685 if (cpu_globals_initialized) { 4686 return; 4687 } 4688 cpu_globals_initialized = true; 4689 4690 if (!features) { 4691 return; 4692 } 4693 4694 for (featurestr = strtok(features, ","); 4695 featurestr; 4696 featurestr = strtok(NULL, ",")) { 4697 const char *name; 4698 const char *val = NULL; 4699 char *eq = NULL; 4700 char num[32]; 4701 GlobalProperty *prop; 4702 4703 /* Compatibility syntax: */ 4704 if (featurestr[0] == '+') { 4705 plus_features = g_list_append(plus_features, 4706 g_strdup(featurestr + 1)); 4707 continue; 4708 } else if (featurestr[0] == '-') { 4709 minus_features = g_list_append(minus_features, 4710 g_strdup(featurestr + 1)); 4711 continue; 4712 } 4713 4714 eq = strchr(featurestr, '='); 4715 if (eq) { 4716 *eq++ = 0; 4717 val = eq; 4718 } else { 4719 val = "on"; 4720 } 4721 4722 feat2prop(featurestr); 4723 name = featurestr; 4724 4725 if (g_list_find_custom(plus_features, name, compare_string)) { 4726 warn_report("Ambiguous CPU model string. " 4727 "Don't mix both \"+%s\" and \"%s=%s\"", 4728 name, name, val); 4729 ambiguous = true; 4730 } 4731 if (g_list_find_custom(minus_features, name, compare_string)) { 4732 warn_report("Ambiguous CPU model string. " 4733 "Don't mix both \"-%s\" and \"%s=%s\"", 4734 name, name, val); 4735 ambiguous = true; 4736 } 4737 4738 /* Special case: */ 4739 if (!strcmp(name, "tsc-freq")) { 4740 int ret; 4741 uint64_t tsc_freq; 4742 4743 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4744 if (ret < 0 || tsc_freq > INT64_MAX) { 4745 error_setg(errp, "bad numerical value %s", val); 4746 return; 4747 } 4748 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4749 val = num; 4750 name = "tsc-frequency"; 4751 } 4752 4753 prop = g_new0(typeof(*prop), 1); 4754 prop->driver = typename; 4755 prop->property = g_strdup(name); 4756 prop->value = g_strdup(val); 4757 qdev_prop_register_global(prop); 4758 } 4759 4760 if (ambiguous) { 4761 warn_report("Compatibility of ambiguous CPU model " 4762 "strings won't be kept on future QEMU versions"); 4763 } 4764 } 4765 4766 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4767 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4768 4769 /* Build a list with the name of all features on a feature word array */ 4770 static void x86_cpu_list_feature_names(FeatureWordArray features, 4771 strList **feat_names) 4772 { 4773 FeatureWord w; 4774 strList **next = feat_names; 4775 4776 for (w = 0; w < FEATURE_WORDS; w++) { 4777 uint64_t filtered = features[w]; 4778 int i; 4779 for (i = 0; i < 64; i++) { 4780 if (filtered & (1ULL << i)) { 4781 strList *new = g_new0(strList, 1); 4782 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4783 *next = new; 4784 next = &new->next; 4785 } 4786 } 4787 } 4788 } 4789 4790 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4791 const char *name, void *opaque, 4792 Error **errp) 4793 { 4794 X86CPU *xc = X86_CPU(obj); 4795 strList *result = NULL; 4796 4797 x86_cpu_list_feature_names(xc->filtered_features, &result); 4798 visit_type_strList(v, "unavailable-features", &result, errp); 4799 } 4800 4801 /* Check for missing features that may prevent the CPU class from 4802 * running using the current machine and accelerator. 4803 */ 4804 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4805 strList **missing_feats) 4806 { 4807 X86CPU *xc; 4808 Error *err = NULL; 4809 strList **next = missing_feats; 4810 4811 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4812 strList *new = g_new0(strList, 1); 4813 new->value = g_strdup("kvm"); 4814 *missing_feats = new; 4815 return; 4816 } 4817 4818 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4819 4820 x86_cpu_expand_features(xc, &err); 4821 if (err) { 4822 /* Errors at x86_cpu_expand_features should never happen, 4823 * but in case it does, just report the model as not 4824 * runnable at all using the "type" property. 4825 */ 4826 strList *new = g_new0(strList, 1); 4827 new->value = g_strdup("type"); 4828 *next = new; 4829 next = &new->next; 4830 } 4831 4832 x86_cpu_filter_features(xc, false); 4833 4834 x86_cpu_list_feature_names(xc->filtered_features, next); 4835 4836 object_unref(OBJECT(xc)); 4837 } 4838 4839 /* Print all cpuid feature names in featureset 4840 */ 4841 static void listflags(GList *features) 4842 { 4843 size_t len = 0; 4844 GList *tmp; 4845 4846 for (tmp = features; tmp; tmp = tmp->next) { 4847 const char *name = tmp->data; 4848 if ((len + strlen(name) + 1) >= 75) { 4849 qemu_printf("\n"); 4850 len = 0; 4851 } 4852 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4853 len += strlen(name) + 1; 4854 } 4855 qemu_printf("\n"); 4856 } 4857 4858 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4859 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4860 { 4861 ObjectClass *class_a = (ObjectClass *)a; 4862 ObjectClass *class_b = (ObjectClass *)b; 4863 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4864 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4865 int ret; 4866 4867 if (cc_a->ordering != cc_b->ordering) { 4868 ret = cc_a->ordering - cc_b->ordering; 4869 } else { 4870 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4871 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4872 ret = strcmp(name_a, name_b); 4873 } 4874 return ret; 4875 } 4876 4877 static GSList *get_sorted_cpu_model_list(void) 4878 { 4879 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4880 list = g_slist_sort(list, x86_cpu_list_compare); 4881 return list; 4882 } 4883 4884 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4885 { 4886 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4887 char *r = object_property_get_str(obj, "model-id", &error_abort); 4888 object_unref(obj); 4889 return r; 4890 } 4891 4892 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4893 { 4894 X86CPUVersion version; 4895 4896 if (!cc->model || !cc->model->is_alias) { 4897 return NULL; 4898 } 4899 version = x86_cpu_model_resolve_version(cc->model); 4900 if (version <= 0) { 4901 return NULL; 4902 } 4903 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4904 } 4905 4906 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4907 { 4908 ObjectClass *oc = data; 4909 X86CPUClass *cc = X86_CPU_CLASS(oc); 4910 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4911 g_autofree char *desc = g_strdup(cc->model_description); 4912 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4913 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4914 4915 if (!desc && alias_of) { 4916 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4917 desc = g_strdup("(alias configured by machine type)"); 4918 } else { 4919 desc = g_strdup_printf("(alias of %s)", alias_of); 4920 } 4921 } 4922 if (!desc && cc->model && cc->model->note) { 4923 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4924 } 4925 if (!desc) { 4926 desc = g_strdup_printf("%s", model_id); 4927 } 4928 4929 qemu_printf("x86 %-20s %-58s\n", name, desc); 4930 } 4931 4932 /* list available CPU models and flags */ 4933 void x86_cpu_list(void) 4934 { 4935 int i, j; 4936 GSList *list; 4937 GList *names = NULL; 4938 4939 qemu_printf("Available CPUs:\n"); 4940 list = get_sorted_cpu_model_list(); 4941 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4942 g_slist_free(list); 4943 4944 names = NULL; 4945 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4946 FeatureWordInfo *fw = &feature_word_info[i]; 4947 for (j = 0; j < 64; j++) { 4948 if (fw->feat_names[j]) { 4949 names = g_list_append(names, (gpointer)fw->feat_names[j]); 4950 } 4951 } 4952 } 4953 4954 names = g_list_sort(names, (GCompareFunc)strcmp); 4955 4956 qemu_printf("\nRecognized CPUID flags:\n"); 4957 listflags(names); 4958 qemu_printf("\n"); 4959 g_list_free(names); 4960 } 4961 4962 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 4963 { 4964 ObjectClass *oc = data; 4965 X86CPUClass *cc = X86_CPU_CLASS(oc); 4966 CpuDefinitionInfoList **cpu_list = user_data; 4967 CpuDefinitionInfoList *entry; 4968 CpuDefinitionInfo *info; 4969 4970 info = g_malloc0(sizeof(*info)); 4971 info->name = x86_cpu_class_get_model_name(cc); 4972 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 4973 info->has_unavailable_features = true; 4974 info->q_typename = g_strdup(object_class_get_name(oc)); 4975 info->migration_safe = cc->migration_safe; 4976 info->has_migration_safe = true; 4977 info->q_static = cc->static_model; 4978 /* 4979 * Old machine types won't report aliases, so that alias translation 4980 * doesn't break compatibility with previous QEMU versions. 4981 */ 4982 if (default_cpu_version != CPU_VERSION_LEGACY) { 4983 info->alias_of = x86_cpu_class_get_alias_of(cc); 4984 info->has_alias_of = !!info->alias_of; 4985 } 4986 4987 entry = g_malloc0(sizeof(*entry)); 4988 entry->value = info; 4989 entry->next = *cpu_list; 4990 *cpu_list = entry; 4991 } 4992 4993 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 4994 { 4995 CpuDefinitionInfoList *cpu_list = NULL; 4996 GSList *list = get_sorted_cpu_model_list(); 4997 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 4998 g_slist_free(list); 4999 return cpu_list; 5000 } 5001 5002 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5003 bool migratable_only) 5004 { 5005 FeatureWordInfo *wi = &feature_word_info[w]; 5006 uint64_t r = 0; 5007 5008 if (kvm_enabled()) { 5009 switch (wi->type) { 5010 case CPUID_FEATURE_WORD: 5011 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5012 wi->cpuid.ecx, 5013 wi->cpuid.reg); 5014 break; 5015 case MSR_FEATURE_WORD: 5016 r = kvm_arch_get_supported_msr_feature(kvm_state, 5017 wi->msr.index); 5018 break; 5019 } 5020 } else if (hvf_enabled()) { 5021 if (wi->type != CPUID_FEATURE_WORD) { 5022 return 0; 5023 } 5024 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5025 wi->cpuid.ecx, 5026 wi->cpuid.reg); 5027 } else if (tcg_enabled()) { 5028 r = wi->tcg_features; 5029 } else { 5030 return ~0; 5031 } 5032 if (migratable_only) { 5033 r &= x86_cpu_get_migratable_flags(w); 5034 } 5035 return r; 5036 } 5037 5038 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5039 { 5040 PropValue *pv; 5041 for (pv = props; pv->prop; pv++) { 5042 if (!pv->value) { 5043 continue; 5044 } 5045 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 5046 &error_abort); 5047 } 5048 } 5049 5050 /* Apply properties for the CPU model version specified in model */ 5051 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5052 { 5053 const X86CPUVersionDefinition *vdef; 5054 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5055 5056 if (version == CPU_VERSION_LEGACY) { 5057 return; 5058 } 5059 5060 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5061 PropValue *p; 5062 5063 for (p = vdef->props; p && p->prop; p++) { 5064 object_property_parse(OBJECT(cpu), p->value, p->prop, 5065 &error_abort); 5066 } 5067 5068 if (vdef->version == version) { 5069 break; 5070 } 5071 } 5072 5073 /* 5074 * If we reached the end of the list, version number was invalid 5075 */ 5076 assert(vdef->version == version); 5077 } 5078 5079 /* Load data from X86CPUDefinition into a X86CPU object 5080 */ 5081 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) 5082 { 5083 X86CPUDefinition *def = model->cpudef; 5084 CPUX86State *env = &cpu->env; 5085 const char *vendor; 5086 char host_vendor[CPUID_VENDOR_SZ + 1]; 5087 FeatureWord w; 5088 5089 /*NOTE: any property set by this function should be returned by 5090 * x86_cpu_static_props(), so static expansion of 5091 * query-cpu-model-expansion is always complete. 5092 */ 5093 5094 /* CPU models only set _minimum_ values for level/xlevel: */ 5095 object_property_set_uint(OBJECT(cpu), def->level, "min-level", 5096 &error_abort); 5097 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", 5098 &error_abort); 5099 5100 object_property_set_int(OBJECT(cpu), def->family, "family", 5101 &error_abort); 5102 object_property_set_int(OBJECT(cpu), def->model, "model", 5103 &error_abort); 5104 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", 5105 &error_abort); 5106 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", 5107 &error_abort); 5108 for (w = 0; w < FEATURE_WORDS; w++) { 5109 env->features[w] = def->features[w]; 5110 } 5111 5112 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5113 cpu->legacy_cache = !def->cache_info; 5114 5115 /* Special cases not set in the X86CPUDefinition structs: */ 5116 /* TODO: in-kernel irqchip for hvf */ 5117 if (kvm_enabled()) { 5118 if (!kvm_irqchip_in_kernel()) { 5119 x86_cpu_change_kvm_default("x2apic", "off"); 5120 } 5121 5122 x86_cpu_apply_props(cpu, kvm_default_props); 5123 } else if (tcg_enabled()) { 5124 x86_cpu_apply_props(cpu, tcg_default_props); 5125 } 5126 5127 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5128 5129 /* sysenter isn't supported in compatibility mode on AMD, 5130 * syscall isn't supported in compatibility mode on Intel. 5131 * Normally we advertise the actual CPU vendor, but you can 5132 * override this using the 'vendor' property if you want to use 5133 * KVM's sysenter/syscall emulation in compatibility mode and 5134 * when doing cross vendor migration 5135 */ 5136 vendor = def->vendor; 5137 if (accel_uses_host_cpuid()) { 5138 uint32_t ebx = 0, ecx = 0, edx = 0; 5139 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5140 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5141 vendor = host_vendor; 5142 } 5143 5144 object_property_set_str(OBJECT(cpu), vendor, "vendor", 5145 &error_abort); 5146 5147 x86_cpu_apply_version_props(cpu, model); 5148 } 5149 5150 #ifndef CONFIG_USER_ONLY 5151 /* Return a QDict containing keys for all properties that can be included 5152 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5153 * must be included in the dictionary. 5154 */ 5155 static QDict *x86_cpu_static_props(void) 5156 { 5157 FeatureWord w; 5158 int i; 5159 static const char *props[] = { 5160 "min-level", 5161 "min-xlevel", 5162 "family", 5163 "model", 5164 "stepping", 5165 "model-id", 5166 "vendor", 5167 "lmce", 5168 NULL, 5169 }; 5170 static QDict *d; 5171 5172 if (d) { 5173 return d; 5174 } 5175 5176 d = qdict_new(); 5177 for (i = 0; props[i]; i++) { 5178 qdict_put_null(d, props[i]); 5179 } 5180 5181 for (w = 0; w < FEATURE_WORDS; w++) { 5182 FeatureWordInfo *fi = &feature_word_info[w]; 5183 int bit; 5184 for (bit = 0; bit < 64; bit++) { 5185 if (!fi->feat_names[bit]) { 5186 continue; 5187 } 5188 qdict_put_null(d, fi->feat_names[bit]); 5189 } 5190 } 5191 5192 return d; 5193 } 5194 5195 /* Add an entry to @props dict, with the value for property. */ 5196 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5197 { 5198 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5199 &error_abort); 5200 5201 qdict_put_obj(props, prop, value); 5202 } 5203 5204 /* Convert CPU model data from X86CPU object to a property dictionary 5205 * that can recreate exactly the same CPU model. 5206 */ 5207 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5208 { 5209 QDict *sprops = x86_cpu_static_props(); 5210 const QDictEntry *e; 5211 5212 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5213 const char *prop = qdict_entry_key(e); 5214 x86_cpu_expand_prop(cpu, props, prop); 5215 } 5216 } 5217 5218 /* Convert CPU model data from X86CPU object to a property dictionary 5219 * that can recreate exactly the same CPU model, including every 5220 * writeable QOM property. 5221 */ 5222 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5223 { 5224 ObjectPropertyIterator iter; 5225 ObjectProperty *prop; 5226 5227 object_property_iter_init(&iter, OBJECT(cpu)); 5228 while ((prop = object_property_iter_next(&iter))) { 5229 /* skip read-only or write-only properties */ 5230 if (!prop->get || !prop->set) { 5231 continue; 5232 } 5233 5234 /* "hotplugged" is the only property that is configurable 5235 * on the command-line but will be set differently on CPUs 5236 * created using "-cpu ... -smp ..." and by CPUs created 5237 * on the fly by x86_cpu_from_model() for querying. Skip it. 5238 */ 5239 if (!strcmp(prop->name, "hotplugged")) { 5240 continue; 5241 } 5242 x86_cpu_expand_prop(cpu, props, prop->name); 5243 } 5244 } 5245 5246 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5247 { 5248 const QDictEntry *prop; 5249 Error *err = NULL; 5250 5251 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5252 object_property_set_qobject(obj, qdict_entry_value(prop), 5253 qdict_entry_key(prop), &err); 5254 if (err) { 5255 break; 5256 } 5257 } 5258 5259 error_propagate(errp, err); 5260 } 5261 5262 /* Create X86CPU object according to model+props specification */ 5263 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5264 { 5265 X86CPU *xc = NULL; 5266 X86CPUClass *xcc; 5267 Error *err = NULL; 5268 5269 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5270 if (xcc == NULL) { 5271 error_setg(&err, "CPU model '%s' not found", model); 5272 goto out; 5273 } 5274 5275 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5276 if (props) { 5277 object_apply_props(OBJECT(xc), props, &err); 5278 if (err) { 5279 goto out; 5280 } 5281 } 5282 5283 x86_cpu_expand_features(xc, &err); 5284 if (err) { 5285 goto out; 5286 } 5287 5288 out: 5289 if (err) { 5290 error_propagate(errp, err); 5291 object_unref(OBJECT(xc)); 5292 xc = NULL; 5293 } 5294 return xc; 5295 } 5296 5297 CpuModelExpansionInfo * 5298 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5299 CpuModelInfo *model, 5300 Error **errp) 5301 { 5302 X86CPU *xc = NULL; 5303 Error *err = NULL; 5304 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5305 QDict *props = NULL; 5306 const char *base_name; 5307 5308 xc = x86_cpu_from_model(model->name, 5309 model->has_props ? 5310 qobject_to(QDict, model->props) : 5311 NULL, &err); 5312 if (err) { 5313 goto out; 5314 } 5315 5316 props = qdict_new(); 5317 ret->model = g_new0(CpuModelInfo, 1); 5318 ret->model->props = QOBJECT(props); 5319 ret->model->has_props = true; 5320 5321 switch (type) { 5322 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5323 /* Static expansion will be based on "base" only */ 5324 base_name = "base"; 5325 x86_cpu_to_dict(xc, props); 5326 break; 5327 case CPU_MODEL_EXPANSION_TYPE_FULL: 5328 /* As we don't return every single property, full expansion needs 5329 * to keep the original model name+props, and add extra 5330 * properties on top of that. 5331 */ 5332 base_name = model->name; 5333 x86_cpu_to_dict_full(xc, props); 5334 break; 5335 default: 5336 error_setg(&err, "Unsupported expansion type"); 5337 goto out; 5338 } 5339 5340 x86_cpu_to_dict(xc, props); 5341 5342 ret->model->name = g_strdup(base_name); 5343 5344 out: 5345 object_unref(OBJECT(xc)); 5346 if (err) { 5347 error_propagate(errp, err); 5348 qapi_free_CpuModelExpansionInfo(ret); 5349 ret = NULL; 5350 } 5351 return ret; 5352 } 5353 #endif /* !CONFIG_USER_ONLY */ 5354 5355 static gchar *x86_gdb_arch_name(CPUState *cs) 5356 { 5357 #ifdef TARGET_X86_64 5358 return g_strdup("i386:x86-64"); 5359 #else 5360 return g_strdup("i386"); 5361 #endif 5362 } 5363 5364 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5365 { 5366 X86CPUModel *model = data; 5367 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5368 5369 xcc->model = model; 5370 xcc->migration_safe = true; 5371 } 5372 5373 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5374 { 5375 g_autofree char *typename = x86_cpu_type_name(name); 5376 TypeInfo ti = { 5377 .name = typename, 5378 .parent = TYPE_X86_CPU, 5379 .class_init = x86_cpu_cpudef_class_init, 5380 .class_data = model, 5381 }; 5382 5383 type_register(&ti); 5384 } 5385 5386 static void x86_register_cpudef_types(X86CPUDefinition *def) 5387 { 5388 X86CPUModel *m; 5389 const X86CPUVersionDefinition *vdef; 5390 5391 /* AMD aliases are handled at runtime based on CPUID vendor, so 5392 * they shouldn't be set on the CPU model table. 5393 */ 5394 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5395 /* catch mistakes instead of silently truncating model_id when too long */ 5396 assert(def->model_id && strlen(def->model_id) <= 48); 5397 5398 /* Unversioned model: */ 5399 m = g_new0(X86CPUModel, 1); 5400 m->cpudef = def; 5401 m->version = CPU_VERSION_AUTO; 5402 m->is_alias = true; 5403 x86_register_cpu_model_type(def->name, m); 5404 5405 /* Versioned models: */ 5406 5407 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5408 X86CPUModel *m = g_new0(X86CPUModel, 1); 5409 g_autofree char *name = 5410 x86_cpu_versioned_model_name(def, vdef->version); 5411 m->cpudef = def; 5412 m->version = vdef->version; 5413 m->note = vdef->note; 5414 x86_register_cpu_model_type(name, m); 5415 5416 if (vdef->alias) { 5417 X86CPUModel *am = g_new0(X86CPUModel, 1); 5418 am->cpudef = def; 5419 am->version = vdef->version; 5420 am->is_alias = true; 5421 x86_register_cpu_model_type(vdef->alias, am); 5422 } 5423 } 5424 5425 } 5426 5427 #if !defined(CONFIG_USER_ONLY) 5428 5429 void cpu_clear_apic_feature(CPUX86State *env) 5430 { 5431 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5432 } 5433 5434 #endif /* !CONFIG_USER_ONLY */ 5435 5436 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5437 uint32_t *eax, uint32_t *ebx, 5438 uint32_t *ecx, uint32_t *edx) 5439 { 5440 X86CPU *cpu = env_archcpu(env); 5441 CPUState *cs = env_cpu(env); 5442 uint32_t die_offset; 5443 uint32_t limit; 5444 uint32_t signature[3]; 5445 X86CPUTopoInfo topo_info; 5446 5447 topo_info.nodes_per_pkg = env->nr_nodes; 5448 topo_info.dies_per_pkg = env->nr_dies; 5449 topo_info.cores_per_die = cs->nr_cores; 5450 topo_info.threads_per_core = cs->nr_threads; 5451 5452 /* Calculate & apply limits for different index ranges */ 5453 if (index >= 0xC0000000) { 5454 limit = env->cpuid_xlevel2; 5455 } else if (index >= 0x80000000) { 5456 limit = env->cpuid_xlevel; 5457 } else if (index >= 0x40000000) { 5458 limit = 0x40000001; 5459 } else { 5460 limit = env->cpuid_level; 5461 } 5462 5463 if (index > limit) { 5464 /* Intel documentation states that invalid EAX input will 5465 * return the same information as EAX=cpuid_level 5466 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5467 */ 5468 index = env->cpuid_level; 5469 } 5470 5471 switch(index) { 5472 case 0: 5473 *eax = env->cpuid_level; 5474 *ebx = env->cpuid_vendor1; 5475 *edx = env->cpuid_vendor2; 5476 *ecx = env->cpuid_vendor3; 5477 break; 5478 case 1: 5479 *eax = env->cpuid_version; 5480 *ebx = (cpu->apic_id << 24) | 5481 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5482 *ecx = env->features[FEAT_1_ECX]; 5483 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5484 *ecx |= CPUID_EXT_OSXSAVE; 5485 } 5486 *edx = env->features[FEAT_1_EDX]; 5487 if (cs->nr_cores * cs->nr_threads > 1) { 5488 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5489 *edx |= CPUID_HT; 5490 } 5491 break; 5492 case 2: 5493 /* cache info: needed for Pentium Pro compatibility */ 5494 if (cpu->cache_info_passthrough) { 5495 host_cpuid(index, 0, eax, ebx, ecx, edx); 5496 break; 5497 } 5498 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5499 *ebx = 0; 5500 if (!cpu->enable_l3_cache) { 5501 *ecx = 0; 5502 } else { 5503 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5504 } 5505 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5506 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5507 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5508 break; 5509 case 4: 5510 /* cache info: needed for Core compatibility */ 5511 if (cpu->cache_info_passthrough) { 5512 host_cpuid(index, count, eax, ebx, ecx, edx); 5513 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5514 *eax &= ~0xFC000000; 5515 if ((*eax & 31) && cs->nr_cores > 1) { 5516 *eax |= (cs->nr_cores - 1) << 26; 5517 } 5518 } else { 5519 *eax = 0; 5520 switch (count) { 5521 case 0: /* L1 dcache info */ 5522 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5523 1, cs->nr_cores, 5524 eax, ebx, ecx, edx); 5525 break; 5526 case 1: /* L1 icache info */ 5527 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5528 1, cs->nr_cores, 5529 eax, ebx, ecx, edx); 5530 break; 5531 case 2: /* L2 cache info */ 5532 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5533 cs->nr_threads, cs->nr_cores, 5534 eax, ebx, ecx, edx); 5535 break; 5536 case 3: /* L3 cache info */ 5537 die_offset = apicid_die_offset(&topo_info); 5538 if (cpu->enable_l3_cache) { 5539 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5540 (1 << die_offset), cs->nr_cores, 5541 eax, ebx, ecx, edx); 5542 break; 5543 } 5544 /* fall through */ 5545 default: /* end of info */ 5546 *eax = *ebx = *ecx = *edx = 0; 5547 break; 5548 } 5549 } 5550 break; 5551 case 5: 5552 /* MONITOR/MWAIT Leaf */ 5553 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5554 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5555 *ecx = cpu->mwait.ecx; /* flags */ 5556 *edx = cpu->mwait.edx; /* mwait substates */ 5557 break; 5558 case 6: 5559 /* Thermal and Power Leaf */ 5560 *eax = env->features[FEAT_6_EAX]; 5561 *ebx = 0; 5562 *ecx = 0; 5563 *edx = 0; 5564 break; 5565 case 7: 5566 /* Structured Extended Feature Flags Enumeration Leaf */ 5567 if (count == 0) { 5568 /* Maximum ECX value for sub-leaves */ 5569 *eax = env->cpuid_level_func7; 5570 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5571 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5572 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5573 *ecx |= CPUID_7_0_ECX_OSPKE; 5574 } 5575 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5576 } else if (count == 1) { 5577 *eax = env->features[FEAT_7_1_EAX]; 5578 *ebx = 0; 5579 *ecx = 0; 5580 *edx = 0; 5581 } else { 5582 *eax = 0; 5583 *ebx = 0; 5584 *ecx = 0; 5585 *edx = 0; 5586 } 5587 break; 5588 case 9: 5589 /* Direct Cache Access Information Leaf */ 5590 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5591 *ebx = 0; 5592 *ecx = 0; 5593 *edx = 0; 5594 break; 5595 case 0xA: 5596 /* Architectural Performance Monitoring Leaf */ 5597 if (kvm_enabled() && cpu->enable_pmu) { 5598 KVMState *s = cs->kvm_state; 5599 5600 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5601 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5602 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5603 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5604 } else if (hvf_enabled() && cpu->enable_pmu) { 5605 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5606 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5607 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5608 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5609 } else { 5610 *eax = 0; 5611 *ebx = 0; 5612 *ecx = 0; 5613 *edx = 0; 5614 } 5615 break; 5616 case 0xB: 5617 /* Extended Topology Enumeration Leaf */ 5618 if (!cpu->enable_cpuid_0xb) { 5619 *eax = *ebx = *ecx = *edx = 0; 5620 break; 5621 } 5622 5623 *ecx = count & 0xff; 5624 *edx = cpu->apic_id; 5625 5626 switch (count) { 5627 case 0: 5628 *eax = apicid_core_offset(&topo_info); 5629 *ebx = cs->nr_threads; 5630 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5631 break; 5632 case 1: 5633 *eax = env->pkg_offset; 5634 *ebx = cs->nr_cores * cs->nr_threads; 5635 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5636 break; 5637 default: 5638 *eax = 0; 5639 *ebx = 0; 5640 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5641 } 5642 5643 assert(!(*eax & ~0x1f)); 5644 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5645 break; 5646 case 0x1F: 5647 /* V2 Extended Topology Enumeration Leaf */ 5648 if (env->nr_dies < 2) { 5649 *eax = *ebx = *ecx = *edx = 0; 5650 break; 5651 } 5652 5653 *ecx = count & 0xff; 5654 *edx = cpu->apic_id; 5655 switch (count) { 5656 case 0: 5657 *eax = apicid_core_offset(&topo_info); 5658 *ebx = cs->nr_threads; 5659 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5660 break; 5661 case 1: 5662 *eax = apicid_die_offset(&topo_info); 5663 *ebx = cs->nr_cores * cs->nr_threads; 5664 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5665 break; 5666 case 2: 5667 *eax = env->pkg_offset; 5668 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5669 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5670 break; 5671 default: 5672 *eax = 0; 5673 *ebx = 0; 5674 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5675 } 5676 assert(!(*eax & ~0x1f)); 5677 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5678 break; 5679 case 0xD: { 5680 /* Processor Extended State */ 5681 *eax = 0; 5682 *ebx = 0; 5683 *ecx = 0; 5684 *edx = 0; 5685 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5686 break; 5687 } 5688 5689 if (count == 0) { 5690 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5691 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5692 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5693 /* 5694 * The initial value of xcr0 and ebx == 0, On host without kvm 5695 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5696 * even through guest update xcr0, this will crash some legacy guest 5697 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5698 */ 5699 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5700 } else if (count == 1) { 5701 *eax = env->features[FEAT_XSAVE]; 5702 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5703 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5704 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5705 *eax = esa->size; 5706 *ebx = esa->offset; 5707 } 5708 } 5709 break; 5710 } 5711 case 0x14: { 5712 /* Intel Processor Trace Enumeration */ 5713 *eax = 0; 5714 *ebx = 0; 5715 *ecx = 0; 5716 *edx = 0; 5717 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5718 !kvm_enabled()) { 5719 break; 5720 } 5721 5722 if (count == 0) { 5723 *eax = INTEL_PT_MAX_SUBLEAF; 5724 *ebx = INTEL_PT_MINIMAL_EBX; 5725 *ecx = INTEL_PT_MINIMAL_ECX; 5726 } else if (count == 1) { 5727 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5728 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5729 } 5730 break; 5731 } 5732 case 0x40000000: 5733 /* 5734 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5735 * set here, but we restrict to TCG none the less. 5736 */ 5737 if (tcg_enabled() && cpu->expose_tcg) { 5738 memcpy(signature, "TCGTCGTCGTCG", 12); 5739 *eax = 0x40000001; 5740 *ebx = signature[0]; 5741 *ecx = signature[1]; 5742 *edx = signature[2]; 5743 } else { 5744 *eax = 0; 5745 *ebx = 0; 5746 *ecx = 0; 5747 *edx = 0; 5748 } 5749 break; 5750 case 0x40000001: 5751 *eax = 0; 5752 *ebx = 0; 5753 *ecx = 0; 5754 *edx = 0; 5755 break; 5756 case 0x80000000: 5757 *eax = env->cpuid_xlevel; 5758 *ebx = env->cpuid_vendor1; 5759 *edx = env->cpuid_vendor2; 5760 *ecx = env->cpuid_vendor3; 5761 break; 5762 case 0x80000001: 5763 *eax = env->cpuid_version; 5764 *ebx = 0; 5765 *ecx = env->features[FEAT_8000_0001_ECX]; 5766 *edx = env->features[FEAT_8000_0001_EDX]; 5767 5768 /* The Linux kernel checks for the CMPLegacy bit and 5769 * discards multiple thread information if it is set. 5770 * So don't set it here for Intel to make Linux guests happy. 5771 */ 5772 if (cs->nr_cores * cs->nr_threads > 1) { 5773 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5774 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5775 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5776 *ecx |= 1 << 1; /* CmpLegacy bit */ 5777 } 5778 } 5779 break; 5780 case 0x80000002: 5781 case 0x80000003: 5782 case 0x80000004: 5783 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5784 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5785 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5786 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5787 break; 5788 case 0x80000005: 5789 /* cache info (L1 cache) */ 5790 if (cpu->cache_info_passthrough) { 5791 host_cpuid(index, 0, eax, ebx, ecx, edx); 5792 break; 5793 } 5794 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | 5795 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5796 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | 5797 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5798 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5799 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5800 break; 5801 case 0x80000006: 5802 /* cache info (L2 cache) */ 5803 if (cpu->cache_info_passthrough) { 5804 host_cpuid(index, 0, eax, ebx, ecx, edx); 5805 break; 5806 } 5807 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | 5808 (L2_DTLB_2M_ENTRIES << 16) | 5809 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | 5810 (L2_ITLB_2M_ENTRIES); 5811 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | 5812 (L2_DTLB_4K_ENTRIES << 16) | 5813 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | 5814 (L2_ITLB_4K_ENTRIES); 5815 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5816 cpu->enable_l3_cache ? 5817 env->cache_info_amd.l3_cache : NULL, 5818 ecx, edx); 5819 break; 5820 case 0x80000007: 5821 *eax = 0; 5822 *ebx = 0; 5823 *ecx = 0; 5824 *edx = env->features[FEAT_8000_0007_EDX]; 5825 break; 5826 case 0x80000008: 5827 /* virtual & phys address size in low 2 bytes. */ 5828 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5829 /* 64 bit processor */ 5830 *eax = cpu->phys_bits; /* configurable physical bits */ 5831 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5832 *eax |= 0x00003900; /* 57 bits virtual */ 5833 } else { 5834 *eax |= 0x00003000; /* 48 bits virtual */ 5835 } 5836 } else { 5837 *eax = cpu->phys_bits; 5838 } 5839 *ebx = env->features[FEAT_8000_0008_EBX]; 5840 if (cs->nr_cores * cs->nr_threads > 1) { 5841 /* 5842 * Bits 15:12 is "The number of bits in the initial 5843 * Core::X86::Apic::ApicId[ApicId] value that indicate 5844 * thread ID within a package". This is already stored at 5845 * CPUX86State::pkg_offset. 5846 * Bits 7:0 is "The number of threads in the package is NC+1" 5847 */ 5848 *ecx = (env->pkg_offset << 12) | 5849 ((cs->nr_cores * cs->nr_threads) - 1); 5850 } else { 5851 *ecx = 0; 5852 } 5853 *edx = 0; 5854 break; 5855 case 0x8000000A: 5856 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5857 *eax = 0x00000001; /* SVM Revision */ 5858 *ebx = 0x00000010; /* nr of ASIDs */ 5859 *ecx = 0; 5860 *edx = env->features[FEAT_SVM]; /* optional features */ 5861 } else { 5862 *eax = 0; 5863 *ebx = 0; 5864 *ecx = 0; 5865 *edx = 0; 5866 } 5867 break; 5868 case 0x8000001D: 5869 *eax = 0; 5870 if (cpu->cache_info_passthrough) { 5871 host_cpuid(index, count, eax, ebx, ecx, edx); 5872 break; 5873 } 5874 switch (count) { 5875 case 0: /* L1 dcache info */ 5876 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, 5877 &topo_info, eax, ebx, ecx, edx); 5878 break; 5879 case 1: /* L1 icache info */ 5880 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, 5881 &topo_info, eax, ebx, ecx, edx); 5882 break; 5883 case 2: /* L2 cache info */ 5884 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, 5885 &topo_info, eax, ebx, ecx, edx); 5886 break; 5887 case 3: /* L3 cache info */ 5888 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, 5889 &topo_info, eax, ebx, ecx, edx); 5890 break; 5891 default: /* end of info */ 5892 *eax = *ebx = *ecx = *edx = 0; 5893 break; 5894 } 5895 break; 5896 case 0x8000001E: 5897 assert(cpu->core_id <= 255); 5898 encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx); 5899 break; 5900 case 0xC0000000: 5901 *eax = env->cpuid_xlevel2; 5902 *ebx = 0; 5903 *ecx = 0; 5904 *edx = 0; 5905 break; 5906 case 0xC0000001: 5907 /* Support for VIA CPU's CPUID instruction */ 5908 *eax = env->cpuid_version; 5909 *ebx = 0; 5910 *ecx = 0; 5911 *edx = env->features[FEAT_C000_0001_EDX]; 5912 break; 5913 case 0xC0000002: 5914 case 0xC0000003: 5915 case 0xC0000004: 5916 /* Reserved for the future, and now filled with zero */ 5917 *eax = 0; 5918 *ebx = 0; 5919 *ecx = 0; 5920 *edx = 0; 5921 break; 5922 case 0x8000001F: 5923 *eax = sev_enabled() ? 0x2 : 0; 5924 *ebx = sev_get_cbit_position(); 5925 *ebx |= sev_get_reduced_phys_bits() << 6; 5926 *ecx = 0; 5927 *edx = 0; 5928 break; 5929 default: 5930 /* reserved values: zero */ 5931 *eax = 0; 5932 *ebx = 0; 5933 *ecx = 0; 5934 *edx = 0; 5935 break; 5936 } 5937 } 5938 5939 static void x86_cpu_reset(DeviceState *dev) 5940 { 5941 CPUState *s = CPU(dev); 5942 X86CPU *cpu = X86_CPU(s); 5943 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 5944 CPUX86State *env = &cpu->env; 5945 target_ulong cr4; 5946 uint64_t xcr0; 5947 int i; 5948 5949 xcc->parent_reset(dev); 5950 5951 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 5952 5953 env->old_exception = -1; 5954 5955 /* init to reset state */ 5956 5957 env->hflags2 |= HF2_GIF_MASK; 5958 5959 cpu_x86_update_cr0(env, 0x60000010); 5960 env->a20_mask = ~0x0; 5961 env->smbase = 0x30000; 5962 env->msr_smi_count = 0; 5963 5964 env->idt.limit = 0xffff; 5965 env->gdt.limit = 0xffff; 5966 env->ldt.limit = 0xffff; 5967 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 5968 env->tr.limit = 0xffff; 5969 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 5970 5971 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 5972 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 5973 DESC_R_MASK | DESC_A_MASK); 5974 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 5975 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5976 DESC_A_MASK); 5977 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 5978 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5979 DESC_A_MASK); 5980 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 5981 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5982 DESC_A_MASK); 5983 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 5984 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5985 DESC_A_MASK); 5986 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 5987 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 5988 DESC_A_MASK); 5989 5990 env->eip = 0xfff0; 5991 env->regs[R_EDX] = env->cpuid_version; 5992 5993 env->eflags = 0x2; 5994 5995 /* FPU init */ 5996 for (i = 0; i < 8; i++) { 5997 env->fptags[i] = 1; 5998 } 5999 cpu_set_fpuc(env, 0x37f); 6000 6001 env->mxcsr = 0x1f80; 6002 /* All units are in INIT state. */ 6003 env->xstate_bv = 0; 6004 6005 env->pat = 0x0007040600070406ULL; 6006 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6007 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6008 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6009 } 6010 6011 memset(env->dr, 0, sizeof(env->dr)); 6012 env->dr[6] = DR6_FIXED_1; 6013 env->dr[7] = DR7_FIXED_1; 6014 cpu_breakpoint_remove_all(s, BP_CPU); 6015 cpu_watchpoint_remove_all(s, BP_CPU); 6016 6017 cr4 = 0; 6018 xcr0 = XSTATE_FP_MASK; 6019 6020 #ifdef CONFIG_USER_ONLY 6021 /* Enable all the features for user-mode. */ 6022 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6023 xcr0 |= XSTATE_SSE_MASK; 6024 } 6025 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6026 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6027 if (env->features[esa->feature] & esa->bits) { 6028 xcr0 |= 1ull << i; 6029 } 6030 } 6031 6032 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6033 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6034 } 6035 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6036 cr4 |= CR4_FSGSBASE_MASK; 6037 } 6038 #endif 6039 6040 env->xcr0 = xcr0; 6041 cpu_x86_update_cr4(env, cr4); 6042 6043 /* 6044 * SDM 11.11.5 requires: 6045 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6046 * - IA32_MTRR_PHYSMASKn.V = 0 6047 * All other bits are undefined. For simplification, zero it all. 6048 */ 6049 env->mtrr_deftype = 0; 6050 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6051 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6052 6053 env->interrupt_injected = -1; 6054 env->exception_nr = -1; 6055 env->exception_pending = 0; 6056 env->exception_injected = 0; 6057 env->exception_has_payload = false; 6058 env->exception_payload = 0; 6059 env->nmi_injected = false; 6060 #if !defined(CONFIG_USER_ONLY) 6061 /* We hard-wire the BSP to the first CPU. */ 6062 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6063 6064 s->halted = !cpu_is_bsp(cpu); 6065 6066 if (kvm_enabled()) { 6067 kvm_arch_reset_vcpu(cpu); 6068 } 6069 else if (hvf_enabled()) { 6070 hvf_reset_vcpu(s); 6071 } 6072 #endif 6073 } 6074 6075 #ifndef CONFIG_USER_ONLY 6076 bool cpu_is_bsp(X86CPU *cpu) 6077 { 6078 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6079 } 6080 6081 /* TODO: remove me, when reset over QOM tree is implemented */ 6082 static void x86_cpu_machine_reset_cb(void *opaque) 6083 { 6084 X86CPU *cpu = opaque; 6085 cpu_reset(CPU(cpu)); 6086 } 6087 #endif 6088 6089 static void mce_init(X86CPU *cpu) 6090 { 6091 CPUX86State *cenv = &cpu->env; 6092 unsigned int bank; 6093 6094 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6095 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6096 (CPUID_MCE | CPUID_MCA)) { 6097 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6098 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6099 cenv->mcg_ctl = ~(uint64_t)0; 6100 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6101 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6102 } 6103 } 6104 } 6105 6106 #ifndef CONFIG_USER_ONLY 6107 APICCommonClass *apic_get_class(void) 6108 { 6109 const char *apic_type = "apic"; 6110 6111 /* TODO: in-kernel irqchip for hvf */ 6112 if (kvm_apic_in_kernel()) { 6113 apic_type = "kvm-apic"; 6114 } else if (xen_enabled()) { 6115 apic_type = "xen-apic"; 6116 } 6117 6118 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6119 } 6120 6121 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6122 { 6123 APICCommonState *apic; 6124 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6125 6126 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6127 6128 object_property_add_child(OBJECT(cpu), "lapic", 6129 OBJECT(cpu->apic_state)); 6130 object_unref(OBJECT(cpu->apic_state)); 6131 6132 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6133 /* TODO: convert to link<> */ 6134 apic = APIC_COMMON(cpu->apic_state); 6135 apic->cpu = cpu; 6136 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6137 } 6138 6139 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6140 { 6141 APICCommonState *apic; 6142 static bool apic_mmio_map_once; 6143 6144 if (cpu->apic_state == NULL) { 6145 return; 6146 } 6147 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized", 6148 errp); 6149 6150 /* Map APIC MMIO area */ 6151 apic = APIC_COMMON(cpu->apic_state); 6152 if (!apic_mmio_map_once) { 6153 memory_region_add_subregion_overlap(get_system_memory(), 6154 apic->apicbase & 6155 MSR_IA32_APICBASE_BASE, 6156 &apic->io_memory, 6157 0x1000); 6158 apic_mmio_map_once = true; 6159 } 6160 } 6161 6162 static void x86_cpu_machine_done(Notifier *n, void *unused) 6163 { 6164 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6165 MemoryRegion *smram = 6166 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6167 6168 if (smram) { 6169 cpu->smram = g_new(MemoryRegion, 1); 6170 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6171 smram, 0, 1ull << 32); 6172 memory_region_set_enabled(cpu->smram, true); 6173 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6174 } 6175 } 6176 #else 6177 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6178 { 6179 } 6180 #endif 6181 6182 /* Note: Only safe for use on x86(-64) hosts */ 6183 static uint32_t x86_host_phys_bits(void) 6184 { 6185 uint32_t eax; 6186 uint32_t host_phys_bits; 6187 6188 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6189 if (eax >= 0x80000008) { 6190 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6191 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6192 * at 23:16 that can specify a maximum physical address bits for 6193 * the guest that can override this value; but I've not seen 6194 * anything with that set. 6195 */ 6196 host_phys_bits = eax & 0xff; 6197 } else { 6198 /* It's an odd 64 bit machine that doesn't have the leaf for 6199 * physical address bits; fall back to 36 that's most older 6200 * Intel. 6201 */ 6202 host_phys_bits = 36; 6203 } 6204 6205 return host_phys_bits; 6206 } 6207 6208 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6209 { 6210 if (*min < value) { 6211 *min = value; 6212 } 6213 } 6214 6215 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6216 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6217 { 6218 CPUX86State *env = &cpu->env; 6219 FeatureWordInfo *fi = &feature_word_info[w]; 6220 uint32_t eax = fi->cpuid.eax; 6221 uint32_t region = eax & 0xF0000000; 6222 6223 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6224 if (!env->features[w]) { 6225 return; 6226 } 6227 6228 switch (region) { 6229 case 0x00000000: 6230 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6231 break; 6232 case 0x80000000: 6233 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6234 break; 6235 case 0xC0000000: 6236 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6237 break; 6238 } 6239 6240 if (eax == 7) { 6241 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6242 fi->cpuid.ecx); 6243 } 6244 } 6245 6246 /* Calculate XSAVE components based on the configured CPU feature flags */ 6247 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6248 { 6249 CPUX86State *env = &cpu->env; 6250 int i; 6251 uint64_t mask; 6252 6253 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6254 return; 6255 } 6256 6257 mask = 0; 6258 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6259 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6260 if (env->features[esa->feature] & esa->bits) { 6261 mask |= (1ULL << i); 6262 } 6263 } 6264 6265 env->features[FEAT_XSAVE_COMP_LO] = mask; 6266 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6267 } 6268 6269 /***** Steps involved on loading and filtering CPUID data 6270 * 6271 * When initializing and realizing a CPU object, the steps 6272 * involved in setting up CPUID data are: 6273 * 6274 * 1) Loading CPU model definition (X86CPUDefinition). This is 6275 * implemented by x86_cpu_load_model() and should be completely 6276 * transparent, as it is done automatically by instance_init. 6277 * No code should need to look at X86CPUDefinition structs 6278 * outside instance_init. 6279 * 6280 * 2) CPU expansion. This is done by realize before CPUID 6281 * filtering, and will make sure host/accelerator data is 6282 * loaded for CPU models that depend on host capabilities 6283 * (e.g. "host"). Done by x86_cpu_expand_features(). 6284 * 6285 * 3) CPUID filtering. This initializes extra data related to 6286 * CPUID, and checks if the host supports all capabilities 6287 * required by the CPU. Runnability of a CPU model is 6288 * determined at this step. Done by x86_cpu_filter_features(). 6289 * 6290 * Some operations don't require all steps to be performed. 6291 * More precisely: 6292 * 6293 * - CPU instance creation (instance_init) will run only CPU 6294 * model loading. CPU expansion can't run at instance_init-time 6295 * because host/accelerator data may be not available yet. 6296 * - CPU realization will perform both CPU model expansion and CPUID 6297 * filtering, and return an error in case one of them fails. 6298 * - query-cpu-definitions needs to run all 3 steps. It needs 6299 * to run CPUID filtering, as the 'unavailable-features' 6300 * field is set based on the filtering results. 6301 * - The query-cpu-model-expansion QMP command only needs to run 6302 * CPU model loading and CPU expansion. It should not filter 6303 * any CPUID data based on host capabilities. 6304 */ 6305 6306 /* Expand CPU configuration data, based on configured features 6307 * and host/accelerator capabilities when appropriate. 6308 */ 6309 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6310 { 6311 CPUX86State *env = &cpu->env; 6312 FeatureWord w; 6313 int i; 6314 GList *l; 6315 Error *local_err = NULL; 6316 6317 for (l = plus_features; l; l = l->next) { 6318 const char *prop = l->data; 6319 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 6320 if (local_err) { 6321 goto out; 6322 } 6323 } 6324 6325 for (l = minus_features; l; l = l->next) { 6326 const char *prop = l->data; 6327 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 6328 if (local_err) { 6329 goto out; 6330 } 6331 } 6332 6333 /*TODO: Now cpu->max_features doesn't overwrite features 6334 * set using QOM properties, and we can convert 6335 * plus_features & minus_features to global properties 6336 * inside x86_cpu_parse_featurestr() too. 6337 */ 6338 if (cpu->max_features) { 6339 for (w = 0; w < FEATURE_WORDS; w++) { 6340 /* Override only features that weren't set explicitly 6341 * by the user. 6342 */ 6343 env->features[w] |= 6344 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6345 ~env->user_features[w] & 6346 ~feature_word_info[w].no_autoenable_flags; 6347 } 6348 } 6349 6350 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6351 FeatureDep *d = &feature_dependencies[i]; 6352 if (!(env->features[d->from.index] & d->from.mask)) { 6353 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6354 6355 /* Not an error unless the dependent feature was added explicitly. */ 6356 mark_unavailable_features(cpu, d->to.index, 6357 unavailable_features & env->user_features[d->to.index], 6358 "This feature depends on other features that were not requested"); 6359 6360 env->user_features[d->to.index] |= unavailable_features; 6361 env->features[d->to.index] &= ~unavailable_features; 6362 } 6363 } 6364 6365 if (!kvm_enabled() || !cpu->expose_kvm) { 6366 env->features[FEAT_KVM] = 0; 6367 } 6368 6369 x86_cpu_enable_xsave_components(cpu); 6370 6371 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6372 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6373 if (cpu->full_cpuid_auto_level) { 6374 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6375 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6376 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6377 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6378 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6379 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6380 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6381 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6382 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6383 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6384 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6385 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6386 6387 /* Intel Processor Trace requires CPUID[0x14] */ 6388 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { 6389 if (cpu->intel_pt_auto_level) { 6390 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6391 } else if (cpu->env.cpuid_min_level < 0x14) { 6392 mark_unavailable_features(cpu, FEAT_7_0_EBX, 6393 CPUID_7_0_EBX_INTEL_PT, 6394 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\""); 6395 } 6396 } 6397 6398 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6399 if (env->nr_dies > 1) { 6400 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6401 } 6402 6403 /* SVM requires CPUID[0x8000000A] */ 6404 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6405 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6406 } 6407 6408 /* SEV requires CPUID[0x8000001F] */ 6409 if (sev_enabled()) { 6410 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6411 } 6412 } 6413 6414 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6415 if (env->cpuid_level_func7 == UINT32_MAX) { 6416 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6417 } 6418 if (env->cpuid_level == UINT32_MAX) { 6419 env->cpuid_level = env->cpuid_min_level; 6420 } 6421 if (env->cpuid_xlevel == UINT32_MAX) { 6422 env->cpuid_xlevel = env->cpuid_min_xlevel; 6423 } 6424 if (env->cpuid_xlevel2 == UINT32_MAX) { 6425 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6426 } 6427 6428 out: 6429 if (local_err != NULL) { 6430 error_propagate(errp, local_err); 6431 } 6432 } 6433 6434 /* 6435 * Finishes initialization of CPUID data, filters CPU feature 6436 * words based on host availability of each feature. 6437 * 6438 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6439 */ 6440 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6441 { 6442 CPUX86State *env = &cpu->env; 6443 FeatureWord w; 6444 const char *prefix = NULL; 6445 6446 if (verbose) { 6447 prefix = accel_uses_host_cpuid() 6448 ? "host doesn't support requested feature" 6449 : "TCG doesn't support requested feature"; 6450 } 6451 6452 for (w = 0; w < FEATURE_WORDS; w++) { 6453 uint64_t host_feat = 6454 x86_cpu_get_supported_feature_word(w, false); 6455 uint64_t requested_features = env->features[w]; 6456 uint64_t unavailable_features = requested_features & ~host_feat; 6457 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6458 } 6459 6460 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6461 kvm_enabled()) { 6462 KVMState *s = CPU(cpu)->kvm_state; 6463 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6464 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6465 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6466 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6467 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6468 6469 if (!eax_0 || 6470 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6471 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6472 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6473 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6474 INTEL_PT_ADDR_RANGES_NUM) || 6475 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6476 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6477 (ecx_0 & INTEL_PT_IP_LIP)) { 6478 /* 6479 * Processor Trace capabilities aren't configurable, so if the 6480 * host can't emulate the capabilities we report on 6481 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6482 */ 6483 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6484 } 6485 } 6486 } 6487 6488 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6489 { 6490 CPUState *cs = CPU(dev); 6491 X86CPU *cpu = X86_CPU(dev); 6492 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6493 CPUX86State *env = &cpu->env; 6494 Error *local_err = NULL; 6495 static bool ht_warned; 6496 6497 if (xcc->host_cpuid_required) { 6498 if (!accel_uses_host_cpuid()) { 6499 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6500 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6501 goto out; 6502 } 6503 } 6504 6505 if (cpu->max_features && accel_uses_host_cpuid()) { 6506 if (enable_cpu_pm) { 6507 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6508 &cpu->mwait.ecx, &cpu->mwait.edx); 6509 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6510 } 6511 if (kvm_enabled() && cpu->ucode_rev == 0) { 6512 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6513 MSR_IA32_UCODE_REV); 6514 } 6515 } 6516 6517 if (cpu->ucode_rev == 0) { 6518 /* The default is the same as KVM's. */ 6519 if (IS_AMD_CPU(env)) { 6520 cpu->ucode_rev = 0x01000065; 6521 } else { 6522 cpu->ucode_rev = 0x100000000ULL; 6523 } 6524 } 6525 6526 /* mwait extended info: needed for Core compatibility */ 6527 /* We always wake on interrupt even if host does not have the capability */ 6528 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6529 6530 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6531 error_setg(errp, "apic-id property was not initialized properly"); 6532 return; 6533 } 6534 6535 x86_cpu_expand_features(cpu, &local_err); 6536 if (local_err) { 6537 goto out; 6538 } 6539 6540 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6541 6542 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6543 error_setg(&local_err, 6544 accel_uses_host_cpuid() ? 6545 "Host doesn't support requested features" : 6546 "TCG doesn't support requested features"); 6547 goto out; 6548 } 6549 6550 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6551 * CPUID[1].EDX. 6552 */ 6553 if (IS_AMD_CPU(env)) { 6554 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6555 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6556 & CPUID_EXT2_AMD_ALIASES); 6557 } 6558 6559 /* For 64bit systems think about the number of physical bits to present. 6560 * ideally this should be the same as the host; anything other than matching 6561 * the host can cause incorrect guest behaviour. 6562 * QEMU used to pick the magic value of 40 bits that corresponds to 6563 * consumer AMD devices but nothing else. 6564 */ 6565 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6566 if (accel_uses_host_cpuid()) { 6567 uint32_t host_phys_bits = x86_host_phys_bits(); 6568 static bool warned; 6569 6570 /* Print a warning if the user set it to a value that's not the 6571 * host value. 6572 */ 6573 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6574 !warned) { 6575 warn_report("Host physical bits (%u)" 6576 " does not match phys-bits property (%u)", 6577 host_phys_bits, cpu->phys_bits); 6578 warned = true; 6579 } 6580 6581 if (cpu->host_phys_bits) { 6582 /* The user asked for us to use the host physical bits */ 6583 cpu->phys_bits = host_phys_bits; 6584 if (cpu->host_phys_bits_limit && 6585 cpu->phys_bits > cpu->host_phys_bits_limit) { 6586 cpu->phys_bits = cpu->host_phys_bits_limit; 6587 } 6588 } 6589 6590 if (cpu->phys_bits && 6591 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6592 cpu->phys_bits < 32)) { 6593 error_setg(errp, "phys-bits should be between 32 and %u " 6594 " (but is %u)", 6595 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6596 return; 6597 } 6598 } else { 6599 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6600 error_setg(errp, "TCG only supports phys-bits=%u", 6601 TCG_PHYS_ADDR_BITS); 6602 return; 6603 } 6604 } 6605 /* 0 means it was not explicitly set by the user (or by machine 6606 * compat_props or by the host code above). In this case, the default 6607 * is the value used by TCG (40). 6608 */ 6609 if (cpu->phys_bits == 0) { 6610 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6611 } 6612 } else { 6613 /* For 32 bit systems don't use the user set value, but keep 6614 * phys_bits consistent with what we tell the guest. 6615 */ 6616 if (cpu->phys_bits != 0) { 6617 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6618 return; 6619 } 6620 6621 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6622 cpu->phys_bits = 36; 6623 } else { 6624 cpu->phys_bits = 32; 6625 } 6626 } 6627 6628 /* Cache information initialization */ 6629 if (!cpu->legacy_cache) { 6630 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6631 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6632 error_setg(errp, 6633 "CPU model '%s' doesn't support legacy-cache=off", name); 6634 return; 6635 } 6636 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6637 *xcc->model->cpudef->cache_info; 6638 } else { 6639 /* Build legacy cache information */ 6640 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6641 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6642 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6643 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6644 6645 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6646 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6647 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6648 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6649 6650 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6651 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6652 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6653 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6654 } 6655 6656 6657 cpu_exec_realizefn(cs, &local_err); 6658 if (local_err != NULL) { 6659 error_propagate(errp, local_err); 6660 return; 6661 } 6662 6663 #ifndef CONFIG_USER_ONLY 6664 MachineState *ms = MACHINE(qdev_get_machine()); 6665 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6666 6667 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6668 x86_cpu_apic_create(cpu, &local_err); 6669 if (local_err != NULL) { 6670 goto out; 6671 } 6672 } 6673 #endif 6674 6675 mce_init(cpu); 6676 6677 #ifndef CONFIG_USER_ONLY 6678 if (tcg_enabled()) { 6679 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6680 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6681 6682 /* Outer container... */ 6683 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6684 memory_region_set_enabled(cpu->cpu_as_root, true); 6685 6686 /* ... with two regions inside: normal system memory with low 6687 * priority, and... 6688 */ 6689 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6690 get_system_memory(), 0, ~0ull); 6691 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6692 memory_region_set_enabled(cpu->cpu_as_mem, true); 6693 6694 cs->num_ases = 2; 6695 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6696 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6697 6698 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6699 cpu->machine_done.notify = x86_cpu_machine_done; 6700 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6701 } 6702 #endif 6703 6704 qemu_init_vcpu(cs); 6705 6706 /* 6707 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6708 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6709 * based on inputs (sockets,cores,threads), it is still better to give 6710 * users a warning. 6711 * 6712 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6713 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6714 */ 6715 if (IS_AMD_CPU(env) && 6716 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6717 cs->nr_threads > 1 && !ht_warned) { 6718 warn_report("This family of AMD CPU doesn't support " 6719 "hyperthreading(%d)", 6720 cs->nr_threads); 6721 error_printf("Please configure -smp options properly" 6722 " or try enabling topoext feature.\n"); 6723 ht_warned = true; 6724 } 6725 6726 x86_cpu_apic_realize(cpu, &local_err); 6727 if (local_err != NULL) { 6728 goto out; 6729 } 6730 cpu_reset(cs); 6731 6732 xcc->parent_realize(dev, &local_err); 6733 6734 out: 6735 if (local_err != NULL) { 6736 error_propagate(errp, local_err); 6737 return; 6738 } 6739 } 6740 6741 static void x86_cpu_unrealizefn(DeviceState *dev) 6742 { 6743 X86CPU *cpu = X86_CPU(dev); 6744 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6745 6746 #ifndef CONFIG_USER_ONLY 6747 cpu_remove_sync(CPU(dev)); 6748 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6749 #endif 6750 6751 if (cpu->apic_state) { 6752 object_unparent(OBJECT(cpu->apic_state)); 6753 cpu->apic_state = NULL; 6754 } 6755 6756 xcc->parent_unrealize(dev); 6757 } 6758 6759 typedef struct BitProperty { 6760 FeatureWord w; 6761 uint64_t mask; 6762 } BitProperty; 6763 6764 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6765 void *opaque, Error **errp) 6766 { 6767 X86CPU *cpu = X86_CPU(obj); 6768 BitProperty *fp = opaque; 6769 uint64_t f = cpu->env.features[fp->w]; 6770 bool value = (f & fp->mask) == fp->mask; 6771 visit_type_bool(v, name, &value, errp); 6772 } 6773 6774 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6775 void *opaque, Error **errp) 6776 { 6777 DeviceState *dev = DEVICE(obj); 6778 X86CPU *cpu = X86_CPU(obj); 6779 BitProperty *fp = opaque; 6780 Error *local_err = NULL; 6781 bool value; 6782 6783 if (dev->realized) { 6784 qdev_prop_set_after_realize(dev, name, errp); 6785 return; 6786 } 6787 6788 visit_type_bool(v, name, &value, &local_err); 6789 if (local_err) { 6790 error_propagate(errp, local_err); 6791 return; 6792 } 6793 6794 if (value) { 6795 cpu->env.features[fp->w] |= fp->mask; 6796 } else { 6797 cpu->env.features[fp->w] &= ~fp->mask; 6798 } 6799 cpu->env.user_features[fp->w] |= fp->mask; 6800 } 6801 6802 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 6803 void *opaque) 6804 { 6805 BitProperty *prop = opaque; 6806 g_free(prop); 6807 } 6808 6809 /* Register a boolean property to get/set a single bit in a uint32_t field. 6810 * 6811 * The same property name can be registered multiple times to make it affect 6812 * multiple bits in the same FeatureWord. In that case, the getter will return 6813 * true only if all bits are set. 6814 */ 6815 static void x86_cpu_register_bit_prop(X86CPU *cpu, 6816 const char *prop_name, 6817 FeatureWord w, 6818 int bitnr) 6819 { 6820 BitProperty *fp; 6821 ObjectProperty *op; 6822 uint64_t mask = (1ULL << bitnr); 6823 6824 op = object_property_find(OBJECT(cpu), prop_name, NULL); 6825 if (op) { 6826 fp = op->opaque; 6827 assert(fp->w == w); 6828 fp->mask |= mask; 6829 } else { 6830 fp = g_new0(BitProperty, 1); 6831 fp->w = w; 6832 fp->mask = mask; 6833 object_property_add(OBJECT(cpu), prop_name, "bool", 6834 x86_cpu_get_bit_prop, 6835 x86_cpu_set_bit_prop, 6836 x86_cpu_release_bit_prop, fp); 6837 } 6838 } 6839 6840 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 6841 FeatureWord w, 6842 int bitnr) 6843 { 6844 FeatureWordInfo *fi = &feature_word_info[w]; 6845 const char *name = fi->feat_names[bitnr]; 6846 6847 if (!name) { 6848 return; 6849 } 6850 6851 /* Property names should use "-" instead of "_". 6852 * Old names containing underscores are registered as aliases 6853 * using object_property_add_alias() 6854 */ 6855 assert(!strchr(name, '_')); 6856 /* aliases don't use "|" delimiters anymore, they are registered 6857 * manually using object_property_add_alias() */ 6858 assert(!strchr(name, '|')); 6859 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 6860 } 6861 6862 #if !defined(CONFIG_USER_ONLY) 6863 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6864 { 6865 X86CPU *cpu = X86_CPU(cs); 6866 CPUX86State *env = &cpu->env; 6867 GuestPanicInformation *panic_info = NULL; 6868 6869 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6870 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6871 6872 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6873 6874 assert(HV_CRASH_PARAMS >= 5); 6875 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6876 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6877 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6878 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6879 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6880 } 6881 6882 return panic_info; 6883 } 6884 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6885 const char *name, void *opaque, 6886 Error **errp) 6887 { 6888 CPUState *cs = CPU(obj); 6889 GuestPanicInformation *panic_info; 6890 6891 if (!cs->crash_occurred) { 6892 error_setg(errp, "No crash occured"); 6893 return; 6894 } 6895 6896 panic_info = x86_cpu_get_crash_info(cs); 6897 if (panic_info == NULL) { 6898 error_setg(errp, "No crash information"); 6899 return; 6900 } 6901 6902 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6903 errp); 6904 qapi_free_GuestPanicInformation(panic_info); 6905 } 6906 #endif /* !CONFIG_USER_ONLY */ 6907 6908 static void x86_cpu_initfn(Object *obj) 6909 { 6910 X86CPU *cpu = X86_CPU(obj); 6911 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 6912 CPUX86State *env = &cpu->env; 6913 FeatureWord w; 6914 6915 env->nr_dies = 1; 6916 env->nr_nodes = 1; 6917 cpu_set_cpustate_pointers(cpu); 6918 6919 object_property_add(obj, "family", "int", 6920 x86_cpuid_version_get_family, 6921 x86_cpuid_version_set_family, NULL, NULL); 6922 object_property_add(obj, "model", "int", 6923 x86_cpuid_version_get_model, 6924 x86_cpuid_version_set_model, NULL, NULL); 6925 object_property_add(obj, "stepping", "int", 6926 x86_cpuid_version_get_stepping, 6927 x86_cpuid_version_set_stepping, NULL, NULL); 6928 object_property_add_str(obj, "vendor", 6929 x86_cpuid_get_vendor, 6930 x86_cpuid_set_vendor); 6931 object_property_add_str(obj, "model-id", 6932 x86_cpuid_get_model_id, 6933 x86_cpuid_set_model_id); 6934 object_property_add(obj, "tsc-frequency", "int", 6935 x86_cpuid_get_tsc_freq, 6936 x86_cpuid_set_tsc_freq, NULL, NULL); 6937 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 6938 x86_cpu_get_feature_words, 6939 NULL, NULL, (void *)env->features); 6940 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 6941 x86_cpu_get_feature_words, 6942 NULL, NULL, (void *)cpu->filtered_features); 6943 /* 6944 * The "unavailable-features" property has the same semantics as 6945 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 6946 * QMP command: they list the features that would have prevented the 6947 * CPU from running if the "enforce" flag was set. 6948 */ 6949 object_property_add(obj, "unavailable-features", "strList", 6950 x86_cpu_get_unavailable_features, 6951 NULL, NULL, NULL); 6952 6953 #if !defined(CONFIG_USER_ONLY) 6954 object_property_add(obj, "crash-information", "GuestPanicInformation", 6955 x86_cpu_get_crash_info_qom, NULL, NULL, NULL); 6956 #endif 6957 6958 for (w = 0; w < FEATURE_WORDS; w++) { 6959 int bitnr; 6960 6961 for (bitnr = 0; bitnr < 64; bitnr++) { 6962 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 6963 } 6964 } 6965 6966 object_property_add_alias(obj, "sse3", obj, "pni"); 6967 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq"); 6968 object_property_add_alias(obj, "sse4-1", obj, "sse4.1"); 6969 object_property_add_alias(obj, "sse4-2", obj, "sse4.2"); 6970 object_property_add_alias(obj, "xd", obj, "nx"); 6971 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt"); 6972 object_property_add_alias(obj, "i64", obj, "lm"); 6973 6974 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl"); 6975 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust"); 6976 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt"); 6977 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm"); 6978 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy"); 6979 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr"); 6980 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core"); 6981 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb"); 6982 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay"); 6983 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu"); 6984 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf"); 6985 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time"); 6986 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi"); 6987 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt"); 6988 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control"); 6989 object_property_add_alias(obj, "svm_lock", obj, "svm-lock"); 6990 object_property_add_alias(obj, "nrip_save", obj, "nrip-save"); 6991 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale"); 6992 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean"); 6993 object_property_add_alias(obj, "pause_filter", obj, "pause-filter"); 6994 object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); 6995 object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); 6996 6997 if (xcc->model) { 6998 x86_cpu_load_model(cpu, xcc->model); 6999 } 7000 } 7001 7002 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7003 { 7004 X86CPU *cpu = X86_CPU(cs); 7005 7006 return cpu->apic_id; 7007 } 7008 7009 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7010 { 7011 X86CPU *cpu = X86_CPU(cs); 7012 7013 return cpu->env.cr[0] & CR0_PG_MASK; 7014 } 7015 7016 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7017 { 7018 X86CPU *cpu = X86_CPU(cs); 7019 7020 cpu->env.eip = value; 7021 } 7022 7023 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 7024 { 7025 X86CPU *cpu = X86_CPU(cs); 7026 7027 cpu->env.eip = tb->pc - tb->cs_base; 7028 } 7029 7030 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7031 { 7032 X86CPU *cpu = X86_CPU(cs); 7033 CPUX86State *env = &cpu->env; 7034 7035 #if !defined(CONFIG_USER_ONLY) 7036 if (interrupt_request & CPU_INTERRUPT_POLL) { 7037 return CPU_INTERRUPT_POLL; 7038 } 7039 #endif 7040 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7041 return CPU_INTERRUPT_SIPI; 7042 } 7043 7044 if (env->hflags2 & HF2_GIF_MASK) { 7045 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7046 !(env->hflags & HF_SMM_MASK)) { 7047 return CPU_INTERRUPT_SMI; 7048 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7049 !(env->hflags2 & HF2_NMI_MASK)) { 7050 return CPU_INTERRUPT_NMI; 7051 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7052 return CPU_INTERRUPT_MCE; 7053 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7054 (((env->hflags2 & HF2_VINTR_MASK) && 7055 (env->hflags2 & HF2_HIF_MASK)) || 7056 (!(env->hflags2 & HF2_VINTR_MASK) && 7057 (env->eflags & IF_MASK && 7058 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7059 return CPU_INTERRUPT_HARD; 7060 #if !defined(CONFIG_USER_ONLY) 7061 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7062 (env->eflags & IF_MASK) && 7063 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7064 return CPU_INTERRUPT_VIRQ; 7065 #endif 7066 } 7067 } 7068 7069 return 0; 7070 } 7071 7072 static bool x86_cpu_has_work(CPUState *cs) 7073 { 7074 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7075 } 7076 7077 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7078 { 7079 X86CPU *cpu = X86_CPU(cs); 7080 CPUX86State *env = &cpu->env; 7081 7082 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7083 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7084 : bfd_mach_i386_i8086); 7085 info->print_insn = print_insn_i386; 7086 7087 info->cap_arch = CS_ARCH_X86; 7088 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7089 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7090 : CS_MODE_16); 7091 info->cap_insn_unit = 1; 7092 info->cap_insn_split = 8; 7093 } 7094 7095 void x86_update_hflags(CPUX86State *env) 7096 { 7097 uint32_t hflags; 7098 #define HFLAG_COPY_MASK \ 7099 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7100 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7101 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7102 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7103 7104 hflags = env->hflags & HFLAG_COPY_MASK; 7105 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7106 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7107 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7108 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7109 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7110 7111 if (env->cr[4] & CR4_OSFXSR_MASK) { 7112 hflags |= HF_OSFXSR_MASK; 7113 } 7114 7115 if (env->efer & MSR_EFER_LMA) { 7116 hflags |= HF_LMA_MASK; 7117 } 7118 7119 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7120 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7121 } else { 7122 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7123 (DESC_B_SHIFT - HF_CS32_SHIFT); 7124 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7125 (DESC_B_SHIFT - HF_SS32_SHIFT); 7126 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7127 !(hflags & HF_CS32_MASK)) { 7128 hflags |= HF_ADDSEG_MASK; 7129 } else { 7130 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7131 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7132 } 7133 } 7134 env->hflags = hflags; 7135 } 7136 7137 static Property x86_cpu_properties[] = { 7138 #ifdef CONFIG_USER_ONLY 7139 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7140 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7141 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7142 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7143 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7144 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7145 #else 7146 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7147 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7148 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7149 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7150 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7151 #endif 7152 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7153 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7154 7155 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7156 HYPERV_SPINLOCK_NEVER_RETRY), 7157 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7158 HYPERV_FEAT_RELAXED, 0), 7159 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7160 HYPERV_FEAT_VAPIC, 0), 7161 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7162 HYPERV_FEAT_TIME, 0), 7163 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7164 HYPERV_FEAT_CRASH, 0), 7165 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7166 HYPERV_FEAT_RESET, 0), 7167 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7168 HYPERV_FEAT_VPINDEX, 0), 7169 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7170 HYPERV_FEAT_RUNTIME, 0), 7171 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7172 HYPERV_FEAT_SYNIC, 0), 7173 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7174 HYPERV_FEAT_STIMER, 0), 7175 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7176 HYPERV_FEAT_FREQUENCIES, 0), 7177 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7178 HYPERV_FEAT_REENLIGHTENMENT, 0), 7179 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7180 HYPERV_FEAT_TLBFLUSH, 0), 7181 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7182 HYPERV_FEAT_EVMCS, 0), 7183 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7184 HYPERV_FEAT_IPI, 0), 7185 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7186 HYPERV_FEAT_STIMER_DIRECT, 0), 7187 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7188 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7189 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7190 7191 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7192 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7193 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7194 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7195 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7196 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7197 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7198 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7199 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7200 UINT32_MAX), 7201 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7202 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7203 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7204 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7205 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7206 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7207 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7208 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7209 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 7210 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7211 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7212 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7213 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7214 false), 7215 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7216 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7217 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7218 true), 7219 /* 7220 * lecacy_cache defaults to true unless the CPU model provides its 7221 * own cache information (see x86_cpu_load_def()). 7222 */ 7223 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7224 7225 /* 7226 * From "Requirements for Implementing the Microsoft 7227 * Hypervisor Interface": 7228 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7229 * 7230 * "Starting with Windows Server 2012 and Windows 8, if 7231 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7232 * the hypervisor imposes no specific limit to the number of VPs. 7233 * In this case, Windows Server 2012 guest VMs may use more than 7234 * 64 VPs, up to the maximum supported number of processors applicable 7235 * to the specific Windows version being used." 7236 */ 7237 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7238 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7239 false), 7240 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7241 true), 7242 DEFINE_PROP_END_OF_LIST() 7243 }; 7244 7245 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7246 { 7247 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7248 CPUClass *cc = CPU_CLASS(oc); 7249 DeviceClass *dc = DEVICE_CLASS(oc); 7250 7251 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7252 &xcc->parent_realize); 7253 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7254 &xcc->parent_unrealize); 7255 device_class_set_props(dc, x86_cpu_properties); 7256 7257 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7258 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7259 7260 cc->class_by_name = x86_cpu_class_by_name; 7261 cc->parse_features = x86_cpu_parse_featurestr; 7262 cc->has_work = x86_cpu_has_work; 7263 #ifdef CONFIG_TCG 7264 cc->do_interrupt = x86_cpu_do_interrupt; 7265 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 7266 #endif 7267 cc->dump_state = x86_cpu_dump_state; 7268 cc->set_pc = x86_cpu_set_pc; 7269 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 7270 cc->gdb_read_register = x86_cpu_gdb_read_register; 7271 cc->gdb_write_register = x86_cpu_gdb_write_register; 7272 cc->get_arch_id = x86_cpu_get_arch_id; 7273 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7274 #ifndef CONFIG_USER_ONLY 7275 cc->asidx_from_attrs = x86_asidx_from_attrs; 7276 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7277 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7278 cc->get_crash_info = x86_cpu_get_crash_info; 7279 cc->write_elf64_note = x86_cpu_write_elf64_note; 7280 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7281 cc->write_elf32_note = x86_cpu_write_elf32_note; 7282 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7283 cc->vmsd = &vmstate_x86_cpu; 7284 #endif 7285 cc->gdb_arch_name = x86_gdb_arch_name; 7286 #ifdef TARGET_X86_64 7287 cc->gdb_core_xml_file = "i386-64bit.xml"; 7288 cc->gdb_num_core_regs = 66; 7289 #else 7290 cc->gdb_core_xml_file = "i386-32bit.xml"; 7291 cc->gdb_num_core_regs = 50; 7292 #endif 7293 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 7294 cc->debug_excp_handler = breakpoint_handler; 7295 #endif 7296 cc->cpu_exec_enter = x86_cpu_exec_enter; 7297 cc->cpu_exec_exit = x86_cpu_exec_exit; 7298 #ifdef CONFIG_TCG 7299 cc->tcg_initialize = tcg_x86_init; 7300 cc->tlb_fill = x86_cpu_tlb_fill; 7301 #endif 7302 cc->disas_set_info = x86_disas_set_info; 7303 7304 dc->user_creatable = true; 7305 } 7306 7307 static const TypeInfo x86_cpu_type_info = { 7308 .name = TYPE_X86_CPU, 7309 .parent = TYPE_CPU, 7310 .instance_size = sizeof(X86CPU), 7311 .instance_init = x86_cpu_initfn, 7312 .abstract = true, 7313 .class_size = sizeof(X86CPUClass), 7314 .class_init = x86_cpu_common_class_init, 7315 }; 7316 7317 7318 /* "base" CPU model, used by query-cpu-model-expansion */ 7319 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7320 { 7321 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7322 7323 xcc->static_model = true; 7324 xcc->migration_safe = true; 7325 xcc->model_description = "base CPU model type with no features enabled"; 7326 xcc->ordering = 8; 7327 } 7328 7329 static const TypeInfo x86_base_cpu_type_info = { 7330 .name = X86_CPU_TYPE_NAME("base"), 7331 .parent = TYPE_X86_CPU, 7332 .class_init = x86_cpu_base_class_init, 7333 }; 7334 7335 static void x86_cpu_register_types(void) 7336 { 7337 int i; 7338 7339 type_register_static(&x86_cpu_type_info); 7340 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7341 x86_register_cpudef_types(&builtin_x86_defs[i]); 7342 } 7343 type_register_static(&max_x86_cpu_type_info); 7344 type_register_static(&x86_base_cpu_type_info); 7345 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7346 type_register_static(&host_x86_cpu_type_info); 7347 #endif 7348 } 7349 7350 type_init(x86_cpu_register_types) 7351