1 /* 2 * i386 CPUID helper functions 3 * 4 * Copyright (c) 2003 Fabrice Bellard 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/units.h" 22 #include "qemu/cutils.h" 23 #include "qemu/bitops.h" 24 #include "qemu/qemu-print.h" 25 26 #include "cpu.h" 27 #include "exec/exec-all.h" 28 #include "sysemu/kvm.h" 29 #include "sysemu/reset.h" 30 #include "sysemu/hvf.h" 31 #include "sysemu/cpus.h" 32 #include "sysemu/xen.h" 33 #include "kvm_i386.h" 34 #include "sev_i386.h" 35 36 #include "qemu/error-report.h" 37 #include "qemu/module.h" 38 #include "qemu/option.h" 39 #include "qemu/config-file.h" 40 #include "qapi/error.h" 41 #include "qapi/qapi-visit-machine.h" 42 #include "qapi/qapi-visit-run-state.h" 43 #include "qapi/qmp/qdict.h" 44 #include "qapi/qmp/qerror.h" 45 #include "qapi/visitor.h" 46 #include "qom/qom-qobject.h" 47 #include "sysemu/arch_init.h" 48 #include "qapi/qapi-commands-machine-target.h" 49 50 #include "standard-headers/asm-x86/kvm_para.h" 51 52 #include "sysemu/sysemu.h" 53 #include "sysemu/tcg.h" 54 #include "hw/qdev-properties.h" 55 #include "hw/i386/topology.h" 56 #ifndef CONFIG_USER_ONLY 57 #include "exec/address-spaces.h" 58 #include "hw/i386/apic_internal.h" 59 #include "hw/boards.h" 60 #endif 61 62 #include "disas/capstone.h" 63 64 /* Helpers for building CPUID[2] descriptors: */ 65 66 struct CPUID2CacheDescriptorInfo { 67 enum CacheType type; 68 int level; 69 int size; 70 int line_size; 71 int associativity; 72 }; 73 74 /* 75 * Known CPUID 2 cache descriptors. 76 * From Intel SDM Volume 2A, CPUID instruction 77 */ 78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = { 79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB, 80 .associativity = 4, .line_size = 32, }, 81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB, 82 .associativity = 4, .line_size = 32, }, 83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 84 .associativity = 4, .line_size = 64, }, 85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 86 .associativity = 2, .line_size = 32, }, 87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 88 .associativity = 4, .line_size = 32, }, 89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 90 .associativity = 4, .line_size = 64, }, 91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB, 92 .associativity = 6, .line_size = 64, }, 93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 94 .associativity = 2, .line_size = 64, }, 95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 96 .associativity = 8, .line_size = 64, }, 97 /* lines per sector is not supported cpuid2_cache_descriptor(), 98 * so descriptors 0x22, 0x23 are not included 99 */ 100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 101 .associativity = 16, .line_size = 64, }, 102 /* lines per sector is not supported cpuid2_cache_descriptor(), 103 * so descriptors 0x25, 0x20 are not included 104 */ 105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 106 .associativity = 8, .line_size = 64, }, 107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB, 108 .associativity = 8, .line_size = 64, }, 109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB, 110 .associativity = 4, .line_size = 32, }, 111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 112 .associativity = 4, .line_size = 32, }, 113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 114 .associativity = 4, .line_size = 32, }, 115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 116 .associativity = 4, .line_size = 32, }, 117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 118 .associativity = 4, .line_size = 32, }, 119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 120 .associativity = 4, .line_size = 64, }, 121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 122 .associativity = 8, .line_size = 64, }, 123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB, 124 .associativity = 12, .line_size = 64, }, 125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */ 126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 127 .associativity = 12, .line_size = 64, }, 128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 129 .associativity = 16, .line_size = 64, }, 130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 131 .associativity = 12, .line_size = 64, }, 132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB, 133 .associativity = 16, .line_size = 64, }, 134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB, 135 .associativity = 24, .line_size = 64, }, 136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 137 .associativity = 8, .line_size = 64, }, 138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB, 139 .associativity = 4, .line_size = 64, }, 140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB, 141 .associativity = 4, .line_size = 64, }, 142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB, 143 .associativity = 4, .line_size = 64, }, 144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 145 .associativity = 4, .line_size = 64, }, 146 /* lines per sector is not supported cpuid2_cache_descriptor(), 147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included. 148 */ 149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 150 .associativity = 8, .line_size = 64, }, 151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 152 .associativity = 2, .line_size = 64, }, 153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 154 .associativity = 8, .line_size = 64, }, 155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB, 156 .associativity = 8, .line_size = 32, }, 157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 158 .associativity = 8, .line_size = 32, }, 159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 160 .associativity = 8, .line_size = 32, }, 161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB, 162 .associativity = 8, .line_size = 32, }, 163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB, 164 .associativity = 4, .line_size = 64, }, 165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB, 166 .associativity = 8, .line_size = 64, }, 167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB, 168 .associativity = 4, .line_size = 64, }, 169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 170 .associativity = 4, .line_size = 64, }, 171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 172 .associativity = 4, .line_size = 64, }, 173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB, 174 .associativity = 8, .line_size = 64, }, 175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 176 .associativity = 8, .line_size = 64, }, 177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 178 .associativity = 8, .line_size = 64, }, 179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB, 180 .associativity = 12, .line_size = 64, }, 181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB, 182 .associativity = 12, .line_size = 64, }, 183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB, 184 .associativity = 12, .line_size = 64, }, 185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB, 186 .associativity = 16, .line_size = 64, }, 187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB, 188 .associativity = 16, .line_size = 64, }, 189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB, 190 .associativity = 16, .line_size = 64, }, 191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB, 192 .associativity = 24, .line_size = 64, }, 193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB, 194 .associativity = 24, .line_size = 64, }, 195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB, 196 .associativity = 24, .line_size = 64, }, 197 }; 198 199 /* 200 * "CPUID leaf 2 does not report cache descriptor information, 201 * use CPUID leaf 4 to query cache parameters" 202 */ 203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF 204 205 /* 206 * Return a CPUID 2 cache descriptor for a given cache. 207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE 208 */ 209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache) 210 { 211 int i; 212 213 assert(cache->size > 0); 214 assert(cache->level > 0); 215 assert(cache->line_size > 0); 216 assert(cache->associativity > 0); 217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) { 218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i]; 219 if (d->level == cache->level && d->type == cache->type && 220 d->size == cache->size && d->line_size == cache->line_size && 221 d->associativity == cache->associativity) { 222 return i; 223 } 224 } 225 226 return CACHE_DESCRIPTOR_UNAVAILABLE; 227 } 228 229 /* CPUID Leaf 4 constants: */ 230 231 /* EAX: */ 232 #define CACHE_TYPE_D 1 233 #define CACHE_TYPE_I 2 234 #define CACHE_TYPE_UNIFIED 3 235 236 #define CACHE_LEVEL(l) (l << 5) 237 238 #define CACHE_SELF_INIT_LEVEL (1 << 8) 239 240 /* EDX: */ 241 #define CACHE_NO_INVD_SHARING (1 << 0) 242 #define CACHE_INCLUSIVE (1 << 1) 243 #define CACHE_COMPLEX_IDX (1 << 2) 244 245 /* Encode CacheType for CPUID[4].EAX */ 246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \ 247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \ 248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \ 249 0 /* Invalid value */) 250 251 252 /* Encode cache info for CPUID[4] */ 253 static void encode_cache_cpuid4(CPUCacheInfo *cache, 254 int num_apic_ids, int num_cores, 255 uint32_t *eax, uint32_t *ebx, 256 uint32_t *ecx, uint32_t *edx) 257 { 258 assert(cache->size == cache->line_size * cache->associativity * 259 cache->partitions * cache->sets); 260 261 assert(num_apic_ids > 0); 262 *eax = CACHE_TYPE(cache->type) | 263 CACHE_LEVEL(cache->level) | 264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) | 265 ((num_cores - 1) << 26) | 266 ((num_apic_ids - 1) << 14); 267 268 assert(cache->line_size > 0); 269 assert(cache->partitions > 0); 270 assert(cache->associativity > 0); 271 /* We don't implement fully-associative caches */ 272 assert(cache->associativity < cache->sets); 273 *ebx = (cache->line_size - 1) | 274 ((cache->partitions - 1) << 12) | 275 ((cache->associativity - 1) << 22); 276 277 assert(cache->sets > 0); 278 *ecx = cache->sets - 1; 279 280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 281 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 283 } 284 285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */ 286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache) 287 { 288 assert(cache->size % 1024 == 0); 289 assert(cache->lines_per_tag > 0); 290 assert(cache->associativity > 0); 291 assert(cache->line_size > 0); 292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) | 293 (cache->lines_per_tag << 8) | (cache->line_size); 294 } 295 296 #define ASSOC_FULL 0xFF 297 298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */ 299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \ 300 a == 2 ? 0x2 : \ 301 a == 4 ? 0x4 : \ 302 a == 8 ? 0x6 : \ 303 a == 16 ? 0x8 : \ 304 a == 32 ? 0xA : \ 305 a == 48 ? 0xB : \ 306 a == 64 ? 0xC : \ 307 a == 96 ? 0xD : \ 308 a == 128 ? 0xE : \ 309 a == ASSOC_FULL ? 0xF : \ 310 0 /* invalid value */) 311 312 /* 313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX 314 * @l3 can be NULL. 315 */ 316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2, 317 CPUCacheInfo *l3, 318 uint32_t *ecx, uint32_t *edx) 319 { 320 assert(l2->size % 1024 == 0); 321 assert(l2->associativity > 0); 322 assert(l2->lines_per_tag > 0); 323 assert(l2->line_size > 0); 324 *ecx = ((l2->size / 1024) << 16) | 325 (AMD_ENC_ASSOC(l2->associativity) << 12) | 326 (l2->lines_per_tag << 8) | (l2->line_size); 327 328 if (l3) { 329 assert(l3->size % (512 * 1024) == 0); 330 assert(l3->associativity > 0); 331 assert(l3->lines_per_tag > 0); 332 assert(l3->line_size > 0); 333 *edx = ((l3->size / (512 * 1024)) << 18) | 334 (AMD_ENC_ASSOC(l3->associativity) << 12) | 335 (l3->lines_per_tag << 8) | (l3->line_size); 336 } else { 337 *edx = 0; 338 } 339 } 340 341 /* Encode cache info for CPUID[8000001D] */ 342 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, 343 X86CPUTopoInfo *topo_info, 344 uint32_t *eax, uint32_t *ebx, 345 uint32_t *ecx, uint32_t *edx) 346 { 347 uint32_t l3_cores; 348 unsigned nodes = MAX(topo_info->nodes_per_pkg, 1); 349 350 assert(cache->size == cache->line_size * cache->associativity * 351 cache->partitions * cache->sets); 352 353 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) | 354 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0); 355 356 /* L3 is shared among multiple cores */ 357 if (cache->level == 3) { 358 l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg * 359 topo_info->cores_per_die * 360 topo_info->threads_per_core), 361 nodes); 362 *eax |= (l3_cores - 1) << 14; 363 } else { 364 *eax |= ((topo_info->threads_per_core - 1) << 14); 365 } 366 367 assert(cache->line_size > 0); 368 assert(cache->partitions > 0); 369 assert(cache->associativity > 0); 370 /* We don't implement fully-associative caches */ 371 assert(cache->associativity < cache->sets); 372 *ebx = (cache->line_size - 1) | 373 ((cache->partitions - 1) << 12) | 374 ((cache->associativity - 1) << 22); 375 376 assert(cache->sets > 0); 377 *ecx = cache->sets - 1; 378 379 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) | 380 (cache->inclusive ? CACHE_INCLUSIVE : 0) | 381 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0); 382 } 383 384 /* Encode cache info for CPUID[8000001E] */ 385 static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu, 386 uint32_t *eax, uint32_t *ebx, 387 uint32_t *ecx, uint32_t *edx) 388 { 389 X86CPUTopoIDs topo_ids = {0}; 390 unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1); 391 int shift; 392 393 x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids); 394 395 *eax = cpu->apic_id; 396 /* 397 * CPUID_Fn8000001E_EBX 398 * 31:16 Reserved 399 * 15:8 Threads per core (The number of threads per core is 400 * Threads per core + 1) 401 * 7:0 Core id (see bit decoding below) 402 * SMT: 403 * 4:3 node id 404 * 2 Core complex id 405 * 1:0 Core id 406 * Non SMT: 407 * 5:4 node id 408 * 3 Core complex id 409 * 1:0 Core id 410 */ 411 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) | 412 (topo_ids.core_id); 413 /* 414 * CPUID_Fn8000001E_ECX 415 * 31:11 Reserved 416 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1) 417 * 7:0 Node id (see bit decoding below) 418 * 2 Socket id 419 * 1:0 Node id 420 */ 421 if (nodes <= 4) { 422 *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id; 423 } else { 424 /* 425 * Node id fix up. Actual hardware supports up to 4 nodes. But with 426 * more than 32 cores, we may end up with more than 4 nodes. 427 * Node id is a combination of socket id and node id. Only requirement 428 * here is that this number should be unique accross the system. 429 * Shift the socket id to accommodate more nodes. We dont expect both 430 * socket id and node id to be big number at the same time. This is not 431 * an ideal config but we need to to support it. Max nodes we can have 432 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need 433 * 5 bits for nodes. Find the left most set bit to represent the total 434 * number of nodes. find_last_bit returns last set bit(0 based). Left 435 * shift(+1) the socket id to represent all the nodes. 436 */ 437 nodes -= 1; 438 shift = find_last_bit(&nodes, 8); 439 *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) | 440 topo_ids.node_id; 441 } 442 *edx = 0; 443 } 444 445 /* 446 * Definitions of the hardcoded cache entries we expose: 447 * These are legacy cache values. If there is a need to change any 448 * of these values please use builtin_x86_defs 449 */ 450 451 /* L1 data cache: */ 452 static CPUCacheInfo legacy_l1d_cache = { 453 .type = DATA_CACHE, 454 .level = 1, 455 .size = 32 * KiB, 456 .self_init = 1, 457 .line_size = 64, 458 .associativity = 8, 459 .sets = 64, 460 .partitions = 1, 461 .no_invd_sharing = true, 462 }; 463 464 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 465 static CPUCacheInfo legacy_l1d_cache_amd = { 466 .type = DATA_CACHE, 467 .level = 1, 468 .size = 64 * KiB, 469 .self_init = 1, 470 .line_size = 64, 471 .associativity = 2, 472 .sets = 512, 473 .partitions = 1, 474 .lines_per_tag = 1, 475 .no_invd_sharing = true, 476 }; 477 478 /* L1 instruction cache: */ 479 static CPUCacheInfo legacy_l1i_cache = { 480 .type = INSTRUCTION_CACHE, 481 .level = 1, 482 .size = 32 * KiB, 483 .self_init = 1, 484 .line_size = 64, 485 .associativity = 8, 486 .sets = 64, 487 .partitions = 1, 488 .no_invd_sharing = true, 489 }; 490 491 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */ 492 static CPUCacheInfo legacy_l1i_cache_amd = { 493 .type = INSTRUCTION_CACHE, 494 .level = 1, 495 .size = 64 * KiB, 496 .self_init = 1, 497 .line_size = 64, 498 .associativity = 2, 499 .sets = 512, 500 .partitions = 1, 501 .lines_per_tag = 1, 502 .no_invd_sharing = true, 503 }; 504 505 /* Level 2 unified cache: */ 506 static CPUCacheInfo legacy_l2_cache = { 507 .type = UNIFIED_CACHE, 508 .level = 2, 509 .size = 4 * MiB, 510 .self_init = 1, 511 .line_size = 64, 512 .associativity = 16, 513 .sets = 4096, 514 .partitions = 1, 515 .no_invd_sharing = true, 516 }; 517 518 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */ 519 static CPUCacheInfo legacy_l2_cache_cpuid2 = { 520 .type = UNIFIED_CACHE, 521 .level = 2, 522 .size = 2 * MiB, 523 .line_size = 64, 524 .associativity = 8, 525 }; 526 527 528 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */ 529 static CPUCacheInfo legacy_l2_cache_amd = { 530 .type = UNIFIED_CACHE, 531 .level = 2, 532 .size = 512 * KiB, 533 .line_size = 64, 534 .lines_per_tag = 1, 535 .associativity = 16, 536 .sets = 512, 537 .partitions = 1, 538 }; 539 540 /* Level 3 unified cache: */ 541 static CPUCacheInfo legacy_l3_cache = { 542 .type = UNIFIED_CACHE, 543 .level = 3, 544 .size = 16 * MiB, 545 .line_size = 64, 546 .associativity = 16, 547 .sets = 16384, 548 .partitions = 1, 549 .lines_per_tag = 1, 550 .self_init = true, 551 .inclusive = true, 552 .complex_indexing = true, 553 }; 554 555 /* TLB definitions: */ 556 557 #define L1_DTLB_2M_ASSOC 1 558 #define L1_DTLB_2M_ENTRIES 255 559 #define L1_DTLB_4K_ASSOC 1 560 #define L1_DTLB_4K_ENTRIES 255 561 562 #define L1_ITLB_2M_ASSOC 1 563 #define L1_ITLB_2M_ENTRIES 255 564 #define L1_ITLB_4K_ASSOC 1 565 #define L1_ITLB_4K_ENTRIES 255 566 567 #define L2_DTLB_2M_ASSOC 0 /* disabled */ 568 #define L2_DTLB_2M_ENTRIES 0 /* disabled */ 569 #define L2_DTLB_4K_ASSOC 4 570 #define L2_DTLB_4K_ENTRIES 512 571 572 #define L2_ITLB_2M_ASSOC 0 /* disabled */ 573 #define L2_ITLB_2M_ENTRIES 0 /* disabled */ 574 #define L2_ITLB_4K_ASSOC 4 575 #define L2_ITLB_4K_ENTRIES 512 576 577 /* CPUID Leaf 0x14 constants: */ 578 #define INTEL_PT_MAX_SUBLEAF 0x1 579 /* 580 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH 581 * MSR can be accessed; 582 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; 583 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation 584 * of Intel PT MSRs across warm reset; 585 * bit[03]: Support MTC timing packet and suppression of COFI-based packets; 586 */ 587 #define INTEL_PT_MINIMAL_EBX 0xf 588 /* 589 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and 590 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be 591 * accessed; 592 * bit[01]: ToPA tables can hold any number of output entries, up to the 593 * maximum allowed by the MaskOrTableOffset field of 594 * IA32_RTIT_OUTPUT_MASK_PTRS; 595 * bit[02]: Support Single-Range Output scheme; 596 */ 597 #define INTEL_PT_MINIMAL_ECX 0x7 598 /* generated packets which contain IP payloads have LIP values */ 599 #define INTEL_PT_IP_LIP (1 << 31) 600 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ 601 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 602 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ 603 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ 604 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ 605 606 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, 607 uint32_t vendor2, uint32_t vendor3) 608 { 609 int i; 610 for (i = 0; i < 4; i++) { 611 dst[i] = vendor1 >> (8 * i); 612 dst[i + 4] = vendor2 >> (8 * i); 613 dst[i + 8] = vendor3 >> (8 * i); 614 } 615 dst[CPUID_VENDOR_SZ] = '\0'; 616 } 617 618 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE) 619 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \ 620 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC) 621 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \ 622 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 623 CPUID_PSE36 | CPUID_FXSR) 624 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE) 625 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \ 626 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \ 627 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \ 628 CPUID_PAE | CPUID_SEP | CPUID_APIC) 629 630 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \ 631 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \ 632 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \ 633 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \ 634 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE) 635 /* partly implemented: 636 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */ 637 /* missing: 638 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */ 639 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \ 640 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \ 641 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \ 642 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \ 643 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \ 644 CPUID_EXT_RDRAND) 645 /* missing: 646 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX, 647 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA, 648 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA, 649 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX, 650 CPUID_EXT_F16C */ 651 652 #ifdef TARGET_X86_64 653 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM) 654 #else 655 #define TCG_EXT2_X86_64_FEATURES 0 656 #endif 657 658 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \ 659 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \ 660 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \ 661 TCG_EXT2_X86_64_FEATURES) 662 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \ 663 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A) 664 #define TCG_EXT4_FEATURES 0 665 #define TCG_SVM_FEATURES CPUID_SVM_NPT 666 #define TCG_KVM_FEATURES 0 667 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \ 668 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \ 669 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \ 670 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \ 671 CPUID_7_0_EBX_ERMS) 672 /* missing: 673 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2, 674 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM, 675 CPUID_7_0_EBX_RDSEED */ 676 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ 677 /* CPUID_7_0_ECX_OSPKE is dynamic */ \ 678 CPUID_7_0_ECX_LA57) 679 #define TCG_7_0_EDX_FEATURES 0 680 #define TCG_7_1_EAX_FEATURES 0 681 #define TCG_APM_FEATURES 0 682 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT 683 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) 684 /* missing: 685 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ 686 687 typedef enum FeatureWordType { 688 CPUID_FEATURE_WORD, 689 MSR_FEATURE_WORD, 690 } FeatureWordType; 691 692 typedef struct FeatureWordInfo { 693 FeatureWordType type; 694 /* feature flags names are taken from "Intel Processor Identification and 695 * the CPUID Instruction" and AMD's "CPUID Specification". 696 * In cases of disagreement between feature naming conventions, 697 * aliases may be added. 698 */ 699 const char *feat_names[64]; 700 union { 701 /* If type==CPUID_FEATURE_WORD */ 702 struct { 703 uint32_t eax; /* Input EAX for CPUID */ 704 bool needs_ecx; /* CPUID instruction uses ECX as input */ 705 uint32_t ecx; /* Input ECX value for CPUID */ 706 int reg; /* output register (R_* constant) */ 707 } cpuid; 708 /* If type==MSR_FEATURE_WORD */ 709 struct { 710 uint32_t index; 711 } msr; 712 }; 713 uint64_t tcg_features; /* Feature flags supported by TCG */ 714 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */ 715 uint64_t migratable_flags; /* Feature flags known to be migratable */ 716 /* Features that shouldn't be auto-enabled by "-cpu host" */ 717 uint64_t no_autoenable_flags; 718 } FeatureWordInfo; 719 720 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { 721 [FEAT_1_EDX] = { 722 .type = CPUID_FEATURE_WORD, 723 .feat_names = { 724 "fpu", "vme", "de", "pse", 725 "tsc", "msr", "pae", "mce", 726 "cx8", "apic", NULL, "sep", 727 "mtrr", "pge", "mca", "cmov", 728 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, 729 NULL, "ds" /* Intel dts */, "acpi", "mmx", 730 "fxsr", "sse", "sse2", "ss", 731 "ht" /* Intel htt */, "tm", "ia64", "pbe", 732 }, 733 .cpuid = {.eax = 1, .reg = R_EDX, }, 734 .tcg_features = TCG_FEATURES, 735 }, 736 [FEAT_1_ECX] = { 737 .type = CPUID_FEATURE_WORD, 738 .feat_names = { 739 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor", 740 "ds-cpl", "vmx", "smx", "est", 741 "tm2", "ssse3", "cid", NULL, 742 "fma", "cx16", "xtpr", "pdcm", 743 NULL, "pcid", "dca", "sse4.1", 744 "sse4.2", "x2apic", "movbe", "popcnt", 745 "tsc-deadline", "aes", "xsave", NULL /* osxsave */, 746 "avx", "f16c", "rdrand", "hypervisor", 747 }, 748 .cpuid = { .eax = 1, .reg = R_ECX, }, 749 .tcg_features = TCG_EXT_FEATURES, 750 }, 751 /* Feature names that are already defined on feature_name[] but 752 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their 753 * names on feat_names below. They are copied automatically 754 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD. 755 */ 756 [FEAT_8000_0001_EDX] = { 757 .type = CPUID_FEATURE_WORD, 758 .feat_names = { 759 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */, 760 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */, 761 NULL /* cx8 */, NULL /* apic */, NULL, "syscall", 762 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */, 763 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */, 764 "nx", NULL, "mmxext", NULL /* mmx */, 765 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp", 766 NULL, "lm", "3dnowext", "3dnow", 767 }, 768 .cpuid = { .eax = 0x80000001, .reg = R_EDX, }, 769 .tcg_features = TCG_EXT2_FEATURES, 770 }, 771 [FEAT_8000_0001_ECX] = { 772 .type = CPUID_FEATURE_WORD, 773 .feat_names = { 774 "lahf-lm", "cmp-legacy", "svm", "extapic", 775 "cr8legacy", "abm", "sse4a", "misalignsse", 776 "3dnowprefetch", "osvw", "ibs", "xop", 777 "skinit", "wdt", NULL, "lwp", 778 "fma4", "tce", NULL, "nodeid-msr", 779 NULL, "tbm", "topoext", "perfctr-core", 780 "perfctr-nb", NULL, NULL, NULL, 781 NULL, NULL, NULL, NULL, 782 }, 783 .cpuid = { .eax = 0x80000001, .reg = R_ECX, }, 784 .tcg_features = TCG_EXT3_FEATURES, 785 /* 786 * TOPOEXT is always allowed but can't be enabled blindly by 787 * "-cpu host", as it requires consistent cache topology info 788 * to be provided so it doesn't confuse guests. 789 */ 790 .no_autoenable_flags = CPUID_EXT3_TOPOEXT, 791 }, 792 [FEAT_C000_0001_EDX] = { 793 .type = CPUID_FEATURE_WORD, 794 .feat_names = { 795 NULL, NULL, "xstore", "xstore-en", 796 NULL, NULL, "xcrypt", "xcrypt-en", 797 "ace2", "ace2-en", "phe", "phe-en", 798 "pmm", "pmm-en", NULL, NULL, 799 NULL, NULL, NULL, NULL, 800 NULL, NULL, NULL, NULL, 801 NULL, NULL, NULL, NULL, 802 NULL, NULL, NULL, NULL, 803 }, 804 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, }, 805 .tcg_features = TCG_EXT4_FEATURES, 806 }, 807 [FEAT_KVM] = { 808 .type = CPUID_FEATURE_WORD, 809 .feat_names = { 810 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock", 811 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt", 812 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi", 813 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL, 814 NULL, NULL, NULL, NULL, 815 NULL, NULL, NULL, NULL, 816 "kvmclock-stable-bit", NULL, NULL, NULL, 817 NULL, NULL, NULL, NULL, 818 }, 819 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, }, 820 .tcg_features = TCG_KVM_FEATURES, 821 }, 822 [FEAT_KVM_HINTS] = { 823 .type = CPUID_FEATURE_WORD, 824 .feat_names = { 825 "kvm-hint-dedicated", NULL, NULL, NULL, 826 NULL, NULL, NULL, NULL, 827 NULL, NULL, NULL, NULL, 828 NULL, NULL, NULL, NULL, 829 NULL, NULL, NULL, NULL, 830 NULL, NULL, NULL, NULL, 831 NULL, NULL, NULL, NULL, 832 NULL, NULL, NULL, NULL, 833 }, 834 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, }, 835 .tcg_features = TCG_KVM_FEATURES, 836 /* 837 * KVM hints aren't auto-enabled by -cpu host, they need to be 838 * explicitly enabled in the command-line. 839 */ 840 .no_autoenable_flags = ~0U, 841 }, 842 /* 843 * .feat_names are commented out for Hyper-V enlightenments because we 844 * don't want to have two different ways for enabling them on QEMU command 845 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require 846 * enabling several feature bits simultaneously, exposing these bits 847 * individually may just confuse guests. 848 */ 849 [FEAT_HYPERV_EAX] = { 850 .type = CPUID_FEATURE_WORD, 851 .feat_names = { 852 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, 853 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */, 854 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */, 855 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */, 856 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */, 857 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */, 858 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */, 859 NULL, NULL, 860 NULL, NULL, NULL, NULL, 861 NULL, NULL, NULL, NULL, 862 NULL, NULL, NULL, NULL, 863 NULL, NULL, NULL, NULL, 864 }, 865 .cpuid = { .eax = 0x40000003, .reg = R_EAX, }, 866 }, 867 [FEAT_HYPERV_EBX] = { 868 .type = CPUID_FEATURE_WORD, 869 .feat_names = { 870 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */, 871 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */, 872 NULL /* hv_post_messages */, NULL /* hv_signal_events */, 873 NULL /* hv_create_port */, NULL /* hv_connect_port */, 874 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */, 875 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */, 876 NULL, NULL, 877 NULL, NULL, NULL, NULL, 878 NULL, NULL, NULL, NULL, 879 NULL, NULL, NULL, NULL, 880 NULL, NULL, NULL, NULL, 881 }, 882 .cpuid = { .eax = 0x40000003, .reg = R_EBX, }, 883 }, 884 [FEAT_HYPERV_EDX] = { 885 .type = CPUID_FEATURE_WORD, 886 .feat_names = { 887 NULL /* hv_mwait */, NULL /* hv_guest_debugging */, 888 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */, 889 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */, 890 NULL, NULL, 891 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL, 892 NULL, NULL, NULL, NULL, 893 NULL, NULL, NULL, NULL, 894 NULL, NULL, NULL, NULL, 895 NULL, NULL, NULL, NULL, 896 NULL, NULL, NULL, NULL, 897 }, 898 .cpuid = { .eax = 0x40000003, .reg = R_EDX, }, 899 }, 900 [FEAT_HV_RECOMM_EAX] = { 901 .type = CPUID_FEATURE_WORD, 902 .feat_names = { 903 NULL /* hv_recommend_pv_as_switch */, 904 NULL /* hv_recommend_pv_tlbflush_local */, 905 NULL /* hv_recommend_pv_tlbflush_remote */, 906 NULL /* hv_recommend_msr_apic_access */, 907 NULL /* hv_recommend_msr_reset */, 908 NULL /* hv_recommend_relaxed_timing */, 909 NULL /* hv_recommend_dma_remapping */, 910 NULL /* hv_recommend_int_remapping */, 911 NULL /* hv_recommend_x2apic_msrs */, 912 NULL /* hv_recommend_autoeoi_deprecation */, 913 NULL /* hv_recommend_pv_ipi */, 914 NULL /* hv_recommend_ex_hypercalls */, 915 NULL /* hv_hypervisor_is_nested */, 916 NULL /* hv_recommend_int_mbec */, 917 NULL /* hv_recommend_evmcs */, 918 NULL, 919 NULL, NULL, NULL, NULL, 920 NULL, NULL, NULL, NULL, 921 NULL, NULL, NULL, NULL, 922 NULL, NULL, NULL, NULL, 923 }, 924 .cpuid = { .eax = 0x40000004, .reg = R_EAX, }, 925 }, 926 [FEAT_HV_NESTED_EAX] = { 927 .type = CPUID_FEATURE_WORD, 928 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, }, 929 }, 930 [FEAT_SVM] = { 931 .type = CPUID_FEATURE_WORD, 932 .feat_names = { 933 "npt", "lbrv", "svm-lock", "nrip-save", 934 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists", 935 NULL, NULL, "pause-filter", NULL, 936 "pfthreshold", NULL, NULL, NULL, 937 NULL, NULL, NULL, NULL, 938 NULL, NULL, NULL, NULL, 939 NULL, NULL, NULL, NULL, 940 NULL, NULL, NULL, NULL, 941 }, 942 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, 943 .tcg_features = TCG_SVM_FEATURES, 944 }, 945 [FEAT_7_0_EBX] = { 946 .type = CPUID_FEATURE_WORD, 947 .feat_names = { 948 "fsgsbase", "tsc-adjust", NULL, "bmi1", 949 "hle", "avx2", NULL, "smep", 950 "bmi2", "erms", "invpcid", "rtm", 951 NULL, NULL, "mpx", NULL, 952 "avx512f", "avx512dq", "rdseed", "adx", 953 "smap", "avx512ifma", "pcommit", "clflushopt", 954 "clwb", "intel-pt", "avx512pf", "avx512er", 955 "avx512cd", "sha-ni", "avx512bw", "avx512vl", 956 }, 957 .cpuid = { 958 .eax = 7, 959 .needs_ecx = true, .ecx = 0, 960 .reg = R_EBX, 961 }, 962 .tcg_features = TCG_7_0_EBX_FEATURES, 963 }, 964 [FEAT_7_0_ECX] = { 965 .type = CPUID_FEATURE_WORD, 966 .feat_names = { 967 NULL, "avx512vbmi", "umip", "pku", 968 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL, 969 "gfni", "vaes", "vpclmulqdq", "avx512vnni", 970 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, 971 "la57", NULL, NULL, NULL, 972 NULL, NULL, "rdpid", NULL, 973 NULL, "cldemote", NULL, "movdiri", 974 "movdir64b", NULL, NULL, NULL, 975 }, 976 .cpuid = { 977 .eax = 7, 978 .needs_ecx = true, .ecx = 0, 979 .reg = R_ECX, 980 }, 981 .tcg_features = TCG_7_0_ECX_FEATURES, 982 }, 983 [FEAT_7_0_EDX] = { 984 .type = CPUID_FEATURE_WORD, 985 .feat_names = { 986 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps", 987 NULL, NULL, NULL, NULL, 988 "avx512-vp2intersect", NULL, "md-clear", NULL, 989 NULL, NULL, NULL, NULL, 990 NULL, NULL, NULL /* pconfig */, NULL, 991 NULL, NULL, NULL, NULL, 992 NULL, NULL, "spec-ctrl", "stibp", 993 NULL, "arch-capabilities", "core-capability", "ssbd", 994 }, 995 .cpuid = { 996 .eax = 7, 997 .needs_ecx = true, .ecx = 0, 998 .reg = R_EDX, 999 }, 1000 .tcg_features = TCG_7_0_EDX_FEATURES, 1001 }, 1002 [FEAT_7_1_EAX] = { 1003 .type = CPUID_FEATURE_WORD, 1004 .feat_names = { 1005 NULL, NULL, NULL, NULL, 1006 NULL, "avx512-bf16", NULL, NULL, 1007 NULL, NULL, NULL, NULL, 1008 NULL, NULL, NULL, NULL, 1009 NULL, NULL, NULL, NULL, 1010 NULL, NULL, NULL, NULL, 1011 NULL, NULL, NULL, NULL, 1012 NULL, NULL, NULL, NULL, 1013 }, 1014 .cpuid = { 1015 .eax = 7, 1016 .needs_ecx = true, .ecx = 1, 1017 .reg = R_EAX, 1018 }, 1019 .tcg_features = TCG_7_1_EAX_FEATURES, 1020 }, 1021 [FEAT_8000_0007_EDX] = { 1022 .type = CPUID_FEATURE_WORD, 1023 .feat_names = { 1024 NULL, NULL, NULL, NULL, 1025 NULL, NULL, NULL, NULL, 1026 "invtsc", NULL, NULL, NULL, 1027 NULL, NULL, NULL, NULL, 1028 NULL, NULL, NULL, NULL, 1029 NULL, NULL, NULL, NULL, 1030 NULL, NULL, NULL, NULL, 1031 NULL, NULL, NULL, NULL, 1032 }, 1033 .cpuid = { .eax = 0x80000007, .reg = R_EDX, }, 1034 .tcg_features = TCG_APM_FEATURES, 1035 .unmigratable_flags = CPUID_APM_INVTSC, 1036 }, 1037 [FEAT_8000_0008_EBX] = { 1038 .type = CPUID_FEATURE_WORD, 1039 .feat_names = { 1040 "clzero", NULL, "xsaveerptr", NULL, 1041 NULL, NULL, NULL, NULL, 1042 NULL, "wbnoinvd", NULL, NULL, 1043 "ibpb", NULL, NULL, "amd-stibp", 1044 NULL, NULL, NULL, NULL, 1045 NULL, NULL, NULL, NULL, 1046 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, 1047 NULL, NULL, NULL, NULL, 1048 }, 1049 .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, 1050 .tcg_features = 0, 1051 .unmigratable_flags = 0, 1052 }, 1053 [FEAT_XSAVE] = { 1054 .type = CPUID_FEATURE_WORD, 1055 .feat_names = { 1056 "xsaveopt", "xsavec", "xgetbv1", "xsaves", 1057 NULL, NULL, NULL, NULL, 1058 NULL, NULL, NULL, NULL, 1059 NULL, NULL, NULL, NULL, 1060 NULL, NULL, NULL, NULL, 1061 NULL, NULL, NULL, NULL, 1062 NULL, NULL, NULL, NULL, 1063 NULL, NULL, NULL, NULL, 1064 }, 1065 .cpuid = { 1066 .eax = 0xd, 1067 .needs_ecx = true, .ecx = 1, 1068 .reg = R_EAX, 1069 }, 1070 .tcg_features = TCG_XSAVE_FEATURES, 1071 }, 1072 [FEAT_6_EAX] = { 1073 .type = CPUID_FEATURE_WORD, 1074 .feat_names = { 1075 NULL, NULL, "arat", NULL, 1076 NULL, NULL, NULL, NULL, 1077 NULL, NULL, NULL, NULL, 1078 NULL, NULL, NULL, NULL, 1079 NULL, NULL, NULL, NULL, 1080 NULL, NULL, NULL, NULL, 1081 NULL, NULL, NULL, NULL, 1082 NULL, NULL, NULL, NULL, 1083 }, 1084 .cpuid = { .eax = 6, .reg = R_EAX, }, 1085 .tcg_features = TCG_6_EAX_FEATURES, 1086 }, 1087 [FEAT_XSAVE_COMP_LO] = { 1088 .type = CPUID_FEATURE_WORD, 1089 .cpuid = { 1090 .eax = 0xD, 1091 .needs_ecx = true, .ecx = 0, 1092 .reg = R_EAX, 1093 }, 1094 .tcg_features = ~0U, 1095 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK | 1096 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK | 1097 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | 1098 XSTATE_PKRU_MASK, 1099 }, 1100 [FEAT_XSAVE_COMP_HI] = { 1101 .type = CPUID_FEATURE_WORD, 1102 .cpuid = { 1103 .eax = 0xD, 1104 .needs_ecx = true, .ecx = 0, 1105 .reg = R_EDX, 1106 }, 1107 .tcg_features = ~0U, 1108 }, 1109 /*Below are MSR exposed features*/ 1110 [FEAT_ARCH_CAPABILITIES] = { 1111 .type = MSR_FEATURE_WORD, 1112 .feat_names = { 1113 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", 1114 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", 1115 "taa-no", NULL, NULL, NULL, 1116 NULL, NULL, NULL, NULL, 1117 NULL, NULL, NULL, NULL, 1118 NULL, NULL, NULL, NULL, 1119 NULL, NULL, NULL, NULL, 1120 NULL, NULL, NULL, NULL, 1121 }, 1122 .msr = { 1123 .index = MSR_IA32_ARCH_CAPABILITIES, 1124 }, 1125 }, 1126 [FEAT_CORE_CAPABILITY] = { 1127 .type = MSR_FEATURE_WORD, 1128 .feat_names = { 1129 NULL, NULL, NULL, NULL, 1130 NULL, "split-lock-detect", NULL, NULL, 1131 NULL, NULL, NULL, NULL, 1132 NULL, NULL, NULL, NULL, 1133 NULL, NULL, NULL, NULL, 1134 NULL, NULL, NULL, NULL, 1135 NULL, NULL, NULL, NULL, 1136 NULL, NULL, NULL, NULL, 1137 }, 1138 .msr = { 1139 .index = MSR_IA32_CORE_CAPABILITY, 1140 }, 1141 }, 1142 [FEAT_PERF_CAPABILITIES] = { 1143 .type = MSR_FEATURE_WORD, 1144 .feat_names = { 1145 NULL, NULL, NULL, NULL, 1146 NULL, NULL, NULL, NULL, 1147 NULL, NULL, NULL, NULL, 1148 NULL, "full-width-write", NULL, NULL, 1149 NULL, NULL, NULL, NULL, 1150 NULL, NULL, NULL, NULL, 1151 NULL, NULL, NULL, NULL, 1152 NULL, NULL, NULL, NULL, 1153 }, 1154 .msr = { 1155 .index = MSR_IA32_PERF_CAPABILITIES, 1156 }, 1157 }, 1158 1159 [FEAT_VMX_PROCBASED_CTLS] = { 1160 .type = MSR_FEATURE_WORD, 1161 .feat_names = { 1162 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset", 1163 NULL, NULL, NULL, "vmx-hlt-exit", 1164 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit", 1165 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit", 1166 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit", 1167 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit", 1168 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf", 1169 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls", 1170 }, 1171 .msr = { 1172 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS, 1173 } 1174 }, 1175 1176 [FEAT_VMX_SECONDARY_CTLS] = { 1177 .type = MSR_FEATURE_WORD, 1178 .feat_names = { 1179 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit", 1180 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest", 1181 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit", 1182 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit", 1183 "vmx-rdseed-exit", "vmx-pml", NULL, NULL, 1184 "vmx-xsaves", NULL, NULL, NULL, 1185 NULL, NULL, NULL, NULL, 1186 NULL, NULL, NULL, NULL, 1187 }, 1188 .msr = { 1189 .index = MSR_IA32_VMX_PROCBASED_CTLS2, 1190 } 1191 }, 1192 1193 [FEAT_VMX_PINBASED_CTLS] = { 1194 .type = MSR_FEATURE_WORD, 1195 .feat_names = { 1196 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit", 1197 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr", 1198 NULL, NULL, NULL, NULL, 1199 NULL, NULL, NULL, NULL, 1200 NULL, NULL, NULL, NULL, 1201 NULL, NULL, NULL, NULL, 1202 NULL, NULL, NULL, NULL, 1203 NULL, NULL, NULL, NULL, 1204 }, 1205 .msr = { 1206 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS, 1207 } 1208 }, 1209 1210 [FEAT_VMX_EXIT_CTLS] = { 1211 .type = MSR_FEATURE_WORD, 1212 /* 1213 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from 1214 * the LM CPUID bit. 1215 */ 1216 .feat_names = { 1217 NULL, NULL, "vmx-exit-nosave-debugctl", NULL, 1218 NULL, NULL, NULL, NULL, 1219 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL, 1220 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr", 1221 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat", 1222 "vmx-exit-save-efer", "vmx-exit-load-efer", 1223 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs", 1224 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL, 1225 NULL, NULL, NULL, NULL, 1226 }, 1227 .msr = { 1228 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS, 1229 } 1230 }, 1231 1232 [FEAT_VMX_ENTRY_CTLS] = { 1233 .type = MSR_FEATURE_WORD, 1234 .feat_names = { 1235 NULL, NULL, "vmx-entry-noload-debugctl", NULL, 1236 NULL, NULL, NULL, NULL, 1237 NULL, "vmx-entry-ia32e-mode", NULL, NULL, 1238 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer", 1239 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL, 1240 NULL, NULL, NULL, NULL, 1241 NULL, NULL, NULL, NULL, 1242 NULL, NULL, NULL, NULL, 1243 }, 1244 .msr = { 1245 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS, 1246 } 1247 }, 1248 1249 [FEAT_VMX_MISC] = { 1250 .type = MSR_FEATURE_WORD, 1251 .feat_names = { 1252 NULL, NULL, NULL, NULL, 1253 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown", 1254 "vmx-activity-wait-sipi", NULL, NULL, NULL, 1255 NULL, NULL, NULL, NULL, 1256 NULL, NULL, NULL, NULL, 1257 NULL, NULL, NULL, NULL, 1258 NULL, NULL, NULL, NULL, 1259 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL, 1260 }, 1261 .msr = { 1262 .index = MSR_IA32_VMX_MISC, 1263 } 1264 }, 1265 1266 [FEAT_VMX_EPT_VPID_CAPS] = { 1267 .type = MSR_FEATURE_WORD, 1268 .feat_names = { 1269 "vmx-ept-execonly", NULL, NULL, NULL, 1270 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5", 1271 NULL, NULL, NULL, NULL, 1272 NULL, NULL, NULL, NULL, 1273 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL, 1274 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL, 1275 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL, 1276 NULL, NULL, NULL, NULL, 1277 "vmx-invvpid", NULL, NULL, NULL, 1278 NULL, NULL, NULL, NULL, 1279 "vmx-invvpid-single-addr", "vmx-invept-single-context", 1280 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals", 1281 NULL, NULL, NULL, NULL, 1282 NULL, NULL, NULL, NULL, 1283 NULL, NULL, NULL, NULL, 1284 NULL, NULL, NULL, NULL, 1285 NULL, NULL, NULL, NULL, 1286 }, 1287 .msr = { 1288 .index = MSR_IA32_VMX_EPT_VPID_CAP, 1289 } 1290 }, 1291 1292 [FEAT_VMX_BASIC] = { 1293 .type = MSR_FEATURE_WORD, 1294 .feat_names = { 1295 [54] = "vmx-ins-outs", 1296 [55] = "vmx-true-ctls", 1297 }, 1298 .msr = { 1299 .index = MSR_IA32_VMX_BASIC, 1300 }, 1301 /* Just to be safe - we don't support setting the MSEG version field. */ 1302 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR, 1303 }, 1304 1305 [FEAT_VMX_VMFUNC] = { 1306 .type = MSR_FEATURE_WORD, 1307 .feat_names = { 1308 [0] = "vmx-eptp-switching", 1309 }, 1310 .msr = { 1311 .index = MSR_IA32_VMX_VMFUNC, 1312 } 1313 }, 1314 1315 }; 1316 1317 typedef struct FeatureMask { 1318 FeatureWord index; 1319 uint64_t mask; 1320 } FeatureMask; 1321 1322 typedef struct FeatureDep { 1323 FeatureMask from, to; 1324 } FeatureDep; 1325 1326 static FeatureDep feature_dependencies[] = { 1327 { 1328 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES }, 1329 .to = { FEAT_ARCH_CAPABILITIES, ~0ull }, 1330 }, 1331 { 1332 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY }, 1333 .to = { FEAT_CORE_CAPABILITY, ~0ull }, 1334 }, 1335 { 1336 .from = { FEAT_1_ECX, CPUID_EXT_PDCM }, 1337 .to = { FEAT_PERF_CAPABILITIES, ~0ull }, 1338 }, 1339 { 1340 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1341 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull }, 1342 }, 1343 { 1344 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1345 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull }, 1346 }, 1347 { 1348 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1349 .to = { FEAT_VMX_EXIT_CTLS, ~0ull }, 1350 }, 1351 { 1352 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1353 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull }, 1354 }, 1355 { 1356 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1357 .to = { FEAT_VMX_MISC, ~0ull }, 1358 }, 1359 { 1360 .from = { FEAT_1_ECX, CPUID_EXT_VMX }, 1361 .to = { FEAT_VMX_BASIC, ~0ull }, 1362 }, 1363 { 1364 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM }, 1365 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE }, 1366 }, 1367 { 1368 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS }, 1369 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull }, 1370 }, 1371 { 1372 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES }, 1373 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES }, 1374 }, 1375 { 1376 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND }, 1377 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING }, 1378 }, 1379 { 1380 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID }, 1381 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID }, 1382 }, 1383 { 1384 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED }, 1385 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING }, 1386 }, 1387 { 1388 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP }, 1389 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP }, 1390 }, 1391 { 1392 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1393 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull }, 1394 }, 1395 { 1396 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT }, 1397 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST }, 1398 }, 1399 { 1400 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID }, 1401 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 }, 1402 }, 1403 { 1404 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC }, 1405 .to = { FEAT_VMX_VMFUNC, ~0ull }, 1406 }, 1407 { 1408 .from = { FEAT_8000_0001_ECX, CPUID_EXT3_SVM }, 1409 .to = { FEAT_SVM, ~0ull }, 1410 }, 1411 }; 1412 1413 typedef struct X86RegisterInfo32 { 1414 /* Name of register */ 1415 const char *name; 1416 /* QAPI enum value register */ 1417 X86CPURegister32 qapi_enum; 1418 } X86RegisterInfo32; 1419 1420 #define REGISTER(reg) \ 1421 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg } 1422 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { 1423 REGISTER(EAX), 1424 REGISTER(ECX), 1425 REGISTER(EDX), 1426 REGISTER(EBX), 1427 REGISTER(ESP), 1428 REGISTER(EBP), 1429 REGISTER(ESI), 1430 REGISTER(EDI), 1431 }; 1432 #undef REGISTER 1433 1434 typedef struct ExtSaveArea { 1435 uint32_t feature, bits; 1436 uint32_t offset, size; 1437 } ExtSaveArea; 1438 1439 static const ExtSaveArea x86_ext_save_areas[] = { 1440 [XSTATE_FP_BIT] = { 1441 /* x87 FP state component is always enabled if XSAVE is supported */ 1442 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1443 /* x87 state is in the legacy region of the XSAVE area */ 1444 .offset = 0, 1445 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1446 }, 1447 [XSTATE_SSE_BIT] = { 1448 /* SSE state component is always enabled if XSAVE is supported */ 1449 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, 1450 /* SSE state is in the legacy region of the XSAVE area */ 1451 .offset = 0, 1452 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), 1453 }, 1454 [XSTATE_YMM_BIT] = 1455 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, 1456 .offset = offsetof(X86XSaveArea, avx_state), 1457 .size = sizeof(XSaveAVX) }, 1458 [XSTATE_BNDREGS_BIT] = 1459 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1460 .offset = offsetof(X86XSaveArea, bndreg_state), 1461 .size = sizeof(XSaveBNDREG) }, 1462 [XSTATE_BNDCSR_BIT] = 1463 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, 1464 .offset = offsetof(X86XSaveArea, bndcsr_state), 1465 .size = sizeof(XSaveBNDCSR) }, 1466 [XSTATE_OPMASK_BIT] = 1467 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1468 .offset = offsetof(X86XSaveArea, opmask_state), 1469 .size = sizeof(XSaveOpmask) }, 1470 [XSTATE_ZMM_Hi256_BIT] = 1471 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1472 .offset = offsetof(X86XSaveArea, zmm_hi256_state), 1473 .size = sizeof(XSaveZMM_Hi256) }, 1474 [XSTATE_Hi16_ZMM_BIT] = 1475 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, 1476 .offset = offsetof(X86XSaveArea, hi16_zmm_state), 1477 .size = sizeof(XSaveHi16_ZMM) }, 1478 [XSTATE_PKRU_BIT] = 1479 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, 1480 .offset = offsetof(X86XSaveArea, pkru_state), 1481 .size = sizeof(XSavePKRU) }, 1482 }; 1483 1484 static uint32_t xsave_area_size(uint64_t mask) 1485 { 1486 int i; 1487 uint64_t ret = 0; 1488 1489 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 1490 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 1491 if ((mask >> i) & 1) { 1492 ret = MAX(ret, esa->offset + esa->size); 1493 } 1494 } 1495 return ret; 1496 } 1497 1498 static inline bool accel_uses_host_cpuid(void) 1499 { 1500 return kvm_enabled() || hvf_enabled(); 1501 } 1502 1503 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) 1504 { 1505 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | 1506 cpu->env.features[FEAT_XSAVE_COMP_LO]; 1507 } 1508 1509 const char *get_register_name_32(unsigned int reg) 1510 { 1511 if (reg >= CPU_NB_REGS32) { 1512 return NULL; 1513 } 1514 return x86_reg_info_32[reg].name; 1515 } 1516 1517 /* 1518 * Returns the set of feature flags that are supported and migratable by 1519 * QEMU, for a given FeatureWord. 1520 */ 1521 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w) 1522 { 1523 FeatureWordInfo *wi = &feature_word_info[w]; 1524 uint64_t r = 0; 1525 int i; 1526 1527 for (i = 0; i < 64; i++) { 1528 uint64_t f = 1ULL << i; 1529 1530 /* If the feature name is known, it is implicitly considered migratable, 1531 * unless it is explicitly set in unmigratable_flags */ 1532 if ((wi->migratable_flags & f) || 1533 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) { 1534 r |= f; 1535 } 1536 } 1537 return r; 1538 } 1539 1540 void host_cpuid(uint32_t function, uint32_t count, 1541 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) 1542 { 1543 uint32_t vec[4]; 1544 1545 #ifdef __x86_64__ 1546 asm volatile("cpuid" 1547 : "=a"(vec[0]), "=b"(vec[1]), 1548 "=c"(vec[2]), "=d"(vec[3]) 1549 : "0"(function), "c"(count) : "cc"); 1550 #elif defined(__i386__) 1551 asm volatile("pusha \n\t" 1552 "cpuid \n\t" 1553 "mov %%eax, 0(%2) \n\t" 1554 "mov %%ebx, 4(%2) \n\t" 1555 "mov %%ecx, 8(%2) \n\t" 1556 "mov %%edx, 12(%2) \n\t" 1557 "popa" 1558 : : "a"(function), "c"(count), "S"(vec) 1559 : "memory", "cc"); 1560 #else 1561 abort(); 1562 #endif 1563 1564 if (eax) 1565 *eax = vec[0]; 1566 if (ebx) 1567 *ebx = vec[1]; 1568 if (ecx) 1569 *ecx = vec[2]; 1570 if (edx) 1571 *edx = vec[3]; 1572 } 1573 1574 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping) 1575 { 1576 uint32_t eax, ebx, ecx, edx; 1577 1578 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); 1579 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx); 1580 1581 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx); 1582 if (family) { 1583 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); 1584 } 1585 if (model) { 1586 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); 1587 } 1588 if (stepping) { 1589 *stepping = eax & 0x0F; 1590 } 1591 } 1592 1593 /* CPU class name definitions: */ 1594 1595 /* Return type name for a given CPU model name 1596 * Caller is responsible for freeing the returned string. 1597 */ 1598 static char *x86_cpu_type_name(const char *model_name) 1599 { 1600 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name); 1601 } 1602 1603 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model) 1604 { 1605 g_autofree char *typename = x86_cpu_type_name(cpu_model); 1606 return object_class_by_name(typename); 1607 } 1608 1609 static char *x86_cpu_class_get_model_name(X86CPUClass *cc) 1610 { 1611 const char *class_name = object_class_get_name(OBJECT_CLASS(cc)); 1612 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX)); 1613 return g_strndup(class_name, 1614 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX)); 1615 } 1616 1617 typedef struct PropValue { 1618 const char *prop, *value; 1619 } PropValue; 1620 1621 typedef struct X86CPUVersionDefinition { 1622 X86CPUVersion version; 1623 const char *alias; 1624 const char *note; 1625 PropValue *props; 1626 } X86CPUVersionDefinition; 1627 1628 /* Base definition for a CPU model */ 1629 typedef struct X86CPUDefinition { 1630 const char *name; 1631 uint32_t level; 1632 uint32_t xlevel; 1633 /* vendor is zero-terminated, 12 character ASCII string */ 1634 char vendor[CPUID_VENDOR_SZ + 1]; 1635 int family; 1636 int model; 1637 int stepping; 1638 FeatureWordArray features; 1639 const char *model_id; 1640 CPUCaches *cache_info; 1641 1642 /* Use AMD EPYC encoding for apic id */ 1643 bool use_epyc_apic_id_encoding; 1644 1645 /* 1646 * Definitions for alternative versions of CPU model. 1647 * List is terminated by item with version == 0. 1648 * If NULL, version 1 will be registered automatically. 1649 */ 1650 const X86CPUVersionDefinition *versions; 1651 } X86CPUDefinition; 1652 1653 /* Reference to a specific CPU model version */ 1654 struct X86CPUModel { 1655 /* Base CPU definition */ 1656 X86CPUDefinition *cpudef; 1657 /* CPU model version */ 1658 X86CPUVersion version; 1659 const char *note; 1660 /* 1661 * If true, this is an alias CPU model. 1662 * This matters only for "-cpu help" and query-cpu-definitions 1663 */ 1664 bool is_alias; 1665 }; 1666 1667 /* Get full model name for CPU version */ 1668 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef, 1669 X86CPUVersion version) 1670 { 1671 assert(version > 0); 1672 return g_strdup_printf("%s-v%d", cpudef->name, (int)version); 1673 } 1674 1675 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def) 1676 { 1677 /* When X86CPUDefinition::versions is NULL, we register only v1 */ 1678 static const X86CPUVersionDefinition default_version_list[] = { 1679 { 1 }, 1680 { /* end of list */ } 1681 }; 1682 1683 return def->versions ?: default_version_list; 1684 } 1685 1686 bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type) 1687 { 1688 X86CPUClass *xcc = X86_CPU_CLASS(object_class_by_name(cpu_type)); 1689 1690 assert(xcc); 1691 if (xcc->model && xcc->model->cpudef) { 1692 return xcc->model->cpudef->use_epyc_apic_id_encoding; 1693 } else { 1694 return false; 1695 } 1696 } 1697 1698 static CPUCaches epyc_cache_info = { 1699 .l1d_cache = &(CPUCacheInfo) { 1700 .type = DATA_CACHE, 1701 .level = 1, 1702 .size = 32 * KiB, 1703 .line_size = 64, 1704 .associativity = 8, 1705 .partitions = 1, 1706 .sets = 64, 1707 .lines_per_tag = 1, 1708 .self_init = 1, 1709 .no_invd_sharing = true, 1710 }, 1711 .l1i_cache = &(CPUCacheInfo) { 1712 .type = INSTRUCTION_CACHE, 1713 .level = 1, 1714 .size = 64 * KiB, 1715 .line_size = 64, 1716 .associativity = 4, 1717 .partitions = 1, 1718 .sets = 256, 1719 .lines_per_tag = 1, 1720 .self_init = 1, 1721 .no_invd_sharing = true, 1722 }, 1723 .l2_cache = &(CPUCacheInfo) { 1724 .type = UNIFIED_CACHE, 1725 .level = 2, 1726 .size = 512 * KiB, 1727 .line_size = 64, 1728 .associativity = 8, 1729 .partitions = 1, 1730 .sets = 1024, 1731 .lines_per_tag = 1, 1732 }, 1733 .l3_cache = &(CPUCacheInfo) { 1734 .type = UNIFIED_CACHE, 1735 .level = 3, 1736 .size = 8 * MiB, 1737 .line_size = 64, 1738 .associativity = 16, 1739 .partitions = 1, 1740 .sets = 8192, 1741 .lines_per_tag = 1, 1742 .self_init = true, 1743 .inclusive = true, 1744 .complex_indexing = true, 1745 }, 1746 }; 1747 1748 static CPUCaches epyc_rome_cache_info = { 1749 .l1d_cache = &(CPUCacheInfo) { 1750 .type = DATA_CACHE, 1751 .level = 1, 1752 .size = 32 * KiB, 1753 .line_size = 64, 1754 .associativity = 8, 1755 .partitions = 1, 1756 .sets = 64, 1757 .lines_per_tag = 1, 1758 .self_init = 1, 1759 .no_invd_sharing = true, 1760 }, 1761 .l1i_cache = &(CPUCacheInfo) { 1762 .type = INSTRUCTION_CACHE, 1763 .level = 1, 1764 .size = 32 * KiB, 1765 .line_size = 64, 1766 .associativity = 8, 1767 .partitions = 1, 1768 .sets = 64, 1769 .lines_per_tag = 1, 1770 .self_init = 1, 1771 .no_invd_sharing = true, 1772 }, 1773 .l2_cache = &(CPUCacheInfo) { 1774 .type = UNIFIED_CACHE, 1775 .level = 2, 1776 .size = 512 * KiB, 1777 .line_size = 64, 1778 .associativity = 8, 1779 .partitions = 1, 1780 .sets = 1024, 1781 .lines_per_tag = 1, 1782 }, 1783 .l3_cache = &(CPUCacheInfo) { 1784 .type = UNIFIED_CACHE, 1785 .level = 3, 1786 .size = 16 * MiB, 1787 .line_size = 64, 1788 .associativity = 16, 1789 .partitions = 1, 1790 .sets = 16384, 1791 .lines_per_tag = 1, 1792 .self_init = true, 1793 .inclusive = true, 1794 .complex_indexing = true, 1795 }, 1796 }; 1797 1798 /* The following VMX features are not supported by KVM and are left out in the 1799 * CPU definitions: 1800 * 1801 * Dual-monitor support (all processors) 1802 * Entry to SMM 1803 * Deactivate dual-monitor treatment 1804 * Number of CR3-target values 1805 * Shutdown activity state 1806 * Wait-for-SIPI activity state 1807 * PAUSE-loop exiting (Westmere and newer) 1808 * EPT-violation #VE (Broadwell and newer) 1809 * Inject event with insn length=0 (Skylake and newer) 1810 * Conceal non-root operation from PT 1811 * Conceal VM exits from PT 1812 * Conceal VM entries from PT 1813 * Enable ENCLS exiting 1814 * Mode-based execute control (XS/XU) 1815 s TSC scaling (Skylake Server and newer) 1816 * GPA translation for PT (IceLake and newer) 1817 * User wait and pause 1818 * ENCLV exiting 1819 * Load IA32_RTIT_CTL 1820 * Clear IA32_RTIT_CTL 1821 * Advanced VM-exit information for EPT violations 1822 * Sub-page write permissions 1823 * PT in VMX operation 1824 */ 1825 1826 static X86CPUDefinition builtin_x86_defs[] = { 1827 { 1828 .name = "qemu64", 1829 .level = 0xd, 1830 .vendor = CPUID_VENDOR_AMD, 1831 .family = 6, 1832 .model = 6, 1833 .stepping = 3, 1834 .features[FEAT_1_EDX] = 1835 PPRO_FEATURES | 1836 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1837 CPUID_PSE36, 1838 .features[FEAT_1_ECX] = 1839 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1840 .features[FEAT_8000_0001_EDX] = 1841 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1842 .features[FEAT_8000_0001_ECX] = 1843 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM, 1844 .xlevel = 0x8000000A, 1845 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1846 }, 1847 { 1848 .name = "phenom", 1849 .level = 5, 1850 .vendor = CPUID_VENDOR_AMD, 1851 .family = 16, 1852 .model = 2, 1853 .stepping = 3, 1854 /* Missing: CPUID_HT */ 1855 .features[FEAT_1_EDX] = 1856 PPRO_FEATURES | 1857 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1858 CPUID_PSE36 | CPUID_VME, 1859 .features[FEAT_1_ECX] = 1860 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 | 1861 CPUID_EXT_POPCNT, 1862 .features[FEAT_8000_0001_EDX] = 1863 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | 1864 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT | 1865 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP, 1866 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1867 CPUID_EXT3_CR8LEG, 1868 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1869 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */ 1870 .features[FEAT_8000_0001_ECX] = 1871 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | 1872 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A, 1873 /* Missing: CPUID_SVM_LBRV */ 1874 .features[FEAT_SVM] = 1875 CPUID_SVM_NPT, 1876 .xlevel = 0x8000001A, 1877 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor" 1878 }, 1879 { 1880 .name = "core2duo", 1881 .level = 10, 1882 .vendor = CPUID_VENDOR_INTEL, 1883 .family = 6, 1884 .model = 15, 1885 .stepping = 11, 1886 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 1887 .features[FEAT_1_EDX] = 1888 PPRO_FEATURES | 1889 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1890 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS, 1891 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST, 1892 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */ 1893 .features[FEAT_1_ECX] = 1894 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 1895 CPUID_EXT_CX16, 1896 .features[FEAT_8000_0001_EDX] = 1897 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1898 .features[FEAT_8000_0001_ECX] = 1899 CPUID_EXT3_LAHF_LM, 1900 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 1901 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1902 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1903 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1904 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1905 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 1906 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1907 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1908 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1909 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1910 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1911 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1912 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1913 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 1914 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 1915 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 1916 .features[FEAT_VMX_SECONDARY_CTLS] = 1917 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 1918 .xlevel = 0x80000008, 1919 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz", 1920 }, 1921 { 1922 .name = "kvm64", 1923 .level = 0xd, 1924 .vendor = CPUID_VENDOR_INTEL, 1925 .family = 15, 1926 .model = 6, 1927 .stepping = 1, 1928 /* Missing: CPUID_HT */ 1929 .features[FEAT_1_EDX] = 1930 PPRO_FEATURES | CPUID_VME | 1931 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | 1932 CPUID_PSE36, 1933 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */ 1934 .features[FEAT_1_ECX] = 1935 CPUID_EXT_SSE3 | CPUID_EXT_CX16, 1936 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */ 1937 .features[FEAT_8000_0001_EDX] = 1938 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 1939 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC, 1940 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A, 1941 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH, 1942 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */ 1943 .features[FEAT_8000_0001_ECX] = 1944 0, 1945 /* VMX features from Cedar Mill/Prescott */ 1946 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1947 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1948 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1949 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1950 VMX_PIN_BASED_NMI_EXITING, 1951 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1952 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1953 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1954 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 1955 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 1956 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 1957 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 1958 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING, 1959 .xlevel = 0x80000008, 1960 .model_id = "Common KVM processor" 1961 }, 1962 { 1963 .name = "qemu32", 1964 .level = 4, 1965 .vendor = CPUID_VENDOR_INTEL, 1966 .family = 6, 1967 .model = 6, 1968 .stepping = 3, 1969 .features[FEAT_1_EDX] = 1970 PPRO_FEATURES, 1971 .features[FEAT_1_ECX] = 1972 CPUID_EXT_SSE3, 1973 .xlevel = 0x80000004, 1974 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 1975 }, 1976 { 1977 .name = "kvm32", 1978 .level = 5, 1979 .vendor = CPUID_VENDOR_INTEL, 1980 .family = 15, 1981 .model = 6, 1982 .stepping = 1, 1983 .features[FEAT_1_EDX] = 1984 PPRO_FEATURES | CPUID_VME | 1985 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36, 1986 .features[FEAT_1_ECX] = 1987 CPUID_EXT_SSE3, 1988 .features[FEAT_8000_0001_ECX] = 1989 0, 1990 /* VMX features from Yonah */ 1991 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 1992 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 1993 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 1994 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 1995 VMX_PIN_BASED_NMI_EXITING, 1996 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 1997 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 1998 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 1999 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2000 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2001 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2002 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2003 .xlevel = 0x80000008, 2004 .model_id = "Common 32-bit KVM processor" 2005 }, 2006 { 2007 .name = "coreduo", 2008 .level = 10, 2009 .vendor = CPUID_VENDOR_INTEL, 2010 .family = 6, 2011 .model = 14, 2012 .stepping = 8, 2013 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2014 .features[FEAT_1_EDX] = 2015 PPRO_FEATURES | CPUID_VME | 2016 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI | 2017 CPUID_SS, 2018 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR, 2019 * CPUID_EXT_PDCM, CPUID_EXT_VMX */ 2020 .features[FEAT_1_ECX] = 2021 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR, 2022 .features[FEAT_8000_0001_EDX] = 2023 CPUID_EXT2_NX, 2024 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2025 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2026 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2027 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2028 VMX_PIN_BASED_NMI_EXITING, 2029 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2030 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2031 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2032 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2033 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | 2034 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | 2035 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS, 2036 .xlevel = 0x80000008, 2037 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz", 2038 }, 2039 { 2040 .name = "486", 2041 .level = 1, 2042 .vendor = CPUID_VENDOR_INTEL, 2043 .family = 4, 2044 .model = 8, 2045 .stepping = 0, 2046 .features[FEAT_1_EDX] = 2047 I486_FEATURES, 2048 .xlevel = 0, 2049 .model_id = "", 2050 }, 2051 { 2052 .name = "pentium", 2053 .level = 1, 2054 .vendor = CPUID_VENDOR_INTEL, 2055 .family = 5, 2056 .model = 4, 2057 .stepping = 3, 2058 .features[FEAT_1_EDX] = 2059 PENTIUM_FEATURES, 2060 .xlevel = 0, 2061 .model_id = "", 2062 }, 2063 { 2064 .name = "pentium2", 2065 .level = 2, 2066 .vendor = CPUID_VENDOR_INTEL, 2067 .family = 6, 2068 .model = 5, 2069 .stepping = 2, 2070 .features[FEAT_1_EDX] = 2071 PENTIUM2_FEATURES, 2072 .xlevel = 0, 2073 .model_id = "", 2074 }, 2075 { 2076 .name = "pentium3", 2077 .level = 3, 2078 .vendor = CPUID_VENDOR_INTEL, 2079 .family = 6, 2080 .model = 7, 2081 .stepping = 3, 2082 .features[FEAT_1_EDX] = 2083 PENTIUM3_FEATURES, 2084 .xlevel = 0, 2085 .model_id = "", 2086 }, 2087 { 2088 .name = "athlon", 2089 .level = 2, 2090 .vendor = CPUID_VENDOR_AMD, 2091 .family = 6, 2092 .model = 2, 2093 .stepping = 3, 2094 .features[FEAT_1_EDX] = 2095 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | 2096 CPUID_MCA, 2097 .features[FEAT_8000_0001_EDX] = 2098 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT, 2099 .xlevel = 0x80000008, 2100 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION, 2101 }, 2102 { 2103 .name = "n270", 2104 .level = 10, 2105 .vendor = CPUID_VENDOR_INTEL, 2106 .family = 6, 2107 .model = 28, 2108 .stepping = 2, 2109 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 2110 .features[FEAT_1_EDX] = 2111 PPRO_FEATURES | 2112 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME | 2113 CPUID_ACPI | CPUID_SS, 2114 /* Some CPUs got no CPUID_SEP */ 2115 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2, 2116 * CPUID_EXT_XTPR */ 2117 .features[FEAT_1_ECX] = 2118 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | 2119 CPUID_EXT_MOVBE, 2120 .features[FEAT_8000_0001_EDX] = 2121 CPUID_EXT2_NX, 2122 .features[FEAT_8000_0001_ECX] = 2123 CPUID_EXT3_LAHF_LM, 2124 .xlevel = 0x80000008, 2125 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz", 2126 }, 2127 { 2128 .name = "Conroe", 2129 .level = 10, 2130 .vendor = CPUID_VENDOR_INTEL, 2131 .family = 6, 2132 .model = 15, 2133 .stepping = 3, 2134 .features[FEAT_1_EDX] = 2135 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2136 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2137 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2138 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2139 CPUID_DE | CPUID_FP87, 2140 .features[FEAT_1_ECX] = 2141 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2142 .features[FEAT_8000_0001_EDX] = 2143 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2144 .features[FEAT_8000_0001_ECX] = 2145 CPUID_EXT3_LAHF_LM, 2146 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2147 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE, 2148 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT, 2149 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2150 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2151 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2152 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2153 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2154 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2155 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2156 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2157 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2158 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2159 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2160 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2161 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2162 .features[FEAT_VMX_SECONDARY_CTLS] = 2163 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES, 2164 .xlevel = 0x80000008, 2165 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)", 2166 }, 2167 { 2168 .name = "Penryn", 2169 .level = 10, 2170 .vendor = CPUID_VENDOR_INTEL, 2171 .family = 6, 2172 .model = 23, 2173 .stepping = 3, 2174 .features[FEAT_1_EDX] = 2175 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2176 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2177 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2178 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2179 CPUID_DE | CPUID_FP87, 2180 .features[FEAT_1_ECX] = 2181 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2182 CPUID_EXT_SSE3, 2183 .features[FEAT_8000_0001_EDX] = 2184 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2185 .features[FEAT_8000_0001_ECX] = 2186 CPUID_EXT3_LAHF_LM, 2187 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS, 2188 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2189 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL, 2190 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT | 2191 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL, 2192 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2193 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2194 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS, 2195 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2196 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2197 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2198 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2199 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2200 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2201 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2202 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2203 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2204 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2205 .features[FEAT_VMX_SECONDARY_CTLS] = 2206 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2207 VMX_SECONDARY_EXEC_WBINVD_EXITING, 2208 .xlevel = 0x80000008, 2209 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)", 2210 }, 2211 { 2212 .name = "Nehalem", 2213 .level = 11, 2214 .vendor = CPUID_VENDOR_INTEL, 2215 .family = 6, 2216 .model = 26, 2217 .stepping = 3, 2218 .features[FEAT_1_EDX] = 2219 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2220 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2221 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2222 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2223 CPUID_DE | CPUID_FP87, 2224 .features[FEAT_1_ECX] = 2225 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2226 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3, 2227 .features[FEAT_8000_0001_EDX] = 2228 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2229 .features[FEAT_8000_0001_ECX] = 2230 CPUID_EXT3_LAHF_LM, 2231 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2232 MSR_VMX_BASIC_TRUE_CTLS, 2233 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2234 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2235 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2236 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2237 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2238 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2239 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2240 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2241 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2242 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2243 .features[FEAT_VMX_EXIT_CTLS] = 2244 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2245 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2246 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2247 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2248 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2249 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT, 2250 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2251 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2252 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2253 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2254 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2255 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2256 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2257 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2258 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2259 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2260 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2261 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2262 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2263 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2264 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2265 .features[FEAT_VMX_SECONDARY_CTLS] = 2266 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2267 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2268 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2269 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2270 VMX_SECONDARY_EXEC_ENABLE_VPID, 2271 .xlevel = 0x80000008, 2272 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)", 2273 .versions = (X86CPUVersionDefinition[]) { 2274 { .version = 1 }, 2275 { 2276 .version = 2, 2277 .alias = "Nehalem-IBRS", 2278 .props = (PropValue[]) { 2279 { "spec-ctrl", "on" }, 2280 { "model-id", 2281 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" }, 2282 { /* end of list */ } 2283 } 2284 }, 2285 { /* end of list */ } 2286 } 2287 }, 2288 { 2289 .name = "Westmere", 2290 .level = 11, 2291 .vendor = CPUID_VENDOR_INTEL, 2292 .family = 6, 2293 .model = 44, 2294 .stepping = 1, 2295 .features[FEAT_1_EDX] = 2296 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2297 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2298 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2299 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2300 CPUID_DE | CPUID_FP87, 2301 .features[FEAT_1_ECX] = 2302 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 2303 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2304 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 2305 .features[FEAT_8000_0001_EDX] = 2306 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX, 2307 .features[FEAT_8000_0001_ECX] = 2308 CPUID_EXT3_LAHF_LM, 2309 .features[FEAT_6_EAX] = 2310 CPUID_6_EAX_ARAT, 2311 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2312 MSR_VMX_BASIC_TRUE_CTLS, 2313 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2314 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2315 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2316 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2317 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2318 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2319 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2320 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2321 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2322 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2323 .features[FEAT_VMX_EXIT_CTLS] = 2324 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2325 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2326 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2327 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2328 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2329 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2330 MSR_VMX_MISC_STORE_LMA, 2331 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2332 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2333 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2334 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2335 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2336 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2337 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2338 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2339 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2340 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2341 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2342 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2343 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2344 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2345 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2346 .features[FEAT_VMX_SECONDARY_CTLS] = 2347 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2348 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2349 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2350 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2351 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2352 .xlevel = 0x80000008, 2353 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)", 2354 .versions = (X86CPUVersionDefinition[]) { 2355 { .version = 1 }, 2356 { 2357 .version = 2, 2358 .alias = "Westmere-IBRS", 2359 .props = (PropValue[]) { 2360 { "spec-ctrl", "on" }, 2361 { "model-id", 2362 "Westmere E56xx/L56xx/X56xx (IBRS update)" }, 2363 { /* end of list */ } 2364 } 2365 }, 2366 { /* end of list */ } 2367 } 2368 }, 2369 { 2370 .name = "SandyBridge", 2371 .level = 0xd, 2372 .vendor = CPUID_VENDOR_INTEL, 2373 .family = 6, 2374 .model = 42, 2375 .stepping = 1, 2376 .features[FEAT_1_EDX] = 2377 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2378 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2379 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2380 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2381 CPUID_DE | CPUID_FP87, 2382 .features[FEAT_1_ECX] = 2383 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2384 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2385 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2386 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2387 CPUID_EXT_SSE3, 2388 .features[FEAT_8000_0001_EDX] = 2389 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2390 CPUID_EXT2_SYSCALL, 2391 .features[FEAT_8000_0001_ECX] = 2392 CPUID_EXT3_LAHF_LM, 2393 .features[FEAT_XSAVE] = 2394 CPUID_XSAVE_XSAVEOPT, 2395 .features[FEAT_6_EAX] = 2396 CPUID_6_EAX_ARAT, 2397 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2398 MSR_VMX_BASIC_TRUE_CTLS, 2399 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2400 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2401 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2402 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2403 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2404 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2405 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2406 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2407 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2408 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2409 .features[FEAT_VMX_EXIT_CTLS] = 2410 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2411 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2412 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2413 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2414 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2415 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2416 MSR_VMX_MISC_STORE_LMA, 2417 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2418 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2419 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2420 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2421 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2422 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2423 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2424 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2425 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2426 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2427 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2428 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2429 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2430 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2431 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2432 .features[FEAT_VMX_SECONDARY_CTLS] = 2433 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2434 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2435 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2436 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2437 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST, 2438 .xlevel = 0x80000008, 2439 .model_id = "Intel Xeon E312xx (Sandy Bridge)", 2440 .versions = (X86CPUVersionDefinition[]) { 2441 { .version = 1 }, 2442 { 2443 .version = 2, 2444 .alias = "SandyBridge-IBRS", 2445 .props = (PropValue[]) { 2446 { "spec-ctrl", "on" }, 2447 { "model-id", 2448 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" }, 2449 { /* end of list */ } 2450 } 2451 }, 2452 { /* end of list */ } 2453 } 2454 }, 2455 { 2456 .name = "IvyBridge", 2457 .level = 0xd, 2458 .vendor = CPUID_VENDOR_INTEL, 2459 .family = 6, 2460 .model = 58, 2461 .stepping = 9, 2462 .features[FEAT_1_EDX] = 2463 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2464 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2465 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2466 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2467 CPUID_DE | CPUID_FP87, 2468 .features[FEAT_1_ECX] = 2469 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2470 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT | 2471 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 2472 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 2473 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2474 .features[FEAT_7_0_EBX] = 2475 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | 2476 CPUID_7_0_EBX_ERMS, 2477 .features[FEAT_8000_0001_EDX] = 2478 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2479 CPUID_EXT2_SYSCALL, 2480 .features[FEAT_8000_0001_ECX] = 2481 CPUID_EXT3_LAHF_LM, 2482 .features[FEAT_XSAVE] = 2483 CPUID_XSAVE_XSAVEOPT, 2484 .features[FEAT_6_EAX] = 2485 CPUID_6_EAX_ARAT, 2486 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2487 MSR_VMX_BASIC_TRUE_CTLS, 2488 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2489 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2490 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2491 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2492 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2493 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2494 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2495 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2496 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2497 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, 2498 .features[FEAT_VMX_EXIT_CTLS] = 2499 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2500 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2501 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2502 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2503 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2504 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2505 MSR_VMX_MISC_STORE_LMA, 2506 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2507 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2508 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2509 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2510 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2511 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2512 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2513 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2514 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2515 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2516 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2517 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2518 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2519 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2520 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2521 .features[FEAT_VMX_SECONDARY_CTLS] = 2522 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2523 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2524 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2525 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2526 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2527 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2528 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2529 VMX_SECONDARY_EXEC_RDRAND_EXITING, 2530 .xlevel = 0x80000008, 2531 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)", 2532 .versions = (X86CPUVersionDefinition[]) { 2533 { .version = 1 }, 2534 { 2535 .version = 2, 2536 .alias = "IvyBridge-IBRS", 2537 .props = (PropValue[]) { 2538 { "spec-ctrl", "on" }, 2539 { "model-id", 2540 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" }, 2541 { /* end of list */ } 2542 } 2543 }, 2544 { /* end of list */ } 2545 } 2546 }, 2547 { 2548 .name = "Haswell", 2549 .level = 0xd, 2550 .vendor = CPUID_VENDOR_INTEL, 2551 .family = 6, 2552 .model = 60, 2553 .stepping = 4, 2554 .features[FEAT_1_EDX] = 2555 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2556 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2557 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2558 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2559 CPUID_DE | CPUID_FP87, 2560 .features[FEAT_1_ECX] = 2561 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2562 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2563 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2564 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2565 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2566 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2567 .features[FEAT_8000_0001_EDX] = 2568 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2569 CPUID_EXT2_SYSCALL, 2570 .features[FEAT_8000_0001_ECX] = 2571 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM, 2572 .features[FEAT_7_0_EBX] = 2573 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2574 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2575 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2576 CPUID_7_0_EBX_RTM, 2577 .features[FEAT_XSAVE] = 2578 CPUID_XSAVE_XSAVEOPT, 2579 .features[FEAT_6_EAX] = 2580 CPUID_6_EAX_ARAT, 2581 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2582 MSR_VMX_BASIC_TRUE_CTLS, 2583 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2584 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2585 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2586 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2587 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2588 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2589 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2590 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2591 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2592 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2593 .features[FEAT_VMX_EXIT_CTLS] = 2594 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2595 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2596 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2597 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2598 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2599 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2600 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2601 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2602 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2603 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2604 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2605 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2606 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2607 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2608 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2609 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2610 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2611 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2612 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2613 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2614 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2615 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2616 .features[FEAT_VMX_SECONDARY_CTLS] = 2617 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2618 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2619 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2620 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2621 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2622 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2623 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2624 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2625 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 2626 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2627 .xlevel = 0x80000008, 2628 .model_id = "Intel Core Processor (Haswell)", 2629 .versions = (X86CPUVersionDefinition[]) { 2630 { .version = 1 }, 2631 { 2632 .version = 2, 2633 .alias = "Haswell-noTSX", 2634 .props = (PropValue[]) { 2635 { "hle", "off" }, 2636 { "rtm", "off" }, 2637 { "stepping", "1" }, 2638 { "model-id", "Intel Core Processor (Haswell, no TSX)", }, 2639 { /* end of list */ } 2640 }, 2641 }, 2642 { 2643 .version = 3, 2644 .alias = "Haswell-IBRS", 2645 .props = (PropValue[]) { 2646 /* Restore TSX features removed by -v2 above */ 2647 { "hle", "on" }, 2648 { "rtm", "on" }, 2649 /* 2650 * Haswell and Haswell-IBRS had stepping=4 in 2651 * QEMU 4.0 and older 2652 */ 2653 { "stepping", "4" }, 2654 { "spec-ctrl", "on" }, 2655 { "model-id", 2656 "Intel Core Processor (Haswell, IBRS)" }, 2657 { /* end of list */ } 2658 } 2659 }, 2660 { 2661 .version = 4, 2662 .alias = "Haswell-noTSX-IBRS", 2663 .props = (PropValue[]) { 2664 { "hle", "off" }, 2665 { "rtm", "off" }, 2666 /* spec-ctrl was already enabled by -v3 above */ 2667 { "stepping", "1" }, 2668 { "model-id", 2669 "Intel Core Processor (Haswell, no TSX, IBRS)" }, 2670 { /* end of list */ } 2671 } 2672 }, 2673 { /* end of list */ } 2674 } 2675 }, 2676 { 2677 .name = "Broadwell", 2678 .level = 0xd, 2679 .vendor = CPUID_VENDOR_INTEL, 2680 .family = 6, 2681 .model = 61, 2682 .stepping = 2, 2683 .features[FEAT_1_EDX] = 2684 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2685 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2686 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2687 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2688 CPUID_DE | CPUID_FP87, 2689 .features[FEAT_1_ECX] = 2690 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2691 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2692 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2693 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2694 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2695 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2696 .features[FEAT_8000_0001_EDX] = 2697 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2698 CPUID_EXT2_SYSCALL, 2699 .features[FEAT_8000_0001_ECX] = 2700 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2701 .features[FEAT_7_0_EBX] = 2702 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2703 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2704 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2705 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2706 CPUID_7_0_EBX_SMAP, 2707 .features[FEAT_XSAVE] = 2708 CPUID_XSAVE_XSAVEOPT, 2709 .features[FEAT_6_EAX] = 2710 CPUID_6_EAX_ARAT, 2711 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2712 MSR_VMX_BASIC_TRUE_CTLS, 2713 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2714 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2715 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2716 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2717 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2718 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2719 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2720 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2721 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2722 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2723 .features[FEAT_VMX_EXIT_CTLS] = 2724 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2725 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2726 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2727 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2728 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2729 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2730 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2731 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2732 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2733 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2734 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2735 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2736 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2737 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2738 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2739 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2740 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2741 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2742 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2743 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2744 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2745 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2746 .features[FEAT_VMX_SECONDARY_CTLS] = 2747 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2748 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2749 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2750 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 2751 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2752 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 2753 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 2754 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2755 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2756 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2757 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2758 .xlevel = 0x80000008, 2759 .model_id = "Intel Core Processor (Broadwell)", 2760 .versions = (X86CPUVersionDefinition[]) { 2761 { .version = 1 }, 2762 { 2763 .version = 2, 2764 .alias = "Broadwell-noTSX", 2765 .props = (PropValue[]) { 2766 { "hle", "off" }, 2767 { "rtm", "off" }, 2768 { "model-id", "Intel Core Processor (Broadwell, no TSX)", }, 2769 { /* end of list */ } 2770 }, 2771 }, 2772 { 2773 .version = 3, 2774 .alias = "Broadwell-IBRS", 2775 .props = (PropValue[]) { 2776 /* Restore TSX features removed by -v2 above */ 2777 { "hle", "on" }, 2778 { "rtm", "on" }, 2779 { "spec-ctrl", "on" }, 2780 { "model-id", 2781 "Intel Core Processor (Broadwell, IBRS)" }, 2782 { /* end of list */ } 2783 } 2784 }, 2785 { 2786 .version = 4, 2787 .alias = "Broadwell-noTSX-IBRS", 2788 .props = (PropValue[]) { 2789 { "hle", "off" }, 2790 { "rtm", "off" }, 2791 /* spec-ctrl was already enabled by -v3 above */ 2792 { "model-id", 2793 "Intel Core Processor (Broadwell, no TSX, IBRS)" }, 2794 { /* end of list */ } 2795 } 2796 }, 2797 { /* end of list */ } 2798 } 2799 }, 2800 { 2801 .name = "Skylake-Client", 2802 .level = 0xd, 2803 .vendor = CPUID_VENDOR_INTEL, 2804 .family = 6, 2805 .model = 94, 2806 .stepping = 3, 2807 .features[FEAT_1_EDX] = 2808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2812 CPUID_DE | CPUID_FP87, 2813 .features[FEAT_1_ECX] = 2814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2815 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2816 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2817 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2818 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2819 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2820 .features[FEAT_8000_0001_EDX] = 2821 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 2822 CPUID_EXT2_SYSCALL, 2823 .features[FEAT_8000_0001_ECX] = 2824 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2825 .features[FEAT_7_0_EBX] = 2826 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2827 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2828 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2829 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2830 CPUID_7_0_EBX_SMAP, 2831 /* Missing: XSAVES (not supported by some Linux versions, 2832 * including v4.1 to v4.12). 2833 * KVM doesn't yet expose any XSAVES state save component, 2834 * and the only one defined in Skylake (processor tracing) 2835 * probably will block migration anyway. 2836 */ 2837 .features[FEAT_XSAVE] = 2838 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2839 CPUID_XSAVE_XGETBV1, 2840 .features[FEAT_6_EAX] = 2841 CPUID_6_EAX_ARAT, 2842 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2843 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2844 MSR_VMX_BASIC_TRUE_CTLS, 2845 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2846 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2847 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2848 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2849 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2850 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2851 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2852 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2853 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2854 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2855 .features[FEAT_VMX_EXIT_CTLS] = 2856 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2857 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2858 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2859 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2860 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2861 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2862 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2863 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2864 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2865 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 2866 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2867 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2868 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2869 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2870 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2871 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2872 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2873 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2874 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2875 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2876 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2877 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2878 .features[FEAT_VMX_SECONDARY_CTLS] = 2879 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2880 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 2881 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 2882 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 2883 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 2884 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 2885 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 2886 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 2887 .xlevel = 0x80000008, 2888 .model_id = "Intel Core Processor (Skylake)", 2889 .versions = (X86CPUVersionDefinition[]) { 2890 { .version = 1 }, 2891 { 2892 .version = 2, 2893 .alias = "Skylake-Client-IBRS", 2894 .props = (PropValue[]) { 2895 { "spec-ctrl", "on" }, 2896 { "model-id", 2897 "Intel Core Processor (Skylake, IBRS)" }, 2898 { /* end of list */ } 2899 } 2900 }, 2901 { 2902 .version = 3, 2903 .alias = "Skylake-Client-noTSX-IBRS", 2904 .props = (PropValue[]) { 2905 { "hle", "off" }, 2906 { "rtm", "off" }, 2907 { "model-id", 2908 "Intel Core Processor (Skylake, IBRS, no TSX)" }, 2909 { /* end of list */ } 2910 } 2911 }, 2912 { /* end of list */ } 2913 } 2914 }, 2915 { 2916 .name = "Skylake-Server", 2917 .level = 0xd, 2918 .vendor = CPUID_VENDOR_INTEL, 2919 .family = 6, 2920 .model = 85, 2921 .stepping = 4, 2922 .features[FEAT_1_EDX] = 2923 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 2924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 2925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 2926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 2927 CPUID_DE | CPUID_FP87, 2928 .features[FEAT_1_ECX] = 2929 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 2930 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 2931 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 2932 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 2933 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 2934 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 2935 .features[FEAT_8000_0001_EDX] = 2936 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 2937 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 2938 .features[FEAT_8000_0001_ECX] = 2939 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 2940 .features[FEAT_7_0_EBX] = 2941 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 2942 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 2943 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 2944 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 2945 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 2946 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 2947 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 2948 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 2949 .features[FEAT_7_0_ECX] = 2950 CPUID_7_0_ECX_PKU, 2951 /* Missing: XSAVES (not supported by some Linux versions, 2952 * including v4.1 to v4.12). 2953 * KVM doesn't yet expose any XSAVES state save component, 2954 * and the only one defined in Skylake (processor tracing) 2955 * probably will block migration anyway. 2956 */ 2957 .features[FEAT_XSAVE] = 2958 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 2959 CPUID_XSAVE_XGETBV1, 2960 .features[FEAT_6_EAX] = 2961 CPUID_6_EAX_ARAT, 2962 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 2963 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 2964 MSR_VMX_BASIC_TRUE_CTLS, 2965 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 2966 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 2967 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 2968 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 2969 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 2970 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 2971 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 2972 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 2973 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 2974 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 2975 .features[FEAT_VMX_EXIT_CTLS] = 2976 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 2977 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 2978 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 2979 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 2980 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 2981 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 2982 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 2983 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 2984 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 2985 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 2986 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 2987 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 2988 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 2989 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 2990 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 2991 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 2992 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 2993 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 2994 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 2995 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 2996 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 2997 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 2998 .features[FEAT_VMX_SECONDARY_CTLS] = 2999 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3000 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3001 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3002 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3003 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3004 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3005 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3006 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3007 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3008 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3009 .xlevel = 0x80000008, 3010 .model_id = "Intel Xeon Processor (Skylake)", 3011 .versions = (X86CPUVersionDefinition[]) { 3012 { .version = 1 }, 3013 { 3014 .version = 2, 3015 .alias = "Skylake-Server-IBRS", 3016 .props = (PropValue[]) { 3017 /* clflushopt was not added to Skylake-Server-IBRS */ 3018 /* TODO: add -v3 including clflushopt */ 3019 { "clflushopt", "off" }, 3020 { "spec-ctrl", "on" }, 3021 { "model-id", 3022 "Intel Xeon Processor (Skylake, IBRS)" }, 3023 { /* end of list */ } 3024 } 3025 }, 3026 { 3027 .version = 3, 3028 .alias = "Skylake-Server-noTSX-IBRS", 3029 .props = (PropValue[]) { 3030 { "hle", "off" }, 3031 { "rtm", "off" }, 3032 { "model-id", 3033 "Intel Xeon Processor (Skylake, IBRS, no TSX)" }, 3034 { /* end of list */ } 3035 } 3036 }, 3037 { /* end of list */ } 3038 } 3039 }, 3040 { 3041 .name = "Cascadelake-Server", 3042 .level = 0xd, 3043 .vendor = CPUID_VENDOR_INTEL, 3044 .family = 6, 3045 .model = 85, 3046 .stepping = 6, 3047 .features[FEAT_1_EDX] = 3048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3052 CPUID_DE | CPUID_FP87, 3053 .features[FEAT_1_ECX] = 3054 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3055 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3057 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3059 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3060 .features[FEAT_8000_0001_EDX] = 3061 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3062 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3063 .features[FEAT_8000_0001_ECX] = 3064 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3065 .features[FEAT_7_0_EBX] = 3066 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3067 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3068 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3069 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3070 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3071 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3072 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3073 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3074 .features[FEAT_7_0_ECX] = 3075 CPUID_7_0_ECX_PKU | 3076 CPUID_7_0_ECX_AVX512VNNI, 3077 .features[FEAT_7_0_EDX] = 3078 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3079 /* Missing: XSAVES (not supported by some Linux versions, 3080 * including v4.1 to v4.12). 3081 * KVM doesn't yet expose any XSAVES state save component, 3082 * and the only one defined in Skylake (processor tracing) 3083 * probably will block migration anyway. 3084 */ 3085 .features[FEAT_XSAVE] = 3086 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3087 CPUID_XSAVE_XGETBV1, 3088 .features[FEAT_6_EAX] = 3089 CPUID_6_EAX_ARAT, 3090 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3091 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3092 MSR_VMX_BASIC_TRUE_CTLS, 3093 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3094 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3095 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3096 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3097 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3098 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3099 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3100 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3101 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3102 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3103 .features[FEAT_VMX_EXIT_CTLS] = 3104 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3105 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3106 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3107 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3108 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3109 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3110 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3111 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3112 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3113 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3114 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3115 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3116 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3117 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3118 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3119 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3120 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3121 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3122 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3123 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3124 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3125 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3126 .features[FEAT_VMX_SECONDARY_CTLS] = 3127 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3128 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3129 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3130 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3131 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3132 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3133 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3134 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3135 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3136 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3137 .xlevel = 0x80000008, 3138 .model_id = "Intel Xeon Processor (Cascadelake)", 3139 .versions = (X86CPUVersionDefinition[]) { 3140 { .version = 1 }, 3141 { .version = 2, 3142 .note = "ARCH_CAPABILITIES", 3143 .props = (PropValue[]) { 3144 { "arch-capabilities", "on" }, 3145 { "rdctl-no", "on" }, 3146 { "ibrs-all", "on" }, 3147 { "skip-l1dfl-vmentry", "on" }, 3148 { "mds-no", "on" }, 3149 { /* end of list */ } 3150 }, 3151 }, 3152 { .version = 3, 3153 .alias = "Cascadelake-Server-noTSX", 3154 .note = "ARCH_CAPABILITIES, no TSX", 3155 .props = (PropValue[]) { 3156 { "hle", "off" }, 3157 { "rtm", "off" }, 3158 { /* end of list */ } 3159 }, 3160 }, 3161 { /* end of list */ } 3162 } 3163 }, 3164 { 3165 .name = "Cooperlake", 3166 .level = 0xd, 3167 .vendor = CPUID_VENDOR_INTEL, 3168 .family = 6, 3169 .model = 85, 3170 .stepping = 10, 3171 .features[FEAT_1_EDX] = 3172 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3173 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3174 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3175 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3176 CPUID_DE | CPUID_FP87, 3177 .features[FEAT_1_ECX] = 3178 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3179 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3180 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3181 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3182 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3183 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3184 .features[FEAT_8000_0001_EDX] = 3185 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3186 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3187 .features[FEAT_8000_0001_ECX] = 3188 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3189 .features[FEAT_7_0_EBX] = 3190 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3191 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3192 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3193 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3194 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3195 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3196 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3197 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3198 .features[FEAT_7_0_ECX] = 3199 CPUID_7_0_ECX_PKU | 3200 CPUID_7_0_ECX_AVX512VNNI, 3201 .features[FEAT_7_0_EDX] = 3202 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP | 3203 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES, 3204 .features[FEAT_ARCH_CAPABILITIES] = 3205 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | 3206 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | 3207 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, 3208 .features[FEAT_7_1_EAX] = 3209 CPUID_7_1_EAX_AVX512_BF16, 3210 /* 3211 * Missing: XSAVES (not supported by some Linux versions, 3212 * including v4.1 to v4.12). 3213 * KVM doesn't yet expose any XSAVES state save component, 3214 * and the only one defined in Skylake (processor tracing) 3215 * probably will block migration anyway. 3216 */ 3217 .features[FEAT_XSAVE] = 3218 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3219 CPUID_XSAVE_XGETBV1, 3220 .features[FEAT_6_EAX] = 3221 CPUID_6_EAX_ARAT, 3222 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3223 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3224 MSR_VMX_BASIC_TRUE_CTLS, 3225 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3226 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3227 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3228 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3229 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3230 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3231 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3232 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3233 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3234 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3235 .features[FEAT_VMX_EXIT_CTLS] = 3236 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3237 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3238 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3239 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3240 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3241 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3242 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3243 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3244 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3245 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3246 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3247 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3248 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3249 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3250 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3251 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3252 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3253 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3254 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3255 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3256 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3257 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3258 .features[FEAT_VMX_SECONDARY_CTLS] = 3259 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3260 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3261 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3262 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3263 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3264 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3265 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3266 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3267 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3268 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3269 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3270 .xlevel = 0x80000008, 3271 .model_id = "Intel Xeon Processor (Cooperlake)", 3272 }, 3273 { 3274 .name = "Icelake-Client", 3275 .level = 0xd, 3276 .vendor = CPUID_VENDOR_INTEL, 3277 .family = 6, 3278 .model = 126, 3279 .stepping = 0, 3280 .features[FEAT_1_EDX] = 3281 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3282 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3283 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3284 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3285 CPUID_DE | CPUID_FP87, 3286 .features[FEAT_1_ECX] = 3287 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3288 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3289 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3290 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3291 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3292 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3293 .features[FEAT_8000_0001_EDX] = 3294 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX | 3295 CPUID_EXT2_SYSCALL, 3296 .features[FEAT_8000_0001_ECX] = 3297 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3298 .features[FEAT_8000_0008_EBX] = 3299 CPUID_8000_0008_EBX_WBNOINVD, 3300 .features[FEAT_7_0_EBX] = 3301 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3302 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3303 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3304 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3305 CPUID_7_0_EBX_SMAP, 3306 .features[FEAT_7_0_ECX] = 3307 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3308 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3309 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3310 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3311 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3312 .features[FEAT_7_0_EDX] = 3313 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3314 /* Missing: XSAVES (not supported by some Linux versions, 3315 * including v4.1 to v4.12). 3316 * KVM doesn't yet expose any XSAVES state save component, 3317 * and the only one defined in Skylake (processor tracing) 3318 * probably will block migration anyway. 3319 */ 3320 .features[FEAT_XSAVE] = 3321 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3322 CPUID_XSAVE_XGETBV1, 3323 .features[FEAT_6_EAX] = 3324 CPUID_6_EAX_ARAT, 3325 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3326 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3327 MSR_VMX_BASIC_TRUE_CTLS, 3328 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3329 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3330 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3331 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3332 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3333 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3334 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3335 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3336 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3337 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3338 .features[FEAT_VMX_EXIT_CTLS] = 3339 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3340 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3341 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3342 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3343 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3344 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3345 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3346 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3347 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3348 VMX_PIN_BASED_VMX_PREEMPTION_TIMER, 3349 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3350 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3351 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3352 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3353 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3354 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3355 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3356 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3357 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3358 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3359 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3360 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3361 .features[FEAT_VMX_SECONDARY_CTLS] = 3362 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3363 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3364 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3365 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3366 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3367 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3368 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3369 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3370 .xlevel = 0x80000008, 3371 .model_id = "Intel Core Processor (Icelake)", 3372 .versions = (X86CPUVersionDefinition[]) { 3373 { .version = 1 }, 3374 { 3375 .version = 2, 3376 .note = "no TSX", 3377 .alias = "Icelake-Client-noTSX", 3378 .props = (PropValue[]) { 3379 { "hle", "off" }, 3380 { "rtm", "off" }, 3381 { /* end of list */ } 3382 }, 3383 }, 3384 { /* end of list */ } 3385 } 3386 }, 3387 { 3388 .name = "Icelake-Server", 3389 .level = 0xd, 3390 .vendor = CPUID_VENDOR_INTEL, 3391 .family = 6, 3392 .model = 134, 3393 .stepping = 0, 3394 .features[FEAT_1_EDX] = 3395 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3396 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3397 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3398 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3399 CPUID_DE | CPUID_FP87, 3400 .features[FEAT_1_ECX] = 3401 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3402 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3403 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3404 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3405 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3406 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3407 .features[FEAT_8000_0001_EDX] = 3408 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3409 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3410 .features[FEAT_8000_0001_ECX] = 3411 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3412 .features[FEAT_8000_0008_EBX] = 3413 CPUID_8000_0008_EBX_WBNOINVD, 3414 .features[FEAT_7_0_EBX] = 3415 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | 3416 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | 3417 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | 3418 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | 3419 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB | 3420 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | 3421 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD | 3422 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT, 3423 .features[FEAT_7_0_ECX] = 3424 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | 3425 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | 3426 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | 3427 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | 3428 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57, 3429 .features[FEAT_7_0_EDX] = 3430 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3431 /* Missing: XSAVES (not supported by some Linux versions, 3432 * including v4.1 to v4.12). 3433 * KVM doesn't yet expose any XSAVES state save component, 3434 * and the only one defined in Skylake (processor tracing) 3435 * probably will block migration anyway. 3436 */ 3437 .features[FEAT_XSAVE] = 3438 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3439 CPUID_XSAVE_XGETBV1, 3440 .features[FEAT_6_EAX] = 3441 CPUID_6_EAX_ARAT, 3442 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */ 3443 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3444 MSR_VMX_BASIC_TRUE_CTLS, 3445 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3446 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3447 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3448 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3449 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3450 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3451 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3452 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3453 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3454 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3455 .features[FEAT_VMX_EXIT_CTLS] = 3456 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3457 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3458 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3459 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3460 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3461 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3462 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3463 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3464 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3465 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3466 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3467 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3468 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3469 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3470 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3471 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3472 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3473 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3474 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3475 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3476 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3477 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3478 .features[FEAT_VMX_SECONDARY_CTLS] = 3479 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3480 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3481 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3482 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3483 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3484 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3485 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3486 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3487 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS, 3488 .xlevel = 0x80000008, 3489 .model_id = "Intel Xeon Processor (Icelake)", 3490 .versions = (X86CPUVersionDefinition[]) { 3491 { .version = 1 }, 3492 { 3493 .version = 2, 3494 .note = "no TSX", 3495 .alias = "Icelake-Server-noTSX", 3496 .props = (PropValue[]) { 3497 { "hle", "off" }, 3498 { "rtm", "off" }, 3499 { /* end of list */ } 3500 }, 3501 }, 3502 { 3503 .version = 3, 3504 .props = (PropValue[]) { 3505 { "arch-capabilities", "on" }, 3506 { "rdctl-no", "on" }, 3507 { "ibrs-all", "on" }, 3508 { "skip-l1dfl-vmentry", "on" }, 3509 { "mds-no", "on" }, 3510 { "pschange-mc-no", "on" }, 3511 { "taa-no", "on" }, 3512 { /* end of list */ } 3513 }, 3514 }, 3515 { /* end of list */ } 3516 } 3517 }, 3518 { 3519 .name = "Denverton", 3520 .level = 21, 3521 .vendor = CPUID_VENDOR_INTEL, 3522 .family = 6, 3523 .model = 95, 3524 .stepping = 1, 3525 .features[FEAT_1_EDX] = 3526 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | 3527 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | 3528 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3529 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | 3530 CPUID_SSE | CPUID_SSE2, 3531 .features[FEAT_1_ECX] = 3532 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3533 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 | 3534 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3535 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | 3536 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND, 3537 .features[FEAT_8000_0001_EDX] = 3538 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | 3539 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, 3540 .features[FEAT_8000_0001_ECX] = 3541 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3542 .features[FEAT_7_0_EBX] = 3543 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS | 3544 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP | 3545 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI, 3546 .features[FEAT_7_0_EDX] = 3547 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | 3548 CPUID_7_0_EDX_SPEC_CTRL_SSBD, 3549 /* 3550 * Missing: XSAVES (not supported by some Linux versions, 3551 * including v4.1 to v4.12). 3552 * KVM doesn't yet expose any XSAVES state save component, 3553 * and the only one defined in Skylake (processor tracing) 3554 * probably will block migration anyway. 3555 */ 3556 .features[FEAT_XSAVE] = 3557 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1, 3558 .features[FEAT_6_EAX] = 3559 CPUID_6_EAX_ARAT, 3560 .features[FEAT_ARCH_CAPABILITIES] = 3561 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY, 3562 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3563 MSR_VMX_BASIC_TRUE_CTLS, 3564 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3565 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3566 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3567 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3568 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3569 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3570 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3571 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3572 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3573 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3574 .features[FEAT_VMX_EXIT_CTLS] = 3575 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3576 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3577 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3578 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3579 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3580 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3581 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3582 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3583 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3584 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3585 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3586 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3587 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3588 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3589 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3590 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3591 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3592 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3593 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3594 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3595 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3596 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3597 .features[FEAT_VMX_SECONDARY_CTLS] = 3598 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3599 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3600 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3601 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3602 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3603 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3604 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3605 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3606 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3607 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3608 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3609 .xlevel = 0x80000008, 3610 .model_id = "Intel Atom Processor (Denverton)", 3611 .versions = (X86CPUVersionDefinition[]) { 3612 { .version = 1 }, 3613 { 3614 .version = 2, 3615 .note = "no MPX, no MONITOR", 3616 .props = (PropValue[]) { 3617 { "monitor", "off" }, 3618 { "mpx", "off" }, 3619 { /* end of list */ }, 3620 }, 3621 }, 3622 { /* end of list */ }, 3623 }, 3624 }, 3625 { 3626 .name = "Snowridge", 3627 .level = 27, 3628 .vendor = CPUID_VENDOR_INTEL, 3629 .family = 6, 3630 .model = 134, 3631 .stepping = 1, 3632 .features[FEAT_1_EDX] = 3633 /* missing: CPUID_PN CPUID_IA64 */ 3634 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */ 3635 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | 3636 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE | 3637 CPUID_CX8 | CPUID_APIC | CPUID_SEP | 3638 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | 3639 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | 3640 CPUID_MMX | 3641 CPUID_FXSR | CPUID_SSE | CPUID_SSE2, 3642 .features[FEAT_1_ECX] = 3643 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR | 3644 CPUID_EXT_SSSE3 | 3645 CPUID_EXT_CX16 | 3646 CPUID_EXT_SSE41 | 3647 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | 3648 CPUID_EXT_POPCNT | 3649 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE | 3650 CPUID_EXT_RDRAND, 3651 .features[FEAT_8000_0001_EDX] = 3652 CPUID_EXT2_SYSCALL | 3653 CPUID_EXT2_NX | 3654 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3655 CPUID_EXT2_LM, 3656 .features[FEAT_8000_0001_ECX] = 3657 CPUID_EXT3_LAHF_LM | 3658 CPUID_EXT3_3DNOWPREFETCH, 3659 .features[FEAT_7_0_EBX] = 3660 CPUID_7_0_EBX_FSGSBASE | 3661 CPUID_7_0_EBX_SMEP | 3662 CPUID_7_0_EBX_ERMS | 3663 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */ 3664 CPUID_7_0_EBX_RDSEED | 3665 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3666 CPUID_7_0_EBX_CLWB | 3667 CPUID_7_0_EBX_SHA_NI, 3668 .features[FEAT_7_0_ECX] = 3669 CPUID_7_0_ECX_UMIP | 3670 /* missing bit 5 */ 3671 CPUID_7_0_ECX_GFNI | 3672 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE | 3673 CPUID_7_0_ECX_MOVDIR64B, 3674 .features[FEAT_7_0_EDX] = 3675 CPUID_7_0_EDX_SPEC_CTRL | 3676 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD | 3677 CPUID_7_0_EDX_CORE_CAPABILITY, 3678 .features[FEAT_CORE_CAPABILITY] = 3679 MSR_CORE_CAP_SPLIT_LOCK_DETECT, 3680 /* 3681 * Missing: XSAVES (not supported by some Linux versions, 3682 * including v4.1 to v4.12). 3683 * KVM doesn't yet expose any XSAVES state save component, 3684 * and the only one defined in Skylake (processor tracing) 3685 * probably will block migration anyway. 3686 */ 3687 .features[FEAT_XSAVE] = 3688 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3689 CPUID_XSAVE_XGETBV1, 3690 .features[FEAT_6_EAX] = 3691 CPUID_6_EAX_ARAT, 3692 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS | 3693 MSR_VMX_BASIC_TRUE_CTLS, 3694 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE | 3695 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT | 3696 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER, 3697 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY | 3698 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | 3699 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT | 3700 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | 3701 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | 3702 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT | 3703 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS, 3704 .features[FEAT_VMX_EXIT_CTLS] = 3705 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | 3706 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | 3707 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER | 3708 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | 3709 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, 3710 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT | 3711 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT, 3712 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK | 3713 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS | 3714 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR, 3715 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING | 3716 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | 3717 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | 3718 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | 3719 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | 3720 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING | 3721 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS | 3722 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING | 3723 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS | 3724 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | 3725 VMX_CPU_BASED_MONITOR_TRAP_FLAG | 3726 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, 3727 .features[FEAT_VMX_SECONDARY_CTLS] = 3728 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 3729 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT | 3730 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP | 3731 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | 3732 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | 3733 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | 3734 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | 3735 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID | 3736 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | 3737 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML, 3738 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING, 3739 .xlevel = 0x80000008, 3740 .model_id = "Intel Atom Processor (SnowRidge)", 3741 .versions = (X86CPUVersionDefinition[]) { 3742 { .version = 1 }, 3743 { 3744 .version = 2, 3745 .props = (PropValue[]) { 3746 { "mpx", "off" }, 3747 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" }, 3748 { /* end of list */ }, 3749 }, 3750 }, 3751 { /* end of list */ }, 3752 }, 3753 }, 3754 { 3755 .name = "KnightsMill", 3756 .level = 0xd, 3757 .vendor = CPUID_VENDOR_INTEL, 3758 .family = 6, 3759 .model = 133, 3760 .stepping = 0, 3761 .features[FEAT_1_EDX] = 3762 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | 3763 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | 3764 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | 3765 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | 3766 CPUID_PSE | CPUID_DE | CPUID_FP87, 3767 .features[FEAT_1_ECX] = 3768 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3769 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | 3770 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | 3771 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 | 3772 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE | 3773 CPUID_EXT_F16C | CPUID_EXT_RDRAND, 3774 .features[FEAT_8000_0001_EDX] = 3775 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP | 3776 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3777 .features[FEAT_8000_0001_ECX] = 3778 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH, 3779 .features[FEAT_7_0_EBX] = 3780 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3781 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | 3782 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F | 3783 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF | 3784 CPUID_7_0_EBX_AVX512ER, 3785 .features[FEAT_7_0_ECX] = 3786 CPUID_7_0_ECX_AVX512_VPOPCNTDQ, 3787 .features[FEAT_7_0_EDX] = 3788 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS, 3789 .features[FEAT_XSAVE] = 3790 CPUID_XSAVE_XSAVEOPT, 3791 .features[FEAT_6_EAX] = 3792 CPUID_6_EAX_ARAT, 3793 .xlevel = 0x80000008, 3794 .model_id = "Intel Xeon Phi Processor (Knights Mill)", 3795 }, 3796 { 3797 .name = "Opteron_G1", 3798 .level = 5, 3799 .vendor = CPUID_VENDOR_AMD, 3800 .family = 15, 3801 .model = 6, 3802 .stepping = 1, 3803 .features[FEAT_1_EDX] = 3804 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3805 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3806 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3807 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3808 CPUID_DE | CPUID_FP87, 3809 .features[FEAT_1_ECX] = 3810 CPUID_EXT_SSE3, 3811 .features[FEAT_8000_0001_EDX] = 3812 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3813 .xlevel = 0x80000008, 3814 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)", 3815 }, 3816 { 3817 .name = "Opteron_G2", 3818 .level = 5, 3819 .vendor = CPUID_VENDOR_AMD, 3820 .family = 15, 3821 .model = 6, 3822 .stepping = 1, 3823 .features[FEAT_1_EDX] = 3824 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3825 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3826 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3827 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3828 CPUID_DE | CPUID_FP87, 3829 .features[FEAT_1_ECX] = 3830 CPUID_EXT_CX16 | CPUID_EXT_SSE3, 3831 .features[FEAT_8000_0001_EDX] = 3832 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL, 3833 .features[FEAT_8000_0001_ECX] = 3834 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3835 .xlevel = 0x80000008, 3836 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)", 3837 }, 3838 { 3839 .name = "Opteron_G3", 3840 .level = 5, 3841 .vendor = CPUID_VENDOR_AMD, 3842 .family = 16, 3843 .model = 2, 3844 .stepping = 3, 3845 .features[FEAT_1_EDX] = 3846 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3847 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3848 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3849 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3850 CPUID_DE | CPUID_FP87, 3851 .features[FEAT_1_ECX] = 3852 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR | 3853 CPUID_EXT_SSE3, 3854 .features[FEAT_8000_0001_EDX] = 3855 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL | 3856 CPUID_EXT2_RDTSCP, 3857 .features[FEAT_8000_0001_ECX] = 3858 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | 3859 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM, 3860 .xlevel = 0x80000008, 3861 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)", 3862 }, 3863 { 3864 .name = "Opteron_G4", 3865 .level = 0xd, 3866 .vendor = CPUID_VENDOR_AMD, 3867 .family = 21, 3868 .model = 1, 3869 .stepping = 2, 3870 .features[FEAT_1_EDX] = 3871 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3872 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3873 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3874 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3875 CPUID_DE | CPUID_FP87, 3876 .features[FEAT_1_ECX] = 3877 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES | 3878 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3879 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | 3880 CPUID_EXT_SSE3, 3881 .features[FEAT_8000_0001_EDX] = 3882 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3883 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3884 .features[FEAT_8000_0001_ECX] = 3885 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3886 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3887 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3888 CPUID_EXT3_LAHF_LM, 3889 .features[FEAT_SVM] = 3890 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3891 /* no xsaveopt! */ 3892 .xlevel = 0x8000001A, 3893 .model_id = "AMD Opteron 62xx class CPU", 3894 }, 3895 { 3896 .name = "Opteron_G5", 3897 .level = 0xd, 3898 .vendor = CPUID_VENDOR_AMD, 3899 .family = 21, 3900 .model = 2, 3901 .stepping = 0, 3902 .features[FEAT_1_EDX] = 3903 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | 3904 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | 3905 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | 3906 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | 3907 CPUID_DE | CPUID_FP87, 3908 .features[FEAT_1_ECX] = 3909 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE | 3910 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | 3911 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA | 3912 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3913 .features[FEAT_8000_0001_EDX] = 3914 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX | 3915 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP, 3916 .features[FEAT_8000_0001_ECX] = 3917 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP | 3918 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE | 3919 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM | 3920 CPUID_EXT3_LAHF_LM, 3921 .features[FEAT_SVM] = 3922 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3923 /* no xsaveopt! */ 3924 .xlevel = 0x8000001A, 3925 .model_id = "AMD Opteron 63xx class CPU", 3926 }, 3927 { 3928 .name = "EPYC", 3929 .level = 0xd, 3930 .vendor = CPUID_VENDOR_AMD, 3931 .family = 23, 3932 .model = 1, 3933 .stepping = 2, 3934 .features[FEAT_1_EDX] = 3935 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 3936 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 3937 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 3938 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 3939 CPUID_VME | CPUID_FP87, 3940 .features[FEAT_1_ECX] = 3941 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 3942 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 3943 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 3944 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 3945 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 3946 .features[FEAT_8000_0001_EDX] = 3947 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 3948 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 3949 CPUID_EXT2_SYSCALL, 3950 .features[FEAT_8000_0001_ECX] = 3951 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 3952 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 3953 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 3954 CPUID_EXT3_TOPOEXT, 3955 .features[FEAT_7_0_EBX] = 3956 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 3957 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 3958 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 3959 CPUID_7_0_EBX_SHA_NI, 3960 .features[FEAT_XSAVE] = 3961 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 3962 CPUID_XSAVE_XGETBV1, 3963 .features[FEAT_6_EAX] = 3964 CPUID_6_EAX_ARAT, 3965 .features[FEAT_SVM] = 3966 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 3967 .xlevel = 0x8000001E, 3968 .model_id = "AMD EPYC Processor", 3969 .cache_info = &epyc_cache_info, 3970 .use_epyc_apic_id_encoding = 1, 3971 .versions = (X86CPUVersionDefinition[]) { 3972 { .version = 1 }, 3973 { 3974 .version = 2, 3975 .alias = "EPYC-IBPB", 3976 .props = (PropValue[]) { 3977 { "ibpb", "on" }, 3978 { "model-id", 3979 "AMD EPYC Processor (with IBPB)" }, 3980 { /* end of list */ } 3981 } 3982 }, 3983 { 3984 .version = 3, 3985 .props = (PropValue[]) { 3986 { "ibpb", "on" }, 3987 { "perfctr-core", "on" }, 3988 { "clzero", "on" }, 3989 { "xsaveerptr", "on" }, 3990 { "xsaves", "on" }, 3991 { "model-id", 3992 "AMD EPYC Processor" }, 3993 { /* end of list */ } 3994 } 3995 }, 3996 { /* end of list */ } 3997 } 3998 }, 3999 { 4000 .name = "Dhyana", 4001 .level = 0xd, 4002 .vendor = CPUID_VENDOR_HYGON, 4003 .family = 24, 4004 .model = 0, 4005 .stepping = 1, 4006 .features[FEAT_1_EDX] = 4007 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4008 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4009 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4010 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4011 CPUID_VME | CPUID_FP87, 4012 .features[FEAT_1_ECX] = 4013 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4014 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT | 4015 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4016 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4017 CPUID_EXT_MONITOR | CPUID_EXT_SSE3, 4018 .features[FEAT_8000_0001_EDX] = 4019 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4020 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4021 CPUID_EXT2_SYSCALL, 4022 .features[FEAT_8000_0001_ECX] = 4023 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4024 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4025 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4026 CPUID_EXT3_TOPOEXT, 4027 .features[FEAT_8000_0008_EBX] = 4028 CPUID_8000_0008_EBX_IBPB, 4029 .features[FEAT_7_0_EBX] = 4030 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4031 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4032 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT, 4033 /* 4034 * Missing: XSAVES (not supported by some Linux versions, 4035 * including v4.1 to v4.12). 4036 * KVM doesn't yet expose any XSAVES state save component. 4037 */ 4038 .features[FEAT_XSAVE] = 4039 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4040 CPUID_XSAVE_XGETBV1, 4041 .features[FEAT_6_EAX] = 4042 CPUID_6_EAX_ARAT, 4043 .features[FEAT_SVM] = 4044 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4045 .xlevel = 0x8000001E, 4046 .model_id = "Hygon Dhyana Processor", 4047 .cache_info = &epyc_cache_info, 4048 }, 4049 { 4050 .name = "EPYC-Rome", 4051 .level = 0xd, 4052 .vendor = CPUID_VENDOR_AMD, 4053 .family = 23, 4054 .model = 49, 4055 .stepping = 0, 4056 .features[FEAT_1_EDX] = 4057 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | 4058 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | 4059 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | 4060 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | 4061 CPUID_VME | CPUID_FP87, 4062 .features[FEAT_1_ECX] = 4063 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | 4064 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | 4065 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | 4066 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | 4067 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, 4068 .features[FEAT_8000_0001_EDX] = 4069 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | 4070 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | 4071 CPUID_EXT2_SYSCALL, 4072 .features[FEAT_8000_0001_ECX] = 4073 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | 4074 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | 4075 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | 4076 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, 4077 .features[FEAT_8000_0008_EBX] = 4078 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | 4079 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | 4080 CPUID_8000_0008_EBX_STIBP, 4081 .features[FEAT_7_0_EBX] = 4082 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | 4083 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | 4084 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | 4085 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB, 4086 .features[FEAT_7_0_ECX] = 4087 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID, 4088 .features[FEAT_XSAVE] = 4089 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | 4090 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, 4091 .features[FEAT_6_EAX] = 4092 CPUID_6_EAX_ARAT, 4093 .features[FEAT_SVM] = 4094 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, 4095 .xlevel = 0x8000001E, 4096 .model_id = "AMD EPYC-Rome Processor", 4097 .cache_info = &epyc_rome_cache_info, 4098 .use_epyc_apic_id_encoding = 1, 4099 }, 4100 }; 4101 4102 /* KVM-specific features that are automatically added/removed 4103 * from all CPU models when KVM is enabled. 4104 */ 4105 static PropValue kvm_default_props[] = { 4106 { "kvmclock", "on" }, 4107 { "kvm-nopiodelay", "on" }, 4108 { "kvm-asyncpf", "on" }, 4109 { "kvm-steal-time", "on" }, 4110 { "kvm-pv-eoi", "on" }, 4111 { "kvmclock-stable-bit", "on" }, 4112 { "x2apic", "on" }, 4113 { "acpi", "off" }, 4114 { "monitor", "off" }, 4115 { "svm", "off" }, 4116 { NULL, NULL }, 4117 }; 4118 4119 /* TCG-specific defaults that override all CPU models when using TCG 4120 */ 4121 static PropValue tcg_default_props[] = { 4122 { "vme", "off" }, 4123 { NULL, NULL }, 4124 }; 4125 4126 4127 /* 4128 * We resolve CPU model aliases using -v1 when using "-machine 4129 * none", but this is just for compatibility while libvirt isn't 4130 * adapted to resolve CPU model versions before creating VMs. 4131 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi. 4132 */ 4133 X86CPUVersion default_cpu_version = 1; 4134 4135 void x86_cpu_set_default_version(X86CPUVersion version) 4136 { 4137 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */ 4138 assert(version != CPU_VERSION_AUTO); 4139 default_cpu_version = version; 4140 } 4141 4142 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model) 4143 { 4144 int v = 0; 4145 const X86CPUVersionDefinition *vdef = 4146 x86_cpu_def_get_versions(model->cpudef); 4147 while (vdef->version) { 4148 v = vdef->version; 4149 vdef++; 4150 } 4151 return v; 4152 } 4153 4154 /* Return the actual version being used for a specific CPU model */ 4155 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model) 4156 { 4157 X86CPUVersion v = model->version; 4158 if (v == CPU_VERSION_AUTO) { 4159 v = default_cpu_version; 4160 } 4161 if (v == CPU_VERSION_LATEST) { 4162 return x86_cpu_model_last_version(model); 4163 } 4164 return v; 4165 } 4166 4167 void x86_cpu_change_kvm_default(const char *prop, const char *value) 4168 { 4169 PropValue *pv; 4170 for (pv = kvm_default_props; pv->prop; pv++) { 4171 if (!strcmp(pv->prop, prop)) { 4172 pv->value = value; 4173 break; 4174 } 4175 } 4176 4177 /* It is valid to call this function only for properties that 4178 * are already present in the kvm_default_props table. 4179 */ 4180 assert(pv->prop); 4181 } 4182 4183 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 4184 bool migratable_only); 4185 4186 static bool lmce_supported(void) 4187 { 4188 uint64_t mce_cap = 0; 4189 4190 #ifdef CONFIG_KVM 4191 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) { 4192 return false; 4193 } 4194 #endif 4195 4196 return !!(mce_cap & MCG_LMCE_P); 4197 } 4198 4199 #define CPUID_MODEL_ID_SZ 48 4200 4201 /** 4202 * cpu_x86_fill_model_id: 4203 * Get CPUID model ID string from host CPU. 4204 * 4205 * @str should have at least CPUID_MODEL_ID_SZ bytes 4206 * 4207 * The function does NOT add a null terminator to the string 4208 * automatically. 4209 */ 4210 static int cpu_x86_fill_model_id(char *str) 4211 { 4212 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; 4213 int i; 4214 4215 for (i = 0; i < 3; i++) { 4216 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx); 4217 memcpy(str + i * 16 + 0, &eax, 4); 4218 memcpy(str + i * 16 + 4, &ebx, 4); 4219 memcpy(str + i * 16 + 8, &ecx, 4); 4220 memcpy(str + i * 16 + 12, &edx, 4); 4221 } 4222 return 0; 4223 } 4224 4225 static Property max_x86_cpu_properties[] = { 4226 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true), 4227 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false), 4228 DEFINE_PROP_END_OF_LIST() 4229 }; 4230 4231 static void max_x86_cpu_class_init(ObjectClass *oc, void *data) 4232 { 4233 DeviceClass *dc = DEVICE_CLASS(oc); 4234 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4235 4236 xcc->ordering = 9; 4237 4238 xcc->model_description = 4239 "Enables all features supported by the accelerator in the current host"; 4240 4241 device_class_set_props(dc, max_x86_cpu_properties); 4242 } 4243 4244 static void max_x86_cpu_initfn(Object *obj) 4245 { 4246 X86CPU *cpu = X86_CPU(obj); 4247 CPUX86State *env = &cpu->env; 4248 KVMState *s = kvm_state; 4249 4250 /* We can't fill the features array here because we don't know yet if 4251 * "migratable" is true or false. 4252 */ 4253 cpu->max_features = true; 4254 4255 if (accel_uses_host_cpuid()) { 4256 char vendor[CPUID_VENDOR_SZ + 1] = { 0 }; 4257 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 }; 4258 int family, model, stepping; 4259 4260 host_vendor_fms(vendor, &family, &model, &stepping); 4261 cpu_x86_fill_model_id(model_id); 4262 4263 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort); 4264 object_property_set_int(OBJECT(cpu), family, "family", &error_abort); 4265 object_property_set_int(OBJECT(cpu), model, "model", &error_abort); 4266 object_property_set_int(OBJECT(cpu), stepping, "stepping", 4267 &error_abort); 4268 object_property_set_str(OBJECT(cpu), model_id, "model-id", 4269 &error_abort); 4270 4271 if (kvm_enabled()) { 4272 env->cpuid_min_level = 4273 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); 4274 env->cpuid_min_xlevel = 4275 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); 4276 env->cpuid_min_xlevel2 = 4277 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); 4278 } else { 4279 env->cpuid_min_level = 4280 hvf_get_supported_cpuid(0x0, 0, R_EAX); 4281 env->cpuid_min_xlevel = 4282 hvf_get_supported_cpuid(0x80000000, 0, R_EAX); 4283 env->cpuid_min_xlevel2 = 4284 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); 4285 } 4286 4287 if (lmce_supported()) { 4288 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort); 4289 } 4290 } else { 4291 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD, 4292 "vendor", &error_abort); 4293 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort); 4294 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort); 4295 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort); 4296 object_property_set_str(OBJECT(cpu), 4297 "QEMU TCG CPU version " QEMU_HW_VERSION, 4298 "model-id", &error_abort); 4299 } 4300 4301 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort); 4302 } 4303 4304 static const TypeInfo max_x86_cpu_type_info = { 4305 .name = X86_CPU_TYPE_NAME("max"), 4306 .parent = TYPE_X86_CPU, 4307 .instance_init = max_x86_cpu_initfn, 4308 .class_init = max_x86_cpu_class_init, 4309 }; 4310 4311 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 4312 static void host_x86_cpu_class_init(ObjectClass *oc, void *data) 4313 { 4314 X86CPUClass *xcc = X86_CPU_CLASS(oc); 4315 4316 xcc->host_cpuid_required = true; 4317 xcc->ordering = 8; 4318 4319 #if defined(CONFIG_KVM) 4320 xcc->model_description = 4321 "KVM processor with all supported host features "; 4322 #elif defined(CONFIG_HVF) 4323 xcc->model_description = 4324 "HVF processor with all supported host features "; 4325 #endif 4326 } 4327 4328 static const TypeInfo host_x86_cpu_type_info = { 4329 .name = X86_CPU_TYPE_NAME("host"), 4330 .parent = X86_CPU_TYPE_NAME("max"), 4331 .class_init = host_x86_cpu_class_init, 4332 }; 4333 4334 #endif 4335 4336 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit) 4337 { 4338 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD); 4339 4340 switch (f->type) { 4341 case CPUID_FEATURE_WORD: 4342 { 4343 const char *reg = get_register_name_32(f->cpuid.reg); 4344 assert(reg); 4345 return g_strdup_printf("CPUID.%02XH:%s", 4346 f->cpuid.eax, reg); 4347 } 4348 case MSR_FEATURE_WORD: 4349 return g_strdup_printf("MSR(%02XH)", 4350 f->msr.index); 4351 } 4352 4353 return NULL; 4354 } 4355 4356 static bool x86_cpu_have_filtered_features(X86CPU *cpu) 4357 { 4358 FeatureWord w; 4359 4360 for (w = 0; w < FEATURE_WORDS; w++) { 4361 if (cpu->filtered_features[w]) { 4362 return true; 4363 } 4364 } 4365 4366 return false; 4367 } 4368 4369 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask, 4370 const char *verbose_prefix) 4371 { 4372 CPUX86State *env = &cpu->env; 4373 FeatureWordInfo *f = &feature_word_info[w]; 4374 int i; 4375 4376 if (!cpu->force_features) { 4377 env->features[w] &= ~mask; 4378 } 4379 cpu->filtered_features[w] |= mask; 4380 4381 if (!verbose_prefix) { 4382 return; 4383 } 4384 4385 for (i = 0; i < 64; ++i) { 4386 if ((1ULL << i) & mask) { 4387 g_autofree char *feat_word_str = feature_word_description(f, i); 4388 warn_report("%s: %s%s%s [bit %d]", 4389 verbose_prefix, 4390 feat_word_str, 4391 f->feat_names[i] ? "." : "", 4392 f->feat_names[i] ? f->feat_names[i] : "", i); 4393 } 4394 } 4395 } 4396 4397 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, 4398 const char *name, void *opaque, 4399 Error **errp) 4400 { 4401 X86CPU *cpu = X86_CPU(obj); 4402 CPUX86State *env = &cpu->env; 4403 int64_t value; 4404 4405 value = (env->cpuid_version >> 8) & 0xf; 4406 if (value == 0xf) { 4407 value += (env->cpuid_version >> 20) & 0xff; 4408 } 4409 visit_type_int(v, name, &value, errp); 4410 } 4411 4412 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, 4413 const char *name, void *opaque, 4414 Error **errp) 4415 { 4416 X86CPU *cpu = X86_CPU(obj); 4417 CPUX86State *env = &cpu->env; 4418 const int64_t min = 0; 4419 const int64_t max = 0xff + 0xf; 4420 Error *local_err = NULL; 4421 int64_t value; 4422 4423 visit_type_int(v, name, &value, &local_err); 4424 if (local_err) { 4425 error_propagate(errp, local_err); 4426 return; 4427 } 4428 if (value < min || value > max) { 4429 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4430 name ? name : "null", value, min, max); 4431 return; 4432 } 4433 4434 env->cpuid_version &= ~0xff00f00; 4435 if (value > 0x0f) { 4436 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20); 4437 } else { 4438 env->cpuid_version |= value << 8; 4439 } 4440 } 4441 4442 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, 4443 const char *name, void *opaque, 4444 Error **errp) 4445 { 4446 X86CPU *cpu = X86_CPU(obj); 4447 CPUX86State *env = &cpu->env; 4448 int64_t value; 4449 4450 value = (env->cpuid_version >> 4) & 0xf; 4451 value |= ((env->cpuid_version >> 16) & 0xf) << 4; 4452 visit_type_int(v, name, &value, errp); 4453 } 4454 4455 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, 4456 const char *name, void *opaque, 4457 Error **errp) 4458 { 4459 X86CPU *cpu = X86_CPU(obj); 4460 CPUX86State *env = &cpu->env; 4461 const int64_t min = 0; 4462 const int64_t max = 0xff; 4463 Error *local_err = NULL; 4464 int64_t value; 4465 4466 visit_type_int(v, name, &value, &local_err); 4467 if (local_err) { 4468 error_propagate(errp, local_err); 4469 return; 4470 } 4471 if (value < min || value > max) { 4472 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4473 name ? name : "null", value, min, max); 4474 return; 4475 } 4476 4477 env->cpuid_version &= ~0xf00f0; 4478 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16); 4479 } 4480 4481 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v, 4482 const char *name, void *opaque, 4483 Error **errp) 4484 { 4485 X86CPU *cpu = X86_CPU(obj); 4486 CPUX86State *env = &cpu->env; 4487 int64_t value; 4488 4489 value = env->cpuid_version & 0xf; 4490 visit_type_int(v, name, &value, errp); 4491 } 4492 4493 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v, 4494 const char *name, void *opaque, 4495 Error **errp) 4496 { 4497 X86CPU *cpu = X86_CPU(obj); 4498 CPUX86State *env = &cpu->env; 4499 const int64_t min = 0; 4500 const int64_t max = 0xf; 4501 Error *local_err = NULL; 4502 int64_t value; 4503 4504 visit_type_int(v, name, &value, &local_err); 4505 if (local_err) { 4506 error_propagate(errp, local_err); 4507 return; 4508 } 4509 if (value < min || value > max) { 4510 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4511 name ? name : "null", value, min, max); 4512 return; 4513 } 4514 4515 env->cpuid_version &= ~0xf; 4516 env->cpuid_version |= value & 0xf; 4517 } 4518 4519 static char *x86_cpuid_get_vendor(Object *obj, Error **errp) 4520 { 4521 X86CPU *cpu = X86_CPU(obj); 4522 CPUX86State *env = &cpu->env; 4523 char *value; 4524 4525 value = g_malloc(CPUID_VENDOR_SZ + 1); 4526 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2, 4527 env->cpuid_vendor3); 4528 return value; 4529 } 4530 4531 static void x86_cpuid_set_vendor(Object *obj, const char *value, 4532 Error **errp) 4533 { 4534 X86CPU *cpu = X86_CPU(obj); 4535 CPUX86State *env = &cpu->env; 4536 int i; 4537 4538 if (strlen(value) != CPUID_VENDOR_SZ) { 4539 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); 4540 return; 4541 } 4542 4543 env->cpuid_vendor1 = 0; 4544 env->cpuid_vendor2 = 0; 4545 env->cpuid_vendor3 = 0; 4546 for (i = 0; i < 4; i++) { 4547 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i); 4548 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i); 4549 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i); 4550 } 4551 } 4552 4553 static char *x86_cpuid_get_model_id(Object *obj, Error **errp) 4554 { 4555 X86CPU *cpu = X86_CPU(obj); 4556 CPUX86State *env = &cpu->env; 4557 char *value; 4558 int i; 4559 4560 value = g_malloc(48 + 1); 4561 for (i = 0; i < 48; i++) { 4562 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3)); 4563 } 4564 value[48] = '\0'; 4565 return value; 4566 } 4567 4568 static void x86_cpuid_set_model_id(Object *obj, const char *model_id, 4569 Error **errp) 4570 { 4571 X86CPU *cpu = X86_CPU(obj); 4572 CPUX86State *env = &cpu->env; 4573 int c, len, i; 4574 4575 if (model_id == NULL) { 4576 model_id = ""; 4577 } 4578 len = strlen(model_id); 4579 memset(env->cpuid_model, 0, 48); 4580 for (i = 0; i < 48; i++) { 4581 if (i >= len) { 4582 c = '\0'; 4583 } else { 4584 c = (uint8_t)model_id[i]; 4585 } 4586 env->cpuid_model[i >> 2] |= c << (8 * (i & 3)); 4587 } 4588 } 4589 4590 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name, 4591 void *opaque, Error **errp) 4592 { 4593 X86CPU *cpu = X86_CPU(obj); 4594 int64_t value; 4595 4596 value = cpu->env.tsc_khz * 1000; 4597 visit_type_int(v, name, &value, errp); 4598 } 4599 4600 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name, 4601 void *opaque, Error **errp) 4602 { 4603 X86CPU *cpu = X86_CPU(obj); 4604 const int64_t min = 0; 4605 const int64_t max = INT64_MAX; 4606 Error *local_err = NULL; 4607 int64_t value; 4608 4609 visit_type_int(v, name, &value, &local_err); 4610 if (local_err) { 4611 error_propagate(errp, local_err); 4612 return; 4613 } 4614 if (value < min || value > max) { 4615 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "", 4616 name ? name : "null", value, min, max); 4617 return; 4618 } 4619 4620 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000; 4621 } 4622 4623 /* Generic getter for "feature-words" and "filtered-features" properties */ 4624 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, 4625 const char *name, void *opaque, 4626 Error **errp) 4627 { 4628 uint64_t *array = (uint64_t *)opaque; 4629 FeatureWord w; 4630 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { }; 4631 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { }; 4632 X86CPUFeatureWordInfoList *list = NULL; 4633 4634 for (w = 0; w < FEATURE_WORDS; w++) { 4635 FeatureWordInfo *wi = &feature_word_info[w]; 4636 /* 4637 * We didn't have MSR features when "feature-words" was 4638 * introduced. Therefore skipped other type entries. 4639 */ 4640 if (wi->type != CPUID_FEATURE_WORD) { 4641 continue; 4642 } 4643 X86CPUFeatureWordInfo *qwi = &word_infos[w]; 4644 qwi->cpuid_input_eax = wi->cpuid.eax; 4645 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx; 4646 qwi->cpuid_input_ecx = wi->cpuid.ecx; 4647 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum; 4648 qwi->features = array[w]; 4649 4650 /* List will be in reverse order, but order shouldn't matter */ 4651 list_entries[w].next = list; 4652 list_entries[w].value = &word_infos[w]; 4653 list = &list_entries[w]; 4654 } 4655 4656 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp); 4657 } 4658 4659 /* Convert all '_' in a feature string option name to '-', to make feature 4660 * name conform to QOM property naming rule, which uses '-' instead of '_'. 4661 */ 4662 static inline void feat2prop(char *s) 4663 { 4664 while ((s = strchr(s, '_'))) { 4665 *s = '-'; 4666 } 4667 } 4668 4669 /* Return the feature property name for a feature flag bit */ 4670 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) 4671 { 4672 const char *name; 4673 /* XSAVE components are automatically enabled by other features, 4674 * so return the original feature name instead 4675 */ 4676 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { 4677 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; 4678 4679 if (comp < ARRAY_SIZE(x86_ext_save_areas) && 4680 x86_ext_save_areas[comp].bits) { 4681 w = x86_ext_save_areas[comp].feature; 4682 bitnr = ctz32(x86_ext_save_areas[comp].bits); 4683 } 4684 } 4685 4686 assert(bitnr < 64); 4687 assert(w < FEATURE_WORDS); 4688 name = feature_word_info[w].feat_names[bitnr]; 4689 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD)); 4690 return name; 4691 } 4692 4693 /* Compatibily hack to maintain legacy +-feat semantic, 4694 * where +-feat overwrites any feature set by 4695 * feat=on|feat even if the later is parsed after +-feat 4696 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled) 4697 */ 4698 static GList *plus_features, *minus_features; 4699 4700 static gint compare_string(gconstpointer a, gconstpointer b) 4701 { 4702 return g_strcmp0(a, b); 4703 } 4704 4705 /* Parse "+feature,-feature,feature=foo" CPU feature string 4706 */ 4707 static void x86_cpu_parse_featurestr(const char *typename, char *features, 4708 Error **errp) 4709 { 4710 char *featurestr; /* Single 'key=value" string being parsed */ 4711 static bool cpu_globals_initialized; 4712 bool ambiguous = false; 4713 4714 if (cpu_globals_initialized) { 4715 return; 4716 } 4717 cpu_globals_initialized = true; 4718 4719 if (!features) { 4720 return; 4721 } 4722 4723 for (featurestr = strtok(features, ","); 4724 featurestr; 4725 featurestr = strtok(NULL, ",")) { 4726 const char *name; 4727 const char *val = NULL; 4728 char *eq = NULL; 4729 char num[32]; 4730 GlobalProperty *prop; 4731 4732 /* Compatibility syntax: */ 4733 if (featurestr[0] == '+') { 4734 plus_features = g_list_append(plus_features, 4735 g_strdup(featurestr + 1)); 4736 continue; 4737 } else if (featurestr[0] == '-') { 4738 minus_features = g_list_append(minus_features, 4739 g_strdup(featurestr + 1)); 4740 continue; 4741 } 4742 4743 eq = strchr(featurestr, '='); 4744 if (eq) { 4745 *eq++ = 0; 4746 val = eq; 4747 } else { 4748 val = "on"; 4749 } 4750 4751 feat2prop(featurestr); 4752 name = featurestr; 4753 4754 if (g_list_find_custom(plus_features, name, compare_string)) { 4755 warn_report("Ambiguous CPU model string. " 4756 "Don't mix both \"+%s\" and \"%s=%s\"", 4757 name, name, val); 4758 ambiguous = true; 4759 } 4760 if (g_list_find_custom(minus_features, name, compare_string)) { 4761 warn_report("Ambiguous CPU model string. " 4762 "Don't mix both \"-%s\" and \"%s=%s\"", 4763 name, name, val); 4764 ambiguous = true; 4765 } 4766 4767 /* Special case: */ 4768 if (!strcmp(name, "tsc-freq")) { 4769 int ret; 4770 uint64_t tsc_freq; 4771 4772 ret = qemu_strtosz_metric(val, NULL, &tsc_freq); 4773 if (ret < 0 || tsc_freq > INT64_MAX) { 4774 error_setg(errp, "bad numerical value %s", val); 4775 return; 4776 } 4777 snprintf(num, sizeof(num), "%" PRId64, tsc_freq); 4778 val = num; 4779 name = "tsc-frequency"; 4780 } 4781 4782 prop = g_new0(typeof(*prop), 1); 4783 prop->driver = typename; 4784 prop->property = g_strdup(name); 4785 prop->value = g_strdup(val); 4786 qdev_prop_register_global(prop); 4787 } 4788 4789 if (ambiguous) { 4790 warn_report("Compatibility of ambiguous CPU model " 4791 "strings won't be kept on future QEMU versions"); 4792 } 4793 } 4794 4795 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp); 4796 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose); 4797 4798 /* Build a list with the name of all features on a feature word array */ 4799 static void x86_cpu_list_feature_names(FeatureWordArray features, 4800 strList **feat_names) 4801 { 4802 FeatureWord w; 4803 strList **next = feat_names; 4804 4805 for (w = 0; w < FEATURE_WORDS; w++) { 4806 uint64_t filtered = features[w]; 4807 int i; 4808 for (i = 0; i < 64; i++) { 4809 if (filtered & (1ULL << i)) { 4810 strList *new = g_new0(strList, 1); 4811 new->value = g_strdup(x86_cpu_feature_name(w, i)); 4812 *next = new; 4813 next = &new->next; 4814 } 4815 } 4816 } 4817 } 4818 4819 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, 4820 const char *name, void *opaque, 4821 Error **errp) 4822 { 4823 X86CPU *xc = X86_CPU(obj); 4824 strList *result = NULL; 4825 4826 x86_cpu_list_feature_names(xc->filtered_features, &result); 4827 visit_type_strList(v, "unavailable-features", &result, errp); 4828 } 4829 4830 /* Check for missing features that may prevent the CPU class from 4831 * running using the current machine and accelerator. 4832 */ 4833 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc, 4834 strList **missing_feats) 4835 { 4836 X86CPU *xc; 4837 Error *err = NULL; 4838 strList **next = missing_feats; 4839 4840 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) { 4841 strList *new = g_new0(strList, 1); 4842 new->value = g_strdup("kvm"); 4843 *missing_feats = new; 4844 return; 4845 } 4846 4847 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 4848 4849 x86_cpu_expand_features(xc, &err); 4850 if (err) { 4851 /* Errors at x86_cpu_expand_features should never happen, 4852 * but in case it does, just report the model as not 4853 * runnable at all using the "type" property. 4854 */ 4855 strList *new = g_new0(strList, 1); 4856 new->value = g_strdup("type"); 4857 *next = new; 4858 next = &new->next; 4859 } 4860 4861 x86_cpu_filter_features(xc, false); 4862 4863 x86_cpu_list_feature_names(xc->filtered_features, next); 4864 4865 object_unref(OBJECT(xc)); 4866 } 4867 4868 /* Print all cpuid feature names in featureset 4869 */ 4870 static void listflags(GList *features) 4871 { 4872 size_t len = 0; 4873 GList *tmp; 4874 4875 for (tmp = features; tmp; tmp = tmp->next) { 4876 const char *name = tmp->data; 4877 if ((len + strlen(name) + 1) >= 75) { 4878 qemu_printf("\n"); 4879 len = 0; 4880 } 4881 qemu_printf("%s%s", len == 0 ? " " : " ", name); 4882 len += strlen(name) + 1; 4883 } 4884 qemu_printf("\n"); 4885 } 4886 4887 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */ 4888 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b) 4889 { 4890 ObjectClass *class_a = (ObjectClass *)a; 4891 ObjectClass *class_b = (ObjectClass *)b; 4892 X86CPUClass *cc_a = X86_CPU_CLASS(class_a); 4893 X86CPUClass *cc_b = X86_CPU_CLASS(class_b); 4894 int ret; 4895 4896 if (cc_a->ordering != cc_b->ordering) { 4897 ret = cc_a->ordering - cc_b->ordering; 4898 } else { 4899 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a); 4900 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b); 4901 ret = strcmp(name_a, name_b); 4902 } 4903 return ret; 4904 } 4905 4906 static GSList *get_sorted_cpu_model_list(void) 4907 { 4908 GSList *list = object_class_get_list(TYPE_X86_CPU, false); 4909 list = g_slist_sort(list, x86_cpu_list_compare); 4910 return list; 4911 } 4912 4913 static char *x86_cpu_class_get_model_id(X86CPUClass *xc) 4914 { 4915 Object *obj = object_new_with_class(OBJECT_CLASS(xc)); 4916 char *r = object_property_get_str(obj, "model-id", &error_abort); 4917 object_unref(obj); 4918 return r; 4919 } 4920 4921 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc) 4922 { 4923 X86CPUVersion version; 4924 4925 if (!cc->model || !cc->model->is_alias) { 4926 return NULL; 4927 } 4928 version = x86_cpu_model_resolve_version(cc->model); 4929 if (version <= 0) { 4930 return NULL; 4931 } 4932 return x86_cpu_versioned_model_name(cc->model->cpudef, version); 4933 } 4934 4935 static void x86_cpu_list_entry(gpointer data, gpointer user_data) 4936 { 4937 ObjectClass *oc = data; 4938 X86CPUClass *cc = X86_CPU_CLASS(oc); 4939 g_autofree char *name = x86_cpu_class_get_model_name(cc); 4940 g_autofree char *desc = g_strdup(cc->model_description); 4941 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc); 4942 g_autofree char *model_id = x86_cpu_class_get_model_id(cc); 4943 4944 if (!desc && alias_of) { 4945 if (cc->model && cc->model->version == CPU_VERSION_AUTO) { 4946 desc = g_strdup("(alias configured by machine type)"); 4947 } else { 4948 desc = g_strdup_printf("(alias of %s)", alias_of); 4949 } 4950 } 4951 if (!desc && cc->model && cc->model->note) { 4952 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note); 4953 } 4954 if (!desc) { 4955 desc = g_strdup_printf("%s", model_id); 4956 } 4957 4958 qemu_printf("x86 %-20s %-58s\n", name, desc); 4959 } 4960 4961 /* list available CPU models and flags */ 4962 void x86_cpu_list(void) 4963 { 4964 int i, j; 4965 GSList *list; 4966 GList *names = NULL; 4967 4968 qemu_printf("Available CPUs:\n"); 4969 list = get_sorted_cpu_model_list(); 4970 g_slist_foreach(list, x86_cpu_list_entry, NULL); 4971 g_slist_free(list); 4972 4973 names = NULL; 4974 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) { 4975 FeatureWordInfo *fw = &feature_word_info[i]; 4976 for (j = 0; j < 64; j++) { 4977 if (fw->feat_names[j]) { 4978 names = g_list_append(names, (gpointer)fw->feat_names[j]); 4979 } 4980 } 4981 } 4982 4983 names = g_list_sort(names, (GCompareFunc)strcmp); 4984 4985 qemu_printf("\nRecognized CPUID flags:\n"); 4986 listflags(names); 4987 qemu_printf("\n"); 4988 g_list_free(names); 4989 } 4990 4991 static void x86_cpu_definition_entry(gpointer data, gpointer user_data) 4992 { 4993 ObjectClass *oc = data; 4994 X86CPUClass *cc = X86_CPU_CLASS(oc); 4995 CpuDefinitionInfoList **cpu_list = user_data; 4996 CpuDefinitionInfoList *entry; 4997 CpuDefinitionInfo *info; 4998 4999 info = g_malloc0(sizeof(*info)); 5000 info->name = x86_cpu_class_get_model_name(cc); 5001 x86_cpu_class_check_missing_features(cc, &info->unavailable_features); 5002 info->has_unavailable_features = true; 5003 info->q_typename = g_strdup(object_class_get_name(oc)); 5004 info->migration_safe = cc->migration_safe; 5005 info->has_migration_safe = true; 5006 info->q_static = cc->static_model; 5007 /* 5008 * Old machine types won't report aliases, so that alias translation 5009 * doesn't break compatibility with previous QEMU versions. 5010 */ 5011 if (default_cpu_version != CPU_VERSION_LEGACY) { 5012 info->alias_of = x86_cpu_class_get_alias_of(cc); 5013 info->has_alias_of = !!info->alias_of; 5014 } 5015 5016 entry = g_malloc0(sizeof(*entry)); 5017 entry->value = info; 5018 entry->next = *cpu_list; 5019 *cpu_list = entry; 5020 } 5021 5022 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) 5023 { 5024 CpuDefinitionInfoList *cpu_list = NULL; 5025 GSList *list = get_sorted_cpu_model_list(); 5026 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list); 5027 g_slist_free(list); 5028 return cpu_list; 5029 } 5030 5031 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, 5032 bool migratable_only) 5033 { 5034 FeatureWordInfo *wi = &feature_word_info[w]; 5035 uint64_t r = 0; 5036 5037 if (kvm_enabled()) { 5038 switch (wi->type) { 5039 case CPUID_FEATURE_WORD: 5040 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax, 5041 wi->cpuid.ecx, 5042 wi->cpuid.reg); 5043 break; 5044 case MSR_FEATURE_WORD: 5045 r = kvm_arch_get_supported_msr_feature(kvm_state, 5046 wi->msr.index); 5047 break; 5048 } 5049 } else if (hvf_enabled()) { 5050 if (wi->type != CPUID_FEATURE_WORD) { 5051 return 0; 5052 } 5053 r = hvf_get_supported_cpuid(wi->cpuid.eax, 5054 wi->cpuid.ecx, 5055 wi->cpuid.reg); 5056 } else if (tcg_enabled()) { 5057 r = wi->tcg_features; 5058 } else { 5059 return ~0; 5060 } 5061 if (migratable_only) { 5062 r &= x86_cpu_get_migratable_flags(w); 5063 } 5064 return r; 5065 } 5066 5067 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props) 5068 { 5069 PropValue *pv; 5070 for (pv = props; pv->prop; pv++) { 5071 if (!pv->value) { 5072 continue; 5073 } 5074 object_property_parse(OBJECT(cpu), pv->value, pv->prop, 5075 &error_abort); 5076 } 5077 } 5078 5079 /* Apply properties for the CPU model version specified in model */ 5080 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) 5081 { 5082 const X86CPUVersionDefinition *vdef; 5083 X86CPUVersion version = x86_cpu_model_resolve_version(model); 5084 5085 if (version == CPU_VERSION_LEGACY) { 5086 return; 5087 } 5088 5089 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { 5090 PropValue *p; 5091 5092 for (p = vdef->props; p && p->prop; p++) { 5093 object_property_parse(OBJECT(cpu), p->value, p->prop, 5094 &error_abort); 5095 } 5096 5097 if (vdef->version == version) { 5098 break; 5099 } 5100 } 5101 5102 /* 5103 * If we reached the end of the list, version number was invalid 5104 */ 5105 assert(vdef->version == version); 5106 } 5107 5108 /* Load data from X86CPUDefinition into a X86CPU object 5109 */ 5110 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) 5111 { 5112 X86CPUDefinition *def = model->cpudef; 5113 CPUX86State *env = &cpu->env; 5114 const char *vendor; 5115 char host_vendor[CPUID_VENDOR_SZ + 1]; 5116 FeatureWord w; 5117 5118 /*NOTE: any property set by this function should be returned by 5119 * x86_cpu_static_props(), so static expansion of 5120 * query-cpu-model-expansion is always complete. 5121 */ 5122 5123 /* CPU models only set _minimum_ values for level/xlevel: */ 5124 object_property_set_uint(OBJECT(cpu), def->level, "min-level", 5125 &error_abort); 5126 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", 5127 &error_abort); 5128 5129 object_property_set_int(OBJECT(cpu), def->family, "family", 5130 &error_abort); 5131 object_property_set_int(OBJECT(cpu), def->model, "model", 5132 &error_abort); 5133 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", 5134 &error_abort); 5135 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", 5136 &error_abort); 5137 for (w = 0; w < FEATURE_WORDS; w++) { 5138 env->features[w] = def->features[w]; 5139 } 5140 5141 /* legacy-cache defaults to 'off' if CPU model provides cache info */ 5142 cpu->legacy_cache = !def->cache_info; 5143 5144 /* Special cases not set in the X86CPUDefinition structs: */ 5145 /* TODO: in-kernel irqchip for hvf */ 5146 if (kvm_enabled()) { 5147 if (!kvm_irqchip_in_kernel()) { 5148 x86_cpu_change_kvm_default("x2apic", "off"); 5149 } 5150 5151 x86_cpu_apply_props(cpu, kvm_default_props); 5152 } else if (tcg_enabled()) { 5153 x86_cpu_apply_props(cpu, tcg_default_props); 5154 } 5155 5156 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; 5157 5158 /* sysenter isn't supported in compatibility mode on AMD, 5159 * syscall isn't supported in compatibility mode on Intel. 5160 * Normally we advertise the actual CPU vendor, but you can 5161 * override this using the 'vendor' property if you want to use 5162 * KVM's sysenter/syscall emulation in compatibility mode and 5163 * when doing cross vendor migration 5164 */ 5165 vendor = def->vendor; 5166 if (accel_uses_host_cpuid()) { 5167 uint32_t ebx = 0, ecx = 0, edx = 0; 5168 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx); 5169 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx); 5170 vendor = host_vendor; 5171 } 5172 5173 object_property_set_str(OBJECT(cpu), vendor, "vendor", 5174 &error_abort); 5175 5176 x86_cpu_apply_version_props(cpu, model); 5177 } 5178 5179 #ifndef CONFIG_USER_ONLY 5180 /* Return a QDict containing keys for all properties that can be included 5181 * in static expansion of CPU models. All properties set by x86_cpu_load_model() 5182 * must be included in the dictionary. 5183 */ 5184 static QDict *x86_cpu_static_props(void) 5185 { 5186 FeatureWord w; 5187 int i; 5188 static const char *props[] = { 5189 "min-level", 5190 "min-xlevel", 5191 "family", 5192 "model", 5193 "stepping", 5194 "model-id", 5195 "vendor", 5196 "lmce", 5197 NULL, 5198 }; 5199 static QDict *d; 5200 5201 if (d) { 5202 return d; 5203 } 5204 5205 d = qdict_new(); 5206 for (i = 0; props[i]; i++) { 5207 qdict_put_null(d, props[i]); 5208 } 5209 5210 for (w = 0; w < FEATURE_WORDS; w++) { 5211 FeatureWordInfo *fi = &feature_word_info[w]; 5212 int bit; 5213 for (bit = 0; bit < 64; bit++) { 5214 if (!fi->feat_names[bit]) { 5215 continue; 5216 } 5217 qdict_put_null(d, fi->feat_names[bit]); 5218 } 5219 } 5220 5221 return d; 5222 } 5223 5224 /* Add an entry to @props dict, with the value for property. */ 5225 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop) 5226 { 5227 QObject *value = object_property_get_qobject(OBJECT(cpu), prop, 5228 &error_abort); 5229 5230 qdict_put_obj(props, prop, value); 5231 } 5232 5233 /* Convert CPU model data from X86CPU object to a property dictionary 5234 * that can recreate exactly the same CPU model. 5235 */ 5236 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props) 5237 { 5238 QDict *sprops = x86_cpu_static_props(); 5239 const QDictEntry *e; 5240 5241 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) { 5242 const char *prop = qdict_entry_key(e); 5243 x86_cpu_expand_prop(cpu, props, prop); 5244 } 5245 } 5246 5247 /* Convert CPU model data from X86CPU object to a property dictionary 5248 * that can recreate exactly the same CPU model, including every 5249 * writeable QOM property. 5250 */ 5251 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props) 5252 { 5253 ObjectPropertyIterator iter; 5254 ObjectProperty *prop; 5255 5256 object_property_iter_init(&iter, OBJECT(cpu)); 5257 while ((prop = object_property_iter_next(&iter))) { 5258 /* skip read-only or write-only properties */ 5259 if (!prop->get || !prop->set) { 5260 continue; 5261 } 5262 5263 /* "hotplugged" is the only property that is configurable 5264 * on the command-line but will be set differently on CPUs 5265 * created using "-cpu ... -smp ..." and by CPUs created 5266 * on the fly by x86_cpu_from_model() for querying. Skip it. 5267 */ 5268 if (!strcmp(prop->name, "hotplugged")) { 5269 continue; 5270 } 5271 x86_cpu_expand_prop(cpu, props, prop->name); 5272 } 5273 } 5274 5275 static void object_apply_props(Object *obj, QDict *props, Error **errp) 5276 { 5277 const QDictEntry *prop; 5278 Error *err = NULL; 5279 5280 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) { 5281 object_property_set_qobject(obj, qdict_entry_value(prop), 5282 qdict_entry_key(prop), &err); 5283 if (err) { 5284 break; 5285 } 5286 } 5287 5288 error_propagate(errp, err); 5289 } 5290 5291 /* Create X86CPU object according to model+props specification */ 5292 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp) 5293 { 5294 X86CPU *xc = NULL; 5295 X86CPUClass *xcc; 5296 Error *err = NULL; 5297 5298 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model)); 5299 if (xcc == NULL) { 5300 error_setg(&err, "CPU model '%s' not found", model); 5301 goto out; 5302 } 5303 5304 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc))); 5305 if (props) { 5306 object_apply_props(OBJECT(xc), props, &err); 5307 if (err) { 5308 goto out; 5309 } 5310 } 5311 5312 x86_cpu_expand_features(xc, &err); 5313 if (err) { 5314 goto out; 5315 } 5316 5317 out: 5318 if (err) { 5319 error_propagate(errp, err); 5320 object_unref(OBJECT(xc)); 5321 xc = NULL; 5322 } 5323 return xc; 5324 } 5325 5326 CpuModelExpansionInfo * 5327 qmp_query_cpu_model_expansion(CpuModelExpansionType type, 5328 CpuModelInfo *model, 5329 Error **errp) 5330 { 5331 X86CPU *xc = NULL; 5332 Error *err = NULL; 5333 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1); 5334 QDict *props = NULL; 5335 const char *base_name; 5336 5337 xc = x86_cpu_from_model(model->name, 5338 model->has_props ? 5339 qobject_to(QDict, model->props) : 5340 NULL, &err); 5341 if (err) { 5342 goto out; 5343 } 5344 5345 props = qdict_new(); 5346 ret->model = g_new0(CpuModelInfo, 1); 5347 ret->model->props = QOBJECT(props); 5348 ret->model->has_props = true; 5349 5350 switch (type) { 5351 case CPU_MODEL_EXPANSION_TYPE_STATIC: 5352 /* Static expansion will be based on "base" only */ 5353 base_name = "base"; 5354 x86_cpu_to_dict(xc, props); 5355 break; 5356 case CPU_MODEL_EXPANSION_TYPE_FULL: 5357 /* As we don't return every single property, full expansion needs 5358 * to keep the original model name+props, and add extra 5359 * properties on top of that. 5360 */ 5361 base_name = model->name; 5362 x86_cpu_to_dict_full(xc, props); 5363 break; 5364 default: 5365 error_setg(&err, "Unsupported expansion type"); 5366 goto out; 5367 } 5368 5369 x86_cpu_to_dict(xc, props); 5370 5371 ret->model->name = g_strdup(base_name); 5372 5373 out: 5374 object_unref(OBJECT(xc)); 5375 if (err) { 5376 error_propagate(errp, err); 5377 qapi_free_CpuModelExpansionInfo(ret); 5378 ret = NULL; 5379 } 5380 return ret; 5381 } 5382 #endif /* !CONFIG_USER_ONLY */ 5383 5384 static gchar *x86_gdb_arch_name(CPUState *cs) 5385 { 5386 #ifdef TARGET_X86_64 5387 return g_strdup("i386:x86-64"); 5388 #else 5389 return g_strdup("i386"); 5390 #endif 5391 } 5392 5393 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data) 5394 { 5395 X86CPUModel *model = data; 5396 X86CPUClass *xcc = X86_CPU_CLASS(oc); 5397 5398 xcc->model = model; 5399 xcc->migration_safe = true; 5400 } 5401 5402 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model) 5403 { 5404 g_autofree char *typename = x86_cpu_type_name(name); 5405 TypeInfo ti = { 5406 .name = typename, 5407 .parent = TYPE_X86_CPU, 5408 .class_init = x86_cpu_cpudef_class_init, 5409 .class_data = model, 5410 }; 5411 5412 type_register(&ti); 5413 } 5414 5415 static void x86_register_cpudef_types(X86CPUDefinition *def) 5416 { 5417 X86CPUModel *m; 5418 const X86CPUVersionDefinition *vdef; 5419 5420 /* AMD aliases are handled at runtime based on CPUID vendor, so 5421 * they shouldn't be set on the CPU model table. 5422 */ 5423 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES)); 5424 /* catch mistakes instead of silently truncating model_id when too long */ 5425 assert(def->model_id && strlen(def->model_id) <= 48); 5426 5427 /* Unversioned model: */ 5428 m = g_new0(X86CPUModel, 1); 5429 m->cpudef = def; 5430 m->version = CPU_VERSION_AUTO; 5431 m->is_alias = true; 5432 x86_register_cpu_model_type(def->name, m); 5433 5434 /* Versioned models: */ 5435 5436 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) { 5437 X86CPUModel *m = g_new0(X86CPUModel, 1); 5438 g_autofree char *name = 5439 x86_cpu_versioned_model_name(def, vdef->version); 5440 m->cpudef = def; 5441 m->version = vdef->version; 5442 m->note = vdef->note; 5443 x86_register_cpu_model_type(name, m); 5444 5445 if (vdef->alias) { 5446 X86CPUModel *am = g_new0(X86CPUModel, 1); 5447 am->cpudef = def; 5448 am->version = vdef->version; 5449 am->is_alias = true; 5450 x86_register_cpu_model_type(vdef->alias, am); 5451 } 5452 } 5453 5454 } 5455 5456 #if !defined(CONFIG_USER_ONLY) 5457 5458 void cpu_clear_apic_feature(CPUX86State *env) 5459 { 5460 env->features[FEAT_1_EDX] &= ~CPUID_APIC; 5461 } 5462 5463 #endif /* !CONFIG_USER_ONLY */ 5464 5465 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, 5466 uint32_t *eax, uint32_t *ebx, 5467 uint32_t *ecx, uint32_t *edx) 5468 { 5469 X86CPU *cpu = env_archcpu(env); 5470 CPUState *cs = env_cpu(env); 5471 uint32_t die_offset; 5472 uint32_t limit; 5473 uint32_t signature[3]; 5474 X86CPUTopoInfo topo_info; 5475 5476 topo_info.nodes_per_pkg = env->nr_nodes; 5477 topo_info.dies_per_pkg = env->nr_dies; 5478 topo_info.cores_per_die = cs->nr_cores; 5479 topo_info.threads_per_core = cs->nr_threads; 5480 5481 /* Calculate & apply limits for different index ranges */ 5482 if (index >= 0xC0000000) { 5483 limit = env->cpuid_xlevel2; 5484 } else if (index >= 0x80000000) { 5485 limit = env->cpuid_xlevel; 5486 } else if (index >= 0x40000000) { 5487 limit = 0x40000001; 5488 } else { 5489 limit = env->cpuid_level; 5490 } 5491 5492 if (index > limit) { 5493 /* Intel documentation states that invalid EAX input will 5494 * return the same information as EAX=cpuid_level 5495 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID) 5496 */ 5497 index = env->cpuid_level; 5498 } 5499 5500 switch(index) { 5501 case 0: 5502 *eax = env->cpuid_level; 5503 *ebx = env->cpuid_vendor1; 5504 *edx = env->cpuid_vendor2; 5505 *ecx = env->cpuid_vendor3; 5506 break; 5507 case 1: 5508 *eax = env->cpuid_version; 5509 *ebx = (cpu->apic_id << 24) | 5510 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */ 5511 *ecx = env->features[FEAT_1_ECX]; 5512 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) { 5513 *ecx |= CPUID_EXT_OSXSAVE; 5514 } 5515 *edx = env->features[FEAT_1_EDX]; 5516 if (cs->nr_cores * cs->nr_threads > 1) { 5517 *ebx |= (cs->nr_cores * cs->nr_threads) << 16; 5518 *edx |= CPUID_HT; 5519 } 5520 if (!cpu->enable_pmu) { 5521 *ecx &= ~CPUID_EXT_PDCM; 5522 } 5523 break; 5524 case 2: 5525 /* cache info: needed for Pentium Pro compatibility */ 5526 if (cpu->cache_info_passthrough) { 5527 host_cpuid(index, 0, eax, ebx, ecx, edx); 5528 break; 5529 } 5530 *eax = 1; /* Number of CPUID[EAX=2] calls required */ 5531 *ebx = 0; 5532 if (!cpu->enable_l3_cache) { 5533 *ecx = 0; 5534 } else { 5535 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); 5536 } 5537 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | 5538 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | 5539 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); 5540 break; 5541 case 4: 5542 /* cache info: needed for Core compatibility */ 5543 if (cpu->cache_info_passthrough) { 5544 host_cpuid(index, count, eax, ebx, ecx, edx); 5545 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ 5546 *eax &= ~0xFC000000; 5547 if ((*eax & 31) && cs->nr_cores > 1) { 5548 *eax |= (cs->nr_cores - 1) << 26; 5549 } 5550 } else { 5551 *eax = 0; 5552 switch (count) { 5553 case 0: /* L1 dcache info */ 5554 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, 5555 1, cs->nr_cores, 5556 eax, ebx, ecx, edx); 5557 break; 5558 case 1: /* L1 icache info */ 5559 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, 5560 1, cs->nr_cores, 5561 eax, ebx, ecx, edx); 5562 break; 5563 case 2: /* L2 cache info */ 5564 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, 5565 cs->nr_threads, cs->nr_cores, 5566 eax, ebx, ecx, edx); 5567 break; 5568 case 3: /* L3 cache info */ 5569 die_offset = apicid_die_offset(&topo_info); 5570 if (cpu->enable_l3_cache) { 5571 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, 5572 (1 << die_offset), cs->nr_cores, 5573 eax, ebx, ecx, edx); 5574 break; 5575 } 5576 /* fall through */ 5577 default: /* end of info */ 5578 *eax = *ebx = *ecx = *edx = 0; 5579 break; 5580 } 5581 } 5582 break; 5583 case 5: 5584 /* MONITOR/MWAIT Leaf */ 5585 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */ 5586 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */ 5587 *ecx = cpu->mwait.ecx; /* flags */ 5588 *edx = cpu->mwait.edx; /* mwait substates */ 5589 break; 5590 case 6: 5591 /* Thermal and Power Leaf */ 5592 *eax = env->features[FEAT_6_EAX]; 5593 *ebx = 0; 5594 *ecx = 0; 5595 *edx = 0; 5596 break; 5597 case 7: 5598 /* Structured Extended Feature Flags Enumeration Leaf */ 5599 if (count == 0) { 5600 /* Maximum ECX value for sub-leaves */ 5601 *eax = env->cpuid_level_func7; 5602 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */ 5603 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */ 5604 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) { 5605 *ecx |= CPUID_7_0_ECX_OSPKE; 5606 } 5607 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ 5608 } else if (count == 1) { 5609 *eax = env->features[FEAT_7_1_EAX]; 5610 *ebx = 0; 5611 *ecx = 0; 5612 *edx = 0; 5613 } else { 5614 *eax = 0; 5615 *ebx = 0; 5616 *ecx = 0; 5617 *edx = 0; 5618 } 5619 break; 5620 case 9: 5621 /* Direct Cache Access Information Leaf */ 5622 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */ 5623 *ebx = 0; 5624 *ecx = 0; 5625 *edx = 0; 5626 break; 5627 case 0xA: 5628 /* Architectural Performance Monitoring Leaf */ 5629 if (kvm_enabled() && cpu->enable_pmu) { 5630 KVMState *s = cs->kvm_state; 5631 5632 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX); 5633 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX); 5634 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX); 5635 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX); 5636 } else if (hvf_enabled() && cpu->enable_pmu) { 5637 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX); 5638 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX); 5639 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX); 5640 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX); 5641 } else { 5642 *eax = 0; 5643 *ebx = 0; 5644 *ecx = 0; 5645 *edx = 0; 5646 } 5647 break; 5648 case 0xB: 5649 /* Extended Topology Enumeration Leaf */ 5650 if (!cpu->enable_cpuid_0xb) { 5651 *eax = *ebx = *ecx = *edx = 0; 5652 break; 5653 } 5654 5655 *ecx = count & 0xff; 5656 *edx = cpu->apic_id; 5657 5658 switch (count) { 5659 case 0: 5660 *eax = apicid_core_offset(&topo_info); 5661 *ebx = cs->nr_threads; 5662 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5663 break; 5664 case 1: 5665 *eax = env->pkg_offset; 5666 *ebx = cs->nr_cores * cs->nr_threads; 5667 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5668 break; 5669 default: 5670 *eax = 0; 5671 *ebx = 0; 5672 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5673 } 5674 5675 assert(!(*eax & ~0x1f)); 5676 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5677 break; 5678 case 0x1F: 5679 /* V2 Extended Topology Enumeration Leaf */ 5680 if (env->nr_dies < 2) { 5681 *eax = *ebx = *ecx = *edx = 0; 5682 break; 5683 } 5684 5685 *ecx = count & 0xff; 5686 *edx = cpu->apic_id; 5687 switch (count) { 5688 case 0: 5689 *eax = apicid_core_offset(&topo_info); 5690 *ebx = cs->nr_threads; 5691 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT; 5692 break; 5693 case 1: 5694 *eax = apicid_die_offset(&topo_info); 5695 *ebx = cs->nr_cores * cs->nr_threads; 5696 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE; 5697 break; 5698 case 2: 5699 *eax = env->pkg_offset; 5700 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads; 5701 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE; 5702 break; 5703 default: 5704 *eax = 0; 5705 *ebx = 0; 5706 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID; 5707 } 5708 assert(!(*eax & ~0x1f)); 5709 *ebx &= 0xffff; /* The count doesn't need to be reliable. */ 5710 break; 5711 case 0xD: { 5712 /* Processor Extended State */ 5713 *eax = 0; 5714 *ebx = 0; 5715 *ecx = 0; 5716 *edx = 0; 5717 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 5718 break; 5719 } 5720 5721 if (count == 0) { 5722 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); 5723 *eax = env->features[FEAT_XSAVE_COMP_LO]; 5724 *edx = env->features[FEAT_XSAVE_COMP_HI]; 5725 /* 5726 * The initial value of xcr0 and ebx == 0, On host without kvm 5727 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 5728 * even through guest update xcr0, this will crash some legacy guest 5729 * (e.g., CentOS 6), So set ebx == ecx to workaroud it. 5730 */ 5731 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); 5732 } else if (count == 1) { 5733 *eax = env->features[FEAT_XSAVE]; 5734 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { 5735 if ((x86_cpu_xsave_components(cpu) >> count) & 1) { 5736 const ExtSaveArea *esa = &x86_ext_save_areas[count]; 5737 *eax = esa->size; 5738 *ebx = esa->offset; 5739 } 5740 } 5741 break; 5742 } 5743 case 0x14: { 5744 /* Intel Processor Trace Enumeration */ 5745 *eax = 0; 5746 *ebx = 0; 5747 *ecx = 0; 5748 *edx = 0; 5749 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || 5750 !kvm_enabled()) { 5751 break; 5752 } 5753 5754 if (count == 0) { 5755 *eax = INTEL_PT_MAX_SUBLEAF; 5756 *ebx = INTEL_PT_MINIMAL_EBX; 5757 *ecx = INTEL_PT_MINIMAL_ECX; 5758 } else if (count == 1) { 5759 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; 5760 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; 5761 } 5762 break; 5763 } 5764 case 0x40000000: 5765 /* 5766 * CPUID code in kvm_arch_init_vcpu() ignores stuff 5767 * set here, but we restrict to TCG none the less. 5768 */ 5769 if (tcg_enabled() && cpu->expose_tcg) { 5770 memcpy(signature, "TCGTCGTCGTCG", 12); 5771 *eax = 0x40000001; 5772 *ebx = signature[0]; 5773 *ecx = signature[1]; 5774 *edx = signature[2]; 5775 } else { 5776 *eax = 0; 5777 *ebx = 0; 5778 *ecx = 0; 5779 *edx = 0; 5780 } 5781 break; 5782 case 0x40000001: 5783 *eax = 0; 5784 *ebx = 0; 5785 *ecx = 0; 5786 *edx = 0; 5787 break; 5788 case 0x80000000: 5789 *eax = env->cpuid_xlevel; 5790 *ebx = env->cpuid_vendor1; 5791 *edx = env->cpuid_vendor2; 5792 *ecx = env->cpuid_vendor3; 5793 break; 5794 case 0x80000001: 5795 *eax = env->cpuid_version; 5796 *ebx = 0; 5797 *ecx = env->features[FEAT_8000_0001_ECX]; 5798 *edx = env->features[FEAT_8000_0001_EDX]; 5799 5800 /* The Linux kernel checks for the CMPLegacy bit and 5801 * discards multiple thread information if it is set. 5802 * So don't set it here for Intel to make Linux guests happy. 5803 */ 5804 if (cs->nr_cores * cs->nr_threads > 1) { 5805 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 || 5806 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 || 5807 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) { 5808 *ecx |= 1 << 1; /* CmpLegacy bit */ 5809 } 5810 } 5811 break; 5812 case 0x80000002: 5813 case 0x80000003: 5814 case 0x80000004: 5815 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0]; 5816 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1]; 5817 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2]; 5818 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3]; 5819 break; 5820 case 0x80000005: 5821 /* cache info (L1 cache) */ 5822 if (cpu->cache_info_passthrough) { 5823 host_cpuid(index, 0, eax, ebx, ecx, edx); 5824 break; 5825 } 5826 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | 5827 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); 5828 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | 5829 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); 5830 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); 5831 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); 5832 break; 5833 case 0x80000006: 5834 /* cache info (L2 cache) */ 5835 if (cpu->cache_info_passthrough) { 5836 host_cpuid(index, 0, eax, ebx, ecx, edx); 5837 break; 5838 } 5839 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | 5840 (L2_DTLB_2M_ENTRIES << 16) | 5841 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | 5842 (L2_ITLB_2M_ENTRIES); 5843 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | 5844 (L2_DTLB_4K_ENTRIES << 16) | 5845 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | 5846 (L2_ITLB_4K_ENTRIES); 5847 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, 5848 cpu->enable_l3_cache ? 5849 env->cache_info_amd.l3_cache : NULL, 5850 ecx, edx); 5851 break; 5852 case 0x80000007: 5853 *eax = 0; 5854 *ebx = 0; 5855 *ecx = 0; 5856 *edx = env->features[FEAT_8000_0007_EDX]; 5857 break; 5858 case 0x80000008: 5859 /* virtual & phys address size in low 2 bytes. */ 5860 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 5861 /* 64 bit processor */ 5862 *eax = cpu->phys_bits; /* configurable physical bits */ 5863 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) { 5864 *eax |= 0x00003900; /* 57 bits virtual */ 5865 } else { 5866 *eax |= 0x00003000; /* 48 bits virtual */ 5867 } 5868 } else { 5869 *eax = cpu->phys_bits; 5870 } 5871 *ebx = env->features[FEAT_8000_0008_EBX]; 5872 if (cs->nr_cores * cs->nr_threads > 1) { 5873 /* 5874 * Bits 15:12 is "The number of bits in the initial 5875 * Core::X86::Apic::ApicId[ApicId] value that indicate 5876 * thread ID within a package". This is already stored at 5877 * CPUX86State::pkg_offset. 5878 * Bits 7:0 is "The number of threads in the package is NC+1" 5879 */ 5880 *ecx = (env->pkg_offset << 12) | 5881 ((cs->nr_cores * cs->nr_threads) - 1); 5882 } else { 5883 *ecx = 0; 5884 } 5885 *edx = 0; 5886 break; 5887 case 0x8000000A: 5888 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 5889 *eax = 0x00000001; /* SVM Revision */ 5890 *ebx = 0x00000010; /* nr of ASIDs */ 5891 *ecx = 0; 5892 *edx = env->features[FEAT_SVM]; /* optional features */ 5893 } else { 5894 *eax = 0; 5895 *ebx = 0; 5896 *ecx = 0; 5897 *edx = 0; 5898 } 5899 break; 5900 case 0x8000001D: 5901 *eax = 0; 5902 if (cpu->cache_info_passthrough) { 5903 host_cpuid(index, count, eax, ebx, ecx, edx); 5904 break; 5905 } 5906 switch (count) { 5907 case 0: /* L1 dcache info */ 5908 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, 5909 &topo_info, eax, ebx, ecx, edx); 5910 break; 5911 case 1: /* L1 icache info */ 5912 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, 5913 &topo_info, eax, ebx, ecx, edx); 5914 break; 5915 case 2: /* L2 cache info */ 5916 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, 5917 &topo_info, eax, ebx, ecx, edx); 5918 break; 5919 case 3: /* L3 cache info */ 5920 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, 5921 &topo_info, eax, ebx, ecx, edx); 5922 break; 5923 default: /* end of info */ 5924 *eax = *ebx = *ecx = *edx = 0; 5925 break; 5926 } 5927 break; 5928 case 0x8000001E: 5929 assert(cpu->core_id <= 255); 5930 encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx); 5931 break; 5932 case 0xC0000000: 5933 *eax = env->cpuid_xlevel2; 5934 *ebx = 0; 5935 *ecx = 0; 5936 *edx = 0; 5937 break; 5938 case 0xC0000001: 5939 /* Support for VIA CPU's CPUID instruction */ 5940 *eax = env->cpuid_version; 5941 *ebx = 0; 5942 *ecx = 0; 5943 *edx = env->features[FEAT_C000_0001_EDX]; 5944 break; 5945 case 0xC0000002: 5946 case 0xC0000003: 5947 case 0xC0000004: 5948 /* Reserved for the future, and now filled with zero */ 5949 *eax = 0; 5950 *ebx = 0; 5951 *ecx = 0; 5952 *edx = 0; 5953 break; 5954 case 0x8000001F: 5955 *eax = sev_enabled() ? 0x2 : 0; 5956 *ebx = sev_get_cbit_position(); 5957 *ebx |= sev_get_reduced_phys_bits() << 6; 5958 *ecx = 0; 5959 *edx = 0; 5960 break; 5961 default: 5962 /* reserved values: zero */ 5963 *eax = 0; 5964 *ebx = 0; 5965 *ecx = 0; 5966 *edx = 0; 5967 break; 5968 } 5969 } 5970 5971 static void x86_cpu_reset(DeviceState *dev) 5972 { 5973 CPUState *s = CPU(dev); 5974 X86CPU *cpu = X86_CPU(s); 5975 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu); 5976 CPUX86State *env = &cpu->env; 5977 target_ulong cr4; 5978 uint64_t xcr0; 5979 int i; 5980 5981 xcc->parent_reset(dev); 5982 5983 memset(env, 0, offsetof(CPUX86State, end_reset_fields)); 5984 5985 env->old_exception = -1; 5986 5987 /* init to reset state */ 5988 5989 env->hflags2 |= HF2_GIF_MASK; 5990 5991 cpu_x86_update_cr0(env, 0x60000010); 5992 env->a20_mask = ~0x0; 5993 env->smbase = 0x30000; 5994 env->msr_smi_count = 0; 5995 5996 env->idt.limit = 0xffff; 5997 env->gdt.limit = 0xffff; 5998 env->ldt.limit = 0xffff; 5999 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); 6000 env->tr.limit = 0xffff; 6001 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); 6002 6003 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 6004 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | 6005 DESC_R_MASK | DESC_A_MASK); 6006 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 6007 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6008 DESC_A_MASK); 6009 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 6010 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6011 DESC_A_MASK); 6012 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 6013 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6014 DESC_A_MASK); 6015 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 6016 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6017 DESC_A_MASK); 6018 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 6019 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK | 6020 DESC_A_MASK); 6021 6022 env->eip = 0xfff0; 6023 env->regs[R_EDX] = env->cpuid_version; 6024 6025 env->eflags = 0x2; 6026 6027 /* FPU init */ 6028 for (i = 0; i < 8; i++) { 6029 env->fptags[i] = 1; 6030 } 6031 cpu_set_fpuc(env, 0x37f); 6032 6033 env->mxcsr = 0x1f80; 6034 /* All units are in INIT state. */ 6035 env->xstate_bv = 0; 6036 6037 env->pat = 0x0007040600070406ULL; 6038 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT; 6039 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) { 6040 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT; 6041 } 6042 6043 memset(env->dr, 0, sizeof(env->dr)); 6044 env->dr[6] = DR6_FIXED_1; 6045 env->dr[7] = DR7_FIXED_1; 6046 cpu_breakpoint_remove_all(s, BP_CPU); 6047 cpu_watchpoint_remove_all(s, BP_CPU); 6048 6049 cr4 = 0; 6050 xcr0 = XSTATE_FP_MASK; 6051 6052 #ifdef CONFIG_USER_ONLY 6053 /* Enable all the features for user-mode. */ 6054 if (env->features[FEAT_1_EDX] & CPUID_SSE) { 6055 xcr0 |= XSTATE_SSE_MASK; 6056 } 6057 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6058 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6059 if (env->features[esa->feature] & esa->bits) { 6060 xcr0 |= 1ull << i; 6061 } 6062 } 6063 6064 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) { 6065 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK; 6066 } 6067 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) { 6068 cr4 |= CR4_FSGSBASE_MASK; 6069 } 6070 #endif 6071 6072 env->xcr0 = xcr0; 6073 cpu_x86_update_cr4(env, cr4); 6074 6075 /* 6076 * SDM 11.11.5 requires: 6077 * - IA32_MTRR_DEF_TYPE MSR.E = 0 6078 * - IA32_MTRR_PHYSMASKn.V = 0 6079 * All other bits are undefined. For simplification, zero it all. 6080 */ 6081 env->mtrr_deftype = 0; 6082 memset(env->mtrr_var, 0, sizeof(env->mtrr_var)); 6083 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed)); 6084 6085 env->interrupt_injected = -1; 6086 env->exception_nr = -1; 6087 env->exception_pending = 0; 6088 env->exception_injected = 0; 6089 env->exception_has_payload = false; 6090 env->exception_payload = 0; 6091 env->nmi_injected = false; 6092 #if !defined(CONFIG_USER_ONLY) 6093 /* We hard-wire the BSP to the first CPU. */ 6094 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); 6095 6096 s->halted = !cpu_is_bsp(cpu); 6097 6098 if (kvm_enabled()) { 6099 kvm_arch_reset_vcpu(cpu); 6100 } 6101 else if (hvf_enabled()) { 6102 hvf_reset_vcpu(s); 6103 } 6104 #endif 6105 } 6106 6107 #ifndef CONFIG_USER_ONLY 6108 bool cpu_is_bsp(X86CPU *cpu) 6109 { 6110 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP; 6111 } 6112 6113 /* TODO: remove me, when reset over QOM tree is implemented */ 6114 static void x86_cpu_machine_reset_cb(void *opaque) 6115 { 6116 X86CPU *cpu = opaque; 6117 cpu_reset(CPU(cpu)); 6118 } 6119 #endif 6120 6121 static void mce_init(X86CPU *cpu) 6122 { 6123 CPUX86State *cenv = &cpu->env; 6124 unsigned int bank; 6125 6126 if (((cenv->cpuid_version >> 8) & 0xf) >= 6 6127 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == 6128 (CPUID_MCE | CPUID_MCA)) { 6129 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF | 6130 (cpu->enable_lmce ? MCG_LMCE_P : 0); 6131 cenv->mcg_ctl = ~(uint64_t)0; 6132 for (bank = 0; bank < MCE_BANKS_DEF; bank++) { 6133 cenv->mce_banks[bank * 4] = ~(uint64_t)0; 6134 } 6135 } 6136 } 6137 6138 #ifndef CONFIG_USER_ONLY 6139 APICCommonClass *apic_get_class(void) 6140 { 6141 const char *apic_type = "apic"; 6142 6143 /* TODO: in-kernel irqchip for hvf */ 6144 if (kvm_apic_in_kernel()) { 6145 apic_type = "kvm-apic"; 6146 } else if (xen_enabled()) { 6147 apic_type = "xen-apic"; 6148 } 6149 6150 return APIC_COMMON_CLASS(object_class_by_name(apic_type)); 6151 } 6152 6153 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) 6154 { 6155 APICCommonState *apic; 6156 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class()); 6157 6158 cpu->apic_state = DEVICE(object_new_with_class(apic_class)); 6159 6160 object_property_add_child(OBJECT(cpu), "lapic", 6161 OBJECT(cpu->apic_state)); 6162 object_unref(OBJECT(cpu->apic_state)); 6163 6164 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); 6165 /* TODO: convert to link<> */ 6166 apic = APIC_COMMON(cpu->apic_state); 6167 apic->cpu = cpu; 6168 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; 6169 } 6170 6171 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6172 { 6173 APICCommonState *apic; 6174 static bool apic_mmio_map_once; 6175 6176 if (cpu->apic_state == NULL) { 6177 return; 6178 } 6179 qdev_realize(DEVICE(cpu->apic_state), NULL, errp); 6180 6181 /* Map APIC MMIO area */ 6182 apic = APIC_COMMON(cpu->apic_state); 6183 if (!apic_mmio_map_once) { 6184 memory_region_add_subregion_overlap(get_system_memory(), 6185 apic->apicbase & 6186 MSR_IA32_APICBASE_BASE, 6187 &apic->io_memory, 6188 0x1000); 6189 apic_mmio_map_once = true; 6190 } 6191 } 6192 6193 static void x86_cpu_machine_done(Notifier *n, void *unused) 6194 { 6195 X86CPU *cpu = container_of(n, X86CPU, machine_done); 6196 MemoryRegion *smram = 6197 (MemoryRegion *) object_resolve_path("/machine/smram", NULL); 6198 6199 if (smram) { 6200 cpu->smram = g_new(MemoryRegion, 1); 6201 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram", 6202 smram, 0, 4 * GiB); 6203 memory_region_set_enabled(cpu->smram, true); 6204 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1); 6205 } 6206 } 6207 #else 6208 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) 6209 { 6210 } 6211 #endif 6212 6213 /* Note: Only safe for use on x86(-64) hosts */ 6214 static uint32_t x86_host_phys_bits(void) 6215 { 6216 uint32_t eax; 6217 uint32_t host_phys_bits; 6218 6219 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL); 6220 if (eax >= 0x80000008) { 6221 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL); 6222 /* Note: According to AMD doc 25481 rev 2.34 they have a field 6223 * at 23:16 that can specify a maximum physical address bits for 6224 * the guest that can override this value; but I've not seen 6225 * anything with that set. 6226 */ 6227 host_phys_bits = eax & 0xff; 6228 } else { 6229 /* It's an odd 64 bit machine that doesn't have the leaf for 6230 * physical address bits; fall back to 36 that's most older 6231 * Intel. 6232 */ 6233 host_phys_bits = 36; 6234 } 6235 6236 return host_phys_bits; 6237 } 6238 6239 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value) 6240 { 6241 if (*min < value) { 6242 *min = value; 6243 } 6244 } 6245 6246 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */ 6247 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w) 6248 { 6249 CPUX86State *env = &cpu->env; 6250 FeatureWordInfo *fi = &feature_word_info[w]; 6251 uint32_t eax = fi->cpuid.eax; 6252 uint32_t region = eax & 0xF0000000; 6253 6254 assert(feature_word_info[w].type == CPUID_FEATURE_WORD); 6255 if (!env->features[w]) { 6256 return; 6257 } 6258 6259 switch (region) { 6260 case 0x00000000: 6261 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax); 6262 break; 6263 case 0x80000000: 6264 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax); 6265 break; 6266 case 0xC0000000: 6267 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax); 6268 break; 6269 } 6270 6271 if (eax == 7) { 6272 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7, 6273 fi->cpuid.ecx); 6274 } 6275 } 6276 6277 /* Calculate XSAVE components based on the configured CPU feature flags */ 6278 static void x86_cpu_enable_xsave_components(X86CPU *cpu) 6279 { 6280 CPUX86State *env = &cpu->env; 6281 int i; 6282 uint64_t mask; 6283 6284 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { 6285 return; 6286 } 6287 6288 mask = 0; 6289 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { 6290 const ExtSaveArea *esa = &x86_ext_save_areas[i]; 6291 if (env->features[esa->feature] & esa->bits) { 6292 mask |= (1ULL << i); 6293 } 6294 } 6295 6296 env->features[FEAT_XSAVE_COMP_LO] = mask; 6297 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; 6298 } 6299 6300 /***** Steps involved on loading and filtering CPUID data 6301 * 6302 * When initializing and realizing a CPU object, the steps 6303 * involved in setting up CPUID data are: 6304 * 6305 * 1) Loading CPU model definition (X86CPUDefinition). This is 6306 * implemented by x86_cpu_load_model() and should be completely 6307 * transparent, as it is done automatically by instance_init. 6308 * No code should need to look at X86CPUDefinition structs 6309 * outside instance_init. 6310 * 6311 * 2) CPU expansion. This is done by realize before CPUID 6312 * filtering, and will make sure host/accelerator data is 6313 * loaded for CPU models that depend on host capabilities 6314 * (e.g. "host"). Done by x86_cpu_expand_features(). 6315 * 6316 * 3) CPUID filtering. This initializes extra data related to 6317 * CPUID, and checks if the host supports all capabilities 6318 * required by the CPU. Runnability of a CPU model is 6319 * determined at this step. Done by x86_cpu_filter_features(). 6320 * 6321 * Some operations don't require all steps to be performed. 6322 * More precisely: 6323 * 6324 * - CPU instance creation (instance_init) will run only CPU 6325 * model loading. CPU expansion can't run at instance_init-time 6326 * because host/accelerator data may be not available yet. 6327 * - CPU realization will perform both CPU model expansion and CPUID 6328 * filtering, and return an error in case one of them fails. 6329 * - query-cpu-definitions needs to run all 3 steps. It needs 6330 * to run CPUID filtering, as the 'unavailable-features' 6331 * field is set based on the filtering results. 6332 * - The query-cpu-model-expansion QMP command only needs to run 6333 * CPU model loading and CPU expansion. It should not filter 6334 * any CPUID data based on host capabilities. 6335 */ 6336 6337 /* Expand CPU configuration data, based on configured features 6338 * and host/accelerator capabilities when appropriate. 6339 */ 6340 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp) 6341 { 6342 CPUX86State *env = &cpu->env; 6343 FeatureWord w; 6344 int i; 6345 GList *l; 6346 Error *local_err = NULL; 6347 6348 for (l = plus_features; l; l = l->next) { 6349 const char *prop = l->data; 6350 object_property_set_bool(OBJECT(cpu), true, prop, &local_err); 6351 if (local_err) { 6352 goto out; 6353 } 6354 } 6355 6356 for (l = minus_features; l; l = l->next) { 6357 const char *prop = l->data; 6358 object_property_set_bool(OBJECT(cpu), false, prop, &local_err); 6359 if (local_err) { 6360 goto out; 6361 } 6362 } 6363 6364 /*TODO: Now cpu->max_features doesn't overwrite features 6365 * set using QOM properties, and we can convert 6366 * plus_features & minus_features to global properties 6367 * inside x86_cpu_parse_featurestr() too. 6368 */ 6369 if (cpu->max_features) { 6370 for (w = 0; w < FEATURE_WORDS; w++) { 6371 /* Override only features that weren't set explicitly 6372 * by the user. 6373 */ 6374 env->features[w] |= 6375 x86_cpu_get_supported_feature_word(w, cpu->migratable) & 6376 ~env->user_features[w] & 6377 ~feature_word_info[w].no_autoenable_flags; 6378 } 6379 } 6380 6381 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { 6382 FeatureDep *d = &feature_dependencies[i]; 6383 if (!(env->features[d->from.index] & d->from.mask)) { 6384 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask; 6385 6386 /* Not an error unless the dependent feature was added explicitly. */ 6387 mark_unavailable_features(cpu, d->to.index, 6388 unavailable_features & env->user_features[d->to.index], 6389 "This feature depends on other features that were not requested"); 6390 6391 env->user_features[d->to.index] |= unavailable_features; 6392 env->features[d->to.index] &= ~unavailable_features; 6393 } 6394 } 6395 6396 if (!kvm_enabled() || !cpu->expose_kvm) { 6397 env->features[FEAT_KVM] = 0; 6398 } 6399 6400 x86_cpu_enable_xsave_components(cpu); 6401 6402 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */ 6403 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX); 6404 if (cpu->full_cpuid_auto_level) { 6405 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX); 6406 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX); 6407 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); 6408 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); 6409 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); 6410 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); 6411 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); 6412 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); 6413 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX); 6414 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX); 6415 x86_cpu_adjust_feat_level(cpu, FEAT_SVM); 6416 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE); 6417 6418 /* Intel Processor Trace requires CPUID[0x14] */ 6419 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) { 6420 if (cpu->intel_pt_auto_level) { 6421 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14); 6422 } else if (cpu->env.cpuid_min_level < 0x14) { 6423 mark_unavailable_features(cpu, FEAT_7_0_EBX, 6424 CPUID_7_0_EBX_INTEL_PT, 6425 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\""); 6426 } 6427 } 6428 6429 /* CPU topology with multi-dies support requires CPUID[0x1F] */ 6430 if (env->nr_dies > 1) { 6431 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F); 6432 } 6433 6434 /* SVM requires CPUID[0x8000000A] */ 6435 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) { 6436 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A); 6437 } 6438 6439 /* SEV requires CPUID[0x8000001F] */ 6440 if (sev_enabled()) { 6441 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); 6442 } 6443 } 6444 6445 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ 6446 if (env->cpuid_level_func7 == UINT32_MAX) { 6447 env->cpuid_level_func7 = env->cpuid_min_level_func7; 6448 } 6449 if (env->cpuid_level == UINT32_MAX) { 6450 env->cpuid_level = env->cpuid_min_level; 6451 } 6452 if (env->cpuid_xlevel == UINT32_MAX) { 6453 env->cpuid_xlevel = env->cpuid_min_xlevel; 6454 } 6455 if (env->cpuid_xlevel2 == UINT32_MAX) { 6456 env->cpuid_xlevel2 = env->cpuid_min_xlevel2; 6457 } 6458 6459 out: 6460 if (local_err != NULL) { 6461 error_propagate(errp, local_err); 6462 } 6463 } 6464 6465 /* 6466 * Finishes initialization of CPUID data, filters CPU feature 6467 * words based on host availability of each feature. 6468 * 6469 * Returns: 0 if all flags are supported by the host, non-zero otherwise. 6470 */ 6471 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose) 6472 { 6473 CPUX86State *env = &cpu->env; 6474 FeatureWord w; 6475 const char *prefix = NULL; 6476 6477 if (verbose) { 6478 prefix = accel_uses_host_cpuid() 6479 ? "host doesn't support requested feature" 6480 : "TCG doesn't support requested feature"; 6481 } 6482 6483 for (w = 0; w < FEATURE_WORDS; w++) { 6484 uint64_t host_feat = 6485 x86_cpu_get_supported_feature_word(w, false); 6486 uint64_t requested_features = env->features[w]; 6487 uint64_t unavailable_features = requested_features & ~host_feat; 6488 mark_unavailable_features(cpu, w, unavailable_features, prefix); 6489 } 6490 6491 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && 6492 kvm_enabled()) { 6493 KVMState *s = CPU(cpu)->kvm_state; 6494 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); 6495 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); 6496 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); 6497 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); 6498 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); 6499 6500 if (!eax_0 || 6501 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || 6502 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || 6503 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || 6504 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < 6505 INTEL_PT_ADDR_RANGES_NUM) || 6506 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != 6507 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) || 6508 (ecx_0 & INTEL_PT_IP_LIP)) { 6509 /* 6510 * Processor Trace capabilities aren't configurable, so if the 6511 * host can't emulate the capabilities we report on 6512 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. 6513 */ 6514 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix); 6515 } 6516 } 6517 } 6518 6519 static void x86_cpu_realizefn(DeviceState *dev, Error **errp) 6520 { 6521 CPUState *cs = CPU(dev); 6522 X86CPU *cpu = X86_CPU(dev); 6523 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6524 CPUX86State *env = &cpu->env; 6525 Error *local_err = NULL; 6526 static bool ht_warned; 6527 6528 if (xcc->host_cpuid_required) { 6529 if (!accel_uses_host_cpuid()) { 6530 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6531 error_setg(&local_err, "CPU model '%s' requires KVM", name); 6532 goto out; 6533 } 6534 } 6535 6536 if (cpu->max_features && accel_uses_host_cpuid()) { 6537 if (enable_cpu_pm) { 6538 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx, 6539 &cpu->mwait.ecx, &cpu->mwait.edx); 6540 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR; 6541 } 6542 if (kvm_enabled() && cpu->ucode_rev == 0) { 6543 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state, 6544 MSR_IA32_UCODE_REV); 6545 } 6546 } 6547 6548 if (cpu->ucode_rev == 0) { 6549 /* The default is the same as KVM's. */ 6550 if (IS_AMD_CPU(env)) { 6551 cpu->ucode_rev = 0x01000065; 6552 } else { 6553 cpu->ucode_rev = 0x100000000ULL; 6554 } 6555 } 6556 6557 /* mwait extended info: needed for Core compatibility */ 6558 /* We always wake on interrupt even if host does not have the capability */ 6559 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE; 6560 6561 if (cpu->apic_id == UNASSIGNED_APIC_ID) { 6562 error_setg(errp, "apic-id property was not initialized properly"); 6563 return; 6564 } 6565 6566 x86_cpu_expand_features(cpu, &local_err); 6567 if (local_err) { 6568 goto out; 6569 } 6570 6571 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid); 6572 6573 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) { 6574 error_setg(&local_err, 6575 accel_uses_host_cpuid() ? 6576 "Host doesn't support requested features" : 6577 "TCG doesn't support requested features"); 6578 goto out; 6579 } 6580 6581 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on 6582 * CPUID[1].EDX. 6583 */ 6584 if (IS_AMD_CPU(env)) { 6585 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES; 6586 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX] 6587 & CPUID_EXT2_AMD_ALIASES); 6588 } 6589 6590 /* For 64bit systems think about the number of physical bits to present. 6591 * ideally this should be the same as the host; anything other than matching 6592 * the host can cause incorrect guest behaviour. 6593 * QEMU used to pick the magic value of 40 bits that corresponds to 6594 * consumer AMD devices but nothing else. 6595 */ 6596 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) { 6597 if (accel_uses_host_cpuid()) { 6598 uint32_t host_phys_bits = x86_host_phys_bits(); 6599 static bool warned; 6600 6601 /* Print a warning if the user set it to a value that's not the 6602 * host value. 6603 */ 6604 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 && 6605 !warned) { 6606 warn_report("Host physical bits (%u)" 6607 " does not match phys-bits property (%u)", 6608 host_phys_bits, cpu->phys_bits); 6609 warned = true; 6610 } 6611 6612 if (cpu->host_phys_bits) { 6613 /* The user asked for us to use the host physical bits */ 6614 cpu->phys_bits = host_phys_bits; 6615 if (cpu->host_phys_bits_limit && 6616 cpu->phys_bits > cpu->host_phys_bits_limit) { 6617 cpu->phys_bits = cpu->host_phys_bits_limit; 6618 } 6619 } 6620 6621 if (cpu->phys_bits && 6622 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS || 6623 cpu->phys_bits < 32)) { 6624 error_setg(errp, "phys-bits should be between 32 and %u " 6625 " (but is %u)", 6626 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits); 6627 return; 6628 } 6629 } else { 6630 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) { 6631 error_setg(errp, "TCG only supports phys-bits=%u", 6632 TCG_PHYS_ADDR_BITS); 6633 return; 6634 } 6635 } 6636 /* 0 means it was not explicitly set by the user (or by machine 6637 * compat_props or by the host code above). In this case, the default 6638 * is the value used by TCG (40). 6639 */ 6640 if (cpu->phys_bits == 0) { 6641 cpu->phys_bits = TCG_PHYS_ADDR_BITS; 6642 } 6643 } else { 6644 /* For 32 bit systems don't use the user set value, but keep 6645 * phys_bits consistent with what we tell the guest. 6646 */ 6647 if (cpu->phys_bits != 0) { 6648 error_setg(errp, "phys-bits is not user-configurable in 32 bit"); 6649 return; 6650 } 6651 6652 if (env->features[FEAT_1_EDX] & CPUID_PSE36) { 6653 cpu->phys_bits = 36; 6654 } else { 6655 cpu->phys_bits = 32; 6656 } 6657 } 6658 6659 /* Cache information initialization */ 6660 if (!cpu->legacy_cache) { 6661 if (!xcc->model || !xcc->model->cpudef->cache_info) { 6662 g_autofree char *name = x86_cpu_class_get_model_name(xcc); 6663 error_setg(errp, 6664 "CPU model '%s' doesn't support legacy-cache=off", name); 6665 return; 6666 } 6667 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = 6668 *xcc->model->cpudef->cache_info; 6669 } else { 6670 /* Build legacy cache information */ 6671 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; 6672 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; 6673 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; 6674 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; 6675 6676 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; 6677 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; 6678 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; 6679 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; 6680 6681 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; 6682 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; 6683 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; 6684 env->cache_info_amd.l3_cache = &legacy_l3_cache; 6685 } 6686 6687 6688 cpu_exec_realizefn(cs, &local_err); 6689 if (local_err != NULL) { 6690 error_propagate(errp, local_err); 6691 return; 6692 } 6693 6694 #ifndef CONFIG_USER_ONLY 6695 MachineState *ms = MACHINE(qdev_get_machine()); 6696 qemu_register_reset(x86_cpu_machine_reset_cb, cpu); 6697 6698 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) { 6699 x86_cpu_apic_create(cpu, &local_err); 6700 if (local_err != NULL) { 6701 goto out; 6702 } 6703 } 6704 #endif 6705 6706 mce_init(cpu); 6707 6708 #ifndef CONFIG_USER_ONLY 6709 if (tcg_enabled()) { 6710 cpu->cpu_as_mem = g_new(MemoryRegion, 1); 6711 cpu->cpu_as_root = g_new(MemoryRegion, 1); 6712 6713 /* Outer container... */ 6714 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull); 6715 memory_region_set_enabled(cpu->cpu_as_root, true); 6716 6717 /* ... with two regions inside: normal system memory with low 6718 * priority, and... 6719 */ 6720 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory", 6721 get_system_memory(), 0, ~0ull); 6722 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); 6723 memory_region_set_enabled(cpu->cpu_as_mem, true); 6724 6725 cs->num_ases = 2; 6726 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); 6727 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); 6728 6729 /* ... SMRAM with higher priority, linked from /machine/smram. */ 6730 cpu->machine_done.notify = x86_cpu_machine_done; 6731 qemu_add_machine_init_done_notifier(&cpu->machine_done); 6732 } 6733 #endif 6734 6735 qemu_init_vcpu(cs); 6736 6737 /* 6738 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU 6739 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX 6740 * based on inputs (sockets,cores,threads), it is still better to give 6741 * users a warning. 6742 * 6743 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise 6744 * cs->nr_threads hasn't be populated yet and the checking is incorrect. 6745 */ 6746 if (IS_AMD_CPU(env) && 6747 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) && 6748 cs->nr_threads > 1 && !ht_warned) { 6749 warn_report("This family of AMD CPU doesn't support " 6750 "hyperthreading(%d)", 6751 cs->nr_threads); 6752 error_printf("Please configure -smp options properly" 6753 " or try enabling topoext feature.\n"); 6754 ht_warned = true; 6755 } 6756 6757 x86_cpu_apic_realize(cpu, &local_err); 6758 if (local_err != NULL) { 6759 goto out; 6760 } 6761 cpu_reset(cs); 6762 6763 xcc->parent_realize(dev, &local_err); 6764 6765 out: 6766 if (local_err != NULL) { 6767 error_propagate(errp, local_err); 6768 return; 6769 } 6770 } 6771 6772 static void x86_cpu_unrealizefn(DeviceState *dev) 6773 { 6774 X86CPU *cpu = X86_CPU(dev); 6775 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev); 6776 6777 #ifndef CONFIG_USER_ONLY 6778 cpu_remove_sync(CPU(dev)); 6779 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev); 6780 #endif 6781 6782 if (cpu->apic_state) { 6783 object_unparent(OBJECT(cpu->apic_state)); 6784 cpu->apic_state = NULL; 6785 } 6786 6787 xcc->parent_unrealize(dev); 6788 } 6789 6790 typedef struct BitProperty { 6791 FeatureWord w; 6792 uint64_t mask; 6793 } BitProperty; 6794 6795 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, 6796 void *opaque, Error **errp) 6797 { 6798 X86CPU *cpu = X86_CPU(obj); 6799 BitProperty *fp = opaque; 6800 uint64_t f = cpu->env.features[fp->w]; 6801 bool value = (f & fp->mask) == fp->mask; 6802 visit_type_bool(v, name, &value, errp); 6803 } 6804 6805 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, 6806 void *opaque, Error **errp) 6807 { 6808 DeviceState *dev = DEVICE(obj); 6809 X86CPU *cpu = X86_CPU(obj); 6810 BitProperty *fp = opaque; 6811 Error *local_err = NULL; 6812 bool value; 6813 6814 if (dev->realized) { 6815 qdev_prop_set_after_realize(dev, name, errp); 6816 return; 6817 } 6818 6819 visit_type_bool(v, name, &value, &local_err); 6820 if (local_err) { 6821 error_propagate(errp, local_err); 6822 return; 6823 } 6824 6825 if (value) { 6826 cpu->env.features[fp->w] |= fp->mask; 6827 } else { 6828 cpu->env.features[fp->w] &= ~fp->mask; 6829 } 6830 cpu->env.user_features[fp->w] |= fp->mask; 6831 } 6832 6833 static void x86_cpu_release_bit_prop(Object *obj, const char *name, 6834 void *opaque) 6835 { 6836 BitProperty *prop = opaque; 6837 g_free(prop); 6838 } 6839 6840 /* Register a boolean property to get/set a single bit in a uint32_t field. 6841 * 6842 * The same property name can be registered multiple times to make it affect 6843 * multiple bits in the same FeatureWord. In that case, the getter will return 6844 * true only if all bits are set. 6845 */ 6846 static void x86_cpu_register_bit_prop(X86CPU *cpu, 6847 const char *prop_name, 6848 FeatureWord w, 6849 int bitnr) 6850 { 6851 BitProperty *fp; 6852 ObjectProperty *op; 6853 uint64_t mask = (1ULL << bitnr); 6854 6855 op = object_property_find(OBJECT(cpu), prop_name, NULL); 6856 if (op) { 6857 fp = op->opaque; 6858 assert(fp->w == w); 6859 fp->mask |= mask; 6860 } else { 6861 fp = g_new0(BitProperty, 1); 6862 fp->w = w; 6863 fp->mask = mask; 6864 object_property_add(OBJECT(cpu), prop_name, "bool", 6865 x86_cpu_get_bit_prop, 6866 x86_cpu_set_bit_prop, 6867 x86_cpu_release_bit_prop, fp); 6868 } 6869 } 6870 6871 static void x86_cpu_register_feature_bit_props(X86CPU *cpu, 6872 FeatureWord w, 6873 int bitnr) 6874 { 6875 FeatureWordInfo *fi = &feature_word_info[w]; 6876 const char *name = fi->feat_names[bitnr]; 6877 6878 if (!name) { 6879 return; 6880 } 6881 6882 /* Property names should use "-" instead of "_". 6883 * Old names containing underscores are registered as aliases 6884 * using object_property_add_alias() 6885 */ 6886 assert(!strchr(name, '_')); 6887 /* aliases don't use "|" delimiters anymore, they are registered 6888 * manually using object_property_add_alias() */ 6889 assert(!strchr(name, '|')); 6890 x86_cpu_register_bit_prop(cpu, name, w, bitnr); 6891 } 6892 6893 #if !defined(CONFIG_USER_ONLY) 6894 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs) 6895 { 6896 X86CPU *cpu = X86_CPU(cs); 6897 CPUX86State *env = &cpu->env; 6898 GuestPanicInformation *panic_info = NULL; 6899 6900 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) { 6901 panic_info = g_malloc0(sizeof(GuestPanicInformation)); 6902 6903 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V; 6904 6905 assert(HV_CRASH_PARAMS >= 5); 6906 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0]; 6907 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1]; 6908 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2]; 6909 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3]; 6910 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4]; 6911 } 6912 6913 return panic_info; 6914 } 6915 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v, 6916 const char *name, void *opaque, 6917 Error **errp) 6918 { 6919 CPUState *cs = CPU(obj); 6920 GuestPanicInformation *panic_info; 6921 6922 if (!cs->crash_occurred) { 6923 error_setg(errp, "No crash occured"); 6924 return; 6925 } 6926 6927 panic_info = x86_cpu_get_crash_info(cs); 6928 if (panic_info == NULL) { 6929 error_setg(errp, "No crash information"); 6930 return; 6931 } 6932 6933 visit_type_GuestPanicInformation(v, "crash-information", &panic_info, 6934 errp); 6935 qapi_free_GuestPanicInformation(panic_info); 6936 } 6937 #endif /* !CONFIG_USER_ONLY */ 6938 6939 static void x86_cpu_initfn(Object *obj) 6940 { 6941 X86CPU *cpu = X86_CPU(obj); 6942 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj); 6943 CPUX86State *env = &cpu->env; 6944 FeatureWord w; 6945 6946 env->nr_dies = 1; 6947 env->nr_nodes = 1; 6948 cpu_set_cpustate_pointers(cpu); 6949 6950 object_property_add(obj, "family", "int", 6951 x86_cpuid_version_get_family, 6952 x86_cpuid_version_set_family, NULL, NULL); 6953 object_property_add(obj, "model", "int", 6954 x86_cpuid_version_get_model, 6955 x86_cpuid_version_set_model, NULL, NULL); 6956 object_property_add(obj, "stepping", "int", 6957 x86_cpuid_version_get_stepping, 6958 x86_cpuid_version_set_stepping, NULL, NULL); 6959 object_property_add_str(obj, "vendor", 6960 x86_cpuid_get_vendor, 6961 x86_cpuid_set_vendor); 6962 object_property_add_str(obj, "model-id", 6963 x86_cpuid_get_model_id, 6964 x86_cpuid_set_model_id); 6965 object_property_add(obj, "tsc-frequency", "int", 6966 x86_cpuid_get_tsc_freq, 6967 x86_cpuid_set_tsc_freq, NULL, NULL); 6968 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo", 6969 x86_cpu_get_feature_words, 6970 NULL, NULL, (void *)env->features); 6971 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo", 6972 x86_cpu_get_feature_words, 6973 NULL, NULL, (void *)cpu->filtered_features); 6974 /* 6975 * The "unavailable-features" property has the same semantics as 6976 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions" 6977 * QMP command: they list the features that would have prevented the 6978 * CPU from running if the "enforce" flag was set. 6979 */ 6980 object_property_add(obj, "unavailable-features", "strList", 6981 x86_cpu_get_unavailable_features, 6982 NULL, NULL, NULL); 6983 6984 #if !defined(CONFIG_USER_ONLY) 6985 object_property_add(obj, "crash-information", "GuestPanicInformation", 6986 x86_cpu_get_crash_info_qom, NULL, NULL, NULL); 6987 #endif 6988 6989 for (w = 0; w < FEATURE_WORDS; w++) { 6990 int bitnr; 6991 6992 for (bitnr = 0; bitnr < 64; bitnr++) { 6993 x86_cpu_register_feature_bit_props(cpu, w, bitnr); 6994 } 6995 } 6996 6997 object_property_add_alias(obj, "sse3", obj, "pni"); 6998 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq"); 6999 object_property_add_alias(obj, "sse4-1", obj, "sse4.1"); 7000 object_property_add_alias(obj, "sse4-2", obj, "sse4.2"); 7001 object_property_add_alias(obj, "xd", obj, "nx"); 7002 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt"); 7003 object_property_add_alias(obj, "i64", obj, "lm"); 7004 7005 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl"); 7006 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust"); 7007 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt"); 7008 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm"); 7009 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy"); 7010 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr"); 7011 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core"); 7012 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb"); 7013 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay"); 7014 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu"); 7015 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf"); 7016 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time"); 7017 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi"); 7018 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt"); 7019 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control"); 7020 object_property_add_alias(obj, "svm_lock", obj, "svm-lock"); 7021 object_property_add_alias(obj, "nrip_save", obj, "nrip-save"); 7022 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale"); 7023 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean"); 7024 object_property_add_alias(obj, "pause_filter", obj, "pause-filter"); 7025 object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); 7026 object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); 7027 7028 if (xcc->model) { 7029 x86_cpu_load_model(cpu, xcc->model); 7030 } 7031 } 7032 7033 static int64_t x86_cpu_get_arch_id(CPUState *cs) 7034 { 7035 X86CPU *cpu = X86_CPU(cs); 7036 7037 return cpu->apic_id; 7038 } 7039 7040 static bool x86_cpu_get_paging_enabled(const CPUState *cs) 7041 { 7042 X86CPU *cpu = X86_CPU(cs); 7043 7044 return cpu->env.cr[0] & CR0_PG_MASK; 7045 } 7046 7047 static void x86_cpu_set_pc(CPUState *cs, vaddr value) 7048 { 7049 X86CPU *cpu = X86_CPU(cs); 7050 7051 cpu->env.eip = value; 7052 } 7053 7054 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) 7055 { 7056 X86CPU *cpu = X86_CPU(cs); 7057 7058 cpu->env.eip = tb->pc - tb->cs_base; 7059 } 7060 7061 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request) 7062 { 7063 X86CPU *cpu = X86_CPU(cs); 7064 CPUX86State *env = &cpu->env; 7065 7066 #if !defined(CONFIG_USER_ONLY) 7067 if (interrupt_request & CPU_INTERRUPT_POLL) { 7068 return CPU_INTERRUPT_POLL; 7069 } 7070 #endif 7071 if (interrupt_request & CPU_INTERRUPT_SIPI) { 7072 return CPU_INTERRUPT_SIPI; 7073 } 7074 7075 if (env->hflags2 & HF2_GIF_MASK) { 7076 if ((interrupt_request & CPU_INTERRUPT_SMI) && 7077 !(env->hflags & HF_SMM_MASK)) { 7078 return CPU_INTERRUPT_SMI; 7079 } else if ((interrupt_request & CPU_INTERRUPT_NMI) && 7080 !(env->hflags2 & HF2_NMI_MASK)) { 7081 return CPU_INTERRUPT_NMI; 7082 } else if (interrupt_request & CPU_INTERRUPT_MCE) { 7083 return CPU_INTERRUPT_MCE; 7084 } else if ((interrupt_request & CPU_INTERRUPT_HARD) && 7085 (((env->hflags2 & HF2_VINTR_MASK) && 7086 (env->hflags2 & HF2_HIF_MASK)) || 7087 (!(env->hflags2 & HF2_VINTR_MASK) && 7088 (env->eflags & IF_MASK && 7089 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { 7090 return CPU_INTERRUPT_HARD; 7091 #if !defined(CONFIG_USER_ONLY) 7092 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && 7093 (env->eflags & IF_MASK) && 7094 !(env->hflags & HF_INHIBIT_IRQ_MASK)) { 7095 return CPU_INTERRUPT_VIRQ; 7096 #endif 7097 } 7098 } 7099 7100 return 0; 7101 } 7102 7103 static bool x86_cpu_has_work(CPUState *cs) 7104 { 7105 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0; 7106 } 7107 7108 static void x86_disas_set_info(CPUState *cs, disassemble_info *info) 7109 { 7110 X86CPU *cpu = X86_CPU(cs); 7111 CPUX86State *env = &cpu->env; 7112 7113 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64 7114 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386 7115 : bfd_mach_i386_i8086); 7116 info->print_insn = print_insn_i386; 7117 7118 info->cap_arch = CS_ARCH_X86; 7119 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64 7120 : env->hflags & HF_CS32_MASK ? CS_MODE_32 7121 : CS_MODE_16); 7122 info->cap_insn_unit = 1; 7123 info->cap_insn_split = 8; 7124 } 7125 7126 void x86_update_hflags(CPUX86State *env) 7127 { 7128 uint32_t hflags; 7129 #define HFLAG_COPY_MASK \ 7130 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ 7131 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ 7132 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ 7133 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) 7134 7135 hflags = env->hflags & HFLAG_COPY_MASK; 7136 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; 7137 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); 7138 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & 7139 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); 7140 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); 7141 7142 if (env->cr[4] & CR4_OSFXSR_MASK) { 7143 hflags |= HF_OSFXSR_MASK; 7144 } 7145 7146 if (env->efer & MSR_EFER_LMA) { 7147 hflags |= HF_LMA_MASK; 7148 } 7149 7150 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { 7151 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; 7152 } else { 7153 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> 7154 (DESC_B_SHIFT - HF_CS32_SHIFT); 7155 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> 7156 (DESC_B_SHIFT - HF_SS32_SHIFT); 7157 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || 7158 !(hflags & HF_CS32_MASK)) { 7159 hflags |= HF_ADDSEG_MASK; 7160 } else { 7161 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | 7162 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; 7163 } 7164 } 7165 env->hflags = hflags; 7166 } 7167 7168 static Property x86_cpu_properties[] = { 7169 #ifdef CONFIG_USER_ONLY 7170 /* apic_id = 0 by default for *-user, see commit 9886e834 */ 7171 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0), 7172 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0), 7173 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0), 7174 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0), 7175 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0), 7176 #else 7177 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID), 7178 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1), 7179 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1), 7180 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1), 7181 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1), 7182 #endif 7183 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID), 7184 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false), 7185 7186 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts, 7187 HYPERV_SPINLOCK_NEVER_RETRY), 7188 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features, 7189 HYPERV_FEAT_RELAXED, 0), 7190 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features, 7191 HYPERV_FEAT_VAPIC, 0), 7192 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features, 7193 HYPERV_FEAT_TIME, 0), 7194 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features, 7195 HYPERV_FEAT_CRASH, 0), 7196 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features, 7197 HYPERV_FEAT_RESET, 0), 7198 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features, 7199 HYPERV_FEAT_VPINDEX, 0), 7200 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features, 7201 HYPERV_FEAT_RUNTIME, 0), 7202 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features, 7203 HYPERV_FEAT_SYNIC, 0), 7204 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features, 7205 HYPERV_FEAT_STIMER, 0), 7206 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features, 7207 HYPERV_FEAT_FREQUENCIES, 0), 7208 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features, 7209 HYPERV_FEAT_REENLIGHTENMENT, 0), 7210 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features, 7211 HYPERV_FEAT_TLBFLUSH, 0), 7212 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features, 7213 HYPERV_FEAT_EVMCS, 0), 7214 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features, 7215 HYPERV_FEAT_IPI, 0), 7216 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, 7217 HYPERV_FEAT_STIMER_DIRECT, 0), 7218 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, 7219 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), 7220 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), 7221 7222 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), 7223 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), 7224 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false), 7225 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), 7226 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), 7227 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), 7228 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), 7229 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), 7230 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, 7231 UINT32_MAX), 7232 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), 7233 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX), 7234 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX), 7235 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0), 7236 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0), 7237 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0), 7238 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0), 7239 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true), 7240 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id), 7241 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true), 7242 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false), 7243 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), 7244 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, 7245 false), 7246 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), 7247 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), 7248 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, 7249 true), 7250 /* 7251 * lecacy_cache defaults to true unless the CPU model provides its 7252 * own cache information (see x86_cpu_load_def()). 7253 */ 7254 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), 7255 7256 /* 7257 * From "Requirements for Implementing the Microsoft 7258 * Hypervisor Interface": 7259 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs 7260 * 7261 * "Starting with Windows Server 2012 and Windows 8, if 7262 * CPUID.40000005.EAX contains a value of -1, Windows assumes that 7263 * the hypervisor imposes no specific limit to the number of VPs. 7264 * In this case, Windows Server 2012 guest VMs may use more than 7265 * 64 VPs, up to the maximum supported number of processors applicable 7266 * to the specific Windows version being used." 7267 */ 7268 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1), 7269 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only, 7270 false), 7271 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level, 7272 true), 7273 DEFINE_PROP_END_OF_LIST() 7274 }; 7275 7276 static void x86_cpu_common_class_init(ObjectClass *oc, void *data) 7277 { 7278 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7279 CPUClass *cc = CPU_CLASS(oc); 7280 DeviceClass *dc = DEVICE_CLASS(oc); 7281 7282 device_class_set_parent_realize(dc, x86_cpu_realizefn, 7283 &xcc->parent_realize); 7284 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn, 7285 &xcc->parent_unrealize); 7286 device_class_set_props(dc, x86_cpu_properties); 7287 7288 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset); 7289 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP; 7290 7291 cc->class_by_name = x86_cpu_class_by_name; 7292 cc->parse_features = x86_cpu_parse_featurestr; 7293 cc->has_work = x86_cpu_has_work; 7294 #ifdef CONFIG_TCG 7295 cc->do_interrupt = x86_cpu_do_interrupt; 7296 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt; 7297 #endif 7298 cc->dump_state = x86_cpu_dump_state; 7299 cc->set_pc = x86_cpu_set_pc; 7300 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb; 7301 cc->gdb_read_register = x86_cpu_gdb_read_register; 7302 cc->gdb_write_register = x86_cpu_gdb_write_register; 7303 cc->get_arch_id = x86_cpu_get_arch_id; 7304 cc->get_paging_enabled = x86_cpu_get_paging_enabled; 7305 #ifndef CONFIG_USER_ONLY 7306 cc->asidx_from_attrs = x86_asidx_from_attrs; 7307 cc->get_memory_mapping = x86_cpu_get_memory_mapping; 7308 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug; 7309 cc->get_crash_info = x86_cpu_get_crash_info; 7310 cc->write_elf64_note = x86_cpu_write_elf64_note; 7311 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote; 7312 cc->write_elf32_note = x86_cpu_write_elf32_note; 7313 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote; 7314 cc->vmsd = &vmstate_x86_cpu; 7315 #endif 7316 cc->gdb_arch_name = x86_gdb_arch_name; 7317 #ifdef TARGET_X86_64 7318 cc->gdb_core_xml_file = "i386-64bit.xml"; 7319 cc->gdb_num_core_regs = 66; 7320 #else 7321 cc->gdb_core_xml_file = "i386-32bit.xml"; 7322 cc->gdb_num_core_regs = 50; 7323 #endif 7324 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) 7325 cc->debug_excp_handler = breakpoint_handler; 7326 #endif 7327 cc->cpu_exec_enter = x86_cpu_exec_enter; 7328 cc->cpu_exec_exit = x86_cpu_exec_exit; 7329 #ifdef CONFIG_TCG 7330 cc->tcg_initialize = tcg_x86_init; 7331 cc->tlb_fill = x86_cpu_tlb_fill; 7332 #endif 7333 cc->disas_set_info = x86_disas_set_info; 7334 7335 dc->user_creatable = true; 7336 } 7337 7338 static const TypeInfo x86_cpu_type_info = { 7339 .name = TYPE_X86_CPU, 7340 .parent = TYPE_CPU, 7341 .instance_size = sizeof(X86CPU), 7342 .instance_init = x86_cpu_initfn, 7343 .abstract = true, 7344 .class_size = sizeof(X86CPUClass), 7345 .class_init = x86_cpu_common_class_init, 7346 }; 7347 7348 7349 /* "base" CPU model, used by query-cpu-model-expansion */ 7350 static void x86_cpu_base_class_init(ObjectClass *oc, void *data) 7351 { 7352 X86CPUClass *xcc = X86_CPU_CLASS(oc); 7353 7354 xcc->static_model = true; 7355 xcc->migration_safe = true; 7356 xcc->model_description = "base CPU model type with no features enabled"; 7357 xcc->ordering = 8; 7358 } 7359 7360 static const TypeInfo x86_base_cpu_type_info = { 7361 .name = X86_CPU_TYPE_NAME("base"), 7362 .parent = TYPE_X86_CPU, 7363 .class_init = x86_cpu_base_class_init, 7364 }; 7365 7366 static void x86_cpu_register_types(void) 7367 { 7368 int i; 7369 7370 type_register_static(&x86_cpu_type_info); 7371 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) { 7372 x86_register_cpudef_types(&builtin_x86_defs[i]); 7373 } 7374 type_register_static(&max_x86_cpu_type_info); 7375 type_register_static(&x86_base_cpu_type_info); 7376 #if defined(CONFIG_KVM) || defined(CONFIG_HVF) 7377 type_register_static(&host_x86_cpu_type_info); 7378 #endif 7379 } 7380 7381 type_init(x86_cpu_register_types) 7382