1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 4 #include <linux/string.h> 5 #include <linux/bitops.h> 6 #include <linux/smp.h> 7 #include <linux/sched.h> 8 #include <linux/sched/clock.h> 9 #include <linux/thread_info.h> 10 #include <linux/init.h> 11 #include <linux/uaccess.h> 12 13 #include <asm/cpufeature.h> 14 #include <asm/pgtable.h> 15 #include <asm/msr.h> 16 #include <asm/bugs.h> 17 #include <asm/cpu.h> 18 #include <asm/intel-family.h> 19 #include <asm/microcode_intel.h> 20 #include <asm/hwcap2.h> 21 #include <asm/elf.h> 22 23 #ifdef CONFIG_X86_64 24 #include <linux/topology.h> 25 #endif 26 27 #include "cpu.h" 28 29 #ifdef CONFIG_X86_LOCAL_APIC 30 #include <asm/mpspec.h> 31 #include <asm/apic.h> 32 #endif 33 34 /* 35 * Processors which have self-snooping capability can handle conflicting 36 * memory type across CPUs by snooping its own cache. However, there exists 37 * CPU models in which having conflicting memory types still leads to 38 * unpredictable behavior, machine check errors, or hangs. Clear this 39 * feature to prevent its use on machines with known erratas. 40 */ 41 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c) 42 { 43 switch (c->x86_model) { 44 case INTEL_FAM6_CORE_YONAH: 45 case INTEL_FAM6_CORE2_MEROM: 46 case INTEL_FAM6_CORE2_MEROM_L: 47 case INTEL_FAM6_CORE2_PENRYN: 48 case INTEL_FAM6_CORE2_DUNNINGTON: 49 case INTEL_FAM6_NEHALEM: 50 case INTEL_FAM6_NEHALEM_G: 51 case INTEL_FAM6_NEHALEM_EP: 52 case INTEL_FAM6_NEHALEM_EX: 53 case INTEL_FAM6_WESTMERE: 54 case INTEL_FAM6_WESTMERE_EP: 55 case INTEL_FAM6_SANDYBRIDGE: 56 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP); 57 } 58 } 59 60 static bool ring3mwait_disabled __read_mostly; 61 62 static int __init ring3mwait_disable(char *__unused) 63 { 64 ring3mwait_disabled = true; 65 return 0; 66 } 67 __setup("ring3mwait=disable", ring3mwait_disable); 68 69 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) 70 { 71 /* 72 * Ring 3 MONITOR/MWAIT feature cannot be detected without 73 * cpu model and family comparison. 74 */ 75 if (c->x86 != 6) 76 return; 77 switch (c->x86_model) { 78 case INTEL_FAM6_XEON_PHI_KNL: 79 case INTEL_FAM6_XEON_PHI_KNM: 80 break; 81 default: 82 return; 83 } 84 85 if (ring3mwait_disabled) 86 return; 87 88 set_cpu_cap(c, X86_FEATURE_RING3MWAIT); 89 this_cpu_or(msr_misc_features_shadow, 90 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); 91 92 if (c == &boot_cpu_data) 93 ELF_HWCAP2 |= HWCAP2_RING3MWAIT; 94 } 95 96 /* 97 * Early microcode releases for the Spectre v2 mitigation were broken. 98 * Information taken from; 99 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf 100 * - https://kb.vmware.com/s/article/52345 101 * - Microcode revisions observed in the wild 102 * - Release note from 20180108 microcode release 103 */ 104 struct sku_microcode { 105 u8 model; 106 u8 stepping; 107 u32 microcode; 108 }; 109 static const struct sku_microcode spectre_bad_microcodes[] = { 110 { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 }, 111 { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 }, 112 { INTEL_FAM6_KABYLAKE, 0x09, 0x80 }, 113 { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 }, 114 { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 }, 115 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, 116 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, 117 { INTEL_FAM6_BROADWELL, 0x04, 0x28 }, 118 { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b }, 119 { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 }, 120 { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 }, 121 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, 122 { INTEL_FAM6_HASWELL_L, 0x01, 0x21 }, 123 { INTEL_FAM6_HASWELL_G, 0x01, 0x18 }, 124 { INTEL_FAM6_HASWELL, 0x03, 0x23 }, 125 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, 126 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, 127 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, 128 /* Observed in the wild */ 129 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, 130 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, 131 }; 132 133 static bool bad_spectre_microcode(struct cpuinfo_x86 *c) 134 { 135 int i; 136 137 /* 138 * We know that the hypervisor lie to us on the microcode version so 139 * we may as well hope that it is running the correct version. 140 */ 141 if (cpu_has(c, X86_FEATURE_HYPERVISOR)) 142 return false; 143 144 if (c->x86 != 6) 145 return false; 146 147 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 148 if (c->x86_model == spectre_bad_microcodes[i].model && 149 c->x86_stepping == spectre_bad_microcodes[i].stepping) 150 return (c->microcode <= spectre_bad_microcodes[i].microcode); 151 } 152 return false; 153 } 154 155 static void early_init_intel(struct cpuinfo_x86 *c) 156 { 157 u64 misc_enable; 158 159 /* Unmask CPUID levels if masked: */ 160 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 161 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, 162 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { 163 c->cpuid_level = cpuid_eax(0); 164 get_cpu_cap(c); 165 } 166 } 167 168 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 169 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 170 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 171 172 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) 173 c->microcode = intel_get_microcode_revision(); 174 175 /* Now if any of them are set, check the blacklist and clear the lot */ 176 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || 177 cpu_has(c, X86_FEATURE_INTEL_STIBP) || 178 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || 179 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { 180 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); 181 setup_clear_cpu_cap(X86_FEATURE_IBRS); 182 setup_clear_cpu_cap(X86_FEATURE_IBPB); 183 setup_clear_cpu_cap(X86_FEATURE_STIBP); 184 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); 185 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL); 186 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); 187 setup_clear_cpu_cap(X86_FEATURE_SSBD); 188 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD); 189 } 190 191 /* 192 * Atom erratum AAE44/AAF40/AAG38/AAH41: 193 * 194 * A race condition between speculative fetches and invalidating 195 * a large page. This is worked around in microcode, but we 196 * need the microcode to have already been loaded... so if it is 197 * not, recommend a BIOS update and disable large pages. 198 */ 199 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && 200 c->microcode < 0x20e) { 201 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); 202 clear_cpu_cap(c, X86_FEATURE_PSE); 203 } 204 205 #ifdef CONFIG_X86_64 206 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 207 #else 208 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 209 if (c->x86 == 15 && c->x86_cache_alignment == 64) 210 c->x86_cache_alignment = 128; 211 #endif 212 213 /* CPUID workaround for 0F33/0F34 CPU */ 214 if (c->x86 == 0xF && c->x86_model == 0x3 215 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) 216 c->x86_phys_bits = 36; 217 218 /* 219 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 220 * with P/T states and does not stop in deep C-states. 221 * 222 * It is also reliable across cores and sockets. (but not across 223 * cabinets - we turn it off in that case explicitly.) 224 */ 225 if (c->x86_power & (1 << 8)) { 226 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 227 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 228 } 229 230 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ 231 if (c->x86 == 6) { 232 switch (c->x86_model) { 233 case INTEL_FAM6_ATOM_SALTWELL_MID: 234 case INTEL_FAM6_ATOM_SALTWELL_TABLET: 235 case INTEL_FAM6_ATOM_SILVERMONT_MID: 236 case INTEL_FAM6_ATOM_AIRMONT_NP: 237 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); 238 break; 239 default: 240 break; 241 } 242 } 243 244 /* 245 * There is a known erratum on Pentium III and Core Solo 246 * and Core Duo CPUs. 247 * " Page with PAT set to WC while associated MTRR is UC 248 * may consolidate to UC " 249 * Because of this erratum, it is better to stick with 250 * setting WC in MTRR rather than using PAT on these CPUs. 251 * 252 * Enable PAT WC only on P4, Core 2 or later CPUs. 253 */ 254 if (c->x86 == 6 && c->x86_model < 15) 255 clear_cpu_cap(c, X86_FEATURE_PAT); 256 257 /* 258 * If fast string is not enabled in IA32_MISC_ENABLE for any reason, 259 * clear the fast string and enhanced fast string CPU capabilities. 260 */ 261 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 262 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 263 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { 264 pr_info("Disabled fast string operations\n"); 265 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); 266 setup_clear_cpu_cap(X86_FEATURE_ERMS); 267 } 268 } 269 270 /* 271 * Intel Quark Core DevMan_001.pdf section 6.4.11 272 * "The operating system also is required to invalidate (i.e., flush) 273 * the TLB when any changes are made to any of the page table entries. 274 * The operating system must reload CR3 to cause the TLB to be flushed" 275 * 276 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h 277 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE 278 * to be modified. 279 */ 280 if (c->x86 == 5 && c->x86_model == 9) { 281 pr_info("Disabling PGE capability bit\n"); 282 setup_clear_cpu_cap(X86_FEATURE_PGE); 283 } 284 285 if (c->cpuid_level >= 0x00000001) { 286 u32 eax, ebx, ecx, edx; 287 288 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 289 /* 290 * If HTT (EDX[28]) is set EBX[16:23] contain the number of 291 * apicids which are reserved per package. Store the resulting 292 * shift value for the package management code. 293 */ 294 if (edx & (1U << 28)) 295 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); 296 } 297 298 check_memory_type_self_snoop_errata(c); 299 300 /* 301 * Get the number of SMT siblings early from the extended topology 302 * leaf, if available. Otherwise try the legacy SMT detection. 303 */ 304 if (detect_extended_topology_early(c) < 0) 305 detect_ht_early(c); 306 } 307 308 #ifdef CONFIG_X86_32 309 /* 310 * Early probe support logic for ppro memory erratum #50 311 * 312 * This is called before we do cpu ident work 313 */ 314 315 int ppro_with_ram_bug(void) 316 { 317 /* Uses data from early_cpu_detect now */ 318 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 319 boot_cpu_data.x86 == 6 && 320 boot_cpu_data.x86_model == 1 && 321 boot_cpu_data.x86_stepping < 8) { 322 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 323 return 1; 324 } 325 return 0; 326 } 327 328 static void intel_smp_check(struct cpuinfo_x86 *c) 329 { 330 /* calling is from identify_secondary_cpu() ? */ 331 if (!c->cpu_index) 332 return; 333 334 /* 335 * Mask B, Pentium, but not Pentium MMX 336 */ 337 if (c->x86 == 5 && 338 c->x86_stepping >= 1 && c->x86_stepping <= 4 && 339 c->x86_model <= 3) { 340 /* 341 * Remember we have B step Pentia with bugs 342 */ 343 WARN_ONCE(1, "WARNING: SMP operation may be unreliable" 344 "with B stepping processors.\n"); 345 } 346 } 347 348 static int forcepae; 349 static int __init forcepae_setup(char *__unused) 350 { 351 forcepae = 1; 352 return 1; 353 } 354 __setup("forcepae", forcepae_setup); 355 356 static void intel_workarounds(struct cpuinfo_x86 *c) 357 { 358 #ifdef CONFIG_X86_F00F_BUG 359 /* 360 * All models of Pentium and Pentium with MMX technology CPUs 361 * have the F0 0F bug, which lets nonprivileged users lock up the 362 * system. Announce that the fault handler will be checking for it. 363 * The Quark is also family 5, but does not have the same bug. 364 */ 365 clear_cpu_bug(c, X86_BUG_F00F); 366 if (c->x86 == 5 && c->x86_model < 9) { 367 static int f00f_workaround_enabled; 368 369 set_cpu_bug(c, X86_BUG_F00F); 370 if (!f00f_workaround_enabled) { 371 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n"); 372 f00f_workaround_enabled = 1; 373 } 374 } 375 #endif 376 377 /* 378 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 379 * model 3 mask 3 380 */ 381 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) 382 clear_cpu_cap(c, X86_FEATURE_SEP); 383 384 /* 385 * PAE CPUID issue: many Pentium M report no PAE but may have a 386 * functionally usable PAE implementation. 387 * Forcefully enable PAE if kernel parameter "forcepae" is present. 388 */ 389 if (forcepae) { 390 pr_warn("PAE forced!\n"); 391 set_cpu_cap(c, X86_FEATURE_PAE); 392 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 393 } 394 395 /* 396 * P4 Xeon erratum 037 workaround. 397 * Hardware prefetcher may cause stale data to be loaded into the cache. 398 */ 399 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { 400 if (msr_set_bit(MSR_IA32_MISC_ENABLE, 401 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { 402 pr_info("CPU: C0 stepping P4 Xeon detected.\n"); 403 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n"); 404 } 405 } 406 407 /* 408 * See if we have a good local APIC by checking for buggy Pentia, 409 * i.e. all B steppings and the C2 stepping of P54C when using their 410 * integrated APIC (see 11AP erratum in "Pentium Processor 411 * Specification Update"). 412 */ 413 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && 414 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) 415 set_cpu_bug(c, X86_BUG_11AP); 416 417 418 #ifdef CONFIG_X86_INTEL_USERCOPY 419 /* 420 * Set up the preferred alignment for movsl bulk memory moves 421 */ 422 switch (c->x86) { 423 case 4: /* 486: untested */ 424 break; 425 case 5: /* Old Pentia: untested */ 426 break; 427 case 6: /* PII/PIII only like movsl with 8-byte alignment */ 428 movsl_mask.mask = 7; 429 break; 430 case 15: /* P4 is OK down to 8-byte alignment */ 431 movsl_mask.mask = 7; 432 break; 433 } 434 #endif 435 436 intel_smp_check(c); 437 } 438 #else 439 static void intel_workarounds(struct cpuinfo_x86 *c) 440 { 441 } 442 #endif 443 444 static void srat_detect_node(struct cpuinfo_x86 *c) 445 { 446 #ifdef CONFIG_NUMA 447 unsigned node; 448 int cpu = smp_processor_id(); 449 450 /* Don't do the funky fallback heuristics the AMD version employs 451 for now. */ 452 node = numa_cpu_node(cpu); 453 if (node == NUMA_NO_NODE || !node_online(node)) { 454 /* reuse the value from init_cpu_to_node() */ 455 node = cpu_to_node(cpu); 456 } 457 numa_set_node(cpu, node); 458 #endif 459 } 460 461 #define MSR_IA32_TME_ACTIVATE 0x982 462 463 /* Helpers to access TME_ACTIVATE MSR */ 464 #define TME_ACTIVATE_LOCKED(x) (x & 0x1) 465 #define TME_ACTIVATE_ENABLED(x) (x & 0x2) 466 467 #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ 468 #define TME_ACTIVATE_POLICY_AES_XTS_128 0 469 470 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ 471 472 #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ 473 #define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 474 475 /* Values for mktme_status (SW only construct) */ 476 #define MKTME_ENABLED 0 477 #define MKTME_DISABLED 1 478 #define MKTME_UNINITIALIZED 2 479 static int mktme_status = MKTME_UNINITIALIZED; 480 481 static void detect_tme(struct cpuinfo_x86 *c) 482 { 483 u64 tme_activate, tme_policy, tme_crypto_algs; 484 int keyid_bits = 0, nr_keyids = 0; 485 static u64 tme_activate_cpu0 = 0; 486 487 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); 488 489 if (mktme_status != MKTME_UNINITIALIZED) { 490 if (tme_activate != tme_activate_cpu0) { 491 /* Broken BIOS? */ 492 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); 493 pr_err_once("x86/tme: MKTME is not usable\n"); 494 mktme_status = MKTME_DISABLED; 495 496 /* Proceed. We may need to exclude bits from x86_phys_bits. */ 497 } 498 } else { 499 tme_activate_cpu0 = tme_activate; 500 } 501 502 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { 503 pr_info_once("x86/tme: not enabled by BIOS\n"); 504 mktme_status = MKTME_DISABLED; 505 return; 506 } 507 508 if (mktme_status != MKTME_UNINITIALIZED) 509 goto detect_keyid_bits; 510 511 pr_info("x86/tme: enabled by BIOS\n"); 512 513 tme_policy = TME_ACTIVATE_POLICY(tme_activate); 514 if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) 515 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); 516 517 tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); 518 if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { 519 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", 520 tme_crypto_algs); 521 mktme_status = MKTME_DISABLED; 522 } 523 detect_keyid_bits: 524 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); 525 nr_keyids = (1UL << keyid_bits) - 1; 526 if (nr_keyids) { 527 pr_info_once("x86/mktme: enabled by BIOS\n"); 528 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); 529 } else { 530 pr_info_once("x86/mktme: disabled by BIOS\n"); 531 } 532 533 if (mktme_status == MKTME_UNINITIALIZED) { 534 /* MKTME is usable */ 535 mktme_status = MKTME_ENABLED; 536 } 537 538 /* 539 * KeyID bits effectively lower the number of physical address 540 * bits. Update cpuinfo_x86::x86_phys_bits accordingly. 541 */ 542 c->x86_phys_bits -= keyid_bits; 543 } 544 545 static void init_cpuid_fault(struct cpuinfo_x86 *c) 546 { 547 u64 msr; 548 549 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) { 550 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) 551 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); 552 } 553 } 554 555 static void init_intel_misc_features(struct cpuinfo_x86 *c) 556 { 557 u64 msr; 558 559 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr)) 560 return; 561 562 /* Clear all MISC features */ 563 this_cpu_write(msr_misc_features_shadow, 0); 564 565 /* Check features and update capabilities and shadow control bits */ 566 init_cpuid_fault(c); 567 probe_xeon_phi_r3mwait(c); 568 569 msr = this_cpu_read(msr_misc_features_shadow); 570 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); 571 } 572 573 static void init_intel(struct cpuinfo_x86 *c) 574 { 575 early_init_intel(c); 576 577 intel_workarounds(c); 578 579 /* 580 * Detect the extended topology information if available. This 581 * will reinitialise the initial_apicid which will be used 582 * in init_intel_cacheinfo() 583 */ 584 detect_extended_topology(c); 585 586 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 587 /* 588 * let's use the legacy cpuid vector 0x1 and 0x4 for topology 589 * detection. 590 */ 591 detect_num_cpu_cores(c); 592 #ifdef CONFIG_X86_32 593 detect_ht(c); 594 #endif 595 } 596 597 init_intel_cacheinfo(c); 598 599 if (c->cpuid_level > 9) { 600 unsigned eax = cpuid_eax(10); 601 /* Check for version and the number of counters */ 602 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 603 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 604 } 605 606 if (cpu_has(c, X86_FEATURE_XMM2)) 607 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 608 609 if (boot_cpu_has(X86_FEATURE_DS)) { 610 unsigned int l1, l2; 611 612 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 613 if (!(l1 & (1<<11))) 614 set_cpu_cap(c, X86_FEATURE_BTS); 615 if (!(l1 & (1<<12))) 616 set_cpu_cap(c, X86_FEATURE_PEBS); 617 } 618 619 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) && 620 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) 621 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); 622 623 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) && 624 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT))) 625 set_cpu_bug(c, X86_BUG_MONITOR); 626 627 #ifdef CONFIG_X86_64 628 if (c->x86 == 15) 629 c->x86_cache_alignment = c->x86_clflush_size * 2; 630 if (c->x86 == 6) 631 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 632 #else 633 /* 634 * Names for the Pentium II/Celeron processors 635 * detectable only by also checking the cache size. 636 * Dixon is NOT a Celeron. 637 */ 638 if (c->x86 == 6) { 639 unsigned int l2 = c->x86_cache_size; 640 char *p = NULL; 641 642 switch (c->x86_model) { 643 case 5: 644 if (l2 == 0) 645 p = "Celeron (Covington)"; 646 else if (l2 == 256) 647 p = "Mobile Pentium II (Dixon)"; 648 break; 649 650 case 6: 651 if (l2 == 128) 652 p = "Celeron (Mendocino)"; 653 else if (c->x86_stepping == 0 || c->x86_stepping == 5) 654 p = "Celeron-A"; 655 break; 656 657 case 8: 658 if (l2 == 128) 659 p = "Celeron (Coppermine)"; 660 break; 661 } 662 663 if (p) 664 strcpy(c->x86_model_id, p); 665 } 666 667 if (c->x86 == 15) 668 set_cpu_cap(c, X86_FEATURE_P4); 669 if (c->x86 == 6) 670 set_cpu_cap(c, X86_FEATURE_P3); 671 #endif 672 673 /* Work around errata */ 674 srat_detect_node(c); 675 676 init_ia32_feat_ctl(c); 677 678 if (cpu_has(c, X86_FEATURE_TME)) 679 detect_tme(c); 680 681 init_intel_misc_features(c); 682 683 if (tsx_ctrl_state == TSX_CTRL_ENABLE) 684 tsx_enable(); 685 if (tsx_ctrl_state == TSX_CTRL_DISABLE) 686 tsx_disable(); 687 } 688 689 #ifdef CONFIG_X86_32 690 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 691 { 692 /* 693 * Intel PIII Tualatin. This comes in two flavours. 694 * One has 256kb of cache, the other 512. We have no way 695 * to determine which, so we use a boottime override 696 * for the 512kb model, and assume 256 otherwise. 697 */ 698 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) 699 size = 256; 700 701 /* 702 * Intel Quark SoC X1000 contains a 4-way set associative 703 * 16K cache with a 16 byte cache line and 256 lines per tag 704 */ 705 if ((c->x86 == 5) && (c->x86_model == 9)) 706 size = 16; 707 return size; 708 } 709 #endif 710 711 #define TLB_INST_4K 0x01 712 #define TLB_INST_4M 0x02 713 #define TLB_INST_2M_4M 0x03 714 715 #define TLB_INST_ALL 0x05 716 #define TLB_INST_1G 0x06 717 718 #define TLB_DATA_4K 0x11 719 #define TLB_DATA_4M 0x12 720 #define TLB_DATA_2M_4M 0x13 721 #define TLB_DATA_4K_4M 0x14 722 723 #define TLB_DATA_1G 0x16 724 725 #define TLB_DATA0_4K 0x21 726 #define TLB_DATA0_4M 0x22 727 #define TLB_DATA0_2M_4M 0x23 728 729 #define STLB_4K 0x41 730 #define STLB_4K_2M 0x42 731 732 static const struct _tlb_table intel_tlb_table[] = { 733 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 734 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 735 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 736 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, 737 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, 738 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, 739 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" }, 740 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 741 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 742 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 743 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 744 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, 745 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, 746 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" }, 747 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, 748 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, 749 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, 750 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, 751 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, 752 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, 753 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, 754 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, 755 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, 756 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 757 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, 758 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 759 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, 760 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, 761 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, 762 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, 763 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, 764 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, 765 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, 766 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, 767 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" }, 768 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, 769 { 0x00, 0, 0 } 770 }; 771 772 static void intel_tlb_lookup(const unsigned char desc) 773 { 774 unsigned char k; 775 if (desc == 0) 776 return; 777 778 /* look up this descriptor in the table */ 779 for (k = 0; intel_tlb_table[k].descriptor != desc && 780 intel_tlb_table[k].descriptor != 0; k++) 781 ; 782 783 if (intel_tlb_table[k].tlb_type == 0) 784 return; 785 786 switch (intel_tlb_table[k].tlb_type) { 787 case STLB_4K: 788 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 789 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 790 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 791 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 792 break; 793 case STLB_4K_2M: 794 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 795 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 796 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 797 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 798 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 799 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 800 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) 801 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; 802 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 803 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 804 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 805 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 806 break; 807 case TLB_INST_ALL: 808 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 809 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 810 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 811 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 812 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 813 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 814 break; 815 case TLB_INST_4K: 816 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 817 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 818 break; 819 case TLB_INST_4M: 820 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 821 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 822 break; 823 case TLB_INST_2M_4M: 824 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 825 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 826 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 827 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 828 break; 829 case TLB_DATA_4K: 830 case TLB_DATA0_4K: 831 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 832 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 833 break; 834 case TLB_DATA_4M: 835 case TLB_DATA0_4M: 836 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 837 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 838 break; 839 case TLB_DATA_2M_4M: 840 case TLB_DATA0_2M_4M: 841 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) 842 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; 843 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 844 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 845 break; 846 case TLB_DATA_4K_4M: 847 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 848 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 849 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 850 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 851 break; 852 case TLB_DATA_1G: 853 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) 854 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; 855 break; 856 } 857 } 858 859 static void intel_detect_tlb(struct cpuinfo_x86 *c) 860 { 861 int i, j, n; 862 unsigned int regs[4]; 863 unsigned char *desc = (unsigned char *)regs; 864 865 if (c->cpuid_level < 2) 866 return; 867 868 /* Number of times to iterate */ 869 n = cpuid_eax(2) & 0xFF; 870 871 for (i = 0 ; i < n ; i++) { 872 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); 873 874 /* If bit 31 is set, this is an unknown format */ 875 for (j = 0 ; j < 3 ; j++) 876 if (regs[j] & (1 << 31)) 877 regs[j] = 0; 878 879 /* Byte 0 is level count, not a descriptor */ 880 for (j = 1 ; j < 16 ; j++) 881 intel_tlb_lookup(desc[j]); 882 } 883 } 884 885 static const struct cpu_dev intel_cpu_dev = { 886 .c_vendor = "Intel", 887 .c_ident = { "GenuineIntel" }, 888 #ifdef CONFIG_X86_32 889 .legacy_models = { 890 { .family = 4, .model_names = 891 { 892 [0] = "486 DX-25/33", 893 [1] = "486 DX-50", 894 [2] = "486 SX", 895 [3] = "486 DX/2", 896 [4] = "486 SL", 897 [5] = "486 SX/2", 898 [7] = "486 DX/2-WB", 899 [8] = "486 DX/4", 900 [9] = "486 DX/4-WB" 901 } 902 }, 903 { .family = 5, .model_names = 904 { 905 [0] = "Pentium 60/66 A-step", 906 [1] = "Pentium 60/66", 907 [2] = "Pentium 75 - 200", 908 [3] = "OverDrive PODP5V83", 909 [4] = "Pentium MMX", 910 [7] = "Mobile Pentium 75 - 200", 911 [8] = "Mobile Pentium MMX", 912 [9] = "Quark SoC X1000", 913 } 914 }, 915 { .family = 6, .model_names = 916 { 917 [0] = "Pentium Pro A-step", 918 [1] = "Pentium Pro", 919 [3] = "Pentium II (Klamath)", 920 [4] = "Pentium II (Deschutes)", 921 [5] = "Pentium II (Deschutes)", 922 [6] = "Mobile Pentium II", 923 [7] = "Pentium III (Katmai)", 924 [8] = "Pentium III (Coppermine)", 925 [10] = "Pentium III (Cascades)", 926 [11] = "Pentium III (Tualatin)", 927 } 928 }, 929 { .family = 15, .model_names = 930 { 931 [0] = "Pentium 4 (Unknown)", 932 [1] = "Pentium 4 (Willamette)", 933 [2] = "Pentium 4 (Northwood)", 934 [4] = "Pentium 4 (Foster)", 935 [5] = "Pentium 4 (Foster)", 936 } 937 }, 938 }, 939 .legacy_cache_size = intel_size_cache, 940 #endif 941 .c_detect_tlb = intel_detect_tlb, 942 .c_early_init = early_init_intel, 943 .c_init = init_intel, 944 .c_x86_vendor = X86_VENDOR_INTEL, 945 }; 946 947 cpu_dev_register(intel_cpu_dev); 948