1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 4 #include <linux/string.h> 5 #include <linux/bitops.h> 6 #include <linux/smp.h> 7 #include <linux/sched.h> 8 #include <linux/sched/clock.h> 9 #include <linux/thread_info.h> 10 #include <linux/init.h> 11 #include <linux/uaccess.h> 12 13 #include <asm/cpufeature.h> 14 #include <asm/pgtable.h> 15 #include <asm/msr.h> 16 #include <asm/bugs.h> 17 #include <asm/cpu.h> 18 #include <asm/intel-family.h> 19 #include <asm/microcode_intel.h> 20 #include <asm/hwcap2.h> 21 #include <asm/elf.h> 22 23 #ifdef CONFIG_X86_64 24 #include <linux/topology.h> 25 #endif 26 27 #include "cpu.h" 28 29 #ifdef CONFIG_X86_LOCAL_APIC 30 #include <asm/mpspec.h> 31 #include <asm/apic.h> 32 #endif 33 34 /* 35 * Just in case our CPU detection goes bad, or you have a weird system, 36 * allow a way to override the automatic disabling of MPX. 37 */ 38 static int forcempx; 39 40 static int __init forcempx_setup(char *__unused) 41 { 42 forcempx = 1; 43 44 return 1; 45 } 46 __setup("intel-skd-046-workaround=disable", forcempx_setup); 47 48 void check_mpx_erratum(struct cpuinfo_x86 *c) 49 { 50 if (forcempx) 51 return; 52 /* 53 * Turn off the MPX feature on CPUs where SMEP is not 54 * available or disabled. 55 * 56 * Works around Intel Erratum SKD046: "Branch Instructions 57 * May Initialize MPX Bound Registers Incorrectly". 58 * 59 * This might falsely disable MPX on systems without 60 * SMEP, like Atom processors without SMEP. But there 61 * is no such hardware known at the moment. 62 */ 63 if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) { 64 setup_clear_cpu_cap(X86_FEATURE_MPX); 65 pr_warn("x86/mpx: Disabling MPX since SMEP not present\n"); 66 } 67 } 68 69 static bool ring3mwait_disabled __read_mostly; 70 71 static int __init ring3mwait_disable(char *__unused) 72 { 73 ring3mwait_disabled = true; 74 return 0; 75 } 76 __setup("ring3mwait=disable", ring3mwait_disable); 77 78 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) 79 { 80 /* 81 * Ring 3 MONITOR/MWAIT feature cannot be detected without 82 * cpu model and family comparison. 83 */ 84 if (c->x86 != 6) 85 return; 86 switch (c->x86_model) { 87 case INTEL_FAM6_XEON_PHI_KNL: 88 case INTEL_FAM6_XEON_PHI_KNM: 89 break; 90 default: 91 return; 92 } 93 94 if (ring3mwait_disabled) 95 return; 96 97 set_cpu_cap(c, X86_FEATURE_RING3MWAIT); 98 this_cpu_or(msr_misc_features_shadow, 99 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); 100 101 if (c == &boot_cpu_data) 102 ELF_HWCAP2 |= HWCAP2_RING3MWAIT; 103 } 104 105 /* 106 * Early microcode releases for the Spectre v2 mitigation were broken. 107 * Information taken from; 108 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf 109 * - https://kb.vmware.com/s/article/52345 110 * - Microcode revisions observed in the wild 111 * - Release note from 20180108 microcode release 112 */ 113 struct sku_microcode { 114 u8 model; 115 u8 stepping; 116 u32 microcode; 117 }; 118 static const struct sku_microcode spectre_bad_microcodes[] = { 119 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 }, 120 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 }, 121 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 }, 122 { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 }, 123 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, 124 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, 125 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, 126 { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 }, 127 { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, 128 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, 129 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, 130 { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, 131 { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, 132 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, 133 { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, 134 { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, 135 { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, 136 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, 137 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, 138 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, 139 /* Updated in the 20180108 release; blacklist until we know otherwise */ 140 { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 }, 141 /* Observed in the wild */ 142 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, 143 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, 144 }; 145 146 static bool bad_spectre_microcode(struct cpuinfo_x86 *c) 147 { 148 int i; 149 150 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { 151 if (c->x86_model == spectre_bad_microcodes[i].model && 152 c->x86_mask == spectre_bad_microcodes[i].stepping) 153 return (c->microcode <= spectre_bad_microcodes[i].microcode); 154 } 155 return false; 156 } 157 158 static void early_init_intel(struct cpuinfo_x86 *c) 159 { 160 u64 misc_enable; 161 162 /* Unmask CPUID levels if masked: */ 163 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 164 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, 165 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { 166 c->cpuid_level = cpuid_eax(0); 167 get_cpu_cap(c); 168 } 169 } 170 171 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 172 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 173 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 174 175 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) 176 c->microcode = intel_get_microcode_revision(); 177 178 /* Now if any of them are set, check the blacklist and clear the lot */ 179 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || 180 cpu_has(c, X86_FEATURE_INTEL_STIBP) || 181 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) || 182 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) { 183 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n"); 184 setup_clear_cpu_cap(X86_FEATURE_IBRS); 185 setup_clear_cpu_cap(X86_FEATURE_IBPB); 186 setup_clear_cpu_cap(X86_FEATURE_STIBP); 187 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL); 188 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP); 189 } 190 191 /* 192 * Atom erratum AAE44/AAF40/AAG38/AAH41: 193 * 194 * A race condition between speculative fetches and invalidating 195 * a large page. This is worked around in microcode, but we 196 * need the microcode to have already been loaded... so if it is 197 * not, recommend a BIOS update and disable large pages. 198 */ 199 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && 200 c->microcode < 0x20e) { 201 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); 202 clear_cpu_cap(c, X86_FEATURE_PSE); 203 } 204 205 #ifdef CONFIG_X86_64 206 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 207 #else 208 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 209 if (c->x86 == 15 && c->x86_cache_alignment == 64) 210 c->x86_cache_alignment = 128; 211 #endif 212 213 /* CPUID workaround for 0F33/0F34 CPU */ 214 if (c->x86 == 0xF && c->x86_model == 0x3 215 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) 216 c->x86_phys_bits = 36; 217 218 /* 219 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 220 * with P/T states and does not stop in deep C-states. 221 * 222 * It is also reliable across cores and sockets. (but not across 223 * cabinets - we turn it off in that case explicitly.) 224 */ 225 if (c->x86_power & (1 << 8)) { 226 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 227 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 228 } 229 230 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ 231 if (c->x86 == 6) { 232 switch (c->x86_model) { 233 case 0x27: /* Penwell */ 234 case 0x35: /* Cloverview */ 235 case 0x4a: /* Merrifield */ 236 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); 237 break; 238 default: 239 break; 240 } 241 } 242 243 /* 244 * There is a known erratum on Pentium III and Core Solo 245 * and Core Duo CPUs. 246 * " Page with PAT set to WC while associated MTRR is UC 247 * may consolidate to UC " 248 * Because of this erratum, it is better to stick with 249 * setting WC in MTRR rather than using PAT on these CPUs. 250 * 251 * Enable PAT WC only on P4, Core 2 or later CPUs. 252 */ 253 if (c->x86 == 6 && c->x86_model < 15) 254 clear_cpu_cap(c, X86_FEATURE_PAT); 255 256 /* 257 * If fast string is not enabled in IA32_MISC_ENABLE for any reason, 258 * clear the fast string and enhanced fast string CPU capabilities. 259 */ 260 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 261 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 262 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { 263 pr_info("Disabled fast string operations\n"); 264 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); 265 setup_clear_cpu_cap(X86_FEATURE_ERMS); 266 } 267 } 268 269 /* 270 * Intel Quark Core DevMan_001.pdf section 6.4.11 271 * "The operating system also is required to invalidate (i.e., flush) 272 * the TLB when any changes are made to any of the page table entries. 273 * The operating system must reload CR3 to cause the TLB to be flushed" 274 * 275 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h 276 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE 277 * to be modified. 278 */ 279 if (c->x86 == 5 && c->x86_model == 9) { 280 pr_info("Disabling PGE capability bit\n"); 281 setup_clear_cpu_cap(X86_FEATURE_PGE); 282 } 283 284 if (c->cpuid_level >= 0x00000001) { 285 u32 eax, ebx, ecx, edx; 286 287 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 288 /* 289 * If HTT (EDX[28]) is set EBX[16:23] contain the number of 290 * apicids which are reserved per package. Store the resulting 291 * shift value for the package management code. 292 */ 293 if (edx & (1U << 28)) 294 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); 295 } 296 297 check_mpx_erratum(c); 298 } 299 300 #ifdef CONFIG_X86_32 301 /* 302 * Early probe support logic for ppro memory erratum #50 303 * 304 * This is called before we do cpu ident work 305 */ 306 307 int ppro_with_ram_bug(void) 308 { 309 /* Uses data from early_cpu_detect now */ 310 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 311 boot_cpu_data.x86 == 6 && 312 boot_cpu_data.x86_model == 1 && 313 boot_cpu_data.x86_mask < 8) { 314 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 315 return 1; 316 } 317 return 0; 318 } 319 320 static void intel_smp_check(struct cpuinfo_x86 *c) 321 { 322 /* calling is from identify_secondary_cpu() ? */ 323 if (!c->cpu_index) 324 return; 325 326 /* 327 * Mask B, Pentium, but not Pentium MMX 328 */ 329 if (c->x86 == 5 && 330 c->x86_mask >= 1 && c->x86_mask <= 4 && 331 c->x86_model <= 3) { 332 /* 333 * Remember we have B step Pentia with bugs 334 */ 335 WARN_ONCE(1, "WARNING: SMP operation may be unreliable" 336 "with B stepping processors.\n"); 337 } 338 } 339 340 static int forcepae; 341 static int __init forcepae_setup(char *__unused) 342 { 343 forcepae = 1; 344 return 1; 345 } 346 __setup("forcepae", forcepae_setup); 347 348 static void intel_workarounds(struct cpuinfo_x86 *c) 349 { 350 #ifdef CONFIG_X86_F00F_BUG 351 /* 352 * All models of Pentium and Pentium with MMX technology CPUs 353 * have the F0 0F bug, which lets nonprivileged users lock up the 354 * system. Announce that the fault handler will be checking for it. 355 * The Quark is also family 5, but does not have the same bug. 356 */ 357 clear_cpu_bug(c, X86_BUG_F00F); 358 if (c->x86 == 5 && c->x86_model < 9) { 359 static int f00f_workaround_enabled; 360 361 set_cpu_bug(c, X86_BUG_F00F); 362 if (!f00f_workaround_enabled) { 363 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n"); 364 f00f_workaround_enabled = 1; 365 } 366 } 367 #endif 368 369 /* 370 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 371 * model 3 mask 3 372 */ 373 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 374 clear_cpu_cap(c, X86_FEATURE_SEP); 375 376 /* 377 * PAE CPUID issue: many Pentium M report no PAE but may have a 378 * functionally usable PAE implementation. 379 * Forcefully enable PAE if kernel parameter "forcepae" is present. 380 */ 381 if (forcepae) { 382 pr_warn("PAE forced!\n"); 383 set_cpu_cap(c, X86_FEATURE_PAE); 384 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 385 } 386 387 /* 388 * P4 Xeon erratum 037 workaround. 389 * Hardware prefetcher may cause stale data to be loaded into the cache. 390 */ 391 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 392 if (msr_set_bit(MSR_IA32_MISC_ENABLE, 393 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { 394 pr_info("CPU: C0 stepping P4 Xeon detected.\n"); 395 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n"); 396 } 397 } 398 399 /* 400 * See if we have a good local APIC by checking for buggy Pentia, 401 * i.e. all B steppings and the C2 stepping of P54C when using their 402 * integrated APIC (see 11AP erratum in "Pentium Processor 403 * Specification Update"). 404 */ 405 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && 406 (c->x86_mask < 0x6 || c->x86_mask == 0xb)) 407 set_cpu_bug(c, X86_BUG_11AP); 408 409 410 #ifdef CONFIG_X86_INTEL_USERCOPY 411 /* 412 * Set up the preferred alignment for movsl bulk memory moves 413 */ 414 switch (c->x86) { 415 case 4: /* 486: untested */ 416 break; 417 case 5: /* Old Pentia: untested */ 418 break; 419 case 6: /* PII/PIII only like movsl with 8-byte alignment */ 420 movsl_mask.mask = 7; 421 break; 422 case 15: /* P4 is OK down to 8-byte alignment */ 423 movsl_mask.mask = 7; 424 break; 425 } 426 #endif 427 428 intel_smp_check(c); 429 } 430 #else 431 static void intel_workarounds(struct cpuinfo_x86 *c) 432 { 433 } 434 #endif 435 436 static void srat_detect_node(struct cpuinfo_x86 *c) 437 { 438 #ifdef CONFIG_NUMA 439 unsigned node; 440 int cpu = smp_processor_id(); 441 442 /* Don't do the funky fallback heuristics the AMD version employs 443 for now. */ 444 node = numa_cpu_node(cpu); 445 if (node == NUMA_NO_NODE || !node_online(node)) { 446 /* reuse the value from init_cpu_to_node() */ 447 node = cpu_to_node(cpu); 448 } 449 numa_set_node(cpu, node); 450 #endif 451 } 452 453 /* 454 * find out the number of processor cores on the die 455 */ 456 static int intel_num_cpu_cores(struct cpuinfo_x86 *c) 457 { 458 unsigned int eax, ebx, ecx, edx; 459 460 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) 461 return 1; 462 463 /* Intel has a non-standard dependency on %ecx for this CPUID level. */ 464 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 465 if (eax & 0x1f) 466 return (eax >> 26) + 1; 467 else 468 return 1; 469 } 470 471 static void detect_vmx_virtcap(struct cpuinfo_x86 *c) 472 { 473 /* Intel VMX MSR indicated features */ 474 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 475 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 476 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 477 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 478 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 479 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 480 481 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; 482 483 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 484 clear_cpu_cap(c, X86_FEATURE_VNMI); 485 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 486 clear_cpu_cap(c, X86_FEATURE_EPT); 487 clear_cpu_cap(c, X86_FEATURE_VPID); 488 489 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); 490 msr_ctl = vmx_msr_high | vmx_msr_low; 491 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) 492 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 493 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) 494 set_cpu_cap(c, X86_FEATURE_VNMI); 495 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { 496 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 497 vmx_msr_low, vmx_msr_high); 498 msr_ctl2 = vmx_msr_high | vmx_msr_low; 499 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && 500 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) 501 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 502 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) 503 set_cpu_cap(c, X86_FEATURE_EPT); 504 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) 505 set_cpu_cap(c, X86_FEATURE_VPID); 506 } 507 } 508 509 static void init_intel_energy_perf(struct cpuinfo_x86 *c) 510 { 511 u64 epb; 512 513 /* 514 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized. 515 * (x86_energy_perf_policy(8) is available to change it at run-time.) 516 */ 517 if (!cpu_has(c, X86_FEATURE_EPB)) 518 return; 519 520 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 521 if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE) 522 return; 523 524 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); 525 pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); 526 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; 527 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 528 } 529 530 static void intel_bsp_resume(struct cpuinfo_x86 *c) 531 { 532 /* 533 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume, 534 * so reinitialize it properly like during bootup: 535 */ 536 init_intel_energy_perf(c); 537 } 538 539 static void init_cpuid_fault(struct cpuinfo_x86 *c) 540 { 541 u64 msr; 542 543 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) { 544 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) 545 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); 546 } 547 } 548 549 static void init_intel_misc_features(struct cpuinfo_x86 *c) 550 { 551 u64 msr; 552 553 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr)) 554 return; 555 556 /* Clear all MISC features */ 557 this_cpu_write(msr_misc_features_shadow, 0); 558 559 /* Check features and update capabilities and shadow control bits */ 560 init_cpuid_fault(c); 561 probe_xeon_phi_r3mwait(c); 562 563 msr = this_cpu_read(msr_misc_features_shadow); 564 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); 565 } 566 567 static void init_intel(struct cpuinfo_x86 *c) 568 { 569 unsigned int l2 = 0; 570 571 early_init_intel(c); 572 573 intel_workarounds(c); 574 575 /* 576 * Detect the extended topology information if available. This 577 * will reinitialise the initial_apicid which will be used 578 * in init_intel_cacheinfo() 579 */ 580 detect_extended_topology(c); 581 582 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 583 /* 584 * let's use the legacy cpuid vector 0x1 and 0x4 for topology 585 * detection. 586 */ 587 c->x86_max_cores = intel_num_cpu_cores(c); 588 #ifdef CONFIG_X86_32 589 detect_ht(c); 590 #endif 591 } 592 593 l2 = init_intel_cacheinfo(c); 594 595 /* Detect legacy cache sizes if init_intel_cacheinfo did not */ 596 if (l2 == 0) { 597 cpu_detect_cache_sizes(c); 598 l2 = c->x86_cache_size; 599 } 600 601 if (c->cpuid_level > 9) { 602 unsigned eax = cpuid_eax(10); 603 /* Check for version and the number of counters */ 604 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 605 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 606 } 607 608 if (cpu_has(c, X86_FEATURE_XMM2)) 609 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 610 611 if (boot_cpu_has(X86_FEATURE_DS)) { 612 unsigned int l1; 613 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 614 if (!(l1 & (1<<11))) 615 set_cpu_cap(c, X86_FEATURE_BTS); 616 if (!(l1 & (1<<12))) 617 set_cpu_cap(c, X86_FEATURE_PEBS); 618 } 619 620 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) && 621 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) 622 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); 623 624 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) && 625 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT))) 626 set_cpu_bug(c, X86_BUG_MONITOR); 627 628 #ifdef CONFIG_X86_64 629 if (c->x86 == 15) 630 c->x86_cache_alignment = c->x86_clflush_size * 2; 631 if (c->x86 == 6) 632 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 633 #else 634 /* 635 * Names for the Pentium II/Celeron processors 636 * detectable only by also checking the cache size. 637 * Dixon is NOT a Celeron. 638 */ 639 if (c->x86 == 6) { 640 char *p = NULL; 641 642 switch (c->x86_model) { 643 case 5: 644 if (l2 == 0) 645 p = "Celeron (Covington)"; 646 else if (l2 == 256) 647 p = "Mobile Pentium II (Dixon)"; 648 break; 649 650 case 6: 651 if (l2 == 128) 652 p = "Celeron (Mendocino)"; 653 else if (c->x86_mask == 0 || c->x86_mask == 5) 654 p = "Celeron-A"; 655 break; 656 657 case 8: 658 if (l2 == 128) 659 p = "Celeron (Coppermine)"; 660 break; 661 } 662 663 if (p) 664 strcpy(c->x86_model_id, p); 665 } 666 667 if (c->x86 == 15) 668 set_cpu_cap(c, X86_FEATURE_P4); 669 if (c->x86 == 6) 670 set_cpu_cap(c, X86_FEATURE_P3); 671 #endif 672 673 /* Work around errata */ 674 srat_detect_node(c); 675 676 if (cpu_has(c, X86_FEATURE_VMX)) 677 detect_vmx_virtcap(c); 678 679 init_intel_energy_perf(c); 680 681 init_intel_misc_features(c); 682 } 683 684 #ifdef CONFIG_X86_32 685 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 686 { 687 /* 688 * Intel PIII Tualatin. This comes in two flavours. 689 * One has 256kb of cache, the other 512. We have no way 690 * to determine which, so we use a boottime override 691 * for the 512kb model, and assume 256 otherwise. 692 */ 693 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) 694 size = 256; 695 696 /* 697 * Intel Quark SoC X1000 contains a 4-way set associative 698 * 16K cache with a 16 byte cache line and 256 lines per tag 699 */ 700 if ((c->x86 == 5) && (c->x86_model == 9)) 701 size = 16; 702 return size; 703 } 704 #endif 705 706 #define TLB_INST_4K 0x01 707 #define TLB_INST_4M 0x02 708 #define TLB_INST_2M_4M 0x03 709 710 #define TLB_INST_ALL 0x05 711 #define TLB_INST_1G 0x06 712 713 #define TLB_DATA_4K 0x11 714 #define TLB_DATA_4M 0x12 715 #define TLB_DATA_2M_4M 0x13 716 #define TLB_DATA_4K_4M 0x14 717 718 #define TLB_DATA_1G 0x16 719 720 #define TLB_DATA0_4K 0x21 721 #define TLB_DATA0_4M 0x22 722 #define TLB_DATA0_2M_4M 0x23 723 724 #define STLB_4K 0x41 725 #define STLB_4K_2M 0x42 726 727 static const struct _tlb_table intel_tlb_table[] = { 728 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 729 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 730 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 731 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, 732 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, 733 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, 734 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" }, 735 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 736 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 737 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 738 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 739 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, 740 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, 741 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" }, 742 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, 743 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, 744 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, 745 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, 746 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, 747 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, 748 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 749 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, 750 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 751 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, 752 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, 753 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, 754 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, 755 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, 756 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, 757 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, 758 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, 759 { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" }, 760 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, 761 { 0x00, 0, 0 } 762 }; 763 764 static void intel_tlb_lookup(const unsigned char desc) 765 { 766 unsigned char k; 767 if (desc == 0) 768 return; 769 770 /* look up this descriptor in the table */ 771 for (k = 0; intel_tlb_table[k].descriptor != desc && \ 772 intel_tlb_table[k].descriptor != 0; k++) 773 ; 774 775 if (intel_tlb_table[k].tlb_type == 0) 776 return; 777 778 switch (intel_tlb_table[k].tlb_type) { 779 case STLB_4K: 780 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 781 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 782 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 783 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 784 break; 785 case STLB_4K_2M: 786 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 787 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 788 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 789 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 790 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 791 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 792 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) 793 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; 794 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 795 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 796 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 797 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 798 break; 799 case TLB_INST_ALL: 800 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 801 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 802 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 803 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 804 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 805 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 806 break; 807 case TLB_INST_4K: 808 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 809 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 810 break; 811 case TLB_INST_4M: 812 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 813 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 814 break; 815 case TLB_INST_2M_4M: 816 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 817 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 818 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 819 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 820 break; 821 case TLB_DATA_4K: 822 case TLB_DATA0_4K: 823 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 824 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 825 break; 826 case TLB_DATA_4M: 827 case TLB_DATA0_4M: 828 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 829 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 830 break; 831 case TLB_DATA_2M_4M: 832 case TLB_DATA0_2M_4M: 833 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) 834 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; 835 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 836 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 837 break; 838 case TLB_DATA_4K_4M: 839 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 840 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 841 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 842 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 843 break; 844 case TLB_DATA_1G: 845 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) 846 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; 847 break; 848 } 849 } 850 851 static void intel_detect_tlb(struct cpuinfo_x86 *c) 852 { 853 int i, j, n; 854 unsigned int regs[4]; 855 unsigned char *desc = (unsigned char *)regs; 856 857 if (c->cpuid_level < 2) 858 return; 859 860 /* Number of times to iterate */ 861 n = cpuid_eax(2) & 0xFF; 862 863 for (i = 0 ; i < n ; i++) { 864 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); 865 866 /* If bit 31 is set, this is an unknown format */ 867 for (j = 0 ; j < 3 ; j++) 868 if (regs[j] & (1 << 31)) 869 regs[j] = 0; 870 871 /* Byte 0 is level count, not a descriptor */ 872 for (j = 1 ; j < 16 ; j++) 873 intel_tlb_lookup(desc[j]); 874 } 875 } 876 877 static const struct cpu_dev intel_cpu_dev = { 878 .c_vendor = "Intel", 879 .c_ident = { "GenuineIntel" }, 880 #ifdef CONFIG_X86_32 881 .legacy_models = { 882 { .family = 4, .model_names = 883 { 884 [0] = "486 DX-25/33", 885 [1] = "486 DX-50", 886 [2] = "486 SX", 887 [3] = "486 DX/2", 888 [4] = "486 SL", 889 [5] = "486 SX/2", 890 [7] = "486 DX/2-WB", 891 [8] = "486 DX/4", 892 [9] = "486 DX/4-WB" 893 } 894 }, 895 { .family = 5, .model_names = 896 { 897 [0] = "Pentium 60/66 A-step", 898 [1] = "Pentium 60/66", 899 [2] = "Pentium 75 - 200", 900 [3] = "OverDrive PODP5V83", 901 [4] = "Pentium MMX", 902 [7] = "Mobile Pentium 75 - 200", 903 [8] = "Mobile Pentium MMX", 904 [9] = "Quark SoC X1000", 905 } 906 }, 907 { .family = 6, .model_names = 908 { 909 [0] = "Pentium Pro A-step", 910 [1] = "Pentium Pro", 911 [3] = "Pentium II (Klamath)", 912 [4] = "Pentium II (Deschutes)", 913 [5] = "Pentium II (Deschutes)", 914 [6] = "Mobile Pentium II", 915 [7] = "Pentium III (Katmai)", 916 [8] = "Pentium III (Coppermine)", 917 [10] = "Pentium III (Cascades)", 918 [11] = "Pentium III (Tualatin)", 919 } 920 }, 921 { .family = 15, .model_names = 922 { 923 [0] = "Pentium 4 (Unknown)", 924 [1] = "Pentium 4 (Willamette)", 925 [2] = "Pentium 4 (Northwood)", 926 [4] = "Pentium 4 (Foster)", 927 [5] = "Pentium 4 (Foster)", 928 } 929 }, 930 }, 931 .legacy_cache_size = intel_size_cache, 932 #endif 933 .c_detect_tlb = intel_detect_tlb, 934 .c_early_init = early_init_intel, 935 .c_init = init_intel, 936 .c_bsp_resume = intel_bsp_resume, 937 .c_x86_vendor = X86_VENDOR_INTEL, 938 }; 939 940 cpu_dev_register(intel_cpu_dev); 941 942