1 #include <linux/kernel.h> 2 3 #include <linux/string.h> 4 #include <linux/bitops.h> 5 #include <linux/smp.h> 6 #include <linux/sched.h> 7 #include <linux/thread_info.h> 8 #include <linux/module.h> 9 #include <linux/uaccess.h> 10 11 #include <asm/cpufeature.h> 12 #include <asm/pgtable.h> 13 #include <asm/msr.h> 14 #include <asm/bugs.h> 15 #include <asm/cpu.h> 16 17 #ifdef CONFIG_X86_64 18 #include <linux/topology.h> 19 #endif 20 21 #include "cpu.h" 22 23 #ifdef CONFIG_X86_LOCAL_APIC 24 #include <asm/mpspec.h> 25 #include <asm/apic.h> 26 #endif 27 28 static void early_init_intel(struct cpuinfo_x86 *c) 29 { 30 u64 misc_enable; 31 32 /* Unmask CPUID levels if masked: */ 33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 34 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, 35 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { 36 c->cpuid_level = cpuid_eax(0); 37 get_cpu_cap(c); 38 } 39 } 40 41 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 42 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 43 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 44 45 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) { 46 unsigned lower_word; 47 48 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 49 /* Required by the SDM */ 50 sync_core(); 51 rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); 52 } 53 54 /* 55 * Atom erratum AAE44/AAF40/AAG38/AAH41: 56 * 57 * A race condition between speculative fetches and invalidating 58 * a large page. This is worked around in microcode, but we 59 * need the microcode to have already been loaded... so if it is 60 * not, recommend a BIOS update and disable large pages. 61 */ 62 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && 63 c->microcode < 0x20e) { 64 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); 65 clear_cpu_cap(c, X86_FEATURE_PSE); 66 } 67 68 #ifdef CONFIG_X86_64 69 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 70 #else 71 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 72 if (c->x86 == 15 && c->x86_cache_alignment == 64) 73 c->x86_cache_alignment = 128; 74 #endif 75 76 /* CPUID workaround for 0F33/0F34 CPU */ 77 if (c->x86 == 0xF && c->x86_model == 0x3 78 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) 79 c->x86_phys_bits = 36; 80 81 /* 82 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 83 * with P/T states and does not stop in deep C-states. 84 * 85 * It is also reliable across cores and sockets. (but not across 86 * cabinets - we turn it off in that case explicitly.) 87 */ 88 if (c->x86_power & (1 << 8)) { 89 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 90 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 91 if (!check_tsc_unstable()) 92 set_sched_clock_stable(); 93 } 94 95 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ 96 if (c->x86 == 6) { 97 switch (c->x86_model) { 98 case 0x27: /* Penwell */ 99 case 0x35: /* Cloverview */ 100 case 0x4a: /* Merrifield */ 101 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); 102 break; 103 default: 104 break; 105 } 106 } 107 108 /* 109 * There is a known erratum on Pentium III and Core Solo 110 * and Core Duo CPUs. 111 * " Page with PAT set to WC while associated MTRR is UC 112 * may consolidate to UC " 113 * Because of this erratum, it is better to stick with 114 * setting WC in MTRR rather than using PAT on these CPUs. 115 * 116 * Enable PAT WC only on P4, Core 2 or later CPUs. 117 */ 118 if (c->x86 == 6 && c->x86_model < 15) 119 clear_cpu_cap(c, X86_FEATURE_PAT); 120 121 #ifdef CONFIG_KMEMCHECK 122 /* 123 * P4s have a "fast strings" feature which causes single- 124 * stepping REP instructions to only generate a #DB on 125 * cache-line boundaries. 126 * 127 * Ingo Molnar reported a Pentium D (model 6) and a Xeon 128 * (model 2) with the same problem. 129 */ 130 if (c->x86 == 15) 131 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, 132 MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0) 133 pr_info("kmemcheck: Disabling fast string operations\n"); 134 #endif 135 136 /* 137 * If fast string is not enabled in IA32_MISC_ENABLE for any reason, 138 * clear the fast string and enhanced fast string CPU capabilities. 139 */ 140 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 141 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 142 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { 143 pr_info("Disabled fast string operations\n"); 144 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); 145 setup_clear_cpu_cap(X86_FEATURE_ERMS); 146 } 147 } 148 149 /* 150 * Intel Quark Core DevMan_001.pdf section 6.4.11 151 * "The operating system also is required to invalidate (i.e., flush) 152 * the TLB when any changes are made to any of the page table entries. 153 * The operating system must reload CR3 to cause the TLB to be flushed" 154 * 155 * As a result cpu_has_pge() in arch/x86/include/asm/tlbflush.h should 156 * be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE 157 * to be modified 158 */ 159 if (c->x86 == 5 && c->x86_model == 9) { 160 pr_info("Disabling PGE capability bit\n"); 161 setup_clear_cpu_cap(X86_FEATURE_PGE); 162 } 163 164 if (c->cpuid_level >= 0x00000001) { 165 u32 eax, ebx, ecx, edx; 166 167 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); 168 /* 169 * If HTT (EDX[28]) is set EBX[16:23] contain the number of 170 * apicids which are reserved per package. Store the resulting 171 * shift value for the package management code. 172 */ 173 if (edx & (1U << 28)) 174 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); 175 } 176 } 177 178 #ifdef CONFIG_X86_32 179 /* 180 * Early probe support logic for ppro memory erratum #50 181 * 182 * This is called before we do cpu ident work 183 */ 184 185 int ppro_with_ram_bug(void) 186 { 187 /* Uses data from early_cpu_detect now */ 188 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 189 boot_cpu_data.x86 == 6 && 190 boot_cpu_data.x86_model == 1 && 191 boot_cpu_data.x86_mask < 8) { 192 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 193 return 1; 194 } 195 return 0; 196 } 197 198 static void intel_smp_check(struct cpuinfo_x86 *c) 199 { 200 /* calling is from identify_secondary_cpu() ? */ 201 if (!c->cpu_index) 202 return; 203 204 /* 205 * Mask B, Pentium, but not Pentium MMX 206 */ 207 if (c->x86 == 5 && 208 c->x86_mask >= 1 && c->x86_mask <= 4 && 209 c->x86_model <= 3) { 210 /* 211 * Remember we have B step Pentia with bugs 212 */ 213 WARN_ONCE(1, "WARNING: SMP operation may be unreliable" 214 "with B stepping processors.\n"); 215 } 216 } 217 218 static int forcepae; 219 static int __init forcepae_setup(char *__unused) 220 { 221 forcepae = 1; 222 return 1; 223 } 224 __setup("forcepae", forcepae_setup); 225 226 static void intel_workarounds(struct cpuinfo_x86 *c) 227 { 228 #ifdef CONFIG_X86_F00F_BUG 229 /* 230 * All models of Pentium and Pentium with MMX technology CPUs 231 * have the F0 0F bug, which lets nonprivileged users lock up the 232 * system. Announce that the fault handler will be checking for it. 233 * The Quark is also family 5, but does not have the same bug. 234 */ 235 clear_cpu_bug(c, X86_BUG_F00F); 236 if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) { 237 static int f00f_workaround_enabled; 238 239 set_cpu_bug(c, X86_BUG_F00F); 240 if (!f00f_workaround_enabled) { 241 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n"); 242 f00f_workaround_enabled = 1; 243 } 244 } 245 #endif 246 247 /* 248 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 249 * model 3 mask 3 250 */ 251 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 252 clear_cpu_cap(c, X86_FEATURE_SEP); 253 254 /* 255 * PAE CPUID issue: many Pentium M report no PAE but may have a 256 * functionally usable PAE implementation. 257 * Forcefully enable PAE if kernel parameter "forcepae" is present. 258 */ 259 if (forcepae) { 260 pr_warn("PAE forced!\n"); 261 set_cpu_cap(c, X86_FEATURE_PAE); 262 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); 263 } 264 265 /* 266 * P4 Xeon errata 037 workaround. 267 * Hardware prefetcher may cause stale data to be loaded into the cache. 268 */ 269 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 270 if (msr_set_bit(MSR_IA32_MISC_ENABLE, 271 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) 272 > 0) { 273 pr_info("CPU: C0 stepping P4 Xeon detected.\n"); 274 pr_info("CPU: Disabling hardware prefetching (Errata 037)\n"); 275 } 276 } 277 278 /* 279 * See if we have a good local APIC by checking for buggy Pentia, 280 * i.e. all B steppings and the C2 stepping of P54C when using their 281 * integrated APIC (see 11AP erratum in "Pentium Processor 282 * Specification Update"). 283 */ 284 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && 285 (c->x86_mask < 0x6 || c->x86_mask == 0xb)) 286 set_cpu_bug(c, X86_BUG_11AP); 287 288 289 #ifdef CONFIG_X86_INTEL_USERCOPY 290 /* 291 * Set up the preferred alignment for movsl bulk memory moves 292 */ 293 switch (c->x86) { 294 case 4: /* 486: untested */ 295 break; 296 case 5: /* Old Pentia: untested */ 297 break; 298 case 6: /* PII/PIII only like movsl with 8-byte alignment */ 299 movsl_mask.mask = 7; 300 break; 301 case 15: /* P4 is OK down to 8-byte alignment */ 302 movsl_mask.mask = 7; 303 break; 304 } 305 #endif 306 307 intel_smp_check(c); 308 } 309 #else 310 static void intel_workarounds(struct cpuinfo_x86 *c) 311 { 312 } 313 #endif 314 315 static void srat_detect_node(struct cpuinfo_x86 *c) 316 { 317 #ifdef CONFIG_NUMA 318 unsigned node; 319 int cpu = smp_processor_id(); 320 321 /* Don't do the funky fallback heuristics the AMD version employs 322 for now. */ 323 node = numa_cpu_node(cpu); 324 if (node == NUMA_NO_NODE || !node_online(node)) { 325 /* reuse the value from init_cpu_to_node() */ 326 node = cpu_to_node(cpu); 327 } 328 numa_set_node(cpu, node); 329 #endif 330 } 331 332 /* 333 * find out the number of processor cores on the die 334 */ 335 static int intel_num_cpu_cores(struct cpuinfo_x86 *c) 336 { 337 unsigned int eax, ebx, ecx, edx; 338 339 if (c->cpuid_level < 4) 340 return 1; 341 342 /* Intel has a non-standard dependency on %ecx for this CPUID level. */ 343 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 344 if (eax & 0x1f) 345 return (eax >> 26) + 1; 346 else 347 return 1; 348 } 349 350 static void detect_vmx_virtcap(struct cpuinfo_x86 *c) 351 { 352 /* Intel VMX MSR indicated features */ 353 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 354 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 355 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 356 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 357 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 358 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 359 360 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; 361 362 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 363 clear_cpu_cap(c, X86_FEATURE_VNMI); 364 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 365 clear_cpu_cap(c, X86_FEATURE_EPT); 366 clear_cpu_cap(c, X86_FEATURE_VPID); 367 368 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); 369 msr_ctl = vmx_msr_high | vmx_msr_low; 370 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) 371 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 372 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) 373 set_cpu_cap(c, X86_FEATURE_VNMI); 374 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { 375 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 376 vmx_msr_low, vmx_msr_high); 377 msr_ctl2 = vmx_msr_high | vmx_msr_low; 378 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && 379 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) 380 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 381 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) 382 set_cpu_cap(c, X86_FEATURE_EPT); 383 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) 384 set_cpu_cap(c, X86_FEATURE_VPID); 385 } 386 } 387 388 static void init_intel_energy_perf(struct cpuinfo_x86 *c) 389 { 390 u64 epb; 391 392 /* 393 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized. 394 * (x86_energy_perf_policy(8) is available to change it at run-time.) 395 */ 396 if (!cpu_has(c, X86_FEATURE_EPB)) 397 return; 398 399 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 400 if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE) 401 return; 402 403 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); 404 pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); 405 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; 406 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 407 } 408 409 static void intel_bsp_resume(struct cpuinfo_x86 *c) 410 { 411 /* 412 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume, 413 * so reinitialize it properly like during bootup: 414 */ 415 init_intel_energy_perf(c); 416 } 417 418 static void init_intel(struct cpuinfo_x86 *c) 419 { 420 unsigned int l2 = 0; 421 422 early_init_intel(c); 423 424 intel_workarounds(c); 425 426 /* 427 * Detect the extended topology information if available. This 428 * will reinitialise the initial_apicid which will be used 429 * in init_intel_cacheinfo() 430 */ 431 detect_extended_topology(c); 432 433 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 434 /* 435 * let's use the legacy cpuid vector 0x1 and 0x4 for topology 436 * detection. 437 */ 438 c->x86_max_cores = intel_num_cpu_cores(c); 439 #ifdef CONFIG_X86_32 440 detect_ht(c); 441 #endif 442 } 443 444 l2 = init_intel_cacheinfo(c); 445 446 /* Detect legacy cache sizes if init_intel_cacheinfo did not */ 447 if (l2 == 0) { 448 cpu_detect_cache_sizes(c); 449 l2 = c->x86_cache_size; 450 } 451 452 if (c->cpuid_level > 9) { 453 unsigned eax = cpuid_eax(10); 454 /* Check for version and the number of counters */ 455 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 456 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 457 } 458 459 if (cpu_has_xmm2) 460 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 461 462 if (boot_cpu_has(X86_FEATURE_DS)) { 463 unsigned int l1; 464 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 465 if (!(l1 & (1<<11))) 466 set_cpu_cap(c, X86_FEATURE_BTS); 467 if (!(l1 & (1<<12))) 468 set_cpu_cap(c, X86_FEATURE_PEBS); 469 } 470 471 if (c->x86 == 6 && cpu_has_clflush && 472 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) 473 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); 474 475 #ifdef CONFIG_X86_64 476 if (c->x86 == 15) 477 c->x86_cache_alignment = c->x86_clflush_size * 2; 478 if (c->x86 == 6) 479 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 480 #else 481 /* 482 * Names for the Pentium II/Celeron processors 483 * detectable only by also checking the cache size. 484 * Dixon is NOT a Celeron. 485 */ 486 if (c->x86 == 6) { 487 char *p = NULL; 488 489 switch (c->x86_model) { 490 case 5: 491 if (l2 == 0) 492 p = "Celeron (Covington)"; 493 else if (l2 == 256) 494 p = "Mobile Pentium II (Dixon)"; 495 break; 496 497 case 6: 498 if (l2 == 128) 499 p = "Celeron (Mendocino)"; 500 else if (c->x86_mask == 0 || c->x86_mask == 5) 501 p = "Celeron-A"; 502 break; 503 504 case 8: 505 if (l2 == 128) 506 p = "Celeron (Coppermine)"; 507 break; 508 } 509 510 if (p) 511 strcpy(c->x86_model_id, p); 512 } 513 514 if (c->x86 == 15) 515 set_cpu_cap(c, X86_FEATURE_P4); 516 if (c->x86 == 6) 517 set_cpu_cap(c, X86_FEATURE_P3); 518 #endif 519 520 /* Work around errata */ 521 srat_detect_node(c); 522 523 if (cpu_has(c, X86_FEATURE_VMX)) 524 detect_vmx_virtcap(c); 525 526 init_intel_energy_perf(c); 527 } 528 529 #ifdef CONFIG_X86_32 530 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 531 { 532 /* 533 * Intel PIII Tualatin. This comes in two flavours. 534 * One has 256kb of cache, the other 512. We have no way 535 * to determine which, so we use a boottime override 536 * for the 512kb model, and assume 256 otherwise. 537 */ 538 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) 539 size = 256; 540 541 /* 542 * Intel Quark SoC X1000 contains a 4-way set associative 543 * 16K cache with a 16 byte cache line and 256 lines per tag 544 */ 545 if ((c->x86 == 5) && (c->x86_model == 9)) 546 size = 16; 547 return size; 548 } 549 #endif 550 551 #define TLB_INST_4K 0x01 552 #define TLB_INST_4M 0x02 553 #define TLB_INST_2M_4M 0x03 554 555 #define TLB_INST_ALL 0x05 556 #define TLB_INST_1G 0x06 557 558 #define TLB_DATA_4K 0x11 559 #define TLB_DATA_4M 0x12 560 #define TLB_DATA_2M_4M 0x13 561 #define TLB_DATA_4K_4M 0x14 562 563 #define TLB_DATA_1G 0x16 564 565 #define TLB_DATA0_4K 0x21 566 #define TLB_DATA0_4M 0x22 567 #define TLB_DATA0_2M_4M 0x23 568 569 #define STLB_4K 0x41 570 #define STLB_4K_2M 0x42 571 572 static const struct _tlb_table intel_tlb_table[] = { 573 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 574 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 575 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 576 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, 577 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, 578 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, 579 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" }, 580 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 581 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 582 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 583 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 584 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, 585 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, 586 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" }, 587 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, 588 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, 589 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, 590 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, 591 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, 592 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, 593 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 594 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, 595 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 596 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, 597 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, 598 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, 599 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, 600 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, 601 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, 602 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, 603 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, 604 { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" }, 605 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, 606 { 0x00, 0, 0 } 607 }; 608 609 static void intel_tlb_lookup(const unsigned char desc) 610 { 611 unsigned char k; 612 if (desc == 0) 613 return; 614 615 /* look up this descriptor in the table */ 616 for (k = 0; intel_tlb_table[k].descriptor != desc && \ 617 intel_tlb_table[k].descriptor != 0; k++) 618 ; 619 620 if (intel_tlb_table[k].tlb_type == 0) 621 return; 622 623 switch (intel_tlb_table[k].tlb_type) { 624 case STLB_4K: 625 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 626 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 627 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 628 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 629 break; 630 case STLB_4K_2M: 631 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 632 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 633 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 634 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 635 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 636 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 637 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) 638 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; 639 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 640 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 641 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 642 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 643 break; 644 case TLB_INST_ALL: 645 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 646 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 647 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 648 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 649 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 650 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 651 break; 652 case TLB_INST_4K: 653 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 654 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 655 break; 656 case TLB_INST_4M: 657 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 658 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 659 break; 660 case TLB_INST_2M_4M: 661 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 662 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 663 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 664 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 665 break; 666 case TLB_DATA_4K: 667 case TLB_DATA0_4K: 668 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 669 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 670 break; 671 case TLB_DATA_4M: 672 case TLB_DATA0_4M: 673 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 674 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 675 break; 676 case TLB_DATA_2M_4M: 677 case TLB_DATA0_2M_4M: 678 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) 679 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; 680 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 681 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 682 break; 683 case TLB_DATA_4K_4M: 684 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 685 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 686 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 687 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 688 break; 689 case TLB_DATA_1G: 690 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) 691 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; 692 break; 693 } 694 } 695 696 static void intel_detect_tlb(struct cpuinfo_x86 *c) 697 { 698 int i, j, n; 699 unsigned int regs[4]; 700 unsigned char *desc = (unsigned char *)regs; 701 702 if (c->cpuid_level < 2) 703 return; 704 705 /* Number of times to iterate */ 706 n = cpuid_eax(2) & 0xFF; 707 708 for (i = 0 ; i < n ; i++) { 709 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); 710 711 /* If bit 31 is set, this is an unknown format */ 712 for (j = 0 ; j < 3 ; j++) 713 if (regs[j] & (1 << 31)) 714 regs[j] = 0; 715 716 /* Byte 0 is level count, not a descriptor */ 717 for (j = 1 ; j < 16 ; j++) 718 intel_tlb_lookup(desc[j]); 719 } 720 } 721 722 static const struct cpu_dev intel_cpu_dev = { 723 .c_vendor = "Intel", 724 .c_ident = { "GenuineIntel" }, 725 #ifdef CONFIG_X86_32 726 .legacy_models = { 727 { .family = 4, .model_names = 728 { 729 [0] = "486 DX-25/33", 730 [1] = "486 DX-50", 731 [2] = "486 SX", 732 [3] = "486 DX/2", 733 [4] = "486 SL", 734 [5] = "486 SX/2", 735 [7] = "486 DX/2-WB", 736 [8] = "486 DX/4", 737 [9] = "486 DX/4-WB" 738 } 739 }, 740 { .family = 5, .model_names = 741 { 742 [0] = "Pentium 60/66 A-step", 743 [1] = "Pentium 60/66", 744 [2] = "Pentium 75 - 200", 745 [3] = "OverDrive PODP5V83", 746 [4] = "Pentium MMX", 747 [7] = "Mobile Pentium 75 - 200", 748 [8] = "Mobile Pentium MMX", 749 [9] = "Quark SoC X1000", 750 } 751 }, 752 { .family = 6, .model_names = 753 { 754 [0] = "Pentium Pro A-step", 755 [1] = "Pentium Pro", 756 [3] = "Pentium II (Klamath)", 757 [4] = "Pentium II (Deschutes)", 758 [5] = "Pentium II (Deschutes)", 759 [6] = "Mobile Pentium II", 760 [7] = "Pentium III (Katmai)", 761 [8] = "Pentium III (Coppermine)", 762 [10] = "Pentium III (Cascades)", 763 [11] = "Pentium III (Tualatin)", 764 } 765 }, 766 { .family = 15, .model_names = 767 { 768 [0] = "Pentium 4 (Unknown)", 769 [1] = "Pentium 4 (Willamette)", 770 [2] = "Pentium 4 (Northwood)", 771 [4] = "Pentium 4 (Foster)", 772 [5] = "Pentium 4 (Foster)", 773 } 774 }, 775 }, 776 .legacy_cache_size = intel_size_cache, 777 #endif 778 .c_detect_tlb = intel_detect_tlb, 779 .c_early_init = early_init_intel, 780 .c_init = init_intel, 781 .c_bsp_resume = intel_bsp_resume, 782 .c_x86_vendor = X86_VENDOR_INTEL, 783 }; 784 785 cpu_dev_register(intel_cpu_dev); 786 787