1 #include <linux/init.h> 2 #include <linux/kernel.h> 3 4 #include <linux/string.h> 5 #include <linux/bitops.h> 6 #include <linux/smp.h> 7 #include <linux/sched.h> 8 #include <linux/thread_info.h> 9 #include <linux/module.h> 10 #include <linux/uaccess.h> 11 12 #include <asm/processor.h> 13 #include <asm/pgtable.h> 14 #include <asm/msr.h> 15 #include <asm/bugs.h> 16 #include <asm/cpu.h> 17 18 #ifdef CONFIG_X86_64 19 #include <linux/topology.h> 20 #endif 21 22 #include "cpu.h" 23 24 #ifdef CONFIG_X86_LOCAL_APIC 25 #include <asm/mpspec.h> 26 #include <asm/apic.h> 27 #endif 28 29 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 30 { 31 u64 misc_enable; 32 33 /* Unmask CPUID levels if masked: */ 34 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 35 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 36 37 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { 38 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; 39 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 40 c->cpuid_level = cpuid_eax(0); 41 get_cpu_cap(c); 42 } 43 } 44 45 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 46 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 47 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 48 49 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) { 50 unsigned lower_word; 51 52 wrmsr(MSR_IA32_UCODE_REV, 0, 0); 53 /* Required by the SDM */ 54 sync_core(); 55 rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); 56 } 57 58 /* 59 * Atom erratum AAE44/AAF40/AAG38/AAH41: 60 * 61 * A race condition between speculative fetches and invalidating 62 * a large page. This is worked around in microcode, but we 63 * need the microcode to have already been loaded... so if it is 64 * not, recommend a BIOS update and disable large pages. 65 */ 66 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && 67 c->microcode < 0x20e) { 68 printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); 69 clear_cpu_cap(c, X86_FEATURE_PSE); 70 } 71 72 #ifdef CONFIG_X86_64 73 set_cpu_cap(c, X86_FEATURE_SYSENTER32); 74 #else 75 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ 76 if (c->x86 == 15 && c->x86_cache_alignment == 64) 77 c->x86_cache_alignment = 128; 78 #endif 79 80 /* CPUID workaround for 0F33/0F34 CPU */ 81 if (c->x86 == 0xF && c->x86_model == 0x3 82 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) 83 c->x86_phys_bits = 36; 84 85 /* 86 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 87 * with P/T states and does not stop in deep C-states. 88 * 89 * It is also reliable across cores and sockets. (but not across 90 * cabinets - we turn it off in that case explicitly.) 91 */ 92 if (c->x86_power & (1 << 8)) { 93 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 94 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 95 if (!check_tsc_unstable()) 96 sched_clock_stable = 1; 97 } 98 99 /* 100 * There is a known erratum on Pentium III and Core Solo 101 * and Core Duo CPUs. 102 * " Page with PAT set to WC while associated MTRR is UC 103 * may consolidate to UC " 104 * Because of this erratum, it is better to stick with 105 * setting WC in MTRR rather than using PAT on these CPUs. 106 * 107 * Enable PAT WC only on P4, Core 2 or later CPUs. 108 */ 109 if (c->x86 == 6 && c->x86_model < 15) 110 clear_cpu_cap(c, X86_FEATURE_PAT); 111 112 #ifdef CONFIG_KMEMCHECK 113 /* 114 * P4s have a "fast strings" feature which causes single- 115 * stepping REP instructions to only generate a #DB on 116 * cache-line boundaries. 117 * 118 * Ingo Molnar reported a Pentium D (model 6) and a Xeon 119 * (model 2) with the same problem. 120 */ 121 if (c->x86 == 15) { 122 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 123 124 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { 125 printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); 126 127 misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; 128 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 129 } 130 } 131 #endif 132 133 /* 134 * If fast string is not enabled in IA32_MISC_ENABLE for any reason, 135 * clear the fast string and enhanced fast string CPU capabilities. 136 */ 137 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { 138 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); 139 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { 140 printk(KERN_INFO "Disabled fast string operations\n"); 141 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); 142 setup_clear_cpu_cap(X86_FEATURE_ERMS); 143 } 144 } 145 } 146 147 #ifdef CONFIG_X86_32 148 /* 149 * Early probe support logic for ppro memory erratum #50 150 * 151 * This is called before we do cpu ident work 152 */ 153 154 int __cpuinit ppro_with_ram_bug(void) 155 { 156 /* Uses data from early_cpu_detect now */ 157 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 158 boot_cpu_data.x86 == 6 && 159 boot_cpu_data.x86_model == 1 && 160 boot_cpu_data.x86_mask < 8) { 161 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n"); 162 return 1; 163 } 164 return 0; 165 } 166 167 #ifdef CONFIG_X86_F00F_BUG 168 static void __cpuinit trap_init_f00f_bug(void) 169 { 170 __set_fixmap(FIX_F00F_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); 171 172 /* 173 * Update the IDT descriptor and reload the IDT so that 174 * it uses the read-only mapped virtual address. 175 */ 176 idt_descr.address = fix_to_virt(FIX_F00F_IDT); 177 load_idt(&idt_descr); 178 } 179 #endif 180 181 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 182 { 183 /* calling is from identify_secondary_cpu() ? */ 184 if (!c->cpu_index) 185 return; 186 187 /* 188 * Mask B, Pentium, but not Pentium MMX 189 */ 190 if (c->x86 == 5 && 191 c->x86_mask >= 1 && c->x86_mask <= 4 && 192 c->x86_model <= 3) { 193 /* 194 * Remember we have B step Pentia with bugs 195 */ 196 WARN_ONCE(1, "WARNING: SMP operation may be unreliable" 197 "with B stepping processors.\n"); 198 } 199 } 200 201 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 202 { 203 unsigned long lo, hi; 204 205 #ifdef CONFIG_X86_F00F_BUG 206 /* 207 * All current models of Pentium and Pentium with MMX technology CPUs 208 * have the F0 0F bug, which lets nonprivileged users lock up the 209 * system. 210 * Note that the workaround only should be initialized once... 211 */ 212 c->f00f_bug = 0; 213 if (!paravirt_enabled() && c->x86 == 5) { 214 static int f00f_workaround_enabled; 215 216 c->f00f_bug = 1; 217 if (!f00f_workaround_enabled) { 218 trap_init_f00f_bug(); 219 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n"); 220 f00f_workaround_enabled = 1; 221 } 222 } 223 #endif 224 225 /* 226 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until 227 * model 3 mask 3 228 */ 229 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) 230 clear_cpu_cap(c, X86_FEATURE_SEP); 231 232 /* 233 * P4 Xeon errata 037 workaround. 234 * Hardware prefetcher may cause stale data to be loaded into the cache. 235 */ 236 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { 237 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi); 238 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) { 239 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n"); 240 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n"); 241 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE; 242 wrmsr(MSR_IA32_MISC_ENABLE, lo, hi); 243 } 244 } 245 246 /* 247 * See if we have a good local APIC by checking for buggy Pentia, 248 * i.e. all B steppings and the C2 stepping of P54C when using their 249 * integrated APIC (see 11AP erratum in "Pentium Processor 250 * Specification Update"). 251 */ 252 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && 253 (c->x86_mask < 0x6 || c->x86_mask == 0xb)) 254 set_cpu_cap(c, X86_FEATURE_11AP); 255 256 257 #ifdef CONFIG_X86_INTEL_USERCOPY 258 /* 259 * Set up the preferred alignment for movsl bulk memory moves 260 */ 261 switch (c->x86) { 262 case 4: /* 486: untested */ 263 break; 264 case 5: /* Old Pentia: untested */ 265 break; 266 case 6: /* PII/PIII only like movsl with 8-byte alignment */ 267 movsl_mask.mask = 7; 268 break; 269 case 15: /* P4 is OK down to 8-byte alignment */ 270 movsl_mask.mask = 7; 271 break; 272 } 273 #endif 274 275 #ifdef CONFIG_X86_NUMAQ 276 numaq_tsc_disable(); 277 #endif 278 279 intel_smp_check(c); 280 } 281 #else 282 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 283 { 284 } 285 #endif 286 287 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 288 { 289 #ifdef CONFIG_NUMA 290 unsigned node; 291 int cpu = smp_processor_id(); 292 293 /* Don't do the funky fallback heuristics the AMD version employs 294 for now. */ 295 node = numa_cpu_node(cpu); 296 if (node == NUMA_NO_NODE || !node_online(node)) { 297 /* reuse the value from init_cpu_to_node() */ 298 node = cpu_to_node(cpu); 299 } 300 numa_set_node(cpu, node); 301 #endif 302 } 303 304 /* 305 * find out the number of processor cores on the die 306 */ 307 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 308 { 309 unsigned int eax, ebx, ecx, edx; 310 311 if (c->cpuid_level < 4) 312 return 1; 313 314 /* Intel has a non-standard dependency on %ecx for this CPUID level. */ 315 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); 316 if (eax & 0x1f) 317 return (eax >> 26) + 1; 318 else 319 return 1; 320 } 321 322 static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) 323 { 324 /* Intel VMX MSR indicated features */ 325 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 326 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 327 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 328 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 329 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 330 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 331 332 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; 333 334 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 335 clear_cpu_cap(c, X86_FEATURE_VNMI); 336 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 337 clear_cpu_cap(c, X86_FEATURE_EPT); 338 clear_cpu_cap(c, X86_FEATURE_VPID); 339 340 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); 341 msr_ctl = vmx_msr_high | vmx_msr_low; 342 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) 343 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); 344 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) 345 set_cpu_cap(c, X86_FEATURE_VNMI); 346 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { 347 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, 348 vmx_msr_low, vmx_msr_high); 349 msr_ctl2 = vmx_msr_high | vmx_msr_low; 350 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && 351 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) 352 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); 353 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) 354 set_cpu_cap(c, X86_FEATURE_EPT); 355 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) 356 set_cpu_cap(c, X86_FEATURE_VPID); 357 } 358 } 359 360 static void __cpuinit init_intel(struct cpuinfo_x86 *c) 361 { 362 unsigned int l2 = 0; 363 364 early_init_intel(c); 365 366 intel_workarounds(c); 367 368 /* 369 * Detect the extended topology information if available. This 370 * will reinitialise the initial_apicid which will be used 371 * in init_intel_cacheinfo() 372 */ 373 detect_extended_topology(c); 374 375 l2 = init_intel_cacheinfo(c); 376 if (c->cpuid_level > 9) { 377 unsigned eax = cpuid_eax(10); 378 /* Check for version and the number of counters */ 379 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) 380 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); 381 } 382 383 if (cpu_has_xmm2) 384 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 385 if (cpu_has_ds) { 386 unsigned int l1; 387 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); 388 if (!(l1 & (1<<11))) 389 set_cpu_cap(c, X86_FEATURE_BTS); 390 if (!(l1 & (1<<12))) 391 set_cpu_cap(c, X86_FEATURE_PEBS); 392 } 393 394 if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) 395 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); 396 397 #ifdef CONFIG_X86_64 398 if (c->x86 == 15) 399 c->x86_cache_alignment = c->x86_clflush_size * 2; 400 if (c->x86 == 6) 401 set_cpu_cap(c, X86_FEATURE_REP_GOOD); 402 #else 403 /* 404 * Names for the Pentium II/Celeron processors 405 * detectable only by also checking the cache size. 406 * Dixon is NOT a Celeron. 407 */ 408 if (c->x86 == 6) { 409 char *p = NULL; 410 411 switch (c->x86_model) { 412 case 5: 413 if (l2 == 0) 414 p = "Celeron (Covington)"; 415 else if (l2 == 256) 416 p = "Mobile Pentium II (Dixon)"; 417 break; 418 419 case 6: 420 if (l2 == 128) 421 p = "Celeron (Mendocino)"; 422 else if (c->x86_mask == 0 || c->x86_mask == 5) 423 p = "Celeron-A"; 424 break; 425 426 case 8: 427 if (l2 == 128) 428 p = "Celeron (Coppermine)"; 429 break; 430 } 431 432 if (p) 433 strcpy(c->x86_model_id, p); 434 } 435 436 if (c->x86 == 15) 437 set_cpu_cap(c, X86_FEATURE_P4); 438 if (c->x86 == 6) 439 set_cpu_cap(c, X86_FEATURE_P3); 440 #endif 441 442 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { 443 /* 444 * let's use the legacy cpuid vector 0x1 and 0x4 for topology 445 * detection. 446 */ 447 c->x86_max_cores = intel_num_cpu_cores(c); 448 #ifdef CONFIG_X86_32 449 detect_ht(c); 450 #endif 451 } 452 453 /* Work around errata */ 454 srat_detect_node(c); 455 456 if (cpu_has(c, X86_FEATURE_VMX)) 457 detect_vmx_virtcap(c); 458 459 /* 460 * Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not. 461 * x86_energy_perf_policy(8) is available to change it at run-time 462 */ 463 if (cpu_has(c, X86_FEATURE_EPB)) { 464 u64 epb; 465 466 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 467 if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) { 468 printk_once(KERN_WARNING "ENERGY_PERF_BIAS:" 469 " Set to 'normal', was 'performance'\n" 470 "ENERGY_PERF_BIAS: View and update with" 471 " x86_energy_perf_policy(8)\n"); 472 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; 473 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); 474 } 475 } 476 } 477 478 #ifdef CONFIG_X86_32 479 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 480 { 481 /* 482 * Intel PIII Tualatin. This comes in two flavours. 483 * One has 256kb of cache, the other 512. We have no way 484 * to determine which, so we use a boottime override 485 * for the 512kb model, and assume 256 otherwise. 486 */ 487 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) 488 size = 256; 489 return size; 490 } 491 #endif 492 493 #define TLB_INST_4K 0x01 494 #define TLB_INST_4M 0x02 495 #define TLB_INST_2M_4M 0x03 496 497 #define TLB_INST_ALL 0x05 498 #define TLB_INST_1G 0x06 499 500 #define TLB_DATA_4K 0x11 501 #define TLB_DATA_4M 0x12 502 #define TLB_DATA_2M_4M 0x13 503 #define TLB_DATA_4K_4M 0x14 504 505 #define TLB_DATA_1G 0x16 506 507 #define TLB_DATA0_4K 0x21 508 #define TLB_DATA0_4M 0x22 509 #define TLB_DATA0_2M_4M 0x23 510 511 #define STLB_4K 0x41 512 513 static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { 514 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 515 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 516 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 517 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, 518 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, 519 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, 520 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" }, 521 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 522 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 523 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, 524 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, 525 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, 526 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, 527 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" }, 528 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, 529 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, 530 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, 531 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, 532 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, 533 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, 534 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, 535 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, 536 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, 537 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, 538 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, 539 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, 540 { 0x00, 0, 0 } 541 }; 542 543 static void __cpuinit intel_tlb_lookup(const unsigned char desc) 544 { 545 unsigned char k; 546 if (desc == 0) 547 return; 548 549 /* look up this descriptor in the table */ 550 for (k = 0; intel_tlb_table[k].descriptor != desc && \ 551 intel_tlb_table[k].descriptor != 0; k++) 552 ; 553 554 if (intel_tlb_table[k].tlb_type == 0) 555 return; 556 557 switch (intel_tlb_table[k].tlb_type) { 558 case STLB_4K: 559 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 560 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 561 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 562 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 563 break; 564 case TLB_INST_ALL: 565 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 566 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 567 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 568 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 569 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 570 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 571 break; 572 case TLB_INST_4K: 573 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) 574 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; 575 break; 576 case TLB_INST_4M: 577 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 578 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 579 break; 580 case TLB_INST_2M_4M: 581 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) 582 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; 583 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) 584 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; 585 break; 586 case TLB_DATA_4K: 587 case TLB_DATA0_4K: 588 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 589 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 590 break; 591 case TLB_DATA_4M: 592 case TLB_DATA0_4M: 593 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 594 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 595 break; 596 case TLB_DATA_2M_4M: 597 case TLB_DATA0_2M_4M: 598 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) 599 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; 600 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 601 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 602 break; 603 case TLB_DATA_4K_4M: 604 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) 605 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; 606 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) 607 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; 608 break; 609 } 610 } 611 612 static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) 613 { 614 switch ((c->x86 << 8) + c->x86_model) { 615 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ 616 case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ 617 case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ 618 case 0x61d: /* six-core 45 nm xeon "Dunnington" */ 619 tlb_flushall_shift = -1; 620 break; 621 case 0x61a: /* 45 nm nehalem, "Bloomfield" */ 622 case 0x61e: /* 45 nm nehalem, "Lynnfield" */ 623 case 0x625: /* 32 nm nehalem, "Clarkdale" */ 624 case 0x62c: /* 32 nm nehalem, "Gulftown" */ 625 case 0x62e: /* 45 nm nehalem-ex, "Beckton" */ 626 case 0x62f: /* 32 nm Xeon E7 */ 627 tlb_flushall_shift = 6; 628 break; 629 case 0x62a: /* SandyBridge */ 630 case 0x62d: /* SandyBridge, "Romely-EP" */ 631 tlb_flushall_shift = 5; 632 break; 633 case 0x63a: /* Ivybridge */ 634 tlb_flushall_shift = 1; 635 break; 636 default: 637 tlb_flushall_shift = 6; 638 } 639 } 640 641 static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) 642 { 643 int i, j, n; 644 unsigned int regs[4]; 645 unsigned char *desc = (unsigned char *)regs; 646 647 if (c->cpuid_level < 2) 648 return; 649 650 /* Number of times to iterate */ 651 n = cpuid_eax(2) & 0xFF; 652 653 for (i = 0 ; i < n ; i++) { 654 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); 655 656 /* If bit 31 is set, this is an unknown format */ 657 for (j = 0 ; j < 3 ; j++) 658 if (regs[j] & (1 << 31)) 659 regs[j] = 0; 660 661 /* Byte 0 is level count, not a descriptor */ 662 for (j = 1 ; j < 16 ; j++) 663 intel_tlb_lookup(desc[j]); 664 } 665 intel_tlb_flushall_shift_set(c); 666 } 667 668 static const struct cpu_dev __cpuinitconst intel_cpu_dev = { 669 .c_vendor = "Intel", 670 .c_ident = { "GenuineIntel" }, 671 #ifdef CONFIG_X86_32 672 .c_models = { 673 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names = 674 { 675 [0] = "486 DX-25/33", 676 [1] = "486 DX-50", 677 [2] = "486 SX", 678 [3] = "486 DX/2", 679 [4] = "486 SL", 680 [5] = "486 SX/2", 681 [7] = "486 DX/2-WB", 682 [8] = "486 DX/4", 683 [9] = "486 DX/4-WB" 684 } 685 }, 686 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names = 687 { 688 [0] = "Pentium 60/66 A-step", 689 [1] = "Pentium 60/66", 690 [2] = "Pentium 75 - 200", 691 [3] = "OverDrive PODP5V83", 692 [4] = "Pentium MMX", 693 [7] = "Mobile Pentium 75 - 200", 694 [8] = "Mobile Pentium MMX" 695 } 696 }, 697 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = 698 { 699 [0] = "Pentium Pro A-step", 700 [1] = "Pentium Pro", 701 [3] = "Pentium II (Klamath)", 702 [4] = "Pentium II (Deschutes)", 703 [5] = "Pentium II (Deschutes)", 704 [6] = "Mobile Pentium II", 705 [7] = "Pentium III (Katmai)", 706 [8] = "Pentium III (Coppermine)", 707 [10] = "Pentium III (Cascades)", 708 [11] = "Pentium III (Tualatin)", 709 } 710 }, 711 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names = 712 { 713 [0] = "Pentium 4 (Unknown)", 714 [1] = "Pentium 4 (Willamette)", 715 [2] = "Pentium 4 (Northwood)", 716 [4] = "Pentium 4 (Foster)", 717 [5] = "Pentium 4 (Foster)", 718 } 719 }, 720 }, 721 .c_size_cache = intel_size_cache, 722 #endif 723 .c_detect_tlb = intel_detect_tlb, 724 .c_early_init = early_init_intel, 725 .c_init = init_intel, 726 .c_x86_vendor = X86_VENDOR_INTEL, 727 }; 728 729 cpu_dev_register(intel_cpu_dev); 730 731