xref: /openbmc/linux/arch/x86/kernel/cpu/intel.c (revision b35565bb)
1 #include <linux/kernel.h>
2 
3 #include <linux/string.h>
4 #include <linux/bitops.h>
5 #include <linux/smp.h>
6 #include <linux/sched.h>
7 #include <linux/sched/clock.h>
8 #include <linux/thread_info.h>
9 #include <linux/init.h>
10 #include <linux/uaccess.h>
11 
12 #include <asm/cpufeature.h>
13 #include <asm/pgtable.h>
14 #include <asm/msr.h>
15 #include <asm/bugs.h>
16 #include <asm/cpu.h>
17 #include <asm/intel-family.h>
18 #include <asm/microcode_intel.h>
19 #include <asm/hwcap2.h>
20 #include <asm/elf.h>
21 
22 #ifdef CONFIG_X86_64
23 #include <linux/topology.h>
24 #endif
25 
26 #include "cpu.h"
27 
28 #ifdef CONFIG_X86_LOCAL_APIC
29 #include <asm/mpspec.h>
30 #include <asm/apic.h>
31 #endif
32 
33 /*
34  * Just in case our CPU detection goes bad, or you have a weird system,
35  * allow a way to override the automatic disabling of MPX.
36  */
37 static int forcempx;
38 
39 static int __init forcempx_setup(char *__unused)
40 {
41 	forcempx = 1;
42 
43 	return 1;
44 }
45 __setup("intel-skd-046-workaround=disable", forcempx_setup);
46 
47 void check_mpx_erratum(struct cpuinfo_x86 *c)
48 {
49 	if (forcempx)
50 		return;
51 	/*
52 	 * Turn off the MPX feature on CPUs where SMEP is not
53 	 * available or disabled.
54 	 *
55 	 * Works around Intel Erratum SKD046: "Branch Instructions
56 	 * May Initialize MPX Bound Registers Incorrectly".
57 	 *
58 	 * This might falsely disable MPX on systems without
59 	 * SMEP, like Atom processors without SMEP.  But there
60 	 * is no such hardware known at the moment.
61 	 */
62 	if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
63 		setup_clear_cpu_cap(X86_FEATURE_MPX);
64 		pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
65 	}
66 }
67 
68 static bool ring3mwait_disabled __read_mostly;
69 
70 static int __init ring3mwait_disable(char *__unused)
71 {
72 	ring3mwait_disabled = true;
73 	return 0;
74 }
75 __setup("ring3mwait=disable", ring3mwait_disable);
76 
77 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
78 {
79 	/*
80 	 * Ring 3 MONITOR/MWAIT feature cannot be detected without
81 	 * cpu model and family comparison.
82 	 */
83 	if (c->x86 != 6)
84 		return;
85 	switch (c->x86_model) {
86 	case INTEL_FAM6_XEON_PHI_KNL:
87 	case INTEL_FAM6_XEON_PHI_KNM:
88 		break;
89 	default:
90 		return;
91 	}
92 
93 	if (ring3mwait_disabled)
94 		return;
95 
96 	set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
97 	this_cpu_or(msr_misc_features_shadow,
98 		    1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
99 
100 	if (c == &boot_cpu_data)
101 		ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
102 }
103 
104 static void early_init_intel(struct cpuinfo_x86 *c)
105 {
106 	u64 misc_enable;
107 
108 	/* Unmask CPUID levels if masked: */
109 	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
110 		if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
111 				  MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
112 			c->cpuid_level = cpuid_eax(0);
113 			get_cpu_cap(c);
114 		}
115 	}
116 
117 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
118 		(c->x86 == 0x6 && c->x86_model >= 0x0e))
119 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
120 
121 	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
122 		c->microcode = intel_get_microcode_revision();
123 
124 	/*
125 	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
126 	 *
127 	 * A race condition between speculative fetches and invalidating
128 	 * a large page.  This is worked around in microcode, but we
129 	 * need the microcode to have already been loaded... so if it is
130 	 * not, recommend a BIOS update and disable large pages.
131 	 */
132 	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
133 	    c->microcode < 0x20e) {
134 		pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
135 		clear_cpu_cap(c, X86_FEATURE_PSE);
136 	}
137 
138 #ifdef CONFIG_X86_64
139 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
140 #else
141 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
142 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
143 		c->x86_cache_alignment = 128;
144 #endif
145 
146 	/* CPUID workaround for 0F33/0F34 CPU */
147 	if (c->x86 == 0xF && c->x86_model == 0x3
148 	    && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
149 		c->x86_phys_bits = 36;
150 
151 	/*
152 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
153 	 * with P/T states and does not stop in deep C-states.
154 	 *
155 	 * It is also reliable across cores and sockets. (but not across
156 	 * cabinets - we turn it off in that case explicitly.)
157 	 */
158 	if (c->x86_power & (1 << 8)) {
159 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
160 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
161 	}
162 
163 	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
164 	if (c->x86 == 6) {
165 		switch (c->x86_model) {
166 		case 0x27:	/* Penwell */
167 		case 0x35:	/* Cloverview */
168 		case 0x4a:	/* Merrifield */
169 			set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
170 			break;
171 		default:
172 			break;
173 		}
174 	}
175 
176 	/*
177 	 * There is a known erratum on Pentium III and Core Solo
178 	 * and Core Duo CPUs.
179 	 * " Page with PAT set to WC while associated MTRR is UC
180 	 *   may consolidate to UC "
181 	 * Because of this erratum, it is better to stick with
182 	 * setting WC in MTRR rather than using PAT on these CPUs.
183 	 *
184 	 * Enable PAT WC only on P4, Core 2 or later CPUs.
185 	 */
186 	if (c->x86 == 6 && c->x86_model < 15)
187 		clear_cpu_cap(c, X86_FEATURE_PAT);
188 
189 #ifdef CONFIG_KMEMCHECK
190 	/*
191 	 * P4s have a "fast strings" feature which causes single-
192 	 * stepping REP instructions to only generate a #DB on
193 	 * cache-line boundaries.
194 	 *
195 	 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
196 	 * (model 2) with the same problem.
197 	 */
198 	if (c->x86 == 15)
199 		if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
200 				  MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0)
201 			pr_info("kmemcheck: Disabling fast string operations\n");
202 #endif
203 
204 	/*
205 	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
206 	 * clear the fast string and enhanced fast string CPU capabilities.
207 	 */
208 	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
209 		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
210 		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
211 			pr_info("Disabled fast string operations\n");
212 			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
213 			setup_clear_cpu_cap(X86_FEATURE_ERMS);
214 		}
215 	}
216 
217 	/*
218 	 * Intel Quark Core DevMan_001.pdf section 6.4.11
219 	 * "The operating system also is required to invalidate (i.e., flush)
220 	 *  the TLB when any changes are made to any of the page table entries.
221 	 *  The operating system must reload CR3 to cause the TLB to be flushed"
222 	 *
223 	 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
224 	 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
225 	 * to be modified.
226 	 */
227 	if (c->x86 == 5 && c->x86_model == 9) {
228 		pr_info("Disabling PGE capability bit\n");
229 		setup_clear_cpu_cap(X86_FEATURE_PGE);
230 	}
231 
232 	if (c->cpuid_level >= 0x00000001) {
233 		u32 eax, ebx, ecx, edx;
234 
235 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
236 		/*
237 		 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
238 		 * apicids which are reserved per package. Store the resulting
239 		 * shift value for the package management code.
240 		 */
241 		if (edx & (1U << 28))
242 			c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
243 	}
244 
245 	check_mpx_erratum(c);
246 }
247 
248 #ifdef CONFIG_X86_32
249 /*
250  *	Early probe support logic for ppro memory erratum #50
251  *
252  *	This is called before we do cpu ident work
253  */
254 
255 int ppro_with_ram_bug(void)
256 {
257 	/* Uses data from early_cpu_detect now */
258 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
259 	    boot_cpu_data.x86 == 6 &&
260 	    boot_cpu_data.x86_model == 1 &&
261 	    boot_cpu_data.x86_mask < 8) {
262 		pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
263 		return 1;
264 	}
265 	return 0;
266 }
267 
268 static void intel_smp_check(struct cpuinfo_x86 *c)
269 {
270 	/* calling is from identify_secondary_cpu() ? */
271 	if (!c->cpu_index)
272 		return;
273 
274 	/*
275 	 * Mask B, Pentium, but not Pentium MMX
276 	 */
277 	if (c->x86 == 5 &&
278 	    c->x86_mask >= 1 && c->x86_mask <= 4 &&
279 	    c->x86_model <= 3) {
280 		/*
281 		 * Remember we have B step Pentia with bugs
282 		 */
283 		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
284 				    "with B stepping processors.\n");
285 	}
286 }
287 
288 static int forcepae;
289 static int __init forcepae_setup(char *__unused)
290 {
291 	forcepae = 1;
292 	return 1;
293 }
294 __setup("forcepae", forcepae_setup);
295 
296 static void intel_workarounds(struct cpuinfo_x86 *c)
297 {
298 #ifdef CONFIG_X86_F00F_BUG
299 	/*
300 	 * All models of Pentium and Pentium with MMX technology CPUs
301 	 * have the F0 0F bug, which lets nonprivileged users lock up the
302 	 * system. Announce that the fault handler will be checking for it.
303 	 * The Quark is also family 5, but does not have the same bug.
304 	 */
305 	clear_cpu_bug(c, X86_BUG_F00F);
306 	if (c->x86 == 5 && c->x86_model < 9) {
307 		static int f00f_workaround_enabled;
308 
309 		set_cpu_bug(c, X86_BUG_F00F);
310 		if (!f00f_workaround_enabled) {
311 			pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
312 			f00f_workaround_enabled = 1;
313 		}
314 	}
315 #endif
316 
317 	/*
318 	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
319 	 * model 3 mask 3
320 	 */
321 	if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
322 		clear_cpu_cap(c, X86_FEATURE_SEP);
323 
324 	/*
325 	 * PAE CPUID issue: many Pentium M report no PAE but may have a
326 	 * functionally usable PAE implementation.
327 	 * Forcefully enable PAE if kernel parameter "forcepae" is present.
328 	 */
329 	if (forcepae) {
330 		pr_warn("PAE forced!\n");
331 		set_cpu_cap(c, X86_FEATURE_PAE);
332 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
333 	}
334 
335 	/*
336 	 * P4 Xeon erratum 037 workaround.
337 	 * Hardware prefetcher may cause stale data to be loaded into the cache.
338 	 */
339 	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
340 		if (msr_set_bit(MSR_IA32_MISC_ENABLE,
341 				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
342 			pr_info("CPU: C0 stepping P4 Xeon detected.\n");
343 			pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
344 		}
345 	}
346 
347 	/*
348 	 * See if we have a good local APIC by checking for buggy Pentia,
349 	 * i.e. all B steppings and the C2 stepping of P54C when using their
350 	 * integrated APIC (see 11AP erratum in "Pentium Processor
351 	 * Specification Update").
352 	 */
353 	if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
354 	    (c->x86_mask < 0x6 || c->x86_mask == 0xb))
355 		set_cpu_bug(c, X86_BUG_11AP);
356 
357 
358 #ifdef CONFIG_X86_INTEL_USERCOPY
359 	/*
360 	 * Set up the preferred alignment for movsl bulk memory moves
361 	 */
362 	switch (c->x86) {
363 	case 4:		/* 486: untested */
364 		break;
365 	case 5:		/* Old Pentia: untested */
366 		break;
367 	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
368 		movsl_mask.mask = 7;
369 		break;
370 	case 15:	/* P4 is OK down to 8-byte alignment */
371 		movsl_mask.mask = 7;
372 		break;
373 	}
374 #endif
375 
376 	intel_smp_check(c);
377 }
378 #else
379 static void intel_workarounds(struct cpuinfo_x86 *c)
380 {
381 }
382 #endif
383 
384 static void srat_detect_node(struct cpuinfo_x86 *c)
385 {
386 #ifdef CONFIG_NUMA
387 	unsigned node;
388 	int cpu = smp_processor_id();
389 
390 	/* Don't do the funky fallback heuristics the AMD version employs
391 	   for now. */
392 	node = numa_cpu_node(cpu);
393 	if (node == NUMA_NO_NODE || !node_online(node)) {
394 		/* reuse the value from init_cpu_to_node() */
395 		node = cpu_to_node(cpu);
396 	}
397 	numa_set_node(cpu, node);
398 #endif
399 }
400 
401 /*
402  * find out the number of processor cores on the die
403  */
404 static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
405 {
406 	unsigned int eax, ebx, ecx, edx;
407 
408 	if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
409 		return 1;
410 
411 	/* Intel has a non-standard dependency on %ecx for this CPUID level. */
412 	cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
413 	if (eax & 0x1f)
414 		return (eax >> 26) + 1;
415 	else
416 		return 1;
417 }
418 
419 static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
420 {
421 	/* Intel VMX MSR indicated features */
422 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW	0x00200000
423 #define X86_VMX_FEATURE_PROC_CTLS_VNMI		0x00400000
424 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS	0x80000000
425 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC	0x00000001
426 #define X86_VMX_FEATURE_PROC_CTLS2_EPT		0x00000002
427 #define X86_VMX_FEATURE_PROC_CTLS2_VPID		0x00000020
428 
429 	u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
430 
431 	clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
432 	clear_cpu_cap(c, X86_FEATURE_VNMI);
433 	clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
434 	clear_cpu_cap(c, X86_FEATURE_EPT);
435 	clear_cpu_cap(c, X86_FEATURE_VPID);
436 
437 	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
438 	msr_ctl = vmx_msr_high | vmx_msr_low;
439 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
440 		set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
441 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
442 		set_cpu_cap(c, X86_FEATURE_VNMI);
443 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
444 		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
445 		      vmx_msr_low, vmx_msr_high);
446 		msr_ctl2 = vmx_msr_high | vmx_msr_low;
447 		if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
448 		    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
449 			set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
450 		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
451 			set_cpu_cap(c, X86_FEATURE_EPT);
452 		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
453 			set_cpu_cap(c, X86_FEATURE_VPID);
454 	}
455 }
456 
457 static void init_intel_energy_perf(struct cpuinfo_x86 *c)
458 {
459 	u64 epb;
460 
461 	/*
462 	 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
463 	 * (x86_energy_perf_policy(8) is available to change it at run-time.)
464 	 */
465 	if (!cpu_has(c, X86_FEATURE_EPB))
466 		return;
467 
468 	rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
469 	if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
470 		return;
471 
472 	pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
473 	pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
474 	epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
475 	wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
476 }
477 
478 static void intel_bsp_resume(struct cpuinfo_x86 *c)
479 {
480 	/*
481 	 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
482 	 * so reinitialize it properly like during bootup:
483 	 */
484 	init_intel_energy_perf(c);
485 }
486 
487 static void init_cpuid_fault(struct cpuinfo_x86 *c)
488 {
489 	u64 msr;
490 
491 	if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
492 		if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
493 			set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
494 	}
495 }
496 
497 static void init_intel_misc_features(struct cpuinfo_x86 *c)
498 {
499 	u64 msr;
500 
501 	if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
502 		return;
503 
504 	/* Clear all MISC features */
505 	this_cpu_write(msr_misc_features_shadow, 0);
506 
507 	/* Check features and update capabilities and shadow control bits */
508 	init_cpuid_fault(c);
509 	probe_xeon_phi_r3mwait(c);
510 
511 	msr = this_cpu_read(msr_misc_features_shadow);
512 	wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
513 }
514 
515 static void init_intel(struct cpuinfo_x86 *c)
516 {
517 	unsigned int l2 = 0;
518 
519 	early_init_intel(c);
520 
521 	intel_workarounds(c);
522 
523 	/*
524 	 * Detect the extended topology information if available. This
525 	 * will reinitialise the initial_apicid which will be used
526 	 * in init_intel_cacheinfo()
527 	 */
528 	detect_extended_topology(c);
529 
530 	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
531 		/*
532 		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
533 		 * detection.
534 		 */
535 		c->x86_max_cores = intel_num_cpu_cores(c);
536 #ifdef CONFIG_X86_32
537 		detect_ht(c);
538 #endif
539 	}
540 
541 	l2 = init_intel_cacheinfo(c);
542 
543 	/* Detect legacy cache sizes if init_intel_cacheinfo did not */
544 	if (l2 == 0) {
545 		cpu_detect_cache_sizes(c);
546 		l2 = c->x86_cache_size;
547 	}
548 
549 	if (c->cpuid_level > 9) {
550 		unsigned eax = cpuid_eax(10);
551 		/* Check for version and the number of counters */
552 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
553 			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
554 	}
555 
556 	if (cpu_has(c, X86_FEATURE_XMM2))
557 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
558 
559 	if (boot_cpu_has(X86_FEATURE_DS)) {
560 		unsigned int l1;
561 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
562 		if (!(l1 & (1<<11)))
563 			set_cpu_cap(c, X86_FEATURE_BTS);
564 		if (!(l1 & (1<<12)))
565 			set_cpu_cap(c, X86_FEATURE_PEBS);
566 	}
567 
568 	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
569 	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
570 		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
571 
572 	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
573 		((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
574 		set_cpu_bug(c, X86_BUG_MONITOR);
575 
576 #ifdef CONFIG_X86_64
577 	if (c->x86 == 15)
578 		c->x86_cache_alignment = c->x86_clflush_size * 2;
579 	if (c->x86 == 6)
580 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
581 #else
582 	/*
583 	 * Names for the Pentium II/Celeron processors
584 	 * detectable only by also checking the cache size.
585 	 * Dixon is NOT a Celeron.
586 	 */
587 	if (c->x86 == 6) {
588 		char *p = NULL;
589 
590 		switch (c->x86_model) {
591 		case 5:
592 			if (l2 == 0)
593 				p = "Celeron (Covington)";
594 			else if (l2 == 256)
595 				p = "Mobile Pentium II (Dixon)";
596 			break;
597 
598 		case 6:
599 			if (l2 == 128)
600 				p = "Celeron (Mendocino)";
601 			else if (c->x86_mask == 0 || c->x86_mask == 5)
602 				p = "Celeron-A";
603 			break;
604 
605 		case 8:
606 			if (l2 == 128)
607 				p = "Celeron (Coppermine)";
608 			break;
609 		}
610 
611 		if (p)
612 			strcpy(c->x86_model_id, p);
613 	}
614 
615 	if (c->x86 == 15)
616 		set_cpu_cap(c, X86_FEATURE_P4);
617 	if (c->x86 == 6)
618 		set_cpu_cap(c, X86_FEATURE_P3);
619 #endif
620 
621 	/* Work around errata */
622 	srat_detect_node(c);
623 
624 	if (cpu_has(c, X86_FEATURE_VMX))
625 		detect_vmx_virtcap(c);
626 
627 	init_intel_energy_perf(c);
628 
629 	init_intel_misc_features(c);
630 }
631 
632 #ifdef CONFIG_X86_32
633 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
634 {
635 	/*
636 	 * Intel PIII Tualatin. This comes in two flavours.
637 	 * One has 256kb of cache, the other 512. We have no way
638 	 * to determine which, so we use a boottime override
639 	 * for the 512kb model, and assume 256 otherwise.
640 	 */
641 	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
642 		size = 256;
643 
644 	/*
645 	 * Intel Quark SoC X1000 contains a 4-way set associative
646 	 * 16K cache with a 16 byte cache line and 256 lines per tag
647 	 */
648 	if ((c->x86 == 5) && (c->x86_model == 9))
649 		size = 16;
650 	return size;
651 }
652 #endif
653 
654 #define TLB_INST_4K	0x01
655 #define TLB_INST_4M	0x02
656 #define TLB_INST_2M_4M	0x03
657 
658 #define TLB_INST_ALL	0x05
659 #define TLB_INST_1G	0x06
660 
661 #define TLB_DATA_4K	0x11
662 #define TLB_DATA_4M	0x12
663 #define TLB_DATA_2M_4M	0x13
664 #define TLB_DATA_4K_4M	0x14
665 
666 #define TLB_DATA_1G	0x16
667 
668 #define TLB_DATA0_4K	0x21
669 #define TLB_DATA0_4M	0x22
670 #define TLB_DATA0_2M_4M	0x23
671 
672 #define STLB_4K		0x41
673 #define STLB_4K_2M	0x42
674 
675 static const struct _tlb_table intel_tlb_table[] = {
676 	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
677 	{ 0x02, TLB_INST_4M,		2,	" TLB_INST 4 MByte pages, full associative" },
678 	{ 0x03, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way set associative" },
679 	{ 0x04, TLB_DATA_4M,		8,	" TLB_DATA 4 MByte pages, 4-way set associative" },
680 	{ 0x05, TLB_DATA_4M,		32,	" TLB_DATA 4 MByte pages, 4-way set associative" },
681 	{ 0x0b, TLB_INST_4M,		4,	" TLB_INST 4 MByte pages, 4-way set associative" },
682 	{ 0x4f, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages */" },
683 	{ 0x50, TLB_INST_ALL,		64,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
684 	{ 0x51, TLB_INST_ALL,		128,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
685 	{ 0x52, TLB_INST_ALL,		256,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
686 	{ 0x55, TLB_INST_2M_4M,		7,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
687 	{ 0x56, TLB_DATA0_4M,		16,	" TLB_DATA0 4 MByte pages, 4-way set associative" },
688 	{ 0x57, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, 4-way associative" },
689 	{ 0x59, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, fully associative" },
690 	{ 0x5a, TLB_DATA0_2M_4M,	32,	" TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
691 	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },
692 	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
693 	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
694 	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" },
695 	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" },
696 	{ 0x76, TLB_INST_2M_4M,		8,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
697 	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
698 	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
699 	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
700 	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
701 	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
702 	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set associative" },
703 	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set associative" },
704 	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
705 	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
706 	{ 0xc1, STLB_4K_2M,		1024,	" STLB 4 KByte and 2 MByte pages, 8-way associative" },
707 	{ 0xc2, TLB_DATA_2M_4M,		16,	" DTLB 2 MByte/4MByte pages, 4-way associative" },
708 	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },
709 	{ 0x00, 0, 0 }
710 };
711 
712 static void intel_tlb_lookup(const unsigned char desc)
713 {
714 	unsigned char k;
715 	if (desc == 0)
716 		return;
717 
718 	/* look up this descriptor in the table */
719 	for (k = 0; intel_tlb_table[k].descriptor != desc && \
720 			intel_tlb_table[k].descriptor != 0; k++)
721 		;
722 
723 	if (intel_tlb_table[k].tlb_type == 0)
724 		return;
725 
726 	switch (intel_tlb_table[k].tlb_type) {
727 	case STLB_4K:
728 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
729 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
730 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
731 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
732 		break;
733 	case STLB_4K_2M:
734 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
735 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
736 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
737 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
738 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
739 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
740 		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
741 			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
742 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
743 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
744 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
745 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
746 		break;
747 	case TLB_INST_ALL:
748 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
749 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
750 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
751 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
752 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
753 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
754 		break;
755 	case TLB_INST_4K:
756 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
757 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
758 		break;
759 	case TLB_INST_4M:
760 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
761 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
762 		break;
763 	case TLB_INST_2M_4M:
764 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
765 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
766 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
767 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
768 		break;
769 	case TLB_DATA_4K:
770 	case TLB_DATA0_4K:
771 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
772 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
773 		break;
774 	case TLB_DATA_4M:
775 	case TLB_DATA0_4M:
776 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
777 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
778 		break;
779 	case TLB_DATA_2M_4M:
780 	case TLB_DATA0_2M_4M:
781 		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
782 			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
783 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
784 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
785 		break;
786 	case TLB_DATA_4K_4M:
787 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
788 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
789 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
790 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
791 		break;
792 	case TLB_DATA_1G:
793 		if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
794 			tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
795 		break;
796 	}
797 }
798 
799 static void intel_detect_tlb(struct cpuinfo_x86 *c)
800 {
801 	int i, j, n;
802 	unsigned int regs[4];
803 	unsigned char *desc = (unsigned char *)regs;
804 
805 	if (c->cpuid_level < 2)
806 		return;
807 
808 	/* Number of times to iterate */
809 	n = cpuid_eax(2) & 0xFF;
810 
811 	for (i = 0 ; i < n ; i++) {
812 		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
813 
814 		/* If bit 31 is set, this is an unknown format */
815 		for (j = 0 ; j < 3 ; j++)
816 			if (regs[j] & (1 << 31))
817 				regs[j] = 0;
818 
819 		/* Byte 0 is level count, not a descriptor */
820 		for (j = 1 ; j < 16 ; j++)
821 			intel_tlb_lookup(desc[j]);
822 	}
823 }
824 
825 static const struct cpu_dev intel_cpu_dev = {
826 	.c_vendor	= "Intel",
827 	.c_ident	= { "GenuineIntel" },
828 #ifdef CONFIG_X86_32
829 	.legacy_models = {
830 		{ .family = 4, .model_names =
831 		  {
832 			  [0] = "486 DX-25/33",
833 			  [1] = "486 DX-50",
834 			  [2] = "486 SX",
835 			  [3] = "486 DX/2",
836 			  [4] = "486 SL",
837 			  [5] = "486 SX/2",
838 			  [7] = "486 DX/2-WB",
839 			  [8] = "486 DX/4",
840 			  [9] = "486 DX/4-WB"
841 		  }
842 		},
843 		{ .family = 5, .model_names =
844 		  {
845 			  [0] = "Pentium 60/66 A-step",
846 			  [1] = "Pentium 60/66",
847 			  [2] = "Pentium 75 - 200",
848 			  [3] = "OverDrive PODP5V83",
849 			  [4] = "Pentium MMX",
850 			  [7] = "Mobile Pentium 75 - 200",
851 			  [8] = "Mobile Pentium MMX",
852 			  [9] = "Quark SoC X1000",
853 		  }
854 		},
855 		{ .family = 6, .model_names =
856 		  {
857 			  [0] = "Pentium Pro A-step",
858 			  [1] = "Pentium Pro",
859 			  [3] = "Pentium II (Klamath)",
860 			  [4] = "Pentium II (Deschutes)",
861 			  [5] = "Pentium II (Deschutes)",
862 			  [6] = "Mobile Pentium II",
863 			  [7] = "Pentium III (Katmai)",
864 			  [8] = "Pentium III (Coppermine)",
865 			  [10] = "Pentium III (Cascades)",
866 			  [11] = "Pentium III (Tualatin)",
867 		  }
868 		},
869 		{ .family = 15, .model_names =
870 		  {
871 			  [0] = "Pentium 4 (Unknown)",
872 			  [1] = "Pentium 4 (Willamette)",
873 			  [2] = "Pentium 4 (Northwood)",
874 			  [4] = "Pentium 4 (Foster)",
875 			  [5] = "Pentium 4 (Foster)",
876 		  }
877 		},
878 	},
879 	.legacy_cache_size = intel_size_cache,
880 #endif
881 	.c_detect_tlb	= intel_detect_tlb,
882 	.c_early_init   = early_init_intel,
883 	.c_init		= init_intel,
884 	.c_bsp_resume	= intel_bsp_resume,
885 	.c_x86_vendor	= X86_VENDOR_INTEL,
886 };
887 
888 cpu_dev_register(intel_cpu_dev);
889 
890