xref: /openbmc/linux/arch/x86/kernel/cpu/intel.c (revision d623f60d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 
4 #include <linux/string.h>
5 #include <linux/bitops.h>
6 #include <linux/smp.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/thread_info.h>
10 #include <linux/init.h>
11 #include <linux/uaccess.h>
12 
13 #include <asm/cpufeature.h>
14 #include <asm/pgtable.h>
15 #include <asm/msr.h>
16 #include <asm/bugs.h>
17 #include <asm/cpu.h>
18 #include <asm/intel-family.h>
19 #include <asm/microcode_intel.h>
20 #include <asm/hwcap2.h>
21 #include <asm/elf.h>
22 
23 #ifdef CONFIG_X86_64
24 #include <linux/topology.h>
25 #endif
26 
27 #include "cpu.h"
28 
29 #ifdef CONFIG_X86_LOCAL_APIC
30 #include <asm/mpspec.h>
31 #include <asm/apic.h>
32 #endif
33 
34 /*
35  * Just in case our CPU detection goes bad, or you have a weird system,
36  * allow a way to override the automatic disabling of MPX.
37  */
38 static int forcempx;
39 
40 static int __init forcempx_setup(char *__unused)
41 {
42 	forcempx = 1;
43 
44 	return 1;
45 }
46 __setup("intel-skd-046-workaround=disable", forcempx_setup);
47 
48 void check_mpx_erratum(struct cpuinfo_x86 *c)
49 {
50 	if (forcempx)
51 		return;
52 	/*
53 	 * Turn off the MPX feature on CPUs where SMEP is not
54 	 * available or disabled.
55 	 *
56 	 * Works around Intel Erratum SKD046: "Branch Instructions
57 	 * May Initialize MPX Bound Registers Incorrectly".
58 	 *
59 	 * This might falsely disable MPX on systems without
60 	 * SMEP, like Atom processors without SMEP.  But there
61 	 * is no such hardware known at the moment.
62 	 */
63 	if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
64 		setup_clear_cpu_cap(X86_FEATURE_MPX);
65 		pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
66 	}
67 }
68 
69 static bool ring3mwait_disabled __read_mostly;
70 
71 static int __init ring3mwait_disable(char *__unused)
72 {
73 	ring3mwait_disabled = true;
74 	return 0;
75 }
76 __setup("ring3mwait=disable", ring3mwait_disable);
77 
78 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
79 {
80 	/*
81 	 * Ring 3 MONITOR/MWAIT feature cannot be detected without
82 	 * cpu model and family comparison.
83 	 */
84 	if (c->x86 != 6)
85 		return;
86 	switch (c->x86_model) {
87 	case INTEL_FAM6_XEON_PHI_KNL:
88 	case INTEL_FAM6_XEON_PHI_KNM:
89 		break;
90 	default:
91 		return;
92 	}
93 
94 	if (ring3mwait_disabled)
95 		return;
96 
97 	set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
98 	this_cpu_or(msr_misc_features_shadow,
99 		    1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
100 
101 	if (c == &boot_cpu_data)
102 		ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
103 }
104 
105 /*
106  * Early microcode releases for the Spectre v2 mitigation were broken.
107  * Information taken from;
108  * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
109  * - https://kb.vmware.com/s/article/52345
110  * - Microcode revisions observed in the wild
111  * - Release note from 20180108 microcode release
112  */
113 struct sku_microcode {
114 	u8 model;
115 	u8 stepping;
116 	u32 microcode;
117 };
118 static const struct sku_microcode spectre_bad_microcodes[] = {
119 	{ INTEL_FAM6_KABYLAKE_DESKTOP,	0x0B,	0x80 },
120 	{ INTEL_FAM6_KABYLAKE_DESKTOP,	0x0A,	0x80 },
121 	{ INTEL_FAM6_KABYLAKE_DESKTOP,	0x09,	0x80 },
122 	{ INTEL_FAM6_KABYLAKE_MOBILE,	0x0A,	0x80 },
123 	{ INTEL_FAM6_KABYLAKE_MOBILE,	0x09,	0x80 },
124 	{ INTEL_FAM6_SKYLAKE_X,		0x03,	0x0100013e },
125 	{ INTEL_FAM6_SKYLAKE_X,		0x04,	0x0200003c },
126 	{ INTEL_FAM6_BROADWELL_CORE,	0x04,	0x28 },
127 	{ INTEL_FAM6_BROADWELL_GT3E,	0x01,	0x1b },
128 	{ INTEL_FAM6_BROADWELL_XEON_D,	0x02,	0x14 },
129 	{ INTEL_FAM6_BROADWELL_XEON_D,	0x03,	0x07000011 },
130 	{ INTEL_FAM6_BROADWELL_X,	0x01,	0x0b000025 },
131 	{ INTEL_FAM6_HASWELL_ULT,	0x01,	0x21 },
132 	{ INTEL_FAM6_HASWELL_GT3E,	0x01,	0x18 },
133 	{ INTEL_FAM6_HASWELL_CORE,	0x03,	0x23 },
134 	{ INTEL_FAM6_HASWELL_X,		0x02,	0x3b },
135 	{ INTEL_FAM6_HASWELL_X,		0x04,	0x10 },
136 	{ INTEL_FAM6_IVYBRIDGE_X,	0x04,	0x42a },
137 	/* Observed in the wild */
138 	{ INTEL_FAM6_SANDYBRIDGE_X,	0x06,	0x61b },
139 	{ INTEL_FAM6_SANDYBRIDGE_X,	0x07,	0x712 },
140 };
141 
142 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
143 {
144 	int i;
145 
146 	/*
147 	 * We know that the hypervisor lie to us on the microcode version so
148 	 * we may as well hope that it is running the correct version.
149 	 */
150 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
151 		return false;
152 
153 	for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
154 		if (c->x86_model == spectre_bad_microcodes[i].model &&
155 		    c->x86_stepping == spectre_bad_microcodes[i].stepping)
156 			return (c->microcode <= spectre_bad_microcodes[i].microcode);
157 	}
158 	return false;
159 }
160 
161 static void early_init_intel(struct cpuinfo_x86 *c)
162 {
163 	u64 misc_enable;
164 
165 	/* Unmask CPUID levels if masked: */
166 	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
167 		if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
168 				  MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
169 			c->cpuid_level = cpuid_eax(0);
170 			get_cpu_cap(c);
171 		}
172 	}
173 
174 	if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
175 		(c->x86 == 0x6 && c->x86_model >= 0x0e))
176 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
177 
178 	if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
179 		c->microcode = intel_get_microcode_revision();
180 
181 	/* Now if any of them are set, check the blacklist and clear the lot */
182 	if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
183 	     cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
184 	     cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
185 	     cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
186 		pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
187 		setup_clear_cpu_cap(X86_FEATURE_IBRS);
188 		setup_clear_cpu_cap(X86_FEATURE_IBPB);
189 		setup_clear_cpu_cap(X86_FEATURE_STIBP);
190 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
191 		setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
192 		setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
193 		setup_clear_cpu_cap(X86_FEATURE_SSBD);
194 		setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
195 	}
196 
197 	/*
198 	 * Atom erratum AAE44/AAF40/AAG38/AAH41:
199 	 *
200 	 * A race condition between speculative fetches and invalidating
201 	 * a large page.  This is worked around in microcode, but we
202 	 * need the microcode to have already been loaded... so if it is
203 	 * not, recommend a BIOS update and disable large pages.
204 	 */
205 	if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
206 	    c->microcode < 0x20e) {
207 		pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
208 		clear_cpu_cap(c, X86_FEATURE_PSE);
209 	}
210 
211 #ifdef CONFIG_X86_64
212 	set_cpu_cap(c, X86_FEATURE_SYSENTER32);
213 #else
214 	/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
215 	if (c->x86 == 15 && c->x86_cache_alignment == 64)
216 		c->x86_cache_alignment = 128;
217 #endif
218 
219 	/* CPUID workaround for 0F33/0F34 CPU */
220 	if (c->x86 == 0xF && c->x86_model == 0x3
221 	    && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
222 		c->x86_phys_bits = 36;
223 
224 	/*
225 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
226 	 * with P/T states and does not stop in deep C-states.
227 	 *
228 	 * It is also reliable across cores and sockets. (but not across
229 	 * cabinets - we turn it off in that case explicitly.)
230 	 */
231 	if (c->x86_power & (1 << 8)) {
232 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
233 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
234 	}
235 
236 	/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
237 	if (c->x86 == 6) {
238 		switch (c->x86_model) {
239 		case 0x27:	/* Penwell */
240 		case 0x35:	/* Cloverview */
241 		case 0x4a:	/* Merrifield */
242 			set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
243 			break;
244 		default:
245 			break;
246 		}
247 	}
248 
249 	/*
250 	 * There is a known erratum on Pentium III and Core Solo
251 	 * and Core Duo CPUs.
252 	 * " Page with PAT set to WC while associated MTRR is UC
253 	 *   may consolidate to UC "
254 	 * Because of this erratum, it is better to stick with
255 	 * setting WC in MTRR rather than using PAT on these CPUs.
256 	 *
257 	 * Enable PAT WC only on P4, Core 2 or later CPUs.
258 	 */
259 	if (c->x86 == 6 && c->x86_model < 15)
260 		clear_cpu_cap(c, X86_FEATURE_PAT);
261 
262 	/*
263 	 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
264 	 * clear the fast string and enhanced fast string CPU capabilities.
265 	 */
266 	if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
267 		rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
268 		if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
269 			pr_info("Disabled fast string operations\n");
270 			setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
271 			setup_clear_cpu_cap(X86_FEATURE_ERMS);
272 		}
273 	}
274 
275 	/*
276 	 * Intel Quark Core DevMan_001.pdf section 6.4.11
277 	 * "The operating system also is required to invalidate (i.e., flush)
278 	 *  the TLB when any changes are made to any of the page table entries.
279 	 *  The operating system must reload CR3 to cause the TLB to be flushed"
280 	 *
281 	 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
282 	 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
283 	 * to be modified.
284 	 */
285 	if (c->x86 == 5 && c->x86_model == 9) {
286 		pr_info("Disabling PGE capability bit\n");
287 		setup_clear_cpu_cap(X86_FEATURE_PGE);
288 	}
289 
290 	if (c->cpuid_level >= 0x00000001) {
291 		u32 eax, ebx, ecx, edx;
292 
293 		cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
294 		/*
295 		 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
296 		 * apicids which are reserved per package. Store the resulting
297 		 * shift value for the package management code.
298 		 */
299 		if (edx & (1U << 28))
300 			c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
301 	}
302 
303 	check_mpx_erratum(c);
304 }
305 
306 #ifdef CONFIG_X86_32
307 /*
308  *	Early probe support logic for ppro memory erratum #50
309  *
310  *	This is called before we do cpu ident work
311  */
312 
313 int ppro_with_ram_bug(void)
314 {
315 	/* Uses data from early_cpu_detect now */
316 	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
317 	    boot_cpu_data.x86 == 6 &&
318 	    boot_cpu_data.x86_model == 1 &&
319 	    boot_cpu_data.x86_stepping < 8) {
320 		pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
321 		return 1;
322 	}
323 	return 0;
324 }
325 
326 static void intel_smp_check(struct cpuinfo_x86 *c)
327 {
328 	/* calling is from identify_secondary_cpu() ? */
329 	if (!c->cpu_index)
330 		return;
331 
332 	/*
333 	 * Mask B, Pentium, but not Pentium MMX
334 	 */
335 	if (c->x86 == 5 &&
336 	    c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
337 	    c->x86_model <= 3) {
338 		/*
339 		 * Remember we have B step Pentia with bugs
340 		 */
341 		WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
342 				    "with B stepping processors.\n");
343 	}
344 }
345 
346 static int forcepae;
347 static int __init forcepae_setup(char *__unused)
348 {
349 	forcepae = 1;
350 	return 1;
351 }
352 __setup("forcepae", forcepae_setup);
353 
354 static void intel_workarounds(struct cpuinfo_x86 *c)
355 {
356 #ifdef CONFIG_X86_F00F_BUG
357 	/*
358 	 * All models of Pentium and Pentium with MMX technology CPUs
359 	 * have the F0 0F bug, which lets nonprivileged users lock up the
360 	 * system. Announce that the fault handler will be checking for it.
361 	 * The Quark is also family 5, but does not have the same bug.
362 	 */
363 	clear_cpu_bug(c, X86_BUG_F00F);
364 	if (c->x86 == 5 && c->x86_model < 9) {
365 		static int f00f_workaround_enabled;
366 
367 		set_cpu_bug(c, X86_BUG_F00F);
368 		if (!f00f_workaround_enabled) {
369 			pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
370 			f00f_workaround_enabled = 1;
371 		}
372 	}
373 #endif
374 
375 	/*
376 	 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
377 	 * model 3 mask 3
378 	 */
379 	if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
380 		clear_cpu_cap(c, X86_FEATURE_SEP);
381 
382 	/*
383 	 * PAE CPUID issue: many Pentium M report no PAE but may have a
384 	 * functionally usable PAE implementation.
385 	 * Forcefully enable PAE if kernel parameter "forcepae" is present.
386 	 */
387 	if (forcepae) {
388 		pr_warn("PAE forced!\n");
389 		set_cpu_cap(c, X86_FEATURE_PAE);
390 		add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
391 	}
392 
393 	/*
394 	 * P4 Xeon erratum 037 workaround.
395 	 * Hardware prefetcher may cause stale data to be loaded into the cache.
396 	 */
397 	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
398 		if (msr_set_bit(MSR_IA32_MISC_ENABLE,
399 				MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
400 			pr_info("CPU: C0 stepping P4 Xeon detected.\n");
401 			pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
402 		}
403 	}
404 
405 	/*
406 	 * See if we have a good local APIC by checking for buggy Pentia,
407 	 * i.e. all B steppings and the C2 stepping of P54C when using their
408 	 * integrated APIC (see 11AP erratum in "Pentium Processor
409 	 * Specification Update").
410 	 */
411 	if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
412 	    (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
413 		set_cpu_bug(c, X86_BUG_11AP);
414 
415 
416 #ifdef CONFIG_X86_INTEL_USERCOPY
417 	/*
418 	 * Set up the preferred alignment for movsl bulk memory moves
419 	 */
420 	switch (c->x86) {
421 	case 4:		/* 486: untested */
422 		break;
423 	case 5:		/* Old Pentia: untested */
424 		break;
425 	case 6:		/* PII/PIII only like movsl with 8-byte alignment */
426 		movsl_mask.mask = 7;
427 		break;
428 	case 15:	/* P4 is OK down to 8-byte alignment */
429 		movsl_mask.mask = 7;
430 		break;
431 	}
432 #endif
433 
434 	intel_smp_check(c);
435 }
436 #else
437 static void intel_workarounds(struct cpuinfo_x86 *c)
438 {
439 }
440 #endif
441 
442 static void srat_detect_node(struct cpuinfo_x86 *c)
443 {
444 #ifdef CONFIG_NUMA
445 	unsigned node;
446 	int cpu = smp_processor_id();
447 
448 	/* Don't do the funky fallback heuristics the AMD version employs
449 	   for now. */
450 	node = numa_cpu_node(cpu);
451 	if (node == NUMA_NO_NODE || !node_online(node)) {
452 		/* reuse the value from init_cpu_to_node() */
453 		node = cpu_to_node(cpu);
454 	}
455 	numa_set_node(cpu, node);
456 #endif
457 }
458 
459 static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
460 {
461 	/* Intel VMX MSR indicated features */
462 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW	0x00200000
463 #define X86_VMX_FEATURE_PROC_CTLS_VNMI		0x00400000
464 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS	0x80000000
465 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC	0x00000001
466 #define X86_VMX_FEATURE_PROC_CTLS2_EPT		0x00000002
467 #define X86_VMX_FEATURE_PROC_CTLS2_VPID		0x00000020
468 
469 	u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
470 
471 	clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
472 	clear_cpu_cap(c, X86_FEATURE_VNMI);
473 	clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
474 	clear_cpu_cap(c, X86_FEATURE_EPT);
475 	clear_cpu_cap(c, X86_FEATURE_VPID);
476 
477 	rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
478 	msr_ctl = vmx_msr_high | vmx_msr_low;
479 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
480 		set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
481 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
482 		set_cpu_cap(c, X86_FEATURE_VNMI);
483 	if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
484 		rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
485 		      vmx_msr_low, vmx_msr_high);
486 		msr_ctl2 = vmx_msr_high | vmx_msr_low;
487 		if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
488 		    (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
489 			set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
490 		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
491 			set_cpu_cap(c, X86_FEATURE_EPT);
492 		if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
493 			set_cpu_cap(c, X86_FEATURE_VPID);
494 	}
495 }
496 
497 #define MSR_IA32_TME_ACTIVATE		0x982
498 
499 /* Helpers to access TME_ACTIVATE MSR */
500 #define TME_ACTIVATE_LOCKED(x)		(x & 0x1)
501 #define TME_ACTIVATE_ENABLED(x)		(x & 0x2)
502 
503 #define TME_ACTIVATE_POLICY(x)		((x >> 4) & 0xf)	/* Bits 7:4 */
504 #define TME_ACTIVATE_POLICY_AES_XTS_128	0
505 
506 #define TME_ACTIVATE_KEYID_BITS(x)	((x >> 32) & 0xf)	/* Bits 35:32 */
507 
508 #define TME_ACTIVATE_CRYPTO_ALGS(x)	((x >> 48) & 0xffff)	/* Bits 63:48 */
509 #define TME_ACTIVATE_CRYPTO_AES_XTS_128	1
510 
511 /* Values for mktme_status (SW only construct) */
512 #define MKTME_ENABLED			0
513 #define MKTME_DISABLED			1
514 #define MKTME_UNINITIALIZED		2
515 static int mktme_status = MKTME_UNINITIALIZED;
516 
517 static void detect_tme(struct cpuinfo_x86 *c)
518 {
519 	u64 tme_activate, tme_policy, tme_crypto_algs;
520 	int keyid_bits = 0, nr_keyids = 0;
521 	static u64 tme_activate_cpu0 = 0;
522 
523 	rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
524 
525 	if (mktme_status != MKTME_UNINITIALIZED) {
526 		if (tme_activate != tme_activate_cpu0) {
527 			/* Broken BIOS? */
528 			pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
529 			pr_err_once("x86/tme: MKTME is not usable\n");
530 			mktme_status = MKTME_DISABLED;
531 
532 			/* Proceed. We may need to exclude bits from x86_phys_bits. */
533 		}
534 	} else {
535 		tme_activate_cpu0 = tme_activate;
536 	}
537 
538 	if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
539 		pr_info_once("x86/tme: not enabled by BIOS\n");
540 		mktme_status = MKTME_DISABLED;
541 		return;
542 	}
543 
544 	if (mktme_status != MKTME_UNINITIALIZED)
545 		goto detect_keyid_bits;
546 
547 	pr_info("x86/tme: enabled by BIOS\n");
548 
549 	tme_policy = TME_ACTIVATE_POLICY(tme_activate);
550 	if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
551 		pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
552 
553 	tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
554 	if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
555 		pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
556 				tme_crypto_algs);
557 		mktme_status = MKTME_DISABLED;
558 	}
559 detect_keyid_bits:
560 	keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
561 	nr_keyids = (1UL << keyid_bits) - 1;
562 	if (nr_keyids) {
563 		pr_info_once("x86/mktme: enabled by BIOS\n");
564 		pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
565 	} else {
566 		pr_info_once("x86/mktme: disabled by BIOS\n");
567 	}
568 
569 	if (mktme_status == MKTME_UNINITIALIZED) {
570 		/* MKTME is usable */
571 		mktme_status = MKTME_ENABLED;
572 	}
573 
574 	/*
575 	 * KeyID bits effectively lower the number of physical address
576 	 * bits.  Update cpuinfo_x86::x86_phys_bits accordingly.
577 	 */
578 	c->x86_phys_bits -= keyid_bits;
579 }
580 
581 static void init_intel_energy_perf(struct cpuinfo_x86 *c)
582 {
583 	u64 epb;
584 
585 	/*
586 	 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
587 	 * (x86_energy_perf_policy(8) is available to change it at run-time.)
588 	 */
589 	if (!cpu_has(c, X86_FEATURE_EPB))
590 		return;
591 
592 	rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
593 	if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
594 		return;
595 
596 	pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
597 	pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
598 	epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
599 	wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
600 }
601 
602 static void intel_bsp_resume(struct cpuinfo_x86 *c)
603 {
604 	/*
605 	 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
606 	 * so reinitialize it properly like during bootup:
607 	 */
608 	init_intel_energy_perf(c);
609 }
610 
611 static void init_cpuid_fault(struct cpuinfo_x86 *c)
612 {
613 	u64 msr;
614 
615 	if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
616 		if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
617 			set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
618 	}
619 }
620 
621 static void init_intel_misc_features(struct cpuinfo_x86 *c)
622 {
623 	u64 msr;
624 
625 	if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
626 		return;
627 
628 	/* Clear all MISC features */
629 	this_cpu_write(msr_misc_features_shadow, 0);
630 
631 	/* Check features and update capabilities and shadow control bits */
632 	init_cpuid_fault(c);
633 	probe_xeon_phi_r3mwait(c);
634 
635 	msr = this_cpu_read(msr_misc_features_shadow);
636 	wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
637 }
638 
639 static void init_intel(struct cpuinfo_x86 *c)
640 {
641 	early_init_intel(c);
642 
643 	intel_workarounds(c);
644 
645 	/*
646 	 * Detect the extended topology information if available. This
647 	 * will reinitialise the initial_apicid which will be used
648 	 * in init_intel_cacheinfo()
649 	 */
650 	detect_extended_topology(c);
651 
652 	if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
653 		/*
654 		 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
655 		 * detection.
656 		 */
657 		detect_num_cpu_cores(c);
658 #ifdef CONFIG_X86_32
659 		detect_ht(c);
660 #endif
661 	}
662 
663 	init_intel_cacheinfo(c);
664 
665 	if (c->cpuid_level > 9) {
666 		unsigned eax = cpuid_eax(10);
667 		/* Check for version and the number of counters */
668 		if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
669 			set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
670 	}
671 
672 	if (cpu_has(c, X86_FEATURE_XMM2))
673 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
674 
675 	if (boot_cpu_has(X86_FEATURE_DS)) {
676 		unsigned int l1, l2;
677 
678 		rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
679 		if (!(l1 & (1<<11)))
680 			set_cpu_cap(c, X86_FEATURE_BTS);
681 		if (!(l1 & (1<<12)))
682 			set_cpu_cap(c, X86_FEATURE_PEBS);
683 	}
684 
685 	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
686 	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
687 		set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
688 
689 	if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
690 		((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
691 		set_cpu_bug(c, X86_BUG_MONITOR);
692 
693 #ifdef CONFIG_X86_64
694 	if (c->x86 == 15)
695 		c->x86_cache_alignment = c->x86_clflush_size * 2;
696 	if (c->x86 == 6)
697 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
698 #else
699 	/*
700 	 * Names for the Pentium II/Celeron processors
701 	 * detectable only by also checking the cache size.
702 	 * Dixon is NOT a Celeron.
703 	 */
704 	if (c->x86 == 6) {
705 		unsigned int l2 = c->x86_cache_size;
706 		char *p = NULL;
707 
708 		switch (c->x86_model) {
709 		case 5:
710 			if (l2 == 0)
711 				p = "Celeron (Covington)";
712 			else if (l2 == 256)
713 				p = "Mobile Pentium II (Dixon)";
714 			break;
715 
716 		case 6:
717 			if (l2 == 128)
718 				p = "Celeron (Mendocino)";
719 			else if (c->x86_stepping == 0 || c->x86_stepping == 5)
720 				p = "Celeron-A";
721 			break;
722 
723 		case 8:
724 			if (l2 == 128)
725 				p = "Celeron (Coppermine)";
726 			break;
727 		}
728 
729 		if (p)
730 			strcpy(c->x86_model_id, p);
731 	}
732 
733 	if (c->x86 == 15)
734 		set_cpu_cap(c, X86_FEATURE_P4);
735 	if (c->x86 == 6)
736 		set_cpu_cap(c, X86_FEATURE_P3);
737 #endif
738 
739 	/* Work around errata */
740 	srat_detect_node(c);
741 
742 	if (cpu_has(c, X86_FEATURE_VMX))
743 		detect_vmx_virtcap(c);
744 
745 	if (cpu_has(c, X86_FEATURE_TME))
746 		detect_tme(c);
747 
748 	init_intel_energy_perf(c);
749 
750 	init_intel_misc_features(c);
751 }
752 
753 #ifdef CONFIG_X86_32
754 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
755 {
756 	/*
757 	 * Intel PIII Tualatin. This comes in two flavours.
758 	 * One has 256kb of cache, the other 512. We have no way
759 	 * to determine which, so we use a boottime override
760 	 * for the 512kb model, and assume 256 otherwise.
761 	 */
762 	if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
763 		size = 256;
764 
765 	/*
766 	 * Intel Quark SoC X1000 contains a 4-way set associative
767 	 * 16K cache with a 16 byte cache line and 256 lines per tag
768 	 */
769 	if ((c->x86 == 5) && (c->x86_model == 9))
770 		size = 16;
771 	return size;
772 }
773 #endif
774 
775 #define TLB_INST_4K	0x01
776 #define TLB_INST_4M	0x02
777 #define TLB_INST_2M_4M	0x03
778 
779 #define TLB_INST_ALL	0x05
780 #define TLB_INST_1G	0x06
781 
782 #define TLB_DATA_4K	0x11
783 #define TLB_DATA_4M	0x12
784 #define TLB_DATA_2M_4M	0x13
785 #define TLB_DATA_4K_4M	0x14
786 
787 #define TLB_DATA_1G	0x16
788 
789 #define TLB_DATA0_4K	0x21
790 #define TLB_DATA0_4M	0x22
791 #define TLB_DATA0_2M_4M	0x23
792 
793 #define STLB_4K		0x41
794 #define STLB_4K_2M	0x42
795 
796 static const struct _tlb_table intel_tlb_table[] = {
797 	{ 0x01, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages, 4-way set associative" },
798 	{ 0x02, TLB_INST_4M,		2,	" TLB_INST 4 MByte pages, full associative" },
799 	{ 0x03, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way set associative" },
800 	{ 0x04, TLB_DATA_4M,		8,	" TLB_DATA 4 MByte pages, 4-way set associative" },
801 	{ 0x05, TLB_DATA_4M,		32,	" TLB_DATA 4 MByte pages, 4-way set associative" },
802 	{ 0x0b, TLB_INST_4M,		4,	" TLB_INST 4 MByte pages, 4-way set associative" },
803 	{ 0x4f, TLB_INST_4K,		32,	" TLB_INST 4 KByte pages */" },
804 	{ 0x50, TLB_INST_ALL,		64,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
805 	{ 0x51, TLB_INST_ALL,		128,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
806 	{ 0x52, TLB_INST_ALL,		256,	" TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
807 	{ 0x55, TLB_INST_2M_4M,		7,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
808 	{ 0x56, TLB_DATA0_4M,		16,	" TLB_DATA0 4 MByte pages, 4-way set associative" },
809 	{ 0x57, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, 4-way associative" },
810 	{ 0x59, TLB_DATA0_4K,		16,	" TLB_DATA0 4 KByte pages, fully associative" },
811 	{ 0x5a, TLB_DATA0_2M_4M,	32,	" TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
812 	{ 0x5b, TLB_DATA_4K_4M,		64,	" TLB_DATA 4 KByte and 4 MByte pages" },
813 	{ 0x5c, TLB_DATA_4K_4M,		128,	" TLB_DATA 4 KByte and 4 MByte pages" },
814 	{ 0x5d, TLB_DATA_4K_4M,		256,	" TLB_DATA 4 KByte and 4 MByte pages" },
815 	{ 0x61, TLB_INST_4K,		48,	" TLB_INST 4 KByte pages, full associative" },
816 	{ 0x63, TLB_DATA_1G,		4,	" TLB_DATA 1 GByte pages, 4-way set associative" },
817 	{ 0x6b, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 8-way associative" },
818 	{ 0x6c, TLB_DATA_2M_4M,		128,	" TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
819 	{ 0x6d, TLB_DATA_1G,		16,	" TLB_DATA 1 GByte pages, fully associative" },
820 	{ 0x76, TLB_INST_2M_4M,		8,	" TLB_INST 2-MByte or 4-MByte pages, fully associative" },
821 	{ 0xb0, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 4-way set associative" },
822 	{ 0xb1, TLB_INST_2M_4M,		4,	" TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
823 	{ 0xb2, TLB_INST_4K,		64,	" TLB_INST 4KByte pages, 4-way set associative" },
824 	{ 0xb3, TLB_DATA_4K,		128,	" TLB_DATA 4 KByte pages, 4-way set associative" },
825 	{ 0xb4, TLB_DATA_4K,		256,	" TLB_DATA 4 KByte pages, 4-way associative" },
826 	{ 0xb5, TLB_INST_4K,		64,	" TLB_INST 4 KByte pages, 8-way set associative" },
827 	{ 0xb6, TLB_INST_4K,		128,	" TLB_INST 4 KByte pages, 8-way set associative" },
828 	{ 0xba, TLB_DATA_4K,		64,	" TLB_DATA 4 KByte pages, 4-way associative" },
829 	{ 0xc0, TLB_DATA_4K_4M,		8,	" TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
830 	{ 0xc1, STLB_4K_2M,		1024,	" STLB 4 KByte and 2 MByte pages, 8-way associative" },
831 	{ 0xc2, TLB_DATA_2M_4M,		16,	" DTLB 2 MByte/4MByte pages, 4-way associative" },
832 	{ 0xca, STLB_4K,		512,	" STLB 4 KByte pages, 4-way associative" },
833 	{ 0x00, 0, 0 }
834 };
835 
836 static void intel_tlb_lookup(const unsigned char desc)
837 {
838 	unsigned char k;
839 	if (desc == 0)
840 		return;
841 
842 	/* look up this descriptor in the table */
843 	for (k = 0; intel_tlb_table[k].descriptor != desc && \
844 			intel_tlb_table[k].descriptor != 0; k++)
845 		;
846 
847 	if (intel_tlb_table[k].tlb_type == 0)
848 		return;
849 
850 	switch (intel_tlb_table[k].tlb_type) {
851 	case STLB_4K:
852 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
853 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
854 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
855 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
856 		break;
857 	case STLB_4K_2M:
858 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
859 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
860 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
861 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
862 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
863 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
864 		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
865 			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
866 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
867 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
868 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
869 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
870 		break;
871 	case TLB_INST_ALL:
872 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
873 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
874 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
875 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
876 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
877 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
878 		break;
879 	case TLB_INST_4K:
880 		if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
881 			tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
882 		break;
883 	case TLB_INST_4M:
884 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
885 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
886 		break;
887 	case TLB_INST_2M_4M:
888 		if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
889 			tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
890 		if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
891 			tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
892 		break;
893 	case TLB_DATA_4K:
894 	case TLB_DATA0_4K:
895 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
896 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
897 		break;
898 	case TLB_DATA_4M:
899 	case TLB_DATA0_4M:
900 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
901 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
902 		break;
903 	case TLB_DATA_2M_4M:
904 	case TLB_DATA0_2M_4M:
905 		if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
906 			tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
907 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
908 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
909 		break;
910 	case TLB_DATA_4K_4M:
911 		if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
912 			tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
913 		if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
914 			tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
915 		break;
916 	case TLB_DATA_1G:
917 		if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
918 			tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
919 		break;
920 	}
921 }
922 
923 static void intel_detect_tlb(struct cpuinfo_x86 *c)
924 {
925 	int i, j, n;
926 	unsigned int regs[4];
927 	unsigned char *desc = (unsigned char *)regs;
928 
929 	if (c->cpuid_level < 2)
930 		return;
931 
932 	/* Number of times to iterate */
933 	n = cpuid_eax(2) & 0xFF;
934 
935 	for (i = 0 ; i < n ; i++) {
936 		cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
937 
938 		/* If bit 31 is set, this is an unknown format */
939 		for (j = 0 ; j < 3 ; j++)
940 			if (regs[j] & (1 << 31))
941 				regs[j] = 0;
942 
943 		/* Byte 0 is level count, not a descriptor */
944 		for (j = 1 ; j < 16 ; j++)
945 			intel_tlb_lookup(desc[j]);
946 	}
947 }
948 
949 static const struct cpu_dev intel_cpu_dev = {
950 	.c_vendor	= "Intel",
951 	.c_ident	= { "GenuineIntel" },
952 #ifdef CONFIG_X86_32
953 	.legacy_models = {
954 		{ .family = 4, .model_names =
955 		  {
956 			  [0] = "486 DX-25/33",
957 			  [1] = "486 DX-50",
958 			  [2] = "486 SX",
959 			  [3] = "486 DX/2",
960 			  [4] = "486 SL",
961 			  [5] = "486 SX/2",
962 			  [7] = "486 DX/2-WB",
963 			  [8] = "486 DX/4",
964 			  [9] = "486 DX/4-WB"
965 		  }
966 		},
967 		{ .family = 5, .model_names =
968 		  {
969 			  [0] = "Pentium 60/66 A-step",
970 			  [1] = "Pentium 60/66",
971 			  [2] = "Pentium 75 - 200",
972 			  [3] = "OverDrive PODP5V83",
973 			  [4] = "Pentium MMX",
974 			  [7] = "Mobile Pentium 75 - 200",
975 			  [8] = "Mobile Pentium MMX",
976 			  [9] = "Quark SoC X1000",
977 		  }
978 		},
979 		{ .family = 6, .model_names =
980 		  {
981 			  [0] = "Pentium Pro A-step",
982 			  [1] = "Pentium Pro",
983 			  [3] = "Pentium II (Klamath)",
984 			  [4] = "Pentium II (Deschutes)",
985 			  [5] = "Pentium II (Deschutes)",
986 			  [6] = "Mobile Pentium II",
987 			  [7] = "Pentium III (Katmai)",
988 			  [8] = "Pentium III (Coppermine)",
989 			  [10] = "Pentium III (Cascades)",
990 			  [11] = "Pentium III (Tualatin)",
991 		  }
992 		},
993 		{ .family = 15, .model_names =
994 		  {
995 			  [0] = "Pentium 4 (Unknown)",
996 			  [1] = "Pentium 4 (Willamette)",
997 			  [2] = "Pentium 4 (Northwood)",
998 			  [4] = "Pentium 4 (Foster)",
999 			  [5] = "Pentium 4 (Foster)",
1000 		  }
1001 		},
1002 	},
1003 	.legacy_cache_size = intel_size_cache,
1004 #endif
1005 	.c_detect_tlb	= intel_detect_tlb,
1006 	.c_early_init   = early_init_intel,
1007 	.c_init		= init_intel,
1008 	.c_bsp_resume	= intel_bsp_resume,
1009 	.c_x86_vendor	= X86_VENDOR_INTEL,
1010 };
1011 
1012 cpu_dev_register(intel_cpu_dev);
1013 
1014