xref: /openbmc/linux/arch/x86/kernel/cpu/amd.c (revision 2cf1c348)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/bitops.h>
4 #include <linux/elf.h>
5 #include <linux/mm.h>
6 
7 #include <linux/io.h>
8 #include <linux/sched.h>
9 #include <linux/sched/clock.h>
10 #include <linux/random.h>
11 #include <linux/topology.h>
12 #include <asm/processor.h>
13 #include <asm/apic.h>
14 #include <asm/cacheinfo.h>
15 #include <asm/cpu.h>
16 #include <asm/spec-ctrl.h>
17 #include <asm/smp.h>
18 #include <asm/numa.h>
19 #include <asm/pci-direct.h>
20 #include <asm/delay.h>
21 #include <asm/debugreg.h>
22 #include <asm/resctrl.h>
23 
24 #ifdef CONFIG_X86_64
25 # include <asm/mmconfig.h>
26 #endif
27 
28 #include "cpu.h"
29 
30 static const int amd_erratum_383[];
31 static const int amd_erratum_400[];
32 static const int amd_erratum_1054[];
33 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
34 
35 /*
36  * nodes_per_socket: Stores the number of nodes per socket.
37  * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
38  * Node Identifiers[10:8]
39  */
40 static u32 nodes_per_socket = 1;
41 
42 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
43 {
44 	u32 gprs[8] = { 0 };
45 	int err;
46 
47 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
48 		  "%s should only be used on K8!\n", __func__);
49 
50 	gprs[1] = msr;
51 	gprs[7] = 0x9c5a203a;
52 
53 	err = rdmsr_safe_regs(gprs);
54 
55 	*p = gprs[0] | ((u64)gprs[2] << 32);
56 
57 	return err;
58 }
59 
60 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
61 {
62 	u32 gprs[8] = { 0 };
63 
64 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
65 		  "%s should only be used on K8!\n", __func__);
66 
67 	gprs[0] = (u32)val;
68 	gprs[1] = msr;
69 	gprs[2] = val >> 32;
70 	gprs[7] = 0x9c5a203a;
71 
72 	return wrmsr_safe_regs(gprs);
73 }
74 
75 /*
76  *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
77  *	misexecution of code under Linux. Owners of such processors should
78  *	contact AMD for precise details and a CPU swap.
79  *
80  *	See	http://www.multimania.com/poulot/k6bug.html
81  *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
82  *		(Publication # 21266  Issue Date: August 1998)
83  *
84  *	The following test is erm.. interesting. AMD neglected to up
85  *	the chip setting when fixing the bug but they also tweaked some
86  *	performance at the same time..
87  */
88 
89 #ifdef CONFIG_X86_32
90 extern __visible void vide(void);
91 __asm__(".text\n"
92 	".globl vide\n"
93 	".type vide, @function\n"
94 	".align 4\n"
95 	"vide: ret\n");
96 #endif
97 
98 static void init_amd_k5(struct cpuinfo_x86 *c)
99 {
100 #ifdef CONFIG_X86_32
101 /*
102  * General Systems BIOSen alias the cpu frequency registers
103  * of the Elan at 0x000df000. Unfortunately, one of the Linux
104  * drivers subsequently pokes it, and changes the CPU speed.
105  * Workaround : Remove the unneeded alias.
106  */
107 #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
108 #define CBAR_ENB	(0x80000000)
109 #define CBAR_KEY	(0X000000CB)
110 	if (c->x86_model == 9 || c->x86_model == 10) {
111 		if (inl(CBAR) & CBAR_ENB)
112 			outl(0 | CBAR_KEY, CBAR);
113 	}
114 #endif
115 }
116 
117 static void init_amd_k6(struct cpuinfo_x86 *c)
118 {
119 #ifdef CONFIG_X86_32
120 	u32 l, h;
121 	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
122 
123 	if (c->x86_model < 6) {
124 		/* Based on AMD doc 20734R - June 2000 */
125 		if (c->x86_model == 0) {
126 			clear_cpu_cap(c, X86_FEATURE_APIC);
127 			set_cpu_cap(c, X86_FEATURE_PGE);
128 		}
129 		return;
130 	}
131 
132 	if (c->x86_model == 6 && c->x86_stepping == 1) {
133 		const int K6_BUG_LOOP = 1000000;
134 		int n;
135 		void (*f_vide)(void);
136 		u64 d, d2;
137 
138 		pr_info("AMD K6 stepping B detected - ");
139 
140 		/*
141 		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
142 		 * calls at the same time.
143 		 */
144 
145 		n = K6_BUG_LOOP;
146 		f_vide = vide;
147 		OPTIMIZER_HIDE_VAR(f_vide);
148 		d = rdtsc();
149 		while (n--)
150 			f_vide();
151 		d2 = rdtsc();
152 		d = d2-d;
153 
154 		if (d > 20*K6_BUG_LOOP)
155 			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
156 		else
157 			pr_cont("probably OK (after B9730xxxx).\n");
158 	}
159 
160 	/* K6 with old style WHCR */
161 	if (c->x86_model < 8 ||
162 	   (c->x86_model == 8 && c->x86_stepping < 8)) {
163 		/* We can only write allocate on the low 508Mb */
164 		if (mbytes > 508)
165 			mbytes = 508;
166 
167 		rdmsr(MSR_K6_WHCR, l, h);
168 		if ((l&0x0000FFFF) == 0) {
169 			unsigned long flags;
170 			l = (1<<0)|((mbytes/4)<<1);
171 			local_irq_save(flags);
172 			wbinvd();
173 			wrmsr(MSR_K6_WHCR, l, h);
174 			local_irq_restore(flags);
175 			pr_info("Enabling old style K6 write allocation for %d Mb\n",
176 				mbytes);
177 		}
178 		return;
179 	}
180 
181 	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
182 	     c->x86_model == 9 || c->x86_model == 13) {
183 		/* The more serious chips .. */
184 
185 		if (mbytes > 4092)
186 			mbytes = 4092;
187 
188 		rdmsr(MSR_K6_WHCR, l, h);
189 		if ((l&0xFFFF0000) == 0) {
190 			unsigned long flags;
191 			l = ((mbytes>>2)<<22)|(1<<16);
192 			local_irq_save(flags);
193 			wbinvd();
194 			wrmsr(MSR_K6_WHCR, l, h);
195 			local_irq_restore(flags);
196 			pr_info("Enabling new style K6 write allocation for %d Mb\n",
197 				mbytes);
198 		}
199 
200 		return;
201 	}
202 
203 	if (c->x86_model == 10) {
204 		/* AMD Geode LX is model 10 */
205 		/* placeholder for any needed mods */
206 		return;
207 	}
208 #endif
209 }
210 
211 static void init_amd_k7(struct cpuinfo_x86 *c)
212 {
213 #ifdef CONFIG_X86_32
214 	u32 l, h;
215 
216 	/*
217 	 * Bit 15 of Athlon specific MSR 15, needs to be 0
218 	 * to enable SSE on Palomino/Morgan/Barton CPU's.
219 	 * If the BIOS didn't enable it already, enable it here.
220 	 */
221 	if (c->x86_model >= 6 && c->x86_model <= 10) {
222 		if (!cpu_has(c, X86_FEATURE_XMM)) {
223 			pr_info("Enabling disabled K7/SSE Support.\n");
224 			msr_clear_bit(MSR_K7_HWCR, 15);
225 			set_cpu_cap(c, X86_FEATURE_XMM);
226 		}
227 	}
228 
229 	/*
230 	 * It's been determined by AMD that Athlons since model 8 stepping 1
231 	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
232 	 * As per AMD technical note 27212 0.2
233 	 */
234 	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
235 		rdmsr(MSR_K7_CLK_CTL, l, h);
236 		if ((l & 0xfff00000) != 0x20000000) {
237 			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
238 				l, ((l & 0x000fffff)|0x20000000));
239 			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
240 		}
241 	}
242 
243 	/* calling is from identify_secondary_cpu() ? */
244 	if (!c->cpu_index)
245 		return;
246 
247 	/*
248 	 * Certain Athlons might work (for various values of 'work') in SMP
249 	 * but they are not certified as MP capable.
250 	 */
251 	/* Athlon 660/661 is valid. */
252 	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
253 	    (c->x86_stepping == 1)))
254 		return;
255 
256 	/* Duron 670 is valid */
257 	if ((c->x86_model == 7) && (c->x86_stepping == 0))
258 		return;
259 
260 	/*
261 	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
262 	 * bit. It's worth noting that the A5 stepping (662) of some
263 	 * Athlon XP's have the MP bit set.
264 	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
265 	 * more.
266 	 */
267 	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
268 	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
269 	     (c->x86_model > 7))
270 		if (cpu_has(c, X86_FEATURE_MP))
271 			return;
272 
273 	/* If we get here, not a certified SMP capable AMD system. */
274 
275 	/*
276 	 * Don't taint if we are running SMP kernel on a single non-MP
277 	 * approved Athlon
278 	 */
279 	WARN_ONCE(1, "WARNING: This combination of AMD"
280 		" processors is not suitable for SMP.\n");
281 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
282 #endif
283 }
284 
285 #ifdef CONFIG_NUMA
286 /*
287  * To workaround broken NUMA config.  Read the comment in
288  * srat_detect_node().
289  */
290 static int nearby_node(int apicid)
291 {
292 	int i, node;
293 
294 	for (i = apicid - 1; i >= 0; i--) {
295 		node = __apicid_to_node[i];
296 		if (node != NUMA_NO_NODE && node_online(node))
297 			return node;
298 	}
299 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
300 		node = __apicid_to_node[i];
301 		if (node != NUMA_NO_NODE && node_online(node))
302 			return node;
303 	}
304 	return first_node(node_online_map); /* Shouldn't happen */
305 }
306 #endif
307 
308 /*
309  * Fix up cpu_core_id for pre-F17h systems to be in the
310  * [0 .. cores_per_node - 1] range. Not really needed but
311  * kept so as not to break existing setups.
312  */
313 static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
314 {
315 	u32 cus_per_node;
316 
317 	if (c->x86 >= 0x17)
318 		return;
319 
320 	cus_per_node = c->x86_max_cores / nodes_per_socket;
321 	c->cpu_core_id %= cus_per_node;
322 }
323 
324 /*
325  * Fixup core topology information for
326  * (1) AMD multi-node processors
327  *     Assumption: Number of cores in each internal node is the same.
328  * (2) AMD processors supporting compute units
329  */
330 static void amd_get_topology(struct cpuinfo_x86 *c)
331 {
332 	int cpu = smp_processor_id();
333 
334 	/* get information required for multi-node processors */
335 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
336 		int err;
337 		u32 eax, ebx, ecx, edx;
338 
339 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
340 
341 		c->cpu_die_id  = ecx & 0xff;
342 
343 		if (c->x86 == 0x15)
344 			c->cu_id = ebx & 0xff;
345 
346 		if (c->x86 >= 0x17) {
347 			c->cpu_core_id = ebx & 0xff;
348 
349 			if (smp_num_siblings > 1)
350 				c->x86_max_cores /= smp_num_siblings;
351 		}
352 
353 		/*
354 		 * In case leaf B is available, use it to derive
355 		 * topology information.
356 		 */
357 		err = detect_extended_topology(c);
358 		if (!err)
359 			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
360 
361 		cacheinfo_amd_init_llc_id(c, cpu);
362 
363 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
364 		u64 value;
365 
366 		rdmsrl(MSR_FAM10H_NODE_ID, value);
367 		c->cpu_die_id = value & 7;
368 
369 		per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
370 	} else
371 		return;
372 
373 	if (nodes_per_socket > 1) {
374 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
375 		legacy_fixup_core_id(c);
376 	}
377 }
378 
379 /*
380  * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
381  * Assumes number of cores is a power of two.
382  */
383 static void amd_detect_cmp(struct cpuinfo_x86 *c)
384 {
385 	unsigned bits;
386 	int cpu = smp_processor_id();
387 
388 	bits = c->x86_coreid_bits;
389 	/* Low order bits define the core id (index of core in socket) */
390 	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
391 	/* Convert the initial APIC ID into the socket ID */
392 	c->phys_proc_id = c->initial_apicid >> bits;
393 	/* use socket ID also for last level cache */
394 	per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
395 }
396 
397 static void amd_detect_ppin(struct cpuinfo_x86 *c)
398 {
399 	unsigned long long val;
400 
401 	if (!cpu_has(c, X86_FEATURE_AMD_PPIN))
402 		return;
403 
404 	/* When PPIN is defined in CPUID, still need to check PPIN_CTL MSR */
405 	if (rdmsrl_safe(MSR_AMD_PPIN_CTL, &val))
406 		goto clear_ppin;
407 
408 	/* PPIN is locked in disabled mode, clear feature bit */
409 	if ((val & 3UL) == 1UL)
410 		goto clear_ppin;
411 
412 	/* If PPIN is disabled, try to enable it */
413 	if (!(val & 2UL)) {
414 		wrmsrl_safe(MSR_AMD_PPIN_CTL,  val | 2UL);
415 		rdmsrl_safe(MSR_AMD_PPIN_CTL, &val);
416 	}
417 
418 	/* If PPIN_EN bit is 1, return from here; otherwise fall through */
419 	if (val & 2UL)
420 		return;
421 
422 clear_ppin:
423 	clear_cpu_cap(c, X86_FEATURE_AMD_PPIN);
424 }
425 
426 u32 amd_get_nodes_per_socket(void)
427 {
428 	return nodes_per_socket;
429 }
430 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
431 
432 static void srat_detect_node(struct cpuinfo_x86 *c)
433 {
434 #ifdef CONFIG_NUMA
435 	int cpu = smp_processor_id();
436 	int node;
437 	unsigned apicid = c->apicid;
438 
439 	node = numa_cpu_node(cpu);
440 	if (node == NUMA_NO_NODE)
441 		node = get_llc_id(cpu);
442 
443 	/*
444 	 * On multi-fabric platform (e.g. Numascale NumaChip) a
445 	 * platform-specific handler needs to be called to fixup some
446 	 * IDs of the CPU.
447 	 */
448 	if (x86_cpuinit.fixup_cpu_id)
449 		x86_cpuinit.fixup_cpu_id(c, node);
450 
451 	if (!node_online(node)) {
452 		/*
453 		 * Two possibilities here:
454 		 *
455 		 * - The CPU is missing memory and no node was created.  In
456 		 *   that case try picking one from a nearby CPU.
457 		 *
458 		 * - The APIC IDs differ from the HyperTransport node IDs
459 		 *   which the K8 northbridge parsing fills in.  Assume
460 		 *   they are all increased by a constant offset, but in
461 		 *   the same order as the HT nodeids.  If that doesn't
462 		 *   result in a usable node fall back to the path for the
463 		 *   previous case.
464 		 *
465 		 * This workaround operates directly on the mapping between
466 		 * APIC ID and NUMA node, assuming certain relationship
467 		 * between APIC ID, HT node ID and NUMA topology.  As going
468 		 * through CPU mapping may alter the outcome, directly
469 		 * access __apicid_to_node[].
470 		 */
471 		int ht_nodeid = c->initial_apicid;
472 
473 		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
474 			node = __apicid_to_node[ht_nodeid];
475 		/* Pick a nearby node */
476 		if (!node_online(node))
477 			node = nearby_node(apicid);
478 	}
479 	numa_set_node(cpu, node);
480 #endif
481 }
482 
483 static void early_init_amd_mc(struct cpuinfo_x86 *c)
484 {
485 #ifdef CONFIG_SMP
486 	unsigned bits, ecx;
487 
488 	/* Multi core CPU? */
489 	if (c->extended_cpuid_level < 0x80000008)
490 		return;
491 
492 	ecx = cpuid_ecx(0x80000008);
493 
494 	c->x86_max_cores = (ecx & 0xff) + 1;
495 
496 	/* CPU telling us the core id bits shift? */
497 	bits = (ecx >> 12) & 0xF;
498 
499 	/* Otherwise recompute */
500 	if (bits == 0) {
501 		while ((1 << bits) < c->x86_max_cores)
502 			bits++;
503 	}
504 
505 	c->x86_coreid_bits = bits;
506 #endif
507 }
508 
509 static void bsp_init_amd(struct cpuinfo_x86 *c)
510 {
511 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
512 
513 		if (c->x86 > 0x10 ||
514 		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
515 			u64 val;
516 
517 			rdmsrl(MSR_K7_HWCR, val);
518 			if (!(val & BIT(24)))
519 				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
520 		}
521 	}
522 
523 	if (c->x86 == 0x15) {
524 		unsigned long upperbit;
525 		u32 cpuid, assoc;
526 
527 		cpuid	 = cpuid_edx(0x80000005);
528 		assoc	 = cpuid >> 16 & 0xff;
529 		upperbit = ((cpuid >> 24) << 10) / assoc;
530 
531 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
532 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
533 
534 		/* A random value per boot for bit slice [12:upper_bit) */
535 		va_align.bits = get_random_int() & va_align.mask;
536 	}
537 
538 	if (cpu_has(c, X86_FEATURE_MWAITX))
539 		use_mwaitx_delay();
540 
541 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
542 		u32 ecx;
543 
544 		ecx = cpuid_ecx(0x8000001e);
545 		__max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
546 	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
547 		u64 value;
548 
549 		rdmsrl(MSR_FAM10H_NODE_ID, value);
550 		__max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
551 	}
552 
553 	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
554 	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
555 	    c->x86 >= 0x15 && c->x86 <= 0x17) {
556 		unsigned int bit;
557 
558 		switch (c->x86) {
559 		case 0x15: bit = 54; break;
560 		case 0x16: bit = 33; break;
561 		case 0x17: bit = 10; break;
562 		default: return;
563 		}
564 		/*
565 		 * Try to cache the base value so further operations can
566 		 * avoid RMW. If that faults, do not enable SSBD.
567 		 */
568 		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
569 			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
570 			setup_force_cpu_cap(X86_FEATURE_SSBD);
571 			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
572 		}
573 	}
574 
575 	resctrl_cpu_detect(c);
576 }
577 
578 static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
579 {
580 	u64 msr;
581 
582 	/*
583 	 * BIOS support is required for SME and SEV.
584 	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
585 	 *	      the SME physical address space reduction value.
586 	 *	      If BIOS has not enabled SME then don't advertise the
587 	 *	      SME feature (set in scattered.c).
588 	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
589 	 *            SEV and SEV_ES feature (set in scattered.c).
590 	 *
591 	 *   In all cases, since support for SME and SEV requires long mode,
592 	 *   don't advertise the feature under CONFIG_X86_32.
593 	 */
594 	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
595 		/* Check if memory encryption is enabled */
596 		rdmsrl(MSR_AMD64_SYSCFG, msr);
597 		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
598 			goto clear_all;
599 
600 		/*
601 		 * Always adjust physical address bits. Even though this
602 		 * will be a value above 32-bits this is still done for
603 		 * CONFIG_X86_32 so that accurate values are reported.
604 		 */
605 		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
606 
607 		if (IS_ENABLED(CONFIG_X86_32))
608 			goto clear_all;
609 
610 		rdmsrl(MSR_K7_HWCR, msr);
611 		if (!(msr & MSR_K7_HWCR_SMMLOCK))
612 			goto clear_sev;
613 
614 		return;
615 
616 clear_all:
617 		setup_clear_cpu_cap(X86_FEATURE_SME);
618 clear_sev:
619 		setup_clear_cpu_cap(X86_FEATURE_SEV);
620 		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
621 	}
622 }
623 
624 static void early_init_amd(struct cpuinfo_x86 *c)
625 {
626 	u64 value;
627 	u32 dummy;
628 
629 	early_init_amd_mc(c);
630 
631 	if (c->x86 >= 0xf)
632 		set_cpu_cap(c, X86_FEATURE_K8);
633 
634 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
635 
636 	/*
637 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
638 	 * with P/T states and does not stop in deep C-states
639 	 */
640 	if (c->x86_power & (1 << 8)) {
641 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
642 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
643 	}
644 
645 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
646 	if (c->x86_power & BIT(12))
647 		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
648 
649 	/* Bit 14 indicates the Runtime Average Power Limit interface. */
650 	if (c->x86_power & BIT(14))
651 		set_cpu_cap(c, X86_FEATURE_RAPL);
652 
653 #ifdef CONFIG_X86_64
654 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
655 #else
656 	/*  Set MTRR capability flag if appropriate */
657 	if (c->x86 == 5)
658 		if (c->x86_model == 13 || c->x86_model == 9 ||
659 		    (c->x86_model == 8 && c->x86_stepping >= 8))
660 			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
661 #endif
662 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
663 	/*
664 	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
665 	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
666 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
667 	 * after 16h.
668 	 */
669 	if (boot_cpu_has(X86_FEATURE_APIC)) {
670 		if (c->x86 > 0x16)
671 			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
672 		else if (c->x86 >= 0xf) {
673 			/* check CPU config space for extended APIC ID */
674 			unsigned int val;
675 
676 			val = read_pci_config(0, 24, 0, 0x68);
677 			if ((val >> 17 & 0x3) == 0x3)
678 				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
679 		}
680 	}
681 #endif
682 
683 	/*
684 	 * This is only needed to tell the kernel whether to use VMCALL
685 	 * and VMMCALL.  VMMCALL is never executed except under virt, so
686 	 * we can set it unconditionally.
687 	 */
688 	set_cpu_cap(c, X86_FEATURE_VMMCALL);
689 
690 	/* F16h erratum 793, CVE-2013-6885 */
691 	if (c->x86 == 0x16 && c->x86_model <= 0xf)
692 		msr_set_bit(MSR_AMD64_LS_CFG, 15);
693 
694 	/*
695 	 * Check whether the machine is affected by erratum 400. This is
696 	 * used to select the proper idle routine and to enable the check
697 	 * whether the machine is affected in arch_post_acpi_init(), which
698 	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
699 	 */
700 	if (cpu_has_amd_erratum(c, amd_erratum_400))
701 		set_cpu_bug(c, X86_BUG_AMD_E400);
702 
703 	early_detect_mem_encrypt(c);
704 
705 	/* Re-enable TopologyExtensions if switched off by BIOS */
706 	if (c->x86 == 0x15 &&
707 	    (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
708 	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
709 
710 		if (msr_set_bit(0xc0011005, 54) > 0) {
711 			rdmsrl(0xc0011005, value);
712 			if (value & BIT_64(54)) {
713 				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
714 				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
715 			}
716 		}
717 	}
718 
719 	if (cpu_has(c, X86_FEATURE_TOPOEXT))
720 		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
721 }
722 
723 static void init_amd_k8(struct cpuinfo_x86 *c)
724 {
725 	u32 level;
726 	u64 value;
727 
728 	/* On C+ stepping K8 rep microcode works well for copy/memset */
729 	level = cpuid_eax(1);
730 	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
731 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
732 
733 	/*
734 	 * Some BIOSes incorrectly force this feature, but only K8 revision D
735 	 * (model = 0x14) and later actually support it.
736 	 * (AMD Erratum #110, docId: 25759).
737 	 */
738 	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
739 		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
740 		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
741 			value &= ~BIT_64(32);
742 			wrmsrl_amd_safe(0xc001100d, value);
743 		}
744 	}
745 
746 	if (!c->x86_model_id[0])
747 		strcpy(c->x86_model_id, "Hammer");
748 
749 #ifdef CONFIG_SMP
750 	/*
751 	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
752 	 * bit 6 of msr C001_0015
753 	 *
754 	 * Errata 63 for SH-B3 steppings
755 	 * Errata 122 for all steppings (F+ have it disabled by default)
756 	 */
757 	msr_set_bit(MSR_K7_HWCR, 6);
758 #endif
759 	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
760 }
761 
762 static void init_amd_gh(struct cpuinfo_x86 *c)
763 {
764 #ifdef CONFIG_MMCONF_FAM10H
765 	/* do this for boot cpu */
766 	if (c == &boot_cpu_data)
767 		check_enable_amd_mmconf_dmi();
768 
769 	fam10h_check_enable_mmcfg();
770 #endif
771 
772 	/*
773 	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
774 	 * is always needed when GART is enabled, even in a kernel which has no
775 	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
776 	 * If it doesn't, we do it here as suggested by the BKDG.
777 	 *
778 	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
779 	 */
780 	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
781 
782 	/*
783 	 * On family 10h BIOS may not have properly enabled WC+ support, causing
784 	 * it to be converted to CD memtype. This may result in performance
785 	 * degradation for certain nested-paging guests. Prevent this conversion
786 	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
787 	 *
788 	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
789 	 * guests on older kvm hosts.
790 	 */
791 	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
792 
793 	if (cpu_has_amd_erratum(c, amd_erratum_383))
794 		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
795 }
796 
797 #define MSR_AMD64_DE_CFG	0xC0011029
798 
799 static void init_amd_ln(struct cpuinfo_x86 *c)
800 {
801 	/*
802 	 * Apply erratum 665 fix unconditionally so machines without a BIOS
803 	 * fix work.
804 	 */
805 	msr_set_bit(MSR_AMD64_DE_CFG, 31);
806 }
807 
808 static bool rdrand_force;
809 
810 static int __init rdrand_cmdline(char *str)
811 {
812 	if (!str)
813 		return -EINVAL;
814 
815 	if (!strcmp(str, "force"))
816 		rdrand_force = true;
817 	else
818 		return -EINVAL;
819 
820 	return 0;
821 }
822 early_param("rdrand", rdrand_cmdline);
823 
824 static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
825 {
826 	/*
827 	 * Saving of the MSR used to hide the RDRAND support during
828 	 * suspend/resume is done by arch/x86/power/cpu.c, which is
829 	 * dependent on CONFIG_PM_SLEEP.
830 	 */
831 	if (!IS_ENABLED(CONFIG_PM_SLEEP))
832 		return;
833 
834 	/*
835 	 * The nordrand option can clear X86_FEATURE_RDRAND, so check for
836 	 * RDRAND support using the CPUID function directly.
837 	 */
838 	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
839 		return;
840 
841 	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
842 
843 	/*
844 	 * Verify that the CPUID change has occurred in case the kernel is
845 	 * running virtualized and the hypervisor doesn't support the MSR.
846 	 */
847 	if (cpuid_ecx(1) & BIT(30)) {
848 		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
849 		return;
850 	}
851 
852 	clear_cpu_cap(c, X86_FEATURE_RDRAND);
853 	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
854 }
855 
856 static void init_amd_jg(struct cpuinfo_x86 *c)
857 {
858 	/*
859 	 * Some BIOS implementations do not restore proper RDRAND support
860 	 * across suspend and resume. Check on whether to hide the RDRAND
861 	 * instruction support via CPUID.
862 	 */
863 	clear_rdrand_cpuid_bit(c);
864 }
865 
866 static void init_amd_bd(struct cpuinfo_x86 *c)
867 {
868 	u64 value;
869 
870 	/*
871 	 * The way access filter has a performance penalty on some workloads.
872 	 * Disable it on the affected CPUs.
873 	 */
874 	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
875 		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
876 			value |= 0x1E;
877 			wrmsrl_safe(MSR_F15H_IC_CFG, value);
878 		}
879 	}
880 
881 	/*
882 	 * Some BIOS implementations do not restore proper RDRAND support
883 	 * across suspend and resume. Check on whether to hide the RDRAND
884 	 * instruction support via CPUID.
885 	 */
886 	clear_rdrand_cpuid_bit(c);
887 }
888 
889 static void init_amd_zn(struct cpuinfo_x86 *c)
890 {
891 	set_cpu_cap(c, X86_FEATURE_ZEN);
892 
893 #ifdef CONFIG_NUMA
894 	node_reclaim_distance = 32;
895 #endif
896 
897 	/*
898 	 * Fix erratum 1076: CPB feature bit not being set in CPUID.
899 	 * Always set it, except when running under a hypervisor.
900 	 */
901 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
902 		set_cpu_cap(c, X86_FEATURE_CPB);
903 }
904 
905 static void init_amd(struct cpuinfo_x86 *c)
906 {
907 	early_init_amd(c);
908 
909 	/*
910 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
911 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
912 	 */
913 	clear_cpu_cap(c, 0*32+31);
914 
915 	if (c->x86 >= 0x10)
916 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
917 
918 	/* get apicid instead of initial apic id from cpuid */
919 	c->apicid = hard_smp_processor_id();
920 
921 	/* K6s reports MCEs but don't actually have all the MSRs */
922 	if (c->x86 < 6)
923 		clear_cpu_cap(c, X86_FEATURE_MCE);
924 
925 	switch (c->x86) {
926 	case 4:    init_amd_k5(c); break;
927 	case 5:    init_amd_k6(c); break;
928 	case 6:	   init_amd_k7(c); break;
929 	case 0xf:  init_amd_k8(c); break;
930 	case 0x10: init_amd_gh(c); break;
931 	case 0x12: init_amd_ln(c); break;
932 	case 0x15: init_amd_bd(c); break;
933 	case 0x16: init_amd_jg(c); break;
934 	case 0x17: fallthrough;
935 	case 0x19: init_amd_zn(c); break;
936 	}
937 
938 	/*
939 	 * Enable workaround for FXSAVE leak on CPUs
940 	 * without a XSaveErPtr feature
941 	 */
942 	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
943 		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
944 
945 	cpu_detect_cache_sizes(c);
946 
947 	amd_detect_cmp(c);
948 	amd_get_topology(c);
949 	srat_detect_node(c);
950 	amd_detect_ppin(c);
951 
952 	init_amd_cacheinfo(c);
953 
954 	if (cpu_has(c, X86_FEATURE_XMM2)) {
955 		/*
956 		 * Use LFENCE for execution serialization.  On families which
957 		 * don't have that MSR, LFENCE is already serializing.
958 		 * msr_set_bit() uses the safe accessors, too, even if the MSR
959 		 * is not present.
960 		 */
961 		msr_set_bit(MSR_F10H_DECFG,
962 			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
963 
964 		/* A serializing LFENCE stops RDTSC speculation */
965 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
966 	}
967 
968 	/*
969 	 * Family 0x12 and above processors have APIC timer
970 	 * running in deep C states.
971 	 */
972 	if (c->x86 > 0x11)
973 		set_cpu_cap(c, X86_FEATURE_ARAT);
974 
975 	/* 3DNow or LM implies PREFETCHW */
976 	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
977 		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
978 			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
979 
980 	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
981 	if (!cpu_has(c, X86_FEATURE_XENPV))
982 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
983 
984 	/*
985 	 * Turn on the Instructions Retired free counter on machines not
986 	 * susceptible to erratum #1054 "Instructions Retired Performance
987 	 * Counter May Be Inaccurate".
988 	 */
989 	if (cpu_has(c, X86_FEATURE_IRPERF) &&
990 	    !cpu_has_amd_erratum(c, amd_erratum_1054))
991 		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
992 
993 	check_null_seg_clears_base(c);
994 }
995 
996 #ifdef CONFIG_X86_32
997 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
998 {
999 	/* AMD errata T13 (order #21922) */
1000 	if (c->x86 == 6) {
1001 		/* Duron Rev A0 */
1002 		if (c->x86_model == 3 && c->x86_stepping == 0)
1003 			size = 64;
1004 		/* Tbird rev A1/A2 */
1005 		if (c->x86_model == 4 &&
1006 			(c->x86_stepping == 0 || c->x86_stepping == 1))
1007 			size = 256;
1008 	}
1009 	return size;
1010 }
1011 #endif
1012 
1013 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1014 {
1015 	u32 ebx, eax, ecx, edx;
1016 	u16 mask = 0xfff;
1017 
1018 	if (c->x86 < 0xf)
1019 		return;
1020 
1021 	if (c->extended_cpuid_level < 0x80000006)
1022 		return;
1023 
1024 	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1025 
1026 	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1027 	tlb_lli_4k[ENTRIES] = ebx & mask;
1028 
1029 	/*
1030 	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1031 	 * characteristics from the CPUID function 0x80000005 instead.
1032 	 */
1033 	if (c->x86 == 0xf) {
1034 		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1035 		mask = 0xff;
1036 	}
1037 
1038 	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1039 	if (!((eax >> 16) & mask))
1040 		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1041 	else
1042 		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1043 
1044 	/* a 4M entry uses two 2M entries */
1045 	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1046 
1047 	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1048 	if (!(eax & mask)) {
1049 		/* Erratum 658 */
1050 		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1051 			tlb_lli_2m[ENTRIES] = 1024;
1052 		} else {
1053 			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1054 			tlb_lli_2m[ENTRIES] = eax & 0xff;
1055 		}
1056 	} else
1057 		tlb_lli_2m[ENTRIES] = eax & mask;
1058 
1059 	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1060 }
1061 
1062 static const struct cpu_dev amd_cpu_dev = {
1063 	.c_vendor	= "AMD",
1064 	.c_ident	= { "AuthenticAMD" },
1065 #ifdef CONFIG_X86_32
1066 	.legacy_models = {
1067 		{ .family = 4, .model_names =
1068 		  {
1069 			  [3] = "486 DX/2",
1070 			  [7] = "486 DX/2-WB",
1071 			  [8] = "486 DX/4",
1072 			  [9] = "486 DX/4-WB",
1073 			  [14] = "Am5x86-WT",
1074 			  [15] = "Am5x86-WB"
1075 		  }
1076 		},
1077 	},
1078 	.legacy_cache_size = amd_size_cache,
1079 #endif
1080 	.c_early_init   = early_init_amd,
1081 	.c_detect_tlb	= cpu_detect_tlb_amd,
1082 	.c_bsp_init	= bsp_init_amd,
1083 	.c_init		= init_amd,
1084 	.c_x86_vendor	= X86_VENDOR_AMD,
1085 };
1086 
1087 cpu_dev_register(amd_cpu_dev);
1088 
1089 /*
1090  * AMD errata checking
1091  *
1092  * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
1093  * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
1094  * have an OSVW id assigned, which it takes as first argument. Both take a
1095  * variable number of family-specific model-stepping ranges created by
1096  * AMD_MODEL_RANGE().
1097  *
1098  * Example:
1099  *
1100  * const int amd_erratum_319[] =
1101  *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
1102  *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
1103  *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
1104  */
1105 
1106 #define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
1107 #define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
1108 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1109 	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1110 #define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
1111 #define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
1112 #define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
1113 
1114 static const int amd_erratum_400[] =
1115 	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
1116 			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
1117 
1118 static const int amd_erratum_383[] =
1119 	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
1120 
1121 /* #1054: Instructions Retired Performance Counter May Be Inaccurate */
1122 static const int amd_erratum_1054[] =
1123 	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
1124 
1125 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
1126 {
1127 	int osvw_id = *erratum++;
1128 	u32 range;
1129 	u32 ms;
1130 
1131 	if (osvw_id >= 0 && osvw_id < 65536 &&
1132 	    cpu_has(cpu, X86_FEATURE_OSVW)) {
1133 		u64 osvw_len;
1134 
1135 		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
1136 		if (osvw_id < osvw_len) {
1137 			u64 osvw_bits;
1138 
1139 			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
1140 			    osvw_bits);
1141 			return osvw_bits & (1ULL << (osvw_id & 0x3f));
1142 		}
1143 	}
1144 
1145 	/* OSVW unavailable or ID unknown, match family-model-stepping range */
1146 	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
1147 	while ((range = *erratum++))
1148 		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1149 		    (ms >= AMD_MODEL_RANGE_START(range)) &&
1150 		    (ms <= AMD_MODEL_RANGE_END(range)))
1151 			return true;
1152 
1153 	return false;
1154 }
1155 
1156 void set_dr_addr_mask(unsigned long mask, int dr)
1157 {
1158 	if (!boot_cpu_has(X86_FEATURE_BPEXT))
1159 		return;
1160 
1161 	switch (dr) {
1162 	case 0:
1163 		wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1164 		break;
1165 	case 1:
1166 	case 2:
1167 	case 3:
1168 		wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1169 		break;
1170 	default:
1171 		break;
1172 	}
1173 }
1174 
1175 u32 amd_get_highest_perf(void)
1176 {
1177 	struct cpuinfo_x86 *c = &boot_cpu_data;
1178 
1179 	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1180 			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1181 		return 166;
1182 
1183 	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1184 			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1185 		return 166;
1186 
1187 	return 255;
1188 }
1189 EXPORT_SYMBOL_GPL(amd_get_highest_perf);
1190