xref: /openbmc/linux/arch/x86/kernel/cpu/amd.c (revision 2d78eb0342dd2c9c5cde9ae9ada1d33f189a858b)
1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
269c60c88SPaul Gortmaker #include <linux/export.h>
3f7627e25SThomas Gleixner #include <linux/bitops.h>
45cdd174fSStephen Rothwell #include <linux/elf.h>
5f7627e25SThomas Gleixner #include <linux/mm.h>
68d71a2eaSYinghai Lu 
78bdbd962SAlan Cox #include <linux/io.h>
8c98fdeaaSBorislav Petkov #include <linux/sched.h>
9e6017571SIngo Molnar #include <linux/sched/clock.h>
104e26d11fSHector Marco-Gisbert #include <linux/random.h>
11a55c7454SMatt Fleming #include <linux/topology.h>
12f7627e25SThomas Gleixner #include <asm/processor.h>
13f7627e25SThomas Gleixner #include <asm/apic.h>
1468091ee7SSuravee Suthikulpanit #include <asm/cacheinfo.h>
151f442d70SYinghai Lu #include <asm/cpu.h>
1628a27752SThomas Gleixner #include <asm/spec-ctrl.h>
1726bfa5f8SBorislav Petkov #include <asm/smp.h>
180cd39f46SPeter Zijlstra #include <asm/numa.h>
1942937e81SAndreas Herrmann #include <asm/pci-direct.h>
20b466bdb6SHuang Rui #include <asm/delay.h>
21ad3bc25aSBorislav Petkov #include <asm/debugreg.h>
22923f3a2bSReinette Chatre #include <asm/resctrl.h>
23f7627e25SThomas Gleixner 
248d71a2eaSYinghai Lu #ifdef CONFIG_X86_64
258d71a2eaSYinghai Lu # include <asm/mmconfig.h>
268d71a2eaSYinghai Lu #endif
278d71a2eaSYinghai Lu 
28f7627e25SThomas Gleixner #include "cpu.h"
29f7627e25SThomas Gleixner 
303344ed30SThomas Gleixner /*
313344ed30SThomas Gleixner  * nodes_per_socket: Stores the number of nodes per socket.
3221b5ee59SKim Phillips  * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
333344ed30SThomas Gleixner  * Node Identifiers[10:8]
343344ed30SThomas Gleixner  */
35cc2749e4SAravind Gopalakrishnan static u32 nodes_per_socket = 1;
36cc2749e4SAravind Gopalakrishnan 
37cc2749e4SAravind Gopalakrishnan /*
38cc2749e4SAravind Gopalakrishnan  * AMD errata checking
39cc2749e4SAravind Gopalakrishnan  *
40cc2749e4SAravind Gopalakrishnan  * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
41cc2749e4SAravind Gopalakrishnan  * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
422c929ce6SBorislav Petkov  * have an OSVW id assigned, which it takes as first argument. Both take a
432c929ce6SBorislav Petkov  * variable number of family-specific model-stepping ranges created by
442c929ce6SBorislav Petkov  * AMD_MODEL_RANGE().
452c929ce6SBorislav Petkov  *
462c929ce6SBorislav Petkov  * Example:
47682469a5SBorislav Petkov  *
48682469a5SBorislav Petkov  * const int amd_erratum_319[] =
492c929ce6SBorislav Petkov  *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
502c929ce6SBorislav Petkov  *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
512c929ce6SBorislav Petkov  *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
522c929ce6SBorislav Petkov  */
532c929ce6SBorislav Petkov 
542c929ce6SBorislav Petkov #define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
552c929ce6SBorislav Petkov #define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
562c929ce6SBorislav Petkov #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
572c929ce6SBorislav Petkov 	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
582c929ce6SBorislav Petkov #define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
592c929ce6SBorislav Petkov #define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
602c929ce6SBorislav Petkov #define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
612c929ce6SBorislav Petkov 
622c929ce6SBorislav Petkov static const int amd_erratum_400[] =
632c929ce6SBorislav Petkov 	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
64682469a5SBorislav Petkov 			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
65682469a5SBorislav Petkov 
662c929ce6SBorislav Petkov static const int amd_erratum_383[] =
672c929ce6SBorislav Petkov 	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
682c929ce6SBorislav Petkov 
692c929ce6SBorislav Petkov static const int amd_erratum_1485[] =
702c929ce6SBorislav Petkov 	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
712c929ce6SBorislav Petkov 			   AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
722c929ce6SBorislav Petkov 
cpu_has_amd_erratum(struct cpuinfo_x86 * cpu,const int * erratum)732c929ce6SBorislav Petkov static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
742c929ce6SBorislav Petkov {
75f7627e25SThomas Gleixner 	int osvw_id = *erratum++;
76f7627e25SThomas Gleixner 	u32 range;
77f7627e25SThomas Gleixner 	u32 ms;
78f7627e25SThomas Gleixner 
79f7627e25SThomas Gleixner 	if (osvw_id >= 0 && osvw_id < 65536 &&
80f7627e25SThomas Gleixner 	    cpu_has(cpu, X86_FEATURE_OSVW)) {
81d7de8649SAndreas Herrmann 		u64 osvw_len;
82d7de8649SAndreas Herrmann 
83f7627e25SThomas Gleixner 		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
84f7627e25SThomas Gleixner 		if (osvw_id < osvw_len) {
85f7627e25SThomas Gleixner 			u64 osvw_bits;
86f7627e25SThomas Gleixner 
87f7627e25SThomas Gleixner 			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
88f7627e25SThomas Gleixner 			    osvw_bits);
8926b31f46SAndi Kleen 			return osvw_bits & (1ULL << (osvw_id & 0x3f));
90277d5b40SAndi Kleen 		}
91c03e2750SAndi Kleen 	}
92c03e2750SAndi Kleen 
93de642fafSJosh Poimboeuf 	/* OSVW unavailable or ID unknown, match family-model-stepping range */
94de642fafSJosh Poimboeuf 	ms = (cpu->x86_model << 4) | cpu->x86_stepping;
95de642fafSJosh Poimboeuf 	while ((range = *erratum++))
9626b31f46SAndi Kleen 		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
97f7627e25SThomas Gleixner 		    (ms >= AMD_MODEL_RANGE_START(range)) &&
98148f9bb8SPaul Gortmaker 		    (ms <= AMD_MODEL_RANGE_END(range)))
992b16a235SAndi Kleen 			return true;
10026bfa5f8SBorislav Petkov 
101f7627e25SThomas Gleixner 	return false;
102f7627e25SThomas Gleixner }
1036a6256f9SAdam Buchbinder 
rdmsrl_amd_safe(unsigned msr,unsigned long long * p)104f7627e25SThomas Gleixner static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
105f7627e25SThomas Gleixner {
106f7627e25SThomas Gleixner 	u32 gprs[8] = { 0 };
107f7627e25SThomas Gleixner 	int err;
108f7627e25SThomas Gleixner 
109f7627e25SThomas Gleixner 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
110f7627e25SThomas Gleixner 		  "%s should only be used on K8!\n", __func__);
111f7627e25SThomas Gleixner 
112f7627e25SThomas Gleixner 	gprs[1] = msr;
113f7627e25SThomas Gleixner 	gprs[7] = 0x9c5a203a;
11426bfa5f8SBorislav Petkov 
11511fdd252SYinghai Lu 	err = rdmsr_safe_regs(gprs);
11611fdd252SYinghai Lu 
117148f9bb8SPaul Gortmaker 	*p = gprs[0] | ((u64)gprs[2] << 32);
11811fdd252SYinghai Lu 
11926bfa5f8SBorislav Petkov 	return err;
12011fdd252SYinghai Lu }
12146a84132SJiang Liu 
wrmsrl_amd_safe(unsigned msr,unsigned long long val)12211fdd252SYinghai Lu static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
123fb87a298SPaolo Ciarrocchi {
124f7627e25SThomas Gleixner 	u32 gprs[8] = { 0 };
125f7627e25SThomas Gleixner 
12616282a8eSIngo Molnar 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
12716282a8eSIngo Molnar 		  "%s should only be used on K8!\n", __func__);
128f7627e25SThomas Gleixner 
12911fdd252SYinghai Lu 	gprs[0] = (u32)val;
130f7627e25SThomas Gleixner 	gprs[1] = msr;
131f7627e25SThomas Gleixner 	gprs[2] = val >> 32;
132b399151cSJia Zhang 	gprs[7] = 0x9c5a203a;
133f7627e25SThomas Gleixner 
134f7627e25SThomas Gleixner 	return wrmsr_safe_regs(gprs);
135f7627e25SThomas Gleixner }
13637963666SAndy Lutomirski 
137f7627e25SThomas Gleixner /*
1381b74dde7SChen Yucong  *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
139f7627e25SThomas Gleixner  *	misexecution of code under Linux. Owners of such processors should
140f7627e25SThomas Gleixner  *	contact AMD for precise details and a CPU swap.
141f7627e25SThomas Gleixner  *
142f7627e25SThomas Gleixner  *	See	http://www.multimania.com/poulot/k6bug.html
143f7627e25SThomas Gleixner  *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
144f7627e25SThomas Gleixner  *		(Publication # 21266  Issue Date: August 1998)
145f7627e25SThomas Gleixner  *
146f7627e25SThomas Gleixner  *	The following test is erm.. interesting. AMD neglected to up
1475f8a1615SMikulas Patocka  *	the chip setting when fixing the bug but they also tweaked some
1484ea1636bSAndy Lutomirski  *	performance at the same time..
149f7627e25SThomas Gleixner  */
150f7627e25SThomas Gleixner 
1514ea1636bSAndy Lutomirski #ifdef CONFIG_X86_32
152f7627e25SThomas Gleixner extern __visible void vide(void);
153f7627e25SThomas Gleixner __asm__(".text\n"
154f7627e25SThomas Gleixner 	".globl vide\n"
1551b74dde7SChen Yucong 	".type vide, @function\n"
156f7627e25SThomas Gleixner 	".align 4\n"
1571b74dde7SChen Yucong 	"vide: ret\n");
158f7627e25SThomas Gleixner #endif
159f7627e25SThomas Gleixner 
init_amd_k5(struct cpuinfo_x86 * c)160f7627e25SThomas Gleixner static void init_amd_k5(struct cpuinfo_x86 *c)
161f7627e25SThomas Gleixner {
162b399151cSJia Zhang #ifdef CONFIG_X86_32
163f7627e25SThomas Gleixner /*
164f7627e25SThomas Gleixner  * General Systems BIOSen alias the cpu frequency registers
165f7627e25SThomas Gleixner  * of the Elan at 0x000df000. Unfortunately, one of the Linux
166f7627e25SThomas Gleixner  * drivers subsequently pokes it, and changes the CPU speed.
167f7627e25SThomas Gleixner  * Workaround : Remove the unneeded alias.
168f7627e25SThomas Gleixner  */
169f7627e25SThomas Gleixner #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
170f7627e25SThomas Gleixner #define CBAR_ENB	(0x80000000)
171f7627e25SThomas Gleixner #define CBAR_KEY	(0X000000CB)
172f7627e25SThomas Gleixner 	if (c->x86_model == 9 || c->x86_model == 10) {
173f7627e25SThomas Gleixner 		if (inl(CBAR) & CBAR_ENB)
174f7627e25SThomas Gleixner 			outl(0 | CBAR_KEY, CBAR);
1751b74dde7SChen Yucong 	}
176f7627e25SThomas Gleixner #endif
177f7627e25SThomas Gleixner }
17811fdd252SYinghai Lu 
init_amd_k6(struct cpuinfo_x86 * c)179f7627e25SThomas Gleixner static void init_amd_k6(struct cpuinfo_x86 *c)
180f7627e25SThomas Gleixner {
181b399151cSJia Zhang #ifdef CONFIG_X86_32
182f7627e25SThomas Gleixner 	u32 l, h;
183f7627e25SThomas Gleixner 	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
184f7627e25SThomas Gleixner 
185f7627e25SThomas Gleixner 	if (c->x86_model < 6) {
186f7627e25SThomas Gleixner 		/* Based on AMD doc 20734R - June 2000 */
187f7627e25SThomas Gleixner 		if (c->x86_model == 0) {
188f7627e25SThomas Gleixner 			clear_cpu_cap(c, X86_FEATURE_APIC);
189f7627e25SThomas Gleixner 			set_cpu_cap(c, X86_FEATURE_PGE);
190f7627e25SThomas Gleixner 		}
191f7627e25SThomas Gleixner 		return;
192f7627e25SThomas Gleixner 	}
193f7627e25SThomas Gleixner 
194f7627e25SThomas Gleixner 	if (c->x86_model == 6 && c->x86_stepping == 1) {
195f7627e25SThomas Gleixner 		const int K6_BUG_LOOP = 1000000;
1961b74dde7SChen Yucong 		int n;
197f7627e25SThomas Gleixner 		void (*f_vide)(void);
198f7627e25SThomas Gleixner 		u64 d, d2;
199f7627e25SThomas Gleixner 
20011fdd252SYinghai Lu 		pr_info("AMD K6 stepping B detected - ");
201f7627e25SThomas Gleixner 
202f7627e25SThomas Gleixner 		/*
203f7627e25SThomas Gleixner 		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
204f7627e25SThomas Gleixner 		 * calls at the same time.
205f7627e25SThomas Gleixner 		 */
20611fdd252SYinghai Lu 
207f7627e25SThomas Gleixner 		n = K6_BUG_LOOP;
20826bfa5f8SBorislav Petkov 		f_vide = vide;
2091f442d70SYinghai Lu 		OPTIMIZER_HIDE_VAR(f_vide);
2101f442d70SYinghai Lu 		d = rdtsc();
211148f9bb8SPaul Gortmaker 		while (n--)
21211fdd252SYinghai Lu 			f_vide();
21326bfa5f8SBorislav Petkov 		d2 = rdtsc();
21411fdd252SYinghai Lu 		d = d2-d;
215f7627e25SThomas Gleixner 
216fb87a298SPaolo Ciarrocchi 		if (d > 20*K6_BUG_LOOP)
217fb87a298SPaolo Ciarrocchi 			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
218f7627e25SThomas Gleixner 		else
219f7627e25SThomas Gleixner 			pr_cont("probably OK (after B9730xxxx).\n");
220f7627e25SThomas Gleixner 	}
221f7627e25SThomas Gleixner 
222f7627e25SThomas Gleixner 	/* K6 with old style WHCR */
2231b74dde7SChen Yucong 	if (c->x86_model < 8 ||
2248f86a737SBorislav Petkov 	   (c->x86_model == 8 && c->x86_stepping < 8)) {
22516282a8eSIngo Molnar 		/* We can only write allocate on the low 508Mb */
226f7627e25SThomas Gleixner 		if (mbytes > 508)
227f7627e25SThomas Gleixner 			mbytes = 508;
228f7627e25SThomas Gleixner 
229fb87a298SPaolo Ciarrocchi 		rdmsr(MSR_K6_WHCR, l, h);
230fb87a298SPaolo Ciarrocchi 		if ((l&0x0000FFFF) == 0) {
231f7627e25SThomas Gleixner 			unsigned long flags;
232f7627e25SThomas Gleixner 			l = (1<<0)|((mbytes/4)<<1);
233f7627e25SThomas Gleixner 			local_irq_save(flags);
234b399151cSJia Zhang 			wbinvd();
235f7627e25SThomas Gleixner 			wrmsr(MSR_K6_WHCR, l, h);
236f7627e25SThomas Gleixner 			local_irq_restore(flags);
2371b74dde7SChen Yucong 			pr_info("Enabling old style K6 write allocation for %d Mb\n",
2388bdbd962SAlan Cox 				mbytes);
239f7627e25SThomas Gleixner 		}
240f7627e25SThomas Gleixner 		return;
241f7627e25SThomas Gleixner 	}
242f7627e25SThomas Gleixner 
24326bfa5f8SBorislav Petkov 	if ((c->x86_model == 8 && c->x86_stepping > 7) ||
24426bfa5f8SBorislav Petkov 	     c->x86_model == 9 || c->x86_model == 13) {
24526bfa5f8SBorislav Petkov 		/* The more serious chips .. */
24626bfa5f8SBorislav Petkov 
24726bfa5f8SBorislav Petkov 		if (mbytes > 4092)
24826bfa5f8SBorislav Petkov 			mbytes = 4092;
24926bfa5f8SBorislav Petkov 
25026bfa5f8SBorislav Petkov 		rdmsr(MSR_K6_WHCR, l, h);
25126bfa5f8SBorislav Petkov 		if ((l&0xFFFF0000) == 0) {
252b399151cSJia Zhang 			unsigned long flags;
253b399151cSJia Zhang 			l = ((mbytes>>2)<<22)|(1<<16);
25426bfa5f8SBorislav Petkov 			local_irq_save(flags);
25526bfa5f8SBorislav Petkov 			wbinvd();
25626bfa5f8SBorislav Petkov 			wrmsr(MSR_K6_WHCR, l, h);
257b399151cSJia Zhang 			local_irq_restore(flags);
25826bfa5f8SBorislav Petkov 			pr_info("Enabling new style K6 write allocation for %d Mb\n",
25926bfa5f8SBorislav Petkov 				mbytes);
26026bfa5f8SBorislav Petkov 		}
26126bfa5f8SBorislav Petkov 
26226bfa5f8SBorislav Petkov 		return;
26326bfa5f8SBorislav Petkov 	}
26426bfa5f8SBorislav Petkov 
26526bfa5f8SBorislav Petkov 	if (c->x86_model == 10) {
26626bfa5f8SBorislav Petkov 		/* AMD Geode LX is model 10 */
267b399151cSJia Zhang 		/* placeholder for any needed mods */
268b399151cSJia Zhang 		return;
26926bfa5f8SBorislav Petkov 	}
27026bfa5f8SBorislav Petkov #endif
27126bfa5f8SBorislav Petkov }
27226bfa5f8SBorislav Petkov 
init_amd_k7(struct cpuinfo_x86 * c)27326bfa5f8SBorislav Petkov static void init_amd_k7(struct cpuinfo_x86 *c)
27426bfa5f8SBorislav Petkov {
27526bfa5f8SBorislav Petkov #ifdef CONFIG_X86_32
27626bfa5f8SBorislav Petkov 	u32 l, h;
27726bfa5f8SBorislav Petkov 
27826bfa5f8SBorislav Petkov 	/*
27926bfa5f8SBorislav Petkov 	 * Bit 15 of Athlon specific MSR 15, needs to be 0
28026bfa5f8SBorislav Petkov 	 * to enable SSE on Palomino/Morgan/Barton CPU's.
28126bfa5f8SBorislav Petkov 	 * If the BIOS didn't enable it already, enable it here.
2826c62aa4aSYinghai Lu 	 */
28326bfa5f8SBorislav Petkov 	if (c->x86_model >= 6 && c->x86_model <= 10) {
2846c62aa4aSYinghai Lu 		if (!cpu_has(c, X86_FEATURE_XMM)) {
285645a7919STejun Heo 			pr_info("Enabling disabled K7/SSE Support.\n");
286bbc9e2f4STejun Heo 			msr_clear_bit(MSR_K7_HWCR, 15);
287bbc9e2f4STejun Heo 			set_cpu_cap(c, X86_FEATURE_XMM);
288bbc9e2f4STejun Heo 		}
289bbc9e2f4STejun Heo 	}
290148f9bb8SPaul Gortmaker 
2916c62aa4aSYinghai Lu 	/*
2926c62aa4aSYinghai Lu 	 * It's been determined by AMD that Athlons since model 8 stepping 1
2936c62aa4aSYinghai Lu 	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
2946c62aa4aSYinghai Lu 	 * As per AMD technical note 27212 0.2
295bbc9e2f4STejun Heo 	 */
2966c62aa4aSYinghai Lu 	if ((c->x86_model == 8 && c->x86_stepping >= 1) || (c->x86_model > 8)) {
2976c62aa4aSYinghai Lu 		rdmsr(MSR_K7_CLK_CTL, l, h);
2986c62aa4aSYinghai Lu 		if ((l & 0xfff00000) != 0x20000000) {
2996c62aa4aSYinghai Lu 			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
300bbc9e2f4STejun Heo 				l, ((l & 0x000fffff)|0x20000000));
3016c62aa4aSYinghai Lu 			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
3026c62aa4aSYinghai Lu 		}
3036c62aa4aSYinghai Lu 	}
3046c62aa4aSYinghai Lu 
3056c62aa4aSYinghai Lu 	/* calling is from identify_secondary_cpu() ? */
3066c62aa4aSYinghai Lu 	if (!c->cpu_index)
307f7627e25SThomas Gleixner 		return;
308f7627e25SThomas Gleixner 
309b89b41d0SSuravee Suthikulpanit 	/*
310b89b41d0SSuravee Suthikulpanit 	 * Certain Athlons might work (for various values of 'work') in SMP
311b89b41d0SSuravee Suthikulpanit 	 * but they are not certified as MP capable.
312b89b41d0SSuravee Suthikulpanit 	 */
313b89b41d0SSuravee Suthikulpanit 	/* Athlon 660/661 is valid. */
314b89b41d0SSuravee Suthikulpanit 	if ((c->x86_model == 6) && ((c->x86_stepping == 0) ||
315b89b41d0SSuravee Suthikulpanit 	    (c->x86_stepping == 1)))
316b89b41d0SSuravee Suthikulpanit 		return;
317b89b41d0SSuravee Suthikulpanit 
318b89b41d0SSuravee Suthikulpanit 	/* Duron 670 is valid */
319b89b41d0SSuravee Suthikulpanit 	if ((c->x86_model == 7) && (c->x86_stepping == 0))
320b89b41d0SSuravee Suthikulpanit 		return;
321b89b41d0SSuravee Suthikulpanit 
322b89b41d0SSuravee Suthikulpanit 	/*
323b89b41d0SSuravee Suthikulpanit 	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
324b89b41d0SSuravee Suthikulpanit 	 * bit. It's worth noting that the A5 stepping (662) of some
32523588c38SAndreas Herrmann 	 * Athlon XP's have the MP bit set.
32623588c38SAndreas Herrmann 	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
3279d260ebcSAndreas Herrmann 	 * more.
3286057b4d3SAndreas Herrmann 	 */
3294a376ec3SAndreas Herrmann 	if (((c->x86_model == 6) && (c->x86_stepping >= 2)) ||
330148f9bb8SPaul Gortmaker 	    ((c->x86_model == 7) && (c->x86_stepping >= 1)) ||
3314a376ec3SAndreas Herrmann 	     (c->x86_model > 7))
3324a376ec3SAndreas Herrmann 		if (cpu_has(c, X86_FEATURE_MP))
3334a376ec3SAndreas Herrmann 			return;
33423588c38SAndreas Herrmann 
335362f924bSBorislav Petkov 	/* If we get here, not a certified SMP capable AMD system. */
3363986a0a8SSuravee Suthikulpanit 
33779a8b9aaSBorislav Petkov 	/*
3386057b4d3SAndreas Herrmann 	 * Don't taint if we are running SMP kernel on a single non-MP
33979a8b9aaSBorislav Petkov 	 * approved Athlon
34079a8b9aaSBorislav Petkov 	 */
341028c221eSYazen Ghannam 	WARN_ONCE(1, "WARNING: This combination of AMD"
34279a8b9aaSBorislav Petkov 		" processors is not suitable for SMP.\n");
34379a8b9aaSBorislav Petkov 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
34479a8b9aaSBorislav Petkov #endif
345b6a50cddSYazen Ghannam }
34608b25963SYazen Ghannam 
34708b25963SYazen Ghannam #ifdef CONFIG_NUMA
34808b25963SYazen Ghannam /*
34908b25963SYazen Ghannam  * To workaround broken NUMA config.  Read the comment in
35008b25963SYazen Ghannam  * srat_detect_node().
35108b25963SYazen Ghannam  */
nearby_node(int apicid)35208b25963SYazen Ghannam static int nearby_node(int apicid)
353b6a50cddSYazen Ghannam {
3543986a0a8SSuravee Suthikulpanit 	int i, node;
3553986a0a8SSuravee Suthikulpanit 
356b6a50cddSYazen Ghannam 	for (i = apicid - 1; i >= 0; i--) {
3573986a0a8SSuravee Suthikulpanit 		node = __apicid_to_node[i];
3583986a0a8SSuravee Suthikulpanit 		if (node != NUMA_NO_NODE && node_online(node))
3593986a0a8SSuravee Suthikulpanit 			return node;
3603986a0a8SSuravee Suthikulpanit 	}
361028c221eSYazen Ghannam 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
36268091ee7SSuravee Suthikulpanit 		node = __apicid_to_node[i];
36323588c38SAndreas Herrmann 		if (node != NUMA_NO_NODE && node_online(node))
3646057b4d3SAndreas Herrmann 			return node;
3656057b4d3SAndreas Herrmann 	}
3669d260ebcSAndreas Herrmann 	return first_node(node_online_map); /* Shouldn't happen */
367028c221eSYazen Ghannam }
368b6a50cddSYazen Ghannam #endif
369028c221eSYazen Ghannam 
37023588c38SAndreas Herrmann /*
3714a376ec3SAndreas Herrmann  * Fix up cpu_core_id for pre-F17h systems to be in the
3724a376ec3SAndreas Herrmann  * [0 .. cores_per_node - 1] range. Not really needed but
373cc2749e4SAravind Gopalakrishnan  * kept so as not to break existing setups.
3744a376ec3SAndreas Herrmann  */
legacy_fixup_core_id(struct cpuinfo_x86 * c)375b89b41d0SSuravee Suthikulpanit static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
3764a376ec3SAndreas Herrmann {
37723588c38SAndreas Herrmann 	u32 cus_per_node;
3784a376ec3SAndreas Herrmann 
3794a376ec3SAndreas Herrmann 	if (c->x86 >= 0x17)
380aa5e5dc2SMichael Opdenacker 		return;
38111fdd252SYinghai Lu 
382f7627e25SThomas Gleixner 	cus_per_node = c->x86_max_cores / nodes_per_socket;
383148f9bb8SPaul Gortmaker 	c->cpu_core_id %= cus_per_node;
38411fdd252SYinghai Lu }
38511fdd252SYinghai Lu 
38699bd0c0fSAndreas Herrmann /*
387f7627e25SThomas Gleixner  * Fixup core topology information for
38811fdd252SYinghai Lu  * (1) AMD multi-node processors
38911fdd252SYinghai Lu  *     Assumption: Number of cores in each internal node is the same.
39011fdd252SYinghai Lu  * (2) AMD processors supporting compute units
39111fdd252SYinghai Lu  */
amd_get_topology(struct cpuinfo_x86 * c)39211fdd252SYinghai Lu static void amd_get_topology(struct cpuinfo_x86 *c)
39399bd0c0fSAndreas Herrmann {
394028c221eSYazen Ghannam 	int cpu = smp_processor_id();
39511fdd252SYinghai Lu 
39611fdd252SYinghai Lu 	/* get information required for multi-node processors */
397cc2749e4SAravind Gopalakrishnan 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
398cc2749e4SAravind Gopalakrishnan 		int err;
399cc2749e4SAravind Gopalakrishnan 		u32 eax, ebx, ecx, edx;
400cc2749e4SAravind Gopalakrishnan 
401cc2749e4SAravind Gopalakrishnan 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
402cc2749e4SAravind Gopalakrishnan 
403148f9bb8SPaul Gortmaker 		c->cpu_die_id  = ecx & 0xff;
4046c62aa4aSYinghai Lu 
405645a7919STejun Heo 		if (c->x86 == 0x15)
4066c62aa4aSYinghai Lu 			c->cu_id = ebx & 0xff;
4076c62aa4aSYinghai Lu 
4080d96b9ffSYinghai Lu 		if (c->x86 >= 0x17) {
4096c62aa4aSYinghai Lu 			c->cpu_core_id = ebx & 0xff;
410bbc9e2f4STejun Heo 
411bbc9e2f4STejun Heo 			if (smp_num_siblings > 1)
4129164d949SKim Phillips 				c->x86_max_cores /= smp_num_siblings;
4134a376ec3SAndreas Herrmann 		}
41464be4c1cSDaniel J Blueman 
41568894632SAndreas Herrmann 		/*
41668894632SAndreas Herrmann 		 * In case leaf B is available, use it to derive
41768894632SAndreas Herrmann 		 * topology information.
41864be4c1cSDaniel J Blueman 		 */
41968894632SAndreas Herrmann 		err = detect_extended_topology(c);
42064be4c1cSDaniel J Blueman 		if (!err)
42164be4c1cSDaniel J Blueman 			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
4226c62aa4aSYinghai Lu 
423bbc9e2f4STejun Heo 		cacheinfo_amd_init_llc_id(c, cpu);
424bbc9e2f4STejun Heo 
425bbc9e2f4STejun Heo 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
426bbc9e2f4STejun Heo 		u64 value;
427bbc9e2f4STejun Heo 
428bbc9e2f4STejun Heo 		rdmsrl(MSR_FAM10H_NODE_ID, value);
429bbc9e2f4STejun Heo 		c->cpu_die_id = value & 7;
430bbc9e2f4STejun Heo 
431bbc9e2f4STejun Heo 		per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
432bbc9e2f4STejun Heo 	} else
433bbc9e2f4STejun Heo 		return;
434bbc9e2f4STejun Heo 
435bbc9e2f4STejun Heo 	if (nodes_per_socket > 1) {
436bbc9e2f4STejun Heo 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
437bbc9e2f4STejun Heo 		legacy_fixup_core_id(c);
438bbc9e2f4STejun Heo 	}
439bbc9e2f4STejun Heo }
440bbc9e2f4STejun Heo 
441bbc9e2f4STejun Heo /*
4426c62aa4aSYinghai Lu  * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
4436c62aa4aSYinghai Lu  * Assumes number of cores is a power of two.
4447030a7e9SDan Carpenter  */
amd_detect_cmp(struct cpuinfo_x86 * c)445bbc9e2f4STejun Heo static void amd_detect_cmp(struct cpuinfo_x86 *c)
4466c62aa4aSYinghai Lu {
4476c62aa4aSYinghai Lu 	unsigned bits;
4486c62aa4aSYinghai Lu 	int cpu = smp_processor_id();
4496c62aa4aSYinghai Lu 
4506c62aa4aSYinghai Lu 	bits = c->x86_coreid_bits;
4516c62aa4aSYinghai Lu 	/* Low order bits define the core id (index of core in socket) */
4526c62aa4aSYinghai Lu 	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
4536c62aa4aSYinghai Lu 	/* Convert the initial APIC ID into the socket ID */
454148f9bb8SPaul Gortmaker 	c->phys_proc_id = c->initial_apicid >> bits;
45511fdd252SYinghai Lu 	/* use socket ID also for last level cache */
456c8e56d20SBorislav Petkov 	per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
45711fdd252SYinghai Lu }
45811fdd252SYinghai Lu 
amd_get_nodes_per_socket(void)45911fdd252SYinghai Lu u32 amd_get_nodes_per_socket(void)
46011fdd252SYinghai Lu {
46111fdd252SYinghai Lu 	return nodes_per_socket;
46211fdd252SYinghai Lu }
46311fdd252SYinghai Lu EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
46411fdd252SYinghai Lu 
srat_detect_node(struct cpuinfo_x86 * c)46511fdd252SYinghai Lu static void srat_detect_node(struct cpuinfo_x86 *c)
46611fdd252SYinghai Lu {
46711fdd252SYinghai Lu #ifdef CONFIG_NUMA
46811fdd252SYinghai Lu 	int cpu = smp_processor_id();
46911fdd252SYinghai Lu 	int node;
47011fdd252SYinghai Lu 	unsigned apicid = c->apicid;
471f7627e25SThomas Gleixner 
472f7627e25SThomas Gleixner 	node = numa_cpu_node(cpu);
473f7627e25SThomas Gleixner 	if (node == NUMA_NO_NODE)
474f7627e25SThomas Gleixner 		node = get_llc_id(cpu);
47511fdd252SYinghai Lu 
47611fdd252SYinghai Lu 	/*
47711fdd252SYinghai Lu 	 * On multi-fabric platform (e.g. Numascale NumaChip) a
47811fdd252SYinghai Lu 	 * platform-specific handler needs to be called to fixup some
47911fdd252SYinghai Lu 	 * IDs of the CPU.
480148f9bb8SPaul Gortmaker 	 */
4818fa8b035SBorislav Petkov 	if (x86_cpuinit.fixup_cpu_id)
4828fa8b035SBorislav Petkov 		x86_cpuinit.fixup_cpu_id(c, node);
4838fa8b035SBorislav Petkov 
4848fa8b035SBorislav Petkov 	if (!node_online(node)) {
4858fa8b035SBorislav Petkov 		/*
4868fa8b035SBorislav Petkov 		 * Two possibilities here:
4878fa8b035SBorislav Petkov 		 *
4888fa8b035SBorislav Petkov 		 * - The CPU is missing memory and no node was created.  In
4898fa8b035SBorislav Petkov 		 *   that case try picking one from a nearby CPU.
4901b74dde7SChen Yucong 		 *
4918fa8b035SBorislav Petkov 		 * - The APIC IDs differ from the HyperTransport node IDs
4928fa8b035SBorislav Petkov 		 *   which the K8 northbridge parsing fills in.  Assume
4938fa8b035SBorislav Petkov 		 *   they are all increased by a constant offset, but in
4948fa8b035SBorislav Petkov 		 *   the same order as the HT nodeids.  If that doesn't
4958fa8b035SBorislav Petkov 		 *   result in a usable node fall back to the path for the
4968fa8b035SBorislav Petkov 		 *   previous case.
4978fa8b035SBorislav Petkov 		 *
4988fa8b035SBorislav Petkov 		 * This workaround operates directly on the mapping between
4998fa8b035SBorislav Petkov 		 * APIC ID and NUMA node, assuming certain relationship
5008fa8b035SBorislav Petkov 		 * between APIC ID, HT node ID and NUMA topology.  As going
5018fa8b035SBorislav Petkov 		 * through CPU mapping may alter the outcome, directly
5028fa8b035SBorislav Petkov 		 * access __apicid_to_node[].
5038fa8b035SBorislav Petkov 		 */
5044e26d11fSHector Marco-Gisbert 		int ht_nodeid = c->initial_apicid;
5054e26d11fSHector Marco-Gisbert 
506a251c17aSJason A. Donenfeld 		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
5078fa8b035SBorislav Petkov 			node = __apicid_to_node[ht_nodeid];
508b466bdb6SHuang Rui 		/* Pick a nearby node */
509b466bdb6SHuang Rui 		if (!node_online(node))
510b466bdb6SHuang Rui 			node = nearby_node(apicid);
5118dfeae0dSHuang Rui 	}
5128dfeae0dSHuang Rui 	numa_set_node(cpu, node);
5138dfeae0dSHuang Rui #endif
5148dfeae0dSHuang Rui }
5158dfeae0dSHuang Rui 
early_init_amd_mc(struct cpuinfo_x86 * c)51676e2fc63SYazen Ghannam static void early_init_amd_mc(struct cpuinfo_x86 *c)
5178dfeae0dSHuang Rui {
5188dfeae0dSHuang Rui #ifdef CONFIG_SMP
5198dfeae0dSHuang Rui 	unsigned bits, ecx;
5208dfeae0dSHuang Rui 
52176e2fc63SYazen Ghannam 	/* Multi core CPU? */
5228dfeae0dSHuang Rui 	if (c->extended_cpuid_level < 0x80000008)
523764f3c21SKonrad Rzeszutek Wilk 		return;
524845d382bSTom Lendacky 
525845d382bSTom Lendacky 	ecx = cpuid_ecx(0x80000008);
526845d382bSTom Lendacky 
527764f3c21SKonrad Rzeszutek Wilk 	c->x86_max_cores = (ecx & 0xff) + 1;
528764f3c21SKonrad Rzeszutek Wilk 
529764f3c21SKonrad Rzeszutek Wilk 	/* CPU telling us the core id bits shift? */
530764f3c21SKonrad Rzeszutek Wilk 	bits = (ecx >> 12) & 0xF;
531764f3c21SKonrad Rzeszutek Wilk 
532764f3c21SKonrad Rzeszutek Wilk 	/* Otherwise recompute */
533764f3c21SKonrad Rzeszutek Wilk 	if (bits == 0) {
534764f3c21SKonrad Rzeszutek Wilk 		while ((1 << bits) < c->x86_max_cores)
535764f3c21SKonrad Rzeszutek Wilk 			bits++;
536764f3c21SKonrad Rzeszutek Wilk 	}
5379f65fb29SKonrad Rzeszutek Wilk 
538764f3c21SKonrad Rzeszutek Wilk 	c->x86_coreid_bits = bits;
539764f3c21SKonrad Rzeszutek Wilk #endif
54052817587SThomas Gleixner }
5419f65fb29SKonrad Rzeszutek Wilk 
bsp_init_amd(struct cpuinfo_x86 * c)5429f65fb29SKonrad Rzeszutek Wilk static void bsp_init_amd(struct cpuinfo_x86 *c)
543764f3c21SKonrad Rzeszutek Wilk {
544764f3c21SKonrad Rzeszutek Wilk 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
545923f3a2bSReinette Chatre 
546923f3a2bSReinette Chatre 		if (c->x86 > 0x10 ||
5478fa8b035SBorislav Petkov 		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
5488fa8b035SBorislav Petkov 			u64 val;
54918c71ce9STom Lendacky 
55018c71ce9STom Lendacky 			rdmsrl(MSR_K7_HWCR, val);
55118c71ce9STom Lendacky 			if (!(val & BIT(24)))
55218c71ce9STom Lendacky 				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
55318c71ce9STom Lendacky 		}
55418c71ce9STom Lendacky 	}
55518c71ce9STom Lendacky 
55618c71ce9STom Lendacky 	if (c->x86 == 0x15) {
55718c71ce9STom Lendacky 		unsigned long upperbit;
55818c71ce9STom Lendacky 		u32 cpuid, assoc;
55908f253ecSMario Limonciello 
56008f253ecSMario Limonciello 		cpuid	 = cpuid_edx(0x80000005);
56118c71ce9STom Lendacky 		assoc	 = cpuid >> 16 & 0xff;
562360e7c5cSTom Lendacky 		upperbit = ((cpuid >> 24) << 10) / assoc;
56318c71ce9STom Lendacky 
56418c71ce9STom Lendacky 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
56518c71ce9STom Lendacky 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
56618c71ce9STom Lendacky 
56718c71ce9STom Lendacky 		/* A random value per boot for bit slice [12:upper_bit) */
56818c71ce9STom Lendacky 		va_align.bits = get_random_u32() & va_align.mask;
569059e5c32SBrijesh Singh 	}
570059e5c32SBrijesh Singh 
57118c71ce9STom Lendacky 	if (cpu_has(c, X86_FEATURE_MWAITX))
57218c71ce9STom Lendacky 		use_mwaitx_delay();
57318c71ce9STom Lendacky 
57418c71ce9STom Lendacky 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
57518c71ce9STom Lendacky 		u32 ecx;
57618c71ce9STom Lendacky 
57718c71ce9STom Lendacky 		ecx = cpuid_ecx(0x8000001e);
57818c71ce9STom Lendacky 		__max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
57918c71ce9STom Lendacky 	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
58018c71ce9STom Lendacky 		u64 value;
58118c71ce9STom Lendacky 
58218c71ce9STom Lendacky 		rdmsrl(MSR_FAM10H_NODE_ID, value);
58308f253ecSMario Limonciello 		__max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
58408f253ecSMario Limonciello 	}
58508f253ecSMario Limonciello 
58618c71ce9STom Lendacky 	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
58718c71ce9STom Lendacky 	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
58818c71ce9STom Lendacky 	    c->x86 >= 0x15 && c->x86 <= 0x17) {
58918c71ce9STom Lendacky 		unsigned int bit;
59018c71ce9STom Lendacky 
59118c71ce9STom Lendacky 		switch (c->x86) {
59218c71ce9STom Lendacky 		case 0x15: bit = 54; break;
593a006483bSTom Lendacky 		case 0x16: bit = 33; break;
59418c71ce9STom Lendacky 		case 0x17: bit = 10; break;
595a006483bSTom Lendacky 		default: return;
596360e7c5cSTom Lendacky 		}
59718c71ce9STom Lendacky 		/*
59818c71ce9STom Lendacky 		 * Try to cache the base value so further operations can
59918c71ce9STom Lendacky 		 * avoid RMW. If that faults, do not enable SSBD.
600148f9bb8SPaul Gortmaker 		 */
60111fdd252SYinghai Lu 		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
6027ce2f039SBorislav Petkov 			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
603f655e6e6STom Lendacky 			setup_force_cpu_cap(X86_FEATURE_SSBD);
604f655e6e6STom Lendacky 			x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
60511fdd252SYinghai Lu 		}
60611fdd252SYinghai Lu 	}
6078990cac6SPavel Tatashin 
6088990cac6SPavel Tatashin 	resctrl_cpu_detect(c);
6098990cac6SPavel Tatashin 
610f655e6e6STom Lendacky 	/* Figure out Zen generations: */
611f655e6e6STom Lendacky 	switch (c->x86) {
61240fb1715SVenki Pallipadi 	case 0x17: {
61340fb1715SVenki Pallipadi 		switch (c->x86_model) {
61440fb1715SVenki Pallipadi 		case 0x00 ... 0x2f:
61540fb1715SVenki Pallipadi 		case 0x50 ... 0x5f:
61640fb1715SVenki Pallipadi 			setup_force_cpu_cap(X86_FEATURE_ZEN1);
61711fdd252SYinghai Lu 			break;
61840fb1715SVenki Pallipadi 		case 0x30 ... 0x4f:
61940fb1715SVenki Pallipadi 		case 0x60 ... 0x7f:
62011fdd252SYinghai Lu 		case 0x90 ... 0x91:
62101fe03ffSHuang Rui 		case 0xa0 ... 0xaf:
62201fe03ffSHuang Rui 			setup_force_cpu_cap(X86_FEATURE_ZEN2);
62301fe03ffSHuang Rui 			break;
62401fe03ffSHuang Rui 		default:
625cbcddaa3SAndrew Cooper 			goto warn;
626cbcddaa3SAndrew Cooper 		}
627cbcddaa3SAndrew Cooper 		break;
628cbcddaa3SAndrew Cooper 	}
6296c62aa4aSYinghai Lu 	case 0x19: {
6306c62aa4aSYinghai Lu 		switch (c->x86_model) {
6316c62aa4aSYinghai Lu 		case 0x00 ... 0x0f:
63211fdd252SYinghai Lu 		case 0x20 ... 0x5f:
6336c62aa4aSYinghai Lu 			setup_force_cpu_cap(X86_FEATURE_ZEN3);
63411fdd252SYinghai Lu 			break;
635b399151cSJia Zhang 		case 0x10 ... 0x1f:
63611fdd252SYinghai Lu 		case 0x60 ... 0xaf:
6376c62aa4aSYinghai Lu 			setup_force_cpu_cap(X86_FEATURE_ZEN4);
63842937e81SAndreas Herrmann 			break;
639b9d16a2aSAravind Gopalakrishnan 		default:
640b9d16a2aSAravind Gopalakrishnan 			goto warn;
641b9d16a2aSAravind Gopalakrishnan 		}
642b9d16a2aSAravind Gopalakrishnan 		break;
643b9d16a2aSAravind Gopalakrishnan 	}
644b9d16a2aSAravind Gopalakrishnan 	default:
645425d8c2fSBorislav Petkov 		break;
646425d8c2fSBorislav Petkov 	}
647b9d16a2aSAravind Gopalakrishnan 
648425d8c2fSBorislav Petkov 	return;
64942937e81SAndreas Herrmann 
65042937e81SAndreas Herrmann warn:
651425d8c2fSBorislav Petkov 	WARN_ONCE(1, "Family 0x%x, model: 0x%x??\n", c->x86, c->x86_model);
65242937e81SAndreas Herrmann }
653425d8c2fSBorislav Petkov 
early_detect_mem_encrypt(struct cpuinfo_x86 * c)65442937e81SAndreas Herrmann static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
65542937e81SAndreas Herrmann {
656425d8c2fSBorislav Petkov 	u64 msr;
65742937e81SAndreas Herrmann 
6583b564968SBorislav Petkov 	/*
659c1118b36SPaolo Bonzini 	 * BIOS support is required for SME and SEV.
660c1118b36SPaolo Bonzini 	 *   For SME: If BIOS has enabled SME then adjust x86_phys_bits by
661c1118b36SPaolo Bonzini 	 *	      the SME physical address space reduction value.
662c1118b36SPaolo Bonzini 	 *	      If BIOS has not enabled SME then don't advertise the
663c1118b36SPaolo Bonzini 	 *	      SME feature (set in scattered.c).
664c1118b36SPaolo Bonzini 	 *	      If the kernel has not enabled SME via any means then
665c1118b36SPaolo Bonzini 	 *	      don't advertise the SME feature.
6663b564968SBorislav Petkov 	 *   For SEV: If BIOS has not enabled SEV then don't advertise the
6678f86a737SBorislav Petkov 	 *            SEV and SEV_ES feature (set in scattered.c).
6688f86a737SBorislav Petkov 	 *
66911fdd252SYinghai Lu 	 *   In all cases, since support for SME and SEV requires long mode,
6703344ed30SThomas Gleixner 	 *   don't advertise the feature under CONFIG_X86_32.
6713344ed30SThomas Gleixner 	 */
6723344ed30SThomas Gleixner 	if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
6733344ed30SThomas Gleixner 		/* Check if memory encryption is enabled */
6743344ed30SThomas Gleixner 		rdmsrl(MSR_AMD64_SYSCFG, msr);
6753344ed30SThomas Gleixner 		if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
6763344ed30SThomas Gleixner 			goto clear_all;
6773344ed30SThomas Gleixner 
678872cbefdSTom Lendacky 		/*
67918c71ce9STom Lendacky 		 * Always adjust physical address bits. Even though this
6801e1d7e25SThomas Gleixner 		 * will be a value above 32-bits this is still done for
6817ce2f039SBorislav Petkov 		 * CONFIG_X86_32 so that accurate values are reported.
6827ce2f039SBorislav Petkov 		 */
6837ce2f039SBorislav Petkov 		c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
6847ce2f039SBorislav Petkov 
6857ce2f039SBorislav Petkov 		if (IS_ENABLED(CONFIG_X86_32))
6867ce2f039SBorislav Petkov 			goto clear_all;
6877ce2f039SBorislav Petkov 
6887ce2f039SBorislav Petkov 		if (!sme_me_mask)
6897ce2f039SBorislav Petkov 			setup_clear_cpu_cap(X86_FEATURE_SME);
6907ce2f039SBorislav Petkov 
6917ce2f039SBorislav Petkov 		rdmsrl(MSR_K7_HWCR, msr);
6927ce2f039SBorislav Petkov 		if (!(msr & MSR_K7_HWCR_SMMLOCK))
6937ce2f039SBorislav Petkov 			goto clear_sev;
6947ce2f039SBorislav Petkov 
6953c749b81SBorislav Petkov 		return;
6963c749b81SBorislav Petkov 
6973344ed30SThomas Gleixner clear_all:
698e6ee94d5SBorislav Petkov 		setup_clear_cpu_cap(X86_FEATURE_SME);
69926bfa5f8SBorislav Petkov clear_sev:
70026bfa5f8SBorislav Petkov 		setup_clear_cpu_cap(X86_FEATURE_SEV);
70126bfa5f8SBorislav Petkov 		setup_clear_cpu_cap(X86_FEATURE_SEV_ES);
70226bfa5f8SBorislav Petkov 	}
70326bfa5f8SBorislav Petkov }
70426bfa5f8SBorislav Petkov 
early_init_amd(struct cpuinfo_x86 * c)70526bfa5f8SBorislav Petkov static void early_init_amd(struct cpuinfo_x86 *c)
70626bfa5f8SBorislav Petkov {
70726bfa5f8SBorislav Petkov 	u64 value;
70826bfa5f8SBorislav Petkov 	u32 dummy;
70926bfa5f8SBorislav Petkov 
71026bfa5f8SBorislav Petkov 	early_init_amd_mc(c);
71126bfa5f8SBorislav Petkov 
71226bfa5f8SBorislav Petkov 	if (c->x86 >= 0xf)
71326bfa5f8SBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_K8);
71426bfa5f8SBorislav Petkov 
71526bfa5f8SBorislav Petkov 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
71626bfa5f8SBorislav Petkov 
71726bfa5f8SBorislav Petkov 	/*
71826bfa5f8SBorislav Petkov 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
71926bfa5f8SBorislav Petkov 	 * with P/T states and does not stop in deep C-states
72026bfa5f8SBorislav Petkov 	 */
72126bfa5f8SBorislav Petkov 	if (c->x86_power & (1 << 8)) {
72226bfa5f8SBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
72326bfa5f8SBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
7246f9b63a0SBorislav Petkov 	}
7256f9b63a0SBorislav Petkov 
7266f9b63a0SBorislav Petkov 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
7276f9b63a0SBorislav Petkov 	if (c->x86_power & BIT(12))
7286f9b63a0SBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
7296f9b63a0SBorislav Petkov 
7306f9b63a0SBorislav Petkov 	/* Bit 14 indicates the Runtime Average Power Limit interface. */
7316f9b63a0SBorislav Petkov 	if (c->x86_power & BIT(14))
7326f9b63a0SBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_RAPL);
7336f9b63a0SBorislav Petkov 
7346f9b63a0SBorislav Petkov #ifdef CONFIG_X86_64
73596e5d28aSBorislav Petkov 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
73626bfa5f8SBorislav Petkov #else
73726bfa5f8SBorislav Petkov 	/*  Set MTRR capability flag if appropriate */
73826bfa5f8SBorislav Petkov 	if (c->x86 == 5)
73926bfa5f8SBorislav Petkov 		if (c->x86_model == 13 || c->x86_model == 9 ||
7408364e1f8SJan Kiszka 		    (c->x86_model == 8 && c->x86_stepping >= 8))
74126bfa5f8SBorislav Petkov 			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
74226bfa5f8SBorislav Petkov #endif
74326bfa5f8SBorislav Petkov #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
74426bfa5f8SBorislav Petkov 	/*
74526bfa5f8SBorislav Petkov 	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
74626bfa5f8SBorislav Petkov 	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
74726bfa5f8SBorislav Petkov 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
74826bfa5f8SBorislav Petkov 	 * after 16h.
74926bfa5f8SBorislav Petkov 	 */
75026bfa5f8SBorislav Petkov 	if (boot_cpu_has(X86_FEATURE_APIC)) {
75126bfa5f8SBorislav Petkov 		if (c->x86 > 0x16)
75226bfa5f8SBorislav Petkov 			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
75326bfa5f8SBorislav Petkov 		else if (c->x86 >= 0xf) {
75426bfa5f8SBorislav Petkov 			/* check CPU config space for extended APIC ID */
75526bfa5f8SBorislav Petkov 			unsigned int val;
75626bfa5f8SBorislav Petkov 
75726bfa5f8SBorislav Petkov 			val = read_pci_config(0, 24, 0, 0x68);
75826bfa5f8SBorislav Petkov 			if ((val >> 17 & 0x3) == 0x3)
75926bfa5f8SBorislav Petkov 				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
76026bfa5f8SBorislav Petkov 		}
76126bfa5f8SBorislav Petkov 	}
76226bfa5f8SBorislav Petkov #endif
76326bfa5f8SBorislav Petkov 
76426bfa5f8SBorislav Petkov 	/*
76526bfa5f8SBorislav Petkov 	 * This is only needed to tell the kernel whether to use VMCALL
76626bfa5f8SBorislav Petkov 	 * and VMMCALL.  VMMCALL is never executed except under virt, so
76726bfa5f8SBorislav Petkov 	 * we can set it unconditionally.
76826bfa5f8SBorislav Petkov 	 */
76926bfa5f8SBorislav Petkov 	set_cpu_cap(c, X86_FEATURE_VMMCALL);
77026bfa5f8SBorislav Petkov 
77126bfa5f8SBorislav Petkov 	/* F16h erratum 793, CVE-2013-6885 */
77226bfa5f8SBorislav Petkov 	if (c->x86 == 0x16 && c->x86_model <= 0xf)
773d1992996SEmanuel Czirai 		msr_set_bit(MSR_AMD64_LS_CFG, 15);
774d1992996SEmanuel Czirai 
775d1992996SEmanuel Czirai 	/*
776d1992996SEmanuel Czirai 	 * Check whether the machine is affected by erratum 400. This is
777d1992996SEmanuel Czirai 	 * used to select the proper idle routine and to enable the check
778d1992996SEmanuel Czirai 	 * whether the machine is affected in arch_post_acpi_init(), which
779d1992996SEmanuel Czirai 	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
780d1992996SEmanuel Czirai 	 */
781d1992996SEmanuel Czirai 	if (cpu_has_amd_erratum(c, amd_erratum_400))
782c49a0a80STom Lendacky 		set_cpu_bug(c, X86_BUG_AMD_E400);
783c49a0a80STom Lendacky 
784c49a0a80STom Lendacky 	early_detect_mem_encrypt(c);
785c49a0a80STom Lendacky 
786c49a0a80STom Lendacky 	/* Re-enable TopologyExtensions if switched off by BIOS */
787c49a0a80STom Lendacky 	if (c->x86 == 0x15 &&
788c49a0a80STom Lendacky 	    (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
789c49a0a80STom Lendacky 	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
790c49a0a80STom Lendacky 
791c49a0a80STom Lendacky 		if (msr_set_bit(0xc0011005, 54) > 0) {
792c49a0a80STom Lendacky 			rdmsrl(0xc0011005, value);
793c49a0a80STom Lendacky 			if (value & BIT_64(54)) {
794c49a0a80STom Lendacky 				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
795c49a0a80STom Lendacky 				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
796c49a0a80STom Lendacky 			}
797c49a0a80STom Lendacky 		}
798c49a0a80STom Lendacky 	}
799c49a0a80STom Lendacky 
800c49a0a80STom Lendacky 	if (cpu_has(c, X86_FEATURE_TOPOEXT))
801c49a0a80STom Lendacky 		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
802c49a0a80STom Lendacky 
803c49a0a80STom Lendacky 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
804c49a0a80STom Lendacky 		if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
805c49a0a80STom Lendacky 			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
806c49a0a80STom Lendacky 		else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
807c49a0a80STom Lendacky 			setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
808c49a0a80STom Lendacky 			setup_force_cpu_cap(X86_FEATURE_SBPB);
809049f9ae9SJason A. Donenfeld 		}
810c49a0a80STom Lendacky 	}
811c49a0a80STom Lendacky }
812c49a0a80STom Lendacky 
init_amd_k8(struct cpuinfo_x86 * c)813c49a0a80STom Lendacky static void init_amd_k8(struct cpuinfo_x86 *c)
814c49a0a80STom Lendacky {
815c49a0a80STom Lendacky 	u32 level;
816c49a0a80STom Lendacky 	u64 value;
817c49a0a80STom Lendacky 
818c49a0a80STom Lendacky 	/* On C+ stepping K8 rep microcode works well for copy/memset */
819c49a0a80STom Lendacky 	level = cpuid_eax(1);
820c49a0a80STom Lendacky 	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
821c49a0a80STom Lendacky 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
822c49a0a80STom Lendacky 
823c49a0a80STom Lendacky 	/*
824c49a0a80STom Lendacky 	 * Some BIOSes incorrectly force this feature, but only K8 revision D
825c49a0a80STom Lendacky 	 * (model = 0x14) and later actually support it.
826c49a0a80STom Lendacky 	 * (AMD Erratum #110, docId: 25759).
827c49a0a80STom Lendacky 	 */
828c49a0a80STom Lendacky 	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
829c49a0a80STom Lendacky 		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
830c49a0a80STom Lendacky 		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
831c49a0a80STom Lendacky 			value &= ~BIT_64(32);
832c49a0a80STom Lendacky 			wrmsrl_amd_safe(0xc001100d, value);
833c49a0a80STom Lendacky 		}
834c49a0a80STom Lendacky 	}
835c49a0a80STom Lendacky 
836c49a0a80STom Lendacky 	if (!c->x86_model_id[0])
837c49a0a80STom Lendacky 		strcpy(c->x86_model_id, "Hammer");
838c49a0a80STom Lendacky 
839c49a0a80STom Lendacky #ifdef CONFIG_SMP
84026bfa5f8SBorislav Petkov 	/*
84126bfa5f8SBorislav Petkov 	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
84226bfa5f8SBorislav Petkov 	 * bit 6 of msr C001_0015
84326bfa5f8SBorislav Petkov 	 *
84426bfa5f8SBorislav Petkov 	 * Errata 63 for SH-B3 steppings
84526bfa5f8SBorislav Petkov 	 * Errata 122 for all steppings (F+ have it disabled by default)
84626bfa5f8SBorislav Petkov 	 */
84726bfa5f8SBorislav Petkov 	msr_set_bit(MSR_K7_HWCR, 6);
84826bfa5f8SBorislav Petkov #endif
849ae8b7875SBorislav Petkov 	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
85026bfa5f8SBorislav Petkov }
851ae8b7875SBorislav Petkov 
init_amd_gh(struct cpuinfo_x86 * c)85226bfa5f8SBorislav Petkov static void init_amd_gh(struct cpuinfo_x86 *c)
85326bfa5f8SBorislav Petkov {
854c49a0a80STom Lendacky #ifdef CONFIG_MMCONF_FAM10H
855c49a0a80STom Lendacky 	/* do this for boot cpu */
856c49a0a80STom Lendacky 	if (c == &boot_cpu_data)
857c49a0a80STom Lendacky 		check_enable_amd_mmconf_dmi();
858c49a0a80STom Lendacky 
859c49a0a80STom Lendacky 	fam10h_check_enable_mmcfg();
860c49a0a80STom Lendacky #endif
86126bfa5f8SBorislav Petkov 
86226bfa5f8SBorislav Petkov 	/*
863d7caac99SPeter Zijlstra 	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
864d7caac99SPeter Zijlstra 	 * is always needed when GART is enabled, even in a kernel which has no
865f43b9876SPeter Zijlstra 	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
866d7caac99SPeter Zijlstra 	 * If it doesn't, we do it here as suggested by the BKDG.
867d7caac99SPeter Zijlstra 	 *
868d7caac99SPeter Zijlstra 	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
869d7caac99SPeter Zijlstra 	 */
870d7caac99SPeter Zijlstra 	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
871d7caac99SPeter Zijlstra 
872d7caac99SPeter Zijlstra 	/*
873d7caac99SPeter Zijlstra 	 * On family 10h BIOS may not have properly enabled WC+ support, causing
874d7caac99SPeter Zijlstra 	 * it to be converted to CD memtype. This may result in performance
875d7caac99SPeter Zijlstra 	 * degradation for certain nested-paging guests. Prevent this conversion
876d7caac99SPeter Zijlstra 	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
877d7caac99SPeter Zijlstra 	 *
878d7caac99SPeter Zijlstra 	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
879d7caac99SPeter Zijlstra 	 * guests on older kvm hosts.
880d7caac99SPeter Zijlstra 	 */
881d7caac99SPeter Zijlstra 	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
882f43b9876SPeter Zijlstra 
883d7caac99SPeter Zijlstra 	if (cpu_has_amd_erratum(c, amd_erratum_383))
884d7caac99SPeter Zijlstra 		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
885f7f3dc00SBorislav Petkov }
886f7f3dc00SBorislav Petkov 
init_amd_ln(struct cpuinfo_x86 * c)887d1035d97SThomas Gleixner static void init_amd_ln(struct cpuinfo_x86 *c)
88802371991SJiaxun Yang {
889a55c7454SMatt Fleming 	/*
890a55c7454SMatt Fleming 	 * Apply erratum 665 fix unconditionally so machines without a BIOS
891a55c7454SMatt Fleming 	 * fix work.
892a55c7454SMatt Fleming 	 */
89326aae8ccSAndrew Cooper 	msr_set_bit(MSR_AMD64_DE_CFG, 31);
89426aae8ccSAndrew Cooper }
89526aae8ccSAndrew Cooper 
89626aae8ccSAndrew Cooper static bool rdrand_force;
89726aae8ccSAndrew Cooper 
rdrand_cmdline(char * str)898f7f3dc00SBorislav Petkov static int __init rdrand_cmdline(char *str)
89926aae8ccSAndrew Cooper {
90026aae8ccSAndrew Cooper 	if (!str)
90126aae8ccSAndrew Cooper 		return -EINVAL;
90226aae8ccSAndrew Cooper 
90326aae8ccSAndrew Cooper 	if (!strcmp(str, "force"))
90426aae8ccSAndrew Cooper 		rdrand_force = true;
90526aae8ccSAndrew Cooper 	else
90626aae8ccSAndrew Cooper 		return -EINVAL;
90726aae8ccSAndrew Cooper 
908f7f3dc00SBorislav Petkov 	return 0;
909f7f3dc00SBorislav Petkov }
910148f9bb8SPaul Gortmaker early_param("rdrand", rdrand_cmdline);
91111fdd252SYinghai Lu 
clear_rdrand_cpuid_bit(struct cpuinfo_x86 * c)91211fdd252SYinghai Lu static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
91311fdd252SYinghai Lu {
91411fdd252SYinghai Lu 	/*
91511fdd252SYinghai Lu 	 * Saving of the MSR used to hide the RDRAND support during
91611fdd252SYinghai Lu 	 * suspend/resume is done by arch/x86/power/cpu.c, which is
91711fdd252SYinghai Lu 	 * dependent on CONFIG_PM_SLEEP.
91811fdd252SYinghai Lu 	 */
91911fdd252SYinghai Lu 	if (!IS_ENABLED(CONFIG_PM_SLEEP))
92012d8a961SBorislav Petkov 		return;
9216c62aa4aSYinghai Lu 
9220d96b9ffSYinghai Lu 	/*
9230d96b9ffSYinghai Lu 	 * The self-test can clear X86_FEATURE_RDRAND, so check for
9240d96b9ffSYinghai Lu 	 * RDRAND support using the CPUID function directly.
925f7627e25SThomas Gleixner 	 */
926f7627e25SThomas Gleixner 	if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
927f7627e25SThomas Gleixner 		return;
92816282a8eSIngo Molnar 
92926bfa5f8SBorislav Petkov 	msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
93026bfa5f8SBorislav Petkov 
93126bfa5f8SBorislav Petkov 	/*
93226bfa5f8SBorislav Petkov 	 * Verify that the CPUID change has occurred in case the kernel is
93326bfa5f8SBorislav Petkov 	 * running virtualized and the hypervisor doesn't support the MSR.
93426bfa5f8SBorislav Petkov 	 */
93526bfa5f8SBorislav Petkov 	if (cpuid_ecx(1) & BIT(30)) {
936d1992996SEmanuel Czirai 		pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
93726bfa5f8SBorislav Petkov 		return;
938c49a0a80STom Lendacky 	}
939d7caac99SPeter Zijlstra 
940d7caac99SPeter Zijlstra 	clear_cpu_cap(c, X86_FEATURE_RDRAND);
941753039efSKim Phillips 	pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
94226bfa5f8SBorislav Petkov }
943de421863SAndi Kleen 
init_amd_jg(struct cpuinfo_x86 * c)944e3811a3fSRudolf Marek static void init_amd_jg(struct cpuinfo_x86 *c)
945e3811a3fSRudolf Marek {
946e3811a3fSRudolf Marek 	/*
947e3811a3fSRudolf Marek 	 * Some BIOS implementations do not restore proper RDRAND support
948e3811a3fSRudolf Marek 	 * across suspend and resume. Check on whether to hide the RDRAND
9499b13a93dSBorislav Petkov 	 * instruction support via CPUID.
95011fdd252SYinghai Lu 	 */
95127c13eceSBorislav Petkov 	clear_rdrand_cpuid_bit(c);
95211fdd252SYinghai Lu }
95311fdd252SYinghai Lu 
init_amd_bd(struct cpuinfo_x86 * c)9543986a0a8SSuravee Suthikulpanit static void init_amd_bd(struct cpuinfo_x86 *c)
9556c62aa4aSYinghai Lu {
95611fdd252SYinghai Lu 	u64 value;
95704a15418SAndreas Herrmann 
95811fdd252SYinghai Lu 	/*
959054efb64SBorislav Petkov 	 * The way access filter has a performance penalty on some workloads.
960e4d0e84eSTom Lendacky 	 * Disable it on the affected CPUs.
961be261ffcSJosh Poimboeuf 	 */
962e4d0e84eSTom Lendacky 	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
963e4d0e84eSTom Lendacky 		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
964e4d0e84eSTom Lendacky 			value |= 0x1E;
965e4d0e84eSTom Lendacky 			wrmsrl_safe(MSR_F15H_IC_CFG, value);
966*2632daebSBorislav Petkov 		}
967*2632daebSBorislav Petkov 	}
968e4d0e84eSTom Lendacky 
9699c6a73c7STom Lendacky 	/*
9709c6a73c7STom Lendacky 	 * Some BIOS implementations do not restore proper RDRAND support
9719c6a73c7STom Lendacky 	 * across suspend and resume. Check on whether to hide the RDRAND
9726c62aa4aSYinghai Lu 	 * instruction support via CPUID.
973e9cdd343SBoris Ostrovsky 	 */
974e9cdd343SBoris Ostrovsky 	clear_rdrand_cpuid_bit(c);
975e9cdd343SBoris Ostrovsky }
976e9cdd343SBoris Ostrovsky 
fix_erratum_1386(struct cpuinfo_x86 * c)977e9cdd343SBoris Ostrovsky static void fix_erratum_1386(struct cpuinfo_x86 *c)
978b87cf80aSBoris Ostrovsky {
9795bbc097dSJoerg Roedel 	/*
980a930dc45SBorislav Petkov 	 * Work around Erratum 1386.  The XSAVES instruction malfunctions in
981a930dc45SBorislav Petkov 	 * certain circumstances on Zen1/2 uarch, and not all parts have had
982a930dc45SBorislav Petkov 	 * updated microcode at the time of writing (March 2023).
983a930dc45SBorislav Petkov 	 *
98461f01dd9SAndy Lutomirski 	 * Affected parts all have no supervisor XSAVE states, meaning that
985def9331aSJuergen Gross 	 * the XSAVEC instruction (which works fine) is equivalent.
986def9331aSJuergen Gross 	 */
98761f01dd9SAndy Lutomirski 	clear_cpu_cap(c, X86_FEATURE_XSAVES);
98821b5ee59SKim Phillips }
98921b5ee59SKim Phillips 
init_spectral_chicken(struct cpuinfo_x86 * c)99021b5ee59SKim Phillips void init_spectral_chicken(struct cpuinfo_x86 *c)
99121b5ee59SKim Phillips {
99221b5ee59SKim Phillips #ifdef CONFIG_CPU_UNRET_ENTRY
99321b5ee59SKim Phillips 	u64 value;
99421b5ee59SKim Phillips 
99521b5ee59SKim Phillips 	/*
99621b5ee59SKim Phillips 	 * On Zen2 we offer this chicken (bit) on the altar of Speculation.
997415de440SJane Malalane 	 *
998415de440SJane Malalane 	 * This suppresses speculation from the middle of a basic block, i.e. it
9996c62aa4aSYinghai Lu 	 * suppresses non-branch predictions.
10006c62aa4aSYinghai Lu 	 *
10016c62aa4aSYinghai Lu 	 * We use STIBP as a heuristic to filter out Zen2 from the rest of F17H
1002148f9bb8SPaul Gortmaker 	 */
1003f7627e25SThomas Gleixner 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_AMD_STIBP)) {
1004f7627e25SThomas Gleixner 		if (!rdmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, &value)) {
100588296bd4SNathan Chancellor 			value |= MSR_ZEN2_SPECTRAL_CHICKEN_BIT;
10068bdbd962SAlan Cox 			wrmsrl_safe(MSR_ZEN2_SPECTRAL_CHICKEN, value);
1007b399151cSJia Zhang 		}
1008f7627e25SThomas Gleixner 	}
10098bdbd962SAlan Cox #endif
1010f7627e25SThomas Gleixner }
1011b399151cSJia Zhang 
init_amd_zn(struct cpuinfo_x86 * c)1012f7627e25SThomas Gleixner static void init_amd_zn(struct cpuinfo_x86 *c)
1013f7627e25SThomas Gleixner {
1014f7627e25SThomas Gleixner 	setup_force_cpu_cap(X86_FEATURE_ZEN);
1015f7627e25SThomas Gleixner #ifdef CONFIG_NUMA
10166c62aa4aSYinghai Lu 	node_reclaim_distance = 32;
1017f7627e25SThomas Gleixner #endif
1018148f9bb8SPaul Gortmaker }
1019b46882e4SBorislav Petkov 
init_amd_zen1(struct cpuinfo_x86 * c)1020b46882e4SBorislav Petkov static void init_amd_zen1(struct cpuinfo_x86 *c)
1021b46882e4SBorislav Petkov {
1022b46882e4SBorislav Petkov 	fix_erratum_1386(c);
1023b46882e4SBorislav Petkov 
1024b46882e4SBorislav Petkov 	/* Fix up CPUID bits, but only if not virtualised. */
1025b46882e4SBorislav Petkov 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) {
1026b46882e4SBorislav Petkov 
1027b46882e4SBorislav Petkov 		/* Erratum 1076: CPB feature bit not being set in CPUID. */
1028b46882e4SBorislav Petkov 		if (!cpu_has(c, X86_FEATURE_CPB))
1029b46882e4SBorislav Petkov 			set_cpu_cap(c, X86_FEATURE_CPB);
1030b46882e4SBorislav Petkov 
1031b46882e4SBorislav Petkov 		/*
1032b46882e4SBorislav Petkov 		 * Zen3 (Fam19 model < 0x10) parts are not susceptible to
1033b46882e4SBorislav Petkov 		 * Branch Type Confusion, but predate the allocation of the
1034b46882e4SBorislav Petkov 		 * BTC_NO bit.
1035b46882e4SBorislav Petkov 		 */
1036b46882e4SBorislav Petkov 		if (c->x86 == 0x19 && !cpu_has(c, X86_FEATURE_BTC_NO))
1037b46882e4SBorislav Petkov 			set_cpu_cap(c, X86_FEATURE_BTC_NO);
1038b46882e4SBorislav Petkov 	}
1039b46882e4SBorislav Petkov 
1040b46882e4SBorislav Petkov 	pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
1041b46882e4SBorislav Petkov 	setup_force_cpu_bug(X86_BUG_DIV0);
1042b46882e4SBorislav Petkov }
1043b46882e4SBorislav Petkov 
cpu_has_zenbleed_microcode(void)1044d1393367SBorislav Petkov static bool cpu_has_zenbleed_microcode(void)
1045d1393367SBorislav Petkov {
1046d1393367SBorislav Petkov 	u32 good_rev = 0;
1047b46882e4SBorislav Petkov 
1048b46882e4SBorislav Petkov 	switch (boot_cpu_data.x86_model) {
1049b46882e4SBorislav Petkov 	case 0x30 ... 0x3f: good_rev = 0x0830107b; break;
1050b46882e4SBorislav Petkov 	case 0x60 ... 0x67: good_rev = 0x0860010c; break;
1051b46882e4SBorislav Petkov 	case 0x68 ... 0x6f: good_rev = 0x08608107; break;
1052b46882e4SBorislav Petkov 	case 0x70 ... 0x7f: good_rev = 0x08701033; break;
1053b46882e4SBorislav Petkov 	case 0xa0 ... 0xaf: good_rev = 0x08a00009; break;
1054b46882e4SBorislav Petkov 
1055b46882e4SBorislav Petkov 	default:
1056b46882e4SBorislav Petkov 		return false;
1057b46882e4SBorislav Petkov 		break;
1058b46882e4SBorislav Petkov 	}
1059b46882e4SBorislav Petkov 
1060b46882e4SBorislav Petkov 	if (boot_cpu_data.microcode < good_rev)
1061b46882e4SBorislav Petkov 		return false;
1062b46882e4SBorislav Petkov 
1063b46882e4SBorislav Petkov 	return true;
1064b46882e4SBorislav Petkov }
1065b46882e4SBorislav Petkov 
zen2_zenbleed_check(struct cpuinfo_x86 * c)1066b46882e4SBorislav Petkov static void zen2_zenbleed_check(struct cpuinfo_x86 *c)
1067148f9bb8SPaul Gortmaker {
1068f7627e25SThomas Gleixner 	if (cpu_has(c, X86_FEATURE_HYPERVISOR))
1069f7627e25SThomas Gleixner 		return;
10706c62aa4aSYinghai Lu 
107109dc68d9SJan Beulich 	if (!cpu_has(c, X86_FEATURE_AVX))
107209dc68d9SJan Beulich 		return;
1073f7627e25SThomas Gleixner 
1074f7627e25SThomas Gleixner 	if (!cpu_has_zenbleed_microcode()) {
1075f7627e25SThomas Gleixner 		pr_notice_once("Zenbleed: please update your microcode for the most optimal fix\n");
1076f7627e25SThomas Gleixner 		msr_set_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
1077f7627e25SThomas Gleixner 	} else {
1078f7627e25SThomas Gleixner 		msr_clear_bit(MSR_AMD64_DE_CFG, MSR_AMD64_DE_CFG_ZEN2_FP_BACKUP_FIX_BIT);
1079f7627e25SThomas Gleixner 	}
1080f7627e25SThomas Gleixner }
1081f7627e25SThomas Gleixner 
init_amd_zen2(struct cpuinfo_x86 * c)1082f7627e25SThomas Gleixner static void init_amd_zen2(struct cpuinfo_x86 *c)
108309dc68d9SJan Beulich {
10846c62aa4aSYinghai Lu 	fix_erratum_1386(c);
108503ae5768SThomas Petazzoni 	zen2_zenbleed_check(c);
1086b46882e4SBorislav Petkov }
10878fa8b035SBorislav Petkov 
init_amd_zen3(struct cpuinfo_x86 * c)1088f7627e25SThomas Gleixner static void init_amd_zen3(struct cpuinfo_x86 *c)
108910a434fcSYinghai Lu {
1090f7627e25SThomas Gleixner }
1091f7627e25SThomas Gleixner 
init_amd_zen4(struct cpuinfo_x86 * c)109210a434fcSYinghai Lu static void init_amd_zen4(struct cpuinfo_x86 *c)
1093d78d671dSHans Rosenfeld {
1094d78d671dSHans Rosenfeld }
1095d78d671dSHans Rosenfeld 
init_amd(struct cpuinfo_x86 * c)1096d78d671dSHans Rosenfeld static void init_amd(struct cpuinfo_x86 *c)
1097d78d671dSHans Rosenfeld {
1098d78d671dSHans Rosenfeld 	early_init_amd(c);
1099d78d671dSHans Rosenfeld 
1100d78d671dSHans Rosenfeld 	/*
11017d7dc116SBorislav Petkov 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
1102d78d671dSHans Rosenfeld 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
1103d78d671dSHans Rosenfeld 	 */
1104d78d671dSHans Rosenfeld 	clear_cpu_cap(c, 0*32+31);
1105d78d671dSHans Rosenfeld 
1106d78d671dSHans Rosenfeld 	if (c->x86 >= 0x10)
1107d78d671dSHans Rosenfeld 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
1108d78d671dSHans Rosenfeld 
1109d78d671dSHans Rosenfeld 	/* AMD FSRM also implies FSRS */
1110d78d671dSHans Rosenfeld 	if (cpu_has(c, X86_FEATURE_FSRM))
11117d7dc116SBorislav Petkov 		set_cpu_cap(c, X86_FEATURE_FSRS);
11127d7dc116SBorislav Petkov 
11137d7dc116SBorislav Petkov 	/* get apicid instead of initial apic id from cpuid */
11147d7dc116SBorislav Petkov 	c->apicid = read_apic_id();
11157d7dc116SBorislav Petkov 
11167d7dc116SBorislav Petkov 	/* K6s reports MCEs but don't actually have all the MSRs */
11177d7dc116SBorislav Petkov 	if (c->x86 < 6)
11187d7dc116SBorislav Petkov 		clear_cpu_cap(c, X86_FEATURE_MCE);
11197d7dc116SBorislav Petkov 
1120328935e6SBorislav Petkov 	switch (c->x86) {
11219d8888c2SHans Rosenfeld 	case 4:    init_amd_k5(c); break;
11229d8888c2SHans Rosenfeld 	case 5:    init_amd_k6(c); break;
1123e6ee94d5SBorislav Petkov 	case 6:	   init_amd_k7(c); break;
11241be85a6dSHans Rosenfeld 	case 0xf:  init_amd_k8(c); break;
11259d8888c2SHans Rosenfeld 	case 0x10: init_amd_gh(c); break;
112621b5ee59SKim Phillips 	case 0x12: init_amd_ln(c); break;
112721b5ee59SKim Phillips 	case 0x15: init_amd_bd(c); break;
1128e2abfc04SKim Phillips 	case 0x16: init_amd_jg(c); break;
11298c6b79bbSTorsten Kaiser 	case 0x17: init_spectral_chicken(c);
11308c6b79bbSTorsten Kaiser 		   fallthrough;
1131d78d671dSHans Rosenfeld 	case 0x19: init_amd_zn(c); break;
1132d78d671dSHans Rosenfeld 	}
1133d78d671dSHans Rosenfeld 
1134d78d671dSHans Rosenfeld 	if (boot_cpu_has(X86_FEATURE_ZEN1))
1135d78d671dSHans Rosenfeld 		init_amd_zen1(c);
1136d78d671dSHans Rosenfeld 	else if (boot_cpu_has(X86_FEATURE_ZEN2))
1137d78d671dSHans Rosenfeld 		init_amd_zen2(c);
1138d78d671dSHans Rosenfeld 	else if (boot_cpu_has(X86_FEATURE_ZEN3))
1139d78d671dSHans Rosenfeld 		init_amd_zen3(c);
1140d78d671dSHans Rosenfeld 	else if (boot_cpu_has(X86_FEATURE_ZEN4))
1141d78d671dSHans Rosenfeld 		init_amd_zen4(c);
1142d78d671dSHans Rosenfeld 
1143d78d671dSHans Rosenfeld 	/*
1144d78d671dSHans Rosenfeld 	 * Enable workaround for FXSAVE leak on CPUs
1145d78d671dSHans Rosenfeld 	 * without a XSaveErPtr feature
1146d78d671dSHans Rosenfeld 	 */
1147d78d671dSHans Rosenfeld 	if ((c->x86 >= 6) && (!cpu_has(c, X86_FEATURE_XSAVEERPTR)))
1148d78d671dSHans Rosenfeld 		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
1149d78d671dSHans Rosenfeld 
1150d78d671dSHans Rosenfeld 	cpu_detect_cache_sizes(c);
1151b399151cSJia Zhang 
1152d78d671dSHans Rosenfeld 	amd_detect_cmp(c);
1153d78d671dSHans Rosenfeld 	amd_get_topology(c);
1154d78d671dSHans Rosenfeld 	srat_detect_node(c);
1155d78d671dSHans Rosenfeld 
1156d78d671dSHans Rosenfeld 	init_amd_cacheinfo(c);
1157d78d671dSHans Rosenfeld 
1158d78d671dSHans Rosenfeld 	if (!cpu_has(c, X86_FEATURE_LFENCE_RDTSC) && cpu_has(c, X86_FEATURE_XMM2)) {
1159d78d671dSHans Rosenfeld 		/*
1160d6d55f0bSJacob Shin 		 * Use LFENCE for execution serialization.  On families which
1161d6d55f0bSJacob Shin 		 * don't have that MSR, LFENCE is already serializing.
1162d6d55f0bSJacob Shin 		 * msr_set_bit() uses the safe accessors, too, even if the MSR
1163362f924bSBorislav Petkov 		 * is not present.
1164d6d55f0bSJacob Shin 		 */
1165d6d55f0bSJacob Shin 		msr_set_bit(MSR_AMD64_DE_CFG,
1166d6d55f0bSJacob Shin 			    MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
1167d6d55f0bSJacob Shin 
1168d6d55f0bSJacob Shin 		/* A serializing LFENCE stops RDTSC speculation */
1169d6d55f0bSJacob Shin 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
1170d6d55f0bSJacob Shin 	}
1171d6d55f0bSJacob Shin 
1172d6d55f0bSJacob Shin 	/*
1173d6d55f0bSJacob Shin 	 * Family 0x12 and above processors have APIC timer
1174d6d55f0bSJacob Shin 	 * running in deep C states.
1175d6d55f0bSJacob Shin 	 */
1176d6d55f0bSJacob Shin 	if (c->x86 > 0x11)
1177d6d55f0bSJacob Shin 		set_cpu_cap(c, X86_FEATURE_ARAT);
1178d6d55f0bSJacob Shin 
11793743d55bSHuang Rui 	/* 3DNow or LM implies PREFETCHW */
11803743d55bSHuang Rui 	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
11813743d55bSHuang Rui 		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
11823743d55bSHuang Rui 			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
11833743d55bSHuang Rui 
11843743d55bSHuang Rui 	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
11853743d55bSHuang Rui 	if (!cpu_feature_enabled(X86_FEATURE_XENPV))
11863743d55bSHuang Rui 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
11873743d55bSHuang Rui 
11883743d55bSHuang Rui 	/*
11893743d55bSHuang Rui 	 * Turn on the Instructions Retired free counter on machines not
11903743d55bSHuang Rui 	 * susceptible to erratum #1054 "Instructions Retired Performance
11913743d55bSHuang Rui 	 * Counter May Be Inaccurate".
11923743d55bSHuang Rui 	 */
11933743d55bSHuang Rui 	if (cpu_has(c, X86_FEATURE_IRPERF) &&
11943743d55bSHuang Rui 	    (boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
1195 		msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
1196 
1197 	check_null_seg_clears_base(c);
1198 
1199 	/*
1200 	 * Make sure EFER[AIBRSE - Automatic IBRS Enable] is set. The APs are brought up
1201 	 * using the trampoline code and as part of it, MSR_EFER gets prepared there in
1202 	 * order to be replicated onto them. Regardless, set it here again, if not set,
1203 	 * to protect against any future refactoring/code reorganization which might
1204 	 * miss setting this important bit.
1205 	 */
1206 	if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1207 	    cpu_has(c, X86_FEATURE_AUTOIBRS))
1208 		WARN_ON_ONCE(msr_set_bit(MSR_EFER, _EFER_AUTOIBRS) < 0);
1209 
1210 	if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
1211 	     cpu_has_amd_erratum(c, amd_erratum_1485))
1212 		msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
1213 
1214 	/* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */
1215 	clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
1216 }
1217 
1218 #ifdef CONFIG_X86_32
amd_size_cache(struct cpuinfo_x86 * c,unsigned int size)1219 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1220 {
1221 	/* AMD errata T13 (order #21922) */
1222 	if (c->x86 == 6) {
1223 		/* Duron Rev A0 */
1224 		if (c->x86_model == 3 && c->x86_stepping == 0)
1225 			size = 64;
1226 		/* Tbird rev A1/A2 */
1227 		if (c->x86_model == 4 &&
1228 			(c->x86_stepping == 0 || c->x86_stepping == 1))
1229 			size = 256;
1230 	}
1231 	return size;
1232 }
1233 #endif
1234 
cpu_detect_tlb_amd(struct cpuinfo_x86 * c)1235 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
1236 {
1237 	u32 ebx, eax, ecx, edx;
1238 	u16 mask = 0xfff;
1239 
1240 	if (c->x86 < 0xf)
1241 		return;
1242 
1243 	if (c->extended_cpuid_level < 0x80000006)
1244 		return;
1245 
1246 	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
1247 
1248 	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
1249 	tlb_lli_4k[ENTRIES] = ebx & mask;
1250 
1251 	/*
1252 	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
1253 	 * characteristics from the CPUID function 0x80000005 instead.
1254 	 */
1255 	if (c->x86 == 0xf) {
1256 		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1257 		mask = 0xff;
1258 	}
1259 
1260 	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1261 	if (!((eax >> 16) & mask))
1262 		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
1263 	else
1264 		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
1265 
1266 	/* a 4M entry uses two 2M entries */
1267 	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
1268 
1269 	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
1270 	if (!(eax & mask)) {
1271 		/* Erratum 658 */
1272 		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
1273 			tlb_lli_2m[ENTRIES] = 1024;
1274 		} else {
1275 			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
1276 			tlb_lli_2m[ENTRIES] = eax & 0xff;
1277 		}
1278 	} else
1279 		tlb_lli_2m[ENTRIES] = eax & mask;
1280 
1281 	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
1282 }
1283 
1284 static const struct cpu_dev amd_cpu_dev = {
1285 	.c_vendor	= "AMD",
1286 	.c_ident	= { "AuthenticAMD" },
1287 #ifdef CONFIG_X86_32
1288 	.legacy_models = {
1289 		{ .family = 4, .model_names =
1290 		  {
1291 			  [3] = "486 DX/2",
1292 			  [7] = "486 DX/2-WB",
1293 			  [8] = "486 DX/4",
1294 			  [9] = "486 DX/4-WB",
1295 			  [14] = "Am5x86-WT",
1296 			  [15] = "Am5x86-WB"
1297 		  }
1298 		},
1299 	},
1300 	.legacy_cache_size = amd_size_cache,
1301 #endif
1302 	.c_early_init   = early_init_amd,
1303 	.c_detect_tlb	= cpu_detect_tlb_amd,
1304 	.c_bsp_init	= bsp_init_amd,
1305 	.c_init		= init_amd,
1306 	.c_x86_vendor	= X86_VENDOR_AMD,
1307 };
1308 
1309 cpu_dev_register(amd_cpu_dev);
1310 
1311 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[4], amd_dr_addr_mask);
1312 
1313 static unsigned int amd_msr_dr_addr_masks[] = {
1314 	MSR_F16H_DR0_ADDR_MASK,
1315 	MSR_F16H_DR1_ADDR_MASK,
1316 	MSR_F16H_DR1_ADDR_MASK + 1,
1317 	MSR_F16H_DR1_ADDR_MASK + 2
1318 };
1319 
amd_set_dr_addr_mask(unsigned long mask,unsigned int dr)1320 void amd_set_dr_addr_mask(unsigned long mask, unsigned int dr)
1321 {
1322 	int cpu = smp_processor_id();
1323 
1324 	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1325 		return;
1326 
1327 	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1328 		return;
1329 
1330 	if (per_cpu(amd_dr_addr_mask, cpu)[dr] == mask)
1331 		return;
1332 
1333 	wrmsr(amd_msr_dr_addr_masks[dr], mask, 0);
1334 	per_cpu(amd_dr_addr_mask, cpu)[dr] = mask;
1335 }
1336 
amd_get_dr_addr_mask(unsigned int dr)1337 unsigned long amd_get_dr_addr_mask(unsigned int dr)
1338 {
1339 	if (!cpu_feature_enabled(X86_FEATURE_BPEXT))
1340 		return 0;
1341 
1342 	if (WARN_ON_ONCE(dr >= ARRAY_SIZE(amd_msr_dr_addr_masks)))
1343 		return 0;
1344 
1345 	return per_cpu(amd_dr_addr_mask[dr], smp_processor_id());
1346 }
1347 EXPORT_SYMBOL_GPL(amd_get_dr_addr_mask);
1348 
amd_get_highest_perf(void)1349 u32 amd_get_highest_perf(void)
1350 {
1351 	struct cpuinfo_x86 *c = &boot_cpu_data;
1352 
1353 	if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
1354 			       (c->x86_model >= 0x70 && c->x86_model < 0x80)))
1355 		return 166;
1356 
1357 	if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
1358 			       (c->x86_model >= 0x40 && c->x86_model < 0x70)))
1359 		return 166;
1360 
1361 	return 255;
1362 }
1363 EXPORT_SYMBOL_GPL(amd_get_highest_perf);
1364 
zenbleed_check_cpu(void * unused)1365 static void zenbleed_check_cpu(void *unused)
1366 {
1367 	struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
1368 
1369 	zen2_zenbleed_check(c);
1370 }
1371 
amd_check_microcode(void)1372 void amd_check_microcode(void)
1373 {
1374 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
1375 		return;
1376 
1377 	if (cpu_feature_enabled(X86_FEATURE_ZEN2))
1378 		on_each_cpu(zenbleed_check_cpu, NULL, 1);
1379 }
1380 
1381 /*
1382  * Issue a DIV 0/1 insn to clear any division data from previous DIV
1383  * operations.
1384  */
amd_clear_divider(void)1385 void noinstr amd_clear_divider(void)
1386 {
1387 	asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
1388 		     :: "a" (0), "d" (0), "r" (1));
1389 }
1390 EXPORT_SYMBOL_GPL(amd_clear_divider);
1391