xref: /openbmc/linux/arch/x86/kernel/cpu/amd.c (revision 6aa7de05)
1 #include <linux/export.h>
2 #include <linux/bitops.h>
3 #include <linux/elf.h>
4 #include <linux/mm.h>
5 
6 #include <linux/io.h>
7 #include <linux/sched.h>
8 #include <linux/sched/clock.h>
9 #include <linux/random.h>
10 #include <asm/processor.h>
11 #include <asm/apic.h>
12 #include <asm/cpu.h>
13 #include <asm/smp.h>
14 #include <asm/pci-direct.h>
15 #include <asm/delay.h>
16 
17 #ifdef CONFIG_X86_64
18 # include <asm/mmconfig.h>
19 # include <asm/set_memory.h>
20 #endif
21 
22 #include "cpu.h"
23 
24 static const int amd_erratum_383[];
25 static const int amd_erratum_400[];
26 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
27 
28 /*
29  * nodes_per_socket: Stores the number of nodes per socket.
30  * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
31  * Node Identifiers[10:8]
32  */
33 static u32 nodes_per_socket = 1;
34 
35 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
36 {
37 	u32 gprs[8] = { 0 };
38 	int err;
39 
40 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
41 		  "%s should only be used on K8!\n", __func__);
42 
43 	gprs[1] = msr;
44 	gprs[7] = 0x9c5a203a;
45 
46 	err = rdmsr_safe_regs(gprs);
47 
48 	*p = gprs[0] | ((u64)gprs[2] << 32);
49 
50 	return err;
51 }
52 
53 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
54 {
55 	u32 gprs[8] = { 0 };
56 
57 	WARN_ONCE((boot_cpu_data.x86 != 0xf),
58 		  "%s should only be used on K8!\n", __func__);
59 
60 	gprs[0] = (u32)val;
61 	gprs[1] = msr;
62 	gprs[2] = val >> 32;
63 	gprs[7] = 0x9c5a203a;
64 
65 	return wrmsr_safe_regs(gprs);
66 }
67 
68 /*
69  *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
70  *	misexecution of code under Linux. Owners of such processors should
71  *	contact AMD for precise details and a CPU swap.
72  *
73  *	See	http://www.multimania.com/poulot/k6bug.html
74  *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
75  *		(Publication # 21266  Issue Date: August 1998)
76  *
77  *	The following test is erm.. interesting. AMD neglected to up
78  *	the chip setting when fixing the bug but they also tweaked some
79  *	performance at the same time..
80  */
81 
82 extern __visible void vide(void);
83 __asm__(".globl vide\n"
84 	".type vide, @function\n"
85 	".align 4\n"
86 	"vide: ret\n");
87 
88 static void init_amd_k5(struct cpuinfo_x86 *c)
89 {
90 #ifdef CONFIG_X86_32
91 /*
92  * General Systems BIOSen alias the cpu frequency registers
93  * of the Elan at 0x000df000. Unfortunately, one of the Linux
94  * drivers subsequently pokes it, and changes the CPU speed.
95  * Workaround : Remove the unneeded alias.
96  */
97 #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
98 #define CBAR_ENB	(0x80000000)
99 #define CBAR_KEY	(0X000000CB)
100 	if (c->x86_model == 9 || c->x86_model == 10) {
101 		if (inl(CBAR) & CBAR_ENB)
102 			outl(0 | CBAR_KEY, CBAR);
103 	}
104 #endif
105 }
106 
107 static void init_amd_k6(struct cpuinfo_x86 *c)
108 {
109 #ifdef CONFIG_X86_32
110 	u32 l, h;
111 	int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
112 
113 	if (c->x86_model < 6) {
114 		/* Based on AMD doc 20734R - June 2000 */
115 		if (c->x86_model == 0) {
116 			clear_cpu_cap(c, X86_FEATURE_APIC);
117 			set_cpu_cap(c, X86_FEATURE_PGE);
118 		}
119 		return;
120 	}
121 
122 	if (c->x86_model == 6 && c->x86_mask == 1) {
123 		const int K6_BUG_LOOP = 1000000;
124 		int n;
125 		void (*f_vide)(void);
126 		u64 d, d2;
127 
128 		pr_info("AMD K6 stepping B detected - ");
129 
130 		/*
131 		 * It looks like AMD fixed the 2.6.2 bug and improved indirect
132 		 * calls at the same time.
133 		 */
134 
135 		n = K6_BUG_LOOP;
136 		f_vide = vide;
137 		OPTIMIZER_HIDE_VAR(f_vide);
138 		d = rdtsc();
139 		while (n--)
140 			f_vide();
141 		d2 = rdtsc();
142 		d = d2-d;
143 
144 		if (d > 20*K6_BUG_LOOP)
145 			pr_cont("system stability may be impaired when more than 32 MB are used.\n");
146 		else
147 			pr_cont("probably OK (after B9730xxxx).\n");
148 	}
149 
150 	/* K6 with old style WHCR */
151 	if (c->x86_model < 8 ||
152 	   (c->x86_model == 8 && c->x86_mask < 8)) {
153 		/* We can only write allocate on the low 508Mb */
154 		if (mbytes > 508)
155 			mbytes = 508;
156 
157 		rdmsr(MSR_K6_WHCR, l, h);
158 		if ((l&0x0000FFFF) == 0) {
159 			unsigned long flags;
160 			l = (1<<0)|((mbytes/4)<<1);
161 			local_irq_save(flags);
162 			wbinvd();
163 			wrmsr(MSR_K6_WHCR, l, h);
164 			local_irq_restore(flags);
165 			pr_info("Enabling old style K6 write allocation for %d Mb\n",
166 				mbytes);
167 		}
168 		return;
169 	}
170 
171 	if ((c->x86_model == 8 && c->x86_mask > 7) ||
172 	     c->x86_model == 9 || c->x86_model == 13) {
173 		/* The more serious chips .. */
174 
175 		if (mbytes > 4092)
176 			mbytes = 4092;
177 
178 		rdmsr(MSR_K6_WHCR, l, h);
179 		if ((l&0xFFFF0000) == 0) {
180 			unsigned long flags;
181 			l = ((mbytes>>2)<<22)|(1<<16);
182 			local_irq_save(flags);
183 			wbinvd();
184 			wrmsr(MSR_K6_WHCR, l, h);
185 			local_irq_restore(flags);
186 			pr_info("Enabling new style K6 write allocation for %d Mb\n",
187 				mbytes);
188 		}
189 
190 		return;
191 	}
192 
193 	if (c->x86_model == 10) {
194 		/* AMD Geode LX is model 10 */
195 		/* placeholder for any needed mods */
196 		return;
197 	}
198 #endif
199 }
200 
201 static void init_amd_k7(struct cpuinfo_x86 *c)
202 {
203 #ifdef CONFIG_X86_32
204 	u32 l, h;
205 
206 	/*
207 	 * Bit 15 of Athlon specific MSR 15, needs to be 0
208 	 * to enable SSE on Palomino/Morgan/Barton CPU's.
209 	 * If the BIOS didn't enable it already, enable it here.
210 	 */
211 	if (c->x86_model >= 6 && c->x86_model <= 10) {
212 		if (!cpu_has(c, X86_FEATURE_XMM)) {
213 			pr_info("Enabling disabled K7/SSE Support.\n");
214 			msr_clear_bit(MSR_K7_HWCR, 15);
215 			set_cpu_cap(c, X86_FEATURE_XMM);
216 		}
217 	}
218 
219 	/*
220 	 * It's been determined by AMD that Athlons since model 8 stepping 1
221 	 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
222 	 * As per AMD technical note 27212 0.2
223 	 */
224 	if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
225 		rdmsr(MSR_K7_CLK_CTL, l, h);
226 		if ((l & 0xfff00000) != 0x20000000) {
227 			pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
228 				l, ((l & 0x000fffff)|0x20000000));
229 			wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
230 		}
231 	}
232 
233 	set_cpu_cap(c, X86_FEATURE_K7);
234 
235 	/* calling is from identify_secondary_cpu() ? */
236 	if (!c->cpu_index)
237 		return;
238 
239 	/*
240 	 * Certain Athlons might work (for various values of 'work') in SMP
241 	 * but they are not certified as MP capable.
242 	 */
243 	/* Athlon 660/661 is valid. */
244 	if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
245 	    (c->x86_mask == 1)))
246 		return;
247 
248 	/* Duron 670 is valid */
249 	if ((c->x86_model == 7) && (c->x86_mask == 0))
250 		return;
251 
252 	/*
253 	 * Athlon 662, Duron 671, and Athlon >model 7 have capability
254 	 * bit. It's worth noting that the A5 stepping (662) of some
255 	 * Athlon XP's have the MP bit set.
256 	 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
257 	 * more.
258 	 */
259 	if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
260 	    ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
261 	     (c->x86_model > 7))
262 		if (cpu_has(c, X86_FEATURE_MP))
263 			return;
264 
265 	/* If we get here, not a certified SMP capable AMD system. */
266 
267 	/*
268 	 * Don't taint if we are running SMP kernel on a single non-MP
269 	 * approved Athlon
270 	 */
271 	WARN_ONCE(1, "WARNING: This combination of AMD"
272 		" processors is not suitable for SMP.\n");
273 	add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
274 #endif
275 }
276 
277 #ifdef CONFIG_NUMA
278 /*
279  * To workaround broken NUMA config.  Read the comment in
280  * srat_detect_node().
281  */
282 static int nearby_node(int apicid)
283 {
284 	int i, node;
285 
286 	for (i = apicid - 1; i >= 0; i--) {
287 		node = __apicid_to_node[i];
288 		if (node != NUMA_NO_NODE && node_online(node))
289 			return node;
290 	}
291 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
292 		node = __apicid_to_node[i];
293 		if (node != NUMA_NO_NODE && node_online(node))
294 			return node;
295 	}
296 	return first_node(node_online_map); /* Shouldn't happen */
297 }
298 #endif
299 
300 #ifdef CONFIG_SMP
301 /*
302  * Fix up cpu_core_id for pre-F17h systems to be in the
303  * [0 .. cores_per_node - 1] range. Not really needed but
304  * kept so as not to break existing setups.
305  */
306 static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
307 {
308 	u32 cus_per_node;
309 
310 	if (c->x86 >= 0x17)
311 		return;
312 
313 	cus_per_node = c->x86_max_cores / nodes_per_socket;
314 	c->cpu_core_id %= cus_per_node;
315 }
316 
317 /*
318  * Fixup core topology information for
319  * (1) AMD multi-node processors
320  *     Assumption: Number of cores in each internal node is the same.
321  * (2) AMD processors supporting compute units
322  */
323 static void amd_get_topology(struct cpuinfo_x86 *c)
324 {
325 	u8 node_id;
326 	int cpu = smp_processor_id();
327 
328 	/* get information required for multi-node processors */
329 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
330 		u32 eax, ebx, ecx, edx;
331 
332 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
333 
334 		node_id  = ecx & 0xff;
335 		smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
336 
337 		if (c->x86 == 0x15)
338 			c->cu_id = ebx & 0xff;
339 
340 		if (c->x86 >= 0x17) {
341 			c->cpu_core_id = ebx & 0xff;
342 
343 			if (smp_num_siblings > 1)
344 				c->x86_max_cores /= smp_num_siblings;
345 		}
346 
347 		/*
348 		 * We may have multiple LLCs if L3 caches exist, so check if we
349 		 * have an L3 cache by looking at the L3 cache CPUID leaf.
350 		 */
351 		if (cpuid_edx(0x80000006)) {
352 			if (c->x86 == 0x17) {
353 				/*
354 				 * LLC is at the core complex level.
355 				 * Core complex id is ApicId[3].
356 				 */
357 				per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
358 			} else {
359 				/* LLC is at the node level. */
360 				per_cpu(cpu_llc_id, cpu) = node_id;
361 			}
362 		}
363 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
364 		u64 value;
365 
366 		rdmsrl(MSR_FAM10H_NODE_ID, value);
367 		node_id = value & 7;
368 
369 		per_cpu(cpu_llc_id, cpu) = node_id;
370 	} else
371 		return;
372 
373 	if (nodes_per_socket > 1) {
374 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
375 		legacy_fixup_core_id(c);
376 	}
377 }
378 #endif
379 
380 /*
381  * On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
382  * Assumes number of cores is a power of two.
383  */
384 static void amd_detect_cmp(struct cpuinfo_x86 *c)
385 {
386 #ifdef CONFIG_SMP
387 	unsigned bits;
388 	int cpu = smp_processor_id();
389 
390 	bits = c->x86_coreid_bits;
391 	/* Low order bits define the core id (index of core in socket) */
392 	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
393 	/* Convert the initial APIC ID into the socket ID */
394 	c->phys_proc_id = c->initial_apicid >> bits;
395 	/* use socket ID also for last level cache */
396 	per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
397 	amd_get_topology(c);
398 #endif
399 }
400 
401 u16 amd_get_nb_id(int cpu)
402 {
403 	u16 id = 0;
404 #ifdef CONFIG_SMP
405 	id = per_cpu(cpu_llc_id, cpu);
406 #endif
407 	return id;
408 }
409 EXPORT_SYMBOL_GPL(amd_get_nb_id);
410 
411 u32 amd_get_nodes_per_socket(void)
412 {
413 	return nodes_per_socket;
414 }
415 EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
416 
417 static void srat_detect_node(struct cpuinfo_x86 *c)
418 {
419 #ifdef CONFIG_NUMA
420 	int cpu = smp_processor_id();
421 	int node;
422 	unsigned apicid = c->apicid;
423 
424 	node = numa_cpu_node(cpu);
425 	if (node == NUMA_NO_NODE)
426 		node = per_cpu(cpu_llc_id, cpu);
427 
428 	/*
429 	 * On multi-fabric platform (e.g. Numascale NumaChip) a
430 	 * platform-specific handler needs to be called to fixup some
431 	 * IDs of the CPU.
432 	 */
433 	if (x86_cpuinit.fixup_cpu_id)
434 		x86_cpuinit.fixup_cpu_id(c, node);
435 
436 	if (!node_online(node)) {
437 		/*
438 		 * Two possibilities here:
439 		 *
440 		 * - The CPU is missing memory and no node was created.  In
441 		 *   that case try picking one from a nearby CPU.
442 		 *
443 		 * - The APIC IDs differ from the HyperTransport node IDs
444 		 *   which the K8 northbridge parsing fills in.  Assume
445 		 *   they are all increased by a constant offset, but in
446 		 *   the same order as the HT nodeids.  If that doesn't
447 		 *   result in a usable node fall back to the path for the
448 		 *   previous case.
449 		 *
450 		 * This workaround operates directly on the mapping between
451 		 * APIC ID and NUMA node, assuming certain relationship
452 		 * between APIC ID, HT node ID and NUMA topology.  As going
453 		 * through CPU mapping may alter the outcome, directly
454 		 * access __apicid_to_node[].
455 		 */
456 		int ht_nodeid = c->initial_apicid;
457 
458 		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
459 			node = __apicid_to_node[ht_nodeid];
460 		/* Pick a nearby node */
461 		if (!node_online(node))
462 			node = nearby_node(apicid);
463 	}
464 	numa_set_node(cpu, node);
465 #endif
466 }
467 
468 static void early_init_amd_mc(struct cpuinfo_x86 *c)
469 {
470 #ifdef CONFIG_SMP
471 	unsigned bits, ecx;
472 
473 	/* Multi core CPU? */
474 	if (c->extended_cpuid_level < 0x80000008)
475 		return;
476 
477 	ecx = cpuid_ecx(0x80000008);
478 
479 	c->x86_max_cores = (ecx & 0xff) + 1;
480 
481 	/* CPU telling us the core id bits shift? */
482 	bits = (ecx >> 12) & 0xF;
483 
484 	/* Otherwise recompute */
485 	if (bits == 0) {
486 		while ((1 << bits) < c->x86_max_cores)
487 			bits++;
488 	}
489 
490 	c->x86_coreid_bits = bits;
491 #endif
492 }
493 
494 static void bsp_init_amd(struct cpuinfo_x86 *c)
495 {
496 
497 #ifdef CONFIG_X86_64
498 	if (c->x86 >= 0xf) {
499 		unsigned long long tseg;
500 
501 		/*
502 		 * Split up direct mapping around the TSEG SMM area.
503 		 * Don't do it for gbpages because there seems very little
504 		 * benefit in doing so.
505 		 */
506 		if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
507 			unsigned long pfn = tseg >> PAGE_SHIFT;
508 
509 			pr_debug("tseg: %010llx\n", tseg);
510 			if (pfn_range_is_mapped(pfn, pfn + 1))
511 				set_memory_4k((unsigned long)__va(tseg), 1);
512 		}
513 	}
514 #endif
515 
516 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
517 
518 		if (c->x86 > 0x10 ||
519 		    (c->x86 == 0x10 && c->x86_model >= 0x2)) {
520 			u64 val;
521 
522 			rdmsrl(MSR_K7_HWCR, val);
523 			if (!(val & BIT(24)))
524 				pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
525 		}
526 	}
527 
528 	if (c->x86 == 0x15) {
529 		unsigned long upperbit;
530 		u32 cpuid, assoc;
531 
532 		cpuid	 = cpuid_edx(0x80000005);
533 		assoc	 = cpuid >> 16 & 0xff;
534 		upperbit = ((cpuid >> 24) << 10) / assoc;
535 
536 		va_align.mask	  = (upperbit - 1) & PAGE_MASK;
537 		va_align.flags    = ALIGN_VA_32 | ALIGN_VA_64;
538 
539 		/* A random value per boot for bit slice [12:upper_bit) */
540 		va_align.bits = get_random_int() & va_align.mask;
541 	}
542 
543 	if (cpu_has(c, X86_FEATURE_MWAITX))
544 		use_mwaitx_delay();
545 
546 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
547 		u32 ecx;
548 
549 		ecx = cpuid_ecx(0x8000001e);
550 		nodes_per_socket = ((ecx >> 8) & 7) + 1;
551 	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
552 		u64 value;
553 
554 		rdmsrl(MSR_FAM10H_NODE_ID, value);
555 		nodes_per_socket = ((value >> 3) & 7) + 1;
556 	}
557 }
558 
559 static void early_init_amd(struct cpuinfo_x86 *c)
560 {
561 	u32 dummy;
562 
563 	early_init_amd_mc(c);
564 
565 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
566 
567 	/*
568 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
569 	 * with P/T states and does not stop in deep C-states
570 	 */
571 	if (c->x86_power & (1 << 8)) {
572 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
573 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
574 	}
575 
576 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
577 	if (c->x86_power & BIT(12))
578 		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
579 
580 #ifdef CONFIG_X86_64
581 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
582 #else
583 	/*  Set MTRR capability flag if appropriate */
584 	if (c->x86 == 5)
585 		if (c->x86_model == 13 || c->x86_model == 9 ||
586 		    (c->x86_model == 8 && c->x86_mask >= 8))
587 			set_cpu_cap(c, X86_FEATURE_K6_MTRR);
588 #endif
589 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
590 	/*
591 	 * ApicID can always be treated as an 8-bit value for AMD APIC versions
592 	 * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
593 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
594 	 * after 16h.
595 	 */
596 	if (boot_cpu_has(X86_FEATURE_APIC)) {
597 		if (c->x86 > 0x16)
598 			set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
599 		else if (c->x86 >= 0xf) {
600 			/* check CPU config space for extended APIC ID */
601 			unsigned int val;
602 
603 			val = read_pci_config(0, 24, 0, 0x68);
604 			if ((val >> 17 & 0x3) == 0x3)
605 				set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
606 		}
607 	}
608 #endif
609 
610 	/*
611 	 * This is only needed to tell the kernel whether to use VMCALL
612 	 * and VMMCALL.  VMMCALL is never executed except under virt, so
613 	 * we can set it unconditionally.
614 	 */
615 	set_cpu_cap(c, X86_FEATURE_VMMCALL);
616 
617 	/* F16h erratum 793, CVE-2013-6885 */
618 	if (c->x86 == 0x16 && c->x86_model <= 0xf)
619 		msr_set_bit(MSR_AMD64_LS_CFG, 15);
620 
621 	/*
622 	 * Check whether the machine is affected by erratum 400. This is
623 	 * used to select the proper idle routine and to enable the check
624 	 * whether the machine is affected in arch_post_acpi_init(), which
625 	 * sets the X86_BUG_AMD_APIC_C1E bug depending on the MSR check.
626 	 */
627 	if (cpu_has_amd_erratum(c, amd_erratum_400))
628 		set_cpu_bug(c, X86_BUG_AMD_E400);
629 
630 	/*
631 	 * BIOS support is required for SME. If BIOS has enabled SME then
632 	 * adjust x86_phys_bits by the SME physical address space reduction
633 	 * value. If BIOS has not enabled SME then don't advertise the
634 	 * feature (set in scattered.c). Also, since the SME support requires
635 	 * long mode, don't advertise the feature under CONFIG_X86_32.
636 	 */
637 	if (cpu_has(c, X86_FEATURE_SME)) {
638 		u64 msr;
639 
640 		/* Check if SME is enabled */
641 		rdmsrl(MSR_K8_SYSCFG, msr);
642 		if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
643 			c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
644 			if (IS_ENABLED(CONFIG_X86_32))
645 				clear_cpu_cap(c, X86_FEATURE_SME);
646 		} else {
647 			clear_cpu_cap(c, X86_FEATURE_SME);
648 		}
649 	}
650 }
651 
652 static void init_amd_k8(struct cpuinfo_x86 *c)
653 {
654 	u32 level;
655 	u64 value;
656 
657 	/* On C+ stepping K8 rep microcode works well for copy/memset */
658 	level = cpuid_eax(1);
659 	if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
660 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
661 
662 	/*
663 	 * Some BIOSes incorrectly force this feature, but only K8 revision D
664 	 * (model = 0x14) and later actually support it.
665 	 * (AMD Erratum #110, docId: 25759).
666 	 */
667 	if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
668 		clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
669 		if (!rdmsrl_amd_safe(0xc001100d, &value)) {
670 			value &= ~BIT_64(32);
671 			wrmsrl_amd_safe(0xc001100d, value);
672 		}
673 	}
674 
675 	if (!c->x86_model_id[0])
676 		strcpy(c->x86_model_id, "Hammer");
677 
678 #ifdef CONFIG_SMP
679 	/*
680 	 * Disable TLB flush filter by setting HWCR.FFDIS on K8
681 	 * bit 6 of msr C001_0015
682 	 *
683 	 * Errata 63 for SH-B3 steppings
684 	 * Errata 122 for all steppings (F+ have it disabled by default)
685 	 */
686 	msr_set_bit(MSR_K7_HWCR, 6);
687 #endif
688 	set_cpu_bug(c, X86_BUG_SWAPGS_FENCE);
689 }
690 
691 static void init_amd_gh(struct cpuinfo_x86 *c)
692 {
693 #ifdef CONFIG_X86_64
694 	/* do this for boot cpu */
695 	if (c == &boot_cpu_data)
696 		check_enable_amd_mmconf_dmi();
697 
698 	fam10h_check_enable_mmcfg();
699 #endif
700 
701 	/*
702 	 * Disable GART TLB Walk Errors on Fam10h. We do this here because this
703 	 * is always needed when GART is enabled, even in a kernel which has no
704 	 * MCE support built in. BIOS should disable GartTlbWlk Errors already.
705 	 * If it doesn't, we do it here as suggested by the BKDG.
706 	 *
707 	 * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012
708 	 */
709 	msr_set_bit(MSR_AMD64_MCx_MASK(4), 10);
710 
711 	/*
712 	 * On family 10h BIOS may not have properly enabled WC+ support, causing
713 	 * it to be converted to CD memtype. This may result in performance
714 	 * degradation for certain nested-paging guests. Prevent this conversion
715 	 * by clearing bit 24 in MSR_AMD64_BU_CFG2.
716 	 *
717 	 * NOTE: we want to use the _safe accessors so as not to #GP kvm
718 	 * guests on older kvm hosts.
719 	 */
720 	msr_clear_bit(MSR_AMD64_BU_CFG2, 24);
721 
722 	if (cpu_has_amd_erratum(c, amd_erratum_383))
723 		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
724 }
725 
726 #define MSR_AMD64_DE_CFG	0xC0011029
727 
728 static void init_amd_ln(struct cpuinfo_x86 *c)
729 {
730 	/*
731 	 * Apply erratum 665 fix unconditionally so machines without a BIOS
732 	 * fix work.
733 	 */
734 	msr_set_bit(MSR_AMD64_DE_CFG, 31);
735 }
736 
737 static void init_amd_bd(struct cpuinfo_x86 *c)
738 {
739 	u64 value;
740 
741 	/* re-enable TopologyExtensions if switched off by BIOS */
742 	if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
743 	    !cpu_has(c, X86_FEATURE_TOPOEXT)) {
744 
745 		if (msr_set_bit(0xc0011005, 54) > 0) {
746 			rdmsrl(0xc0011005, value);
747 			if (value & BIT_64(54)) {
748 				set_cpu_cap(c, X86_FEATURE_TOPOEXT);
749 				pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
750 			}
751 		}
752 	}
753 
754 	/*
755 	 * The way access filter has a performance penalty on some workloads.
756 	 * Disable it on the affected CPUs.
757 	 */
758 	if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
759 		if (!rdmsrl_safe(MSR_F15H_IC_CFG, &value) && !(value & 0x1E)) {
760 			value |= 0x1E;
761 			wrmsrl_safe(MSR_F15H_IC_CFG, value);
762 		}
763 	}
764 }
765 
766 static void init_amd_zn(struct cpuinfo_x86 *c)
767 {
768 	/*
769 	 * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
770 	 * all up to and including B1.
771 	 */
772 	if (c->x86_model <= 1 && c->x86_mask <= 1)
773 		set_cpu_cap(c, X86_FEATURE_CPB);
774 }
775 
776 static void init_amd(struct cpuinfo_x86 *c)
777 {
778 	early_init_amd(c);
779 
780 	/*
781 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
782 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
783 	 */
784 	clear_cpu_cap(c, 0*32+31);
785 
786 	if (c->x86 >= 0x10)
787 		set_cpu_cap(c, X86_FEATURE_REP_GOOD);
788 
789 	/* get apicid instead of initial apic id from cpuid */
790 	c->apicid = hard_smp_processor_id();
791 
792 	/* K6s reports MCEs but don't actually have all the MSRs */
793 	if (c->x86 < 6)
794 		clear_cpu_cap(c, X86_FEATURE_MCE);
795 
796 	switch (c->x86) {
797 	case 4:    init_amd_k5(c); break;
798 	case 5:    init_amd_k6(c); break;
799 	case 6:	   init_amd_k7(c); break;
800 	case 0xf:  init_amd_k8(c); break;
801 	case 0x10: init_amd_gh(c); break;
802 	case 0x12: init_amd_ln(c); break;
803 	case 0x15: init_amd_bd(c); break;
804 	case 0x17: init_amd_zn(c); break;
805 	}
806 
807 	/* Enable workaround for FXSAVE leak */
808 	if (c->x86 >= 6)
809 		set_cpu_bug(c, X86_BUG_FXSAVE_LEAK);
810 
811 	cpu_detect_cache_sizes(c);
812 
813 	/* Multi core CPU? */
814 	if (c->extended_cpuid_level >= 0x80000008) {
815 		amd_detect_cmp(c);
816 		srat_detect_node(c);
817 	}
818 
819 #ifdef CONFIG_X86_32
820 	detect_ht(c);
821 #endif
822 
823 	init_amd_cacheinfo(c);
824 
825 	if (c->x86 >= 0xf)
826 		set_cpu_cap(c, X86_FEATURE_K8);
827 
828 	if (cpu_has(c, X86_FEATURE_XMM2)) {
829 		/* MFENCE stops RDTSC speculation */
830 		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
831 	}
832 
833 	/*
834 	 * Family 0x12 and above processors have APIC timer
835 	 * running in deep C states.
836 	 */
837 	if (c->x86 > 0x11)
838 		set_cpu_cap(c, X86_FEATURE_ARAT);
839 
840 	/* 3DNow or LM implies PREFETCHW */
841 	if (!cpu_has(c, X86_FEATURE_3DNOWPREFETCH))
842 		if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
843 			set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
844 
845 	/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
846 	if (!cpu_has(c, X86_FEATURE_XENPV))
847 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
848 }
849 
850 #ifdef CONFIG_X86_32
851 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
852 {
853 	/* AMD errata T13 (order #21922) */
854 	if ((c->x86 == 6)) {
855 		/* Duron Rev A0 */
856 		if (c->x86_model == 3 && c->x86_mask == 0)
857 			size = 64;
858 		/* Tbird rev A1/A2 */
859 		if (c->x86_model == 4 &&
860 			(c->x86_mask == 0 || c->x86_mask == 1))
861 			size = 256;
862 	}
863 	return size;
864 }
865 #endif
866 
867 static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
868 {
869 	u32 ebx, eax, ecx, edx;
870 	u16 mask = 0xfff;
871 
872 	if (c->x86 < 0xf)
873 		return;
874 
875 	if (c->extended_cpuid_level < 0x80000006)
876 		return;
877 
878 	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
879 
880 	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
881 	tlb_lli_4k[ENTRIES] = ebx & mask;
882 
883 	/*
884 	 * K8 doesn't have 2M/4M entries in the L2 TLB so read out the L1 TLB
885 	 * characteristics from the CPUID function 0x80000005 instead.
886 	 */
887 	if (c->x86 == 0xf) {
888 		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
889 		mask = 0xff;
890 	}
891 
892 	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
893 	if (!((eax >> 16) & mask))
894 		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
895 	else
896 		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
897 
898 	/* a 4M entry uses two 2M entries */
899 	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
900 
901 	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
902 	if (!(eax & mask)) {
903 		/* Erratum 658 */
904 		if (c->x86 == 0x15 && c->x86_model <= 0x1f) {
905 			tlb_lli_2m[ENTRIES] = 1024;
906 		} else {
907 			cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
908 			tlb_lli_2m[ENTRIES] = eax & 0xff;
909 		}
910 	} else
911 		tlb_lli_2m[ENTRIES] = eax & mask;
912 
913 	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
914 }
915 
916 static const struct cpu_dev amd_cpu_dev = {
917 	.c_vendor	= "AMD",
918 	.c_ident	= { "AuthenticAMD" },
919 #ifdef CONFIG_X86_32
920 	.legacy_models = {
921 		{ .family = 4, .model_names =
922 		  {
923 			  [3] = "486 DX/2",
924 			  [7] = "486 DX/2-WB",
925 			  [8] = "486 DX/4",
926 			  [9] = "486 DX/4-WB",
927 			  [14] = "Am5x86-WT",
928 			  [15] = "Am5x86-WB"
929 		  }
930 		},
931 	},
932 	.legacy_cache_size = amd_size_cache,
933 #endif
934 	.c_early_init   = early_init_amd,
935 	.c_detect_tlb	= cpu_detect_tlb_amd,
936 	.c_bsp_init	= bsp_init_amd,
937 	.c_init		= init_amd,
938 	.c_x86_vendor	= X86_VENDOR_AMD,
939 };
940 
941 cpu_dev_register(amd_cpu_dev);
942 
943 /*
944  * AMD errata checking
945  *
946  * Errata are defined as arrays of ints using the AMD_LEGACY_ERRATUM() or
947  * AMD_OSVW_ERRATUM() macros. The latter is intended for newer errata that
948  * have an OSVW id assigned, which it takes as first argument. Both take a
949  * variable number of family-specific model-stepping ranges created by
950  * AMD_MODEL_RANGE().
951  *
952  * Example:
953  *
954  * const int amd_erratum_319[] =
955  *	AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0x4, 0x2),
956  *			   AMD_MODEL_RANGE(0x10, 0x8, 0x0, 0x8, 0x0),
957  *			   AMD_MODEL_RANGE(0x10, 0x9, 0x0, 0x9, 0x0));
958  */
959 
960 #define AMD_LEGACY_ERRATUM(...)		{ -1, __VA_ARGS__, 0 }
961 #define AMD_OSVW_ERRATUM(osvw_id, ...)	{ osvw_id, __VA_ARGS__, 0 }
962 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
963 	((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
964 #define AMD_MODEL_RANGE_FAMILY(range)	(((range) >> 24) & 0xff)
965 #define AMD_MODEL_RANGE_START(range)	(((range) >> 12) & 0xfff)
966 #define AMD_MODEL_RANGE_END(range)	((range) & 0xfff)
967 
968 static const int amd_erratum_400[] =
969 	AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
970 			    AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
971 
972 static const int amd_erratum_383[] =
973 	AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
974 
975 
976 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
977 {
978 	int osvw_id = *erratum++;
979 	u32 range;
980 	u32 ms;
981 
982 	if (osvw_id >= 0 && osvw_id < 65536 &&
983 	    cpu_has(cpu, X86_FEATURE_OSVW)) {
984 		u64 osvw_len;
985 
986 		rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, osvw_len);
987 		if (osvw_id < osvw_len) {
988 			u64 osvw_bits;
989 
990 			rdmsrl(MSR_AMD64_OSVW_STATUS + (osvw_id >> 6),
991 			    osvw_bits);
992 			return osvw_bits & (1ULL << (osvw_id & 0x3f));
993 		}
994 	}
995 
996 	/* OSVW unavailable or ID unknown, match family-model-stepping range */
997 	ms = (cpu->x86_model << 4) | cpu->x86_mask;
998 	while ((range = *erratum++))
999 		if ((cpu->x86 == AMD_MODEL_RANGE_FAMILY(range)) &&
1000 		    (ms >= AMD_MODEL_RANGE_START(range)) &&
1001 		    (ms <= AMD_MODEL_RANGE_END(range)))
1002 			return true;
1003 
1004 	return false;
1005 }
1006 
1007 void set_dr_addr_mask(unsigned long mask, int dr)
1008 {
1009 	if (!boot_cpu_has(X86_FEATURE_BPEXT))
1010 		return;
1011 
1012 	switch (dr) {
1013 	case 0:
1014 		wrmsr(MSR_F16H_DR0_ADDR_MASK, mask, 0);
1015 		break;
1016 	case 1:
1017 	case 2:
1018 	case 3:
1019 		wrmsr(MSR_F16H_DR1_ADDR_MASK - 1 + dr, mask, 0);
1020 		break;
1021 	default:
1022 		break;
1023 	}
1024 }
1025