xref: /openbmc/linux/arch/x86/kernel/cpu/amd.c (revision a1e58bbd)
1 #include <linux/init.h>
2 #include <linux/bitops.h>
3 #include <linux/mm.h>
4 #include <asm/io.h>
5 #include <asm/processor.h>
6 #include <asm/apic.h>
7 #include <asm/mach_apic.h>
8 
9 #include "cpu.h"
10 
11 /*
12  *	B step AMD K6 before B 9730xxxx have hardware bugs that can cause
13  *	misexecution of code under Linux. Owners of such processors should
14  *	contact AMD for precise details and a CPU swap.
15  *
16  *	See	http://www.multimania.com/poulot/k6bug.html
17  *		http://www.amd.com/K6/k6docs/revgd.html
18  *
19  *	The following test is erm.. interesting. AMD neglected to up
20  *	the chip setting when fixing the bug but they also tweaked some
21  *	performance at the same time..
22  */
23 
24 extern void vide(void);
25 __asm__(".align 4\nvide: ret");
26 
27 #ifdef CONFIG_X86_LOCAL_APIC
28 #define ENABLE_C1E_MASK         0x18000000
29 #define CPUID_PROCESSOR_SIGNATURE       1
30 #define CPUID_XFAM              0x0ff00000
31 #define CPUID_XFAM_K8           0x00000000
32 #define CPUID_XFAM_10H          0x00100000
33 #define CPUID_XFAM_11H          0x00200000
34 #define CPUID_XMOD              0x000f0000
35 #define CPUID_XMOD_REV_F        0x00040000
36 
37 /* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
38 static __cpuinit int amd_apic_timer_broken(void)
39 {
40 	u32 lo, hi;
41 	u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
42 	switch (eax & CPUID_XFAM) {
43 	case CPUID_XFAM_K8:
44 		if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
45 			break;
46 	case CPUID_XFAM_10H:
47 	case CPUID_XFAM_11H:
48 		rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
49 		if (lo & ENABLE_C1E_MASK) {
50 			if (smp_processor_id() != boot_cpu_physical_apicid)
51 				printk(KERN_INFO "AMD C1E detected late. "
52 				       "	Force timer broadcast.\n");
53 			return 1;
54 		}
55 		break;
56 	default:
57 		/* err on the side of caution */
58 		return 1;
59 	}
60 	return 0;
61 }
62 #endif
63 
64 int force_mwait __cpuinitdata;
65 
66 void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
67 {
68 	if (cpuid_eax(0x80000000) >= 0x80000007) {
69 		c->x86_power = cpuid_edx(0x80000007);
70 		if (c->x86_power & (1<<8))
71 			set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
72 	}
73 }
74 
75 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
76 {
77 	u32 l, h;
78 	int mbytes = num_physpages >> (20-PAGE_SHIFT);
79 	int r;
80 
81 #ifdef CONFIG_SMP
82 	unsigned long long value;
83 
84 	/* Disable TLB flush filter by setting HWCR.FFDIS on K8
85 	 * bit 6 of msr C001_0015
86 	 *
87 	 * Errata 63 for SH-B3 steppings
88 	 * Errata 122 for all steppings (F+ have it disabled by default)
89 	 */
90 	if (c->x86 == 15) {
91 		rdmsrl(MSR_K7_HWCR, value);
92 		value |= 1 << 6;
93 		wrmsrl(MSR_K7_HWCR, value);
94 	}
95 #endif
96 
97 	early_init_amd(c);
98 
99 	/*
100 	 *	FIXME: We should handle the K5 here. Set up the write
101 	 *	range and also turn on MSR 83 bits 4 and 31 (write alloc,
102 	 *	no bus pipeline)
103 	 */
104 
105 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
106 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
107 	clear_bit(0*32+31, c->x86_capability);
108 
109 	r = get_model_name(c);
110 
111 	switch(c->x86)
112 	{
113 		case 4:
114 		/*
115 		 * General Systems BIOSen alias the cpu frequency registers
116 		 * of the Elan at 0x000df000. Unfortuantly, one of the Linux
117 		 * drivers subsequently pokes it, and changes the CPU speed.
118 		 * Workaround : Remove the unneeded alias.
119 		 */
120 #define CBAR		(0xfffc) /* Configuration Base Address  (32-bit) */
121 #define CBAR_ENB	(0x80000000)
122 #define CBAR_KEY	(0X000000CB)
123 			if (c->x86_model==9 || c->x86_model == 10) {
124 				if (inl (CBAR) & CBAR_ENB)
125 					outl (0 | CBAR_KEY, CBAR);
126 			}
127 			break;
128 		case 5:
129 			if( c->x86_model < 6 )
130 			{
131 				/* Based on AMD doc 20734R - June 2000 */
132 				if ( c->x86_model == 0 ) {
133 					clear_bit(X86_FEATURE_APIC, c->x86_capability);
134 					set_bit(X86_FEATURE_PGE, c->x86_capability);
135 				}
136 				break;
137 			}
138 
139 			if ( c->x86_model == 6 && c->x86_mask == 1 ) {
140 				const int K6_BUG_LOOP = 1000000;
141 				int n;
142 				void (*f_vide)(void);
143 				unsigned long d, d2;
144 
145 				printk(KERN_INFO "AMD K6 stepping B detected - ");
146 
147 				/*
148 				 * It looks like AMD fixed the 2.6.2 bug and improved indirect
149 				 * calls at the same time.
150 				 */
151 
152 				n = K6_BUG_LOOP;
153 				f_vide = vide;
154 				rdtscl(d);
155 				while (n--)
156 					f_vide();
157 				rdtscl(d2);
158 				d = d2-d;
159 
160 				if (d > 20*K6_BUG_LOOP)
161 					printk("system stability may be impaired when more than 32 MB are used.\n");
162 				else
163 					printk("probably OK (after B9730xxxx).\n");
164 				printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
165 			}
166 
167 			/* K6 with old style WHCR */
168 			if (c->x86_model < 8 ||
169 			   (c->x86_model== 8 && c->x86_mask < 8)) {
170 				/* We can only write allocate on the low 508Mb */
171 				if(mbytes>508)
172 					mbytes=508;
173 
174 				rdmsr(MSR_K6_WHCR, l, h);
175 				if ((l&0x0000FFFF)==0) {
176 					unsigned long flags;
177 					l=(1<<0)|((mbytes/4)<<1);
178 					local_irq_save(flags);
179 					wbinvd();
180 					wrmsr(MSR_K6_WHCR, l, h);
181 					local_irq_restore(flags);
182 					printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
183 						mbytes);
184 				}
185 				break;
186 			}
187 
188 			if ((c->x86_model == 8 && c->x86_mask >7) ||
189 			     c->x86_model == 9 || c->x86_model == 13) {
190 				/* The more serious chips .. */
191 
192 				if(mbytes>4092)
193 					mbytes=4092;
194 
195 				rdmsr(MSR_K6_WHCR, l, h);
196 				if ((l&0xFFFF0000)==0) {
197 					unsigned long flags;
198 					l=((mbytes>>2)<<22)|(1<<16);
199 					local_irq_save(flags);
200 					wbinvd();
201 					wrmsr(MSR_K6_WHCR, l, h);
202 					local_irq_restore(flags);
203 					printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
204 						mbytes);
205 				}
206 
207 				/*  Set MTRR capability flag if appropriate */
208 				if (c->x86_model == 13 || c->x86_model == 9 ||
209 				   (c->x86_model == 8 && c->x86_mask >= 8))
210 					set_bit(X86_FEATURE_K6_MTRR, c->x86_capability);
211 				break;
212 			}
213 
214 			if (c->x86_model == 10) {
215 				/* AMD Geode LX is model 10 */
216 				/* placeholder for any needed mods */
217 				break;
218 			}
219 			break;
220 		case 6: /* An Athlon/Duron */
221 
222 			/* Bit 15 of Athlon specific MSR 15, needs to be 0
223  			 * to enable SSE on Palomino/Morgan/Barton CPU's.
224 			 * If the BIOS didn't enable it already, enable it here.
225 			 */
226 			if (c->x86_model >= 6 && c->x86_model <= 10) {
227 				if (!cpu_has(c, X86_FEATURE_XMM)) {
228 					printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
229 					rdmsr(MSR_K7_HWCR, l, h);
230 					l &= ~0x00008000;
231 					wrmsr(MSR_K7_HWCR, l, h);
232 					set_bit(X86_FEATURE_XMM, c->x86_capability);
233 				}
234 			}
235 
236 			/* It's been determined by AMD that Athlons since model 8 stepping 1
237 			 * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx
238 			 * As per AMD technical note 27212 0.2
239 			 */
240 			if ((c->x86_model == 8 && c->x86_mask>=1) || (c->x86_model > 8)) {
241 				rdmsr(MSR_K7_CLK_CTL, l, h);
242 				if ((l & 0xfff00000) != 0x20000000) {
243 					printk ("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", l,
244 						((l & 0x000fffff)|0x20000000));
245 					wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
246 				}
247 			}
248 			break;
249 	}
250 
251 	switch (c->x86) {
252 	case 15:
253 	/* Use K8 tuning for Fam10h and Fam11h */
254 	case 0x10:
255 	case 0x11:
256 		set_bit(X86_FEATURE_K8, c->x86_capability);
257 		break;
258 	case 6:
259 		set_bit(X86_FEATURE_K7, c->x86_capability);
260 		break;
261 	}
262 	if (c->x86 >= 6)
263 		set_bit(X86_FEATURE_FXSAVE_LEAK, c->x86_capability);
264 
265 	display_cacheinfo(c);
266 
267 	if (cpuid_eax(0x80000000) >= 0x80000008) {
268 		c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
269 	}
270 
271 #ifdef CONFIG_X86_HT
272 	/*
273 	 * On a AMD multi core setup the lower bits of the APIC id
274 	 * distinguish the cores.
275 	 */
276 	if (c->x86_max_cores > 1) {
277 		int cpu = smp_processor_id();
278 		unsigned bits = (cpuid_ecx(0x80000008) >> 12) & 0xf;
279 
280 		if (bits == 0) {
281 			while ((1 << bits) < c->x86_max_cores)
282 				bits++;
283 		}
284 		c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
285 		c->phys_proc_id >>= bits;
286 		printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
287 		       cpu, c->x86_max_cores, c->cpu_core_id);
288 	}
289 #endif
290 
291 	if (cpuid_eax(0x80000000) >= 0x80000006) {
292 		if ((c->x86 == 0x10) && (cpuid_edx(0x80000006) & 0xf000))
293 			num_cache_leaves = 4;
294 		else
295 			num_cache_leaves = 3;
296 	}
297 
298 #ifdef CONFIG_X86_LOCAL_APIC
299 	if (amd_apic_timer_broken())
300 		local_apic_timer_disabled = 1;
301 #endif
302 
303 	/* K6s reports MCEs but don't actually have all the MSRs */
304 	if (c->x86 < 6)
305 		clear_bit(X86_FEATURE_MCE, c->x86_capability);
306 
307 	if (cpu_has_xmm2)
308 		set_bit(X86_FEATURE_MFENCE_RDTSC, c->x86_capability);
309 }
310 
311 static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 * c, unsigned int size)
312 {
313 	/* AMD errata T13 (order #21922) */
314 	if ((c->x86 == 6)) {
315 		if (c->x86_model == 3 && c->x86_mask == 0)	/* Duron Rev A0 */
316 			size = 64;
317 		if (c->x86_model == 4 &&
318 		    (c->x86_mask==0 || c->x86_mask==1))	/* Tbird rev A1/A2 */
319 			size = 256;
320 	}
321 	return size;
322 }
323 
324 static struct cpu_dev amd_cpu_dev __cpuinitdata = {
325 	.c_vendor	= "AMD",
326 	.c_ident 	= { "AuthenticAMD" },
327 	.c_models = {
328 		{ .vendor = X86_VENDOR_AMD, .family = 4, .model_names =
329 		  {
330 			  [3] = "486 DX/2",
331 			  [7] = "486 DX/2-WB",
332 			  [8] = "486 DX/4",
333 			  [9] = "486 DX/4-WB",
334 			  [14] = "Am5x86-WT",
335 			  [15] = "Am5x86-WB"
336 		  }
337 		},
338 	},
339 	.c_init		= init_amd,
340 	.c_size_cache	= amd_size_cache,
341 };
342 
343 int __init amd_init_cpu(void)
344 {
345 	cpu_devs[X86_VENDOR_AMD] = &amd_cpu_dev;
346 	return 0;
347 }
348