xref: /openbmc/linux/arch/x86/kernel/cpu/cyrix.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 #include <linux/init.h>
2 #include <linux/bitops.h>
3 #include <linux/delay.h>
4 #include <linux/pci.h>
5 #include <asm/dma.h>
6 #include <asm/io.h>
7 #include <asm/processor-cyrix.h>
8 #include <asm/timer.h>
9 #include <asm/pci-direct.h>
10 #include <asm/tsc.h>
11 
12 #include "cpu.h"
13 
14 /*
15  * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
16  */
17 static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
18 {
19 	unsigned char ccr2, ccr3;
20 	unsigned long flags;
21 
22 	/* we test for DEVID by checking whether CCR3 is writable */
23 	local_irq_save(flags);
24 	ccr3 = getCx86(CX86_CCR3);
25 	setCx86(CX86_CCR3, ccr3 ^ 0x80);
26 	getCx86(0xc0);   /* dummy to change bus */
27 
28 	if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
29 		ccr2 = getCx86(CX86_CCR2);
30 		setCx86(CX86_CCR2, ccr2 ^ 0x04);
31 		getCx86(0xc0);  /* dummy */
32 
33 		if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
34 			*dir0 = 0xfd;
35 		else {                          /* Cx486S A step */
36 			setCx86(CX86_CCR2, ccr2);
37 			*dir0 = 0xfe;
38 		}
39 	}
40 	else {
41 		setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
42 
43 		/* read DIR0 and DIR1 CPU registers */
44 		*dir0 = getCx86(CX86_DIR0);
45 		*dir1 = getCx86(CX86_DIR1);
46 	}
47 	local_irq_restore(flags);
48 }
49 
50 /*
51  * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
52  * order to identify the Cyrix CPU model after we're out of setup.c
53  *
54  * Actually since bugs.h doesn't even reference this perhaps someone should
55  * fix the documentation ???
56  */
57 static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
58 
59 static char Cx86_model[][9] __cpuinitdata = {
60 	"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
61 	"M II ", "Unknown"
62 };
63 static char Cx486_name[][5] __cpuinitdata = {
64 	"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
65 	"SRx2", "DRx2"
66 };
67 static char Cx486S_name[][4] __cpuinitdata = {
68 	"S", "S2", "Se", "S2e"
69 };
70 static char Cx486D_name[][4] __cpuinitdata = {
71 	"DX", "DX2", "?", "?", "?", "DX4"
72 };
73 static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
74 static char cyrix_model_mult1[] __cpuinitdata = "12??43";
75 static char cyrix_model_mult2[] __cpuinitdata = "12233445";
76 
77 /*
78  * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
79  * BIOSes for compatibility with DOS games.  This makes the udelay loop
80  * work correctly, and improves performance.
81  *
82  * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
83  */
84 
85 extern void calibrate_delay(void) __init;
86 
87 static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
88 {
89 	unsigned long flags;
90 
91 	if (Cx86_dir0_msb == 3) {
92 		unsigned char ccr3, ccr5;
93 
94 		local_irq_save(flags);
95 		ccr3 = getCx86(CX86_CCR3);
96 		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN  */
97 		ccr5 = getCx86(CX86_CCR5);
98 		if (ccr5 & 2)
99 			setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
100 		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
101 		local_irq_restore(flags);
102 
103 		if (ccr5 & 2) { /* possible wrong calibration done */
104 			printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
105 			calibrate_delay();
106 			c->loops_per_jiffy = loops_per_jiffy;
107 		}
108 	}
109 }
110 
111 
112 static void __cpuinit set_cx86_reorder(void)
113 {
114 	u8 ccr3;
115 
116 	printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
117 	ccr3 = getCx86(CX86_CCR3);
118 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
119 
120 	/* Load/Store Serialize to mem access disable (=reorder it) */
121 	setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
122 	/* set load/store serialize from 1GB to 4GB */
123 	ccr3 |= 0xe0;
124 	setCx86(CX86_CCR3, ccr3);
125 }
126 
127 static void __cpuinit set_cx86_memwb(void)
128 {
129 	u32 cr0;
130 
131 	printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
132 
133 	/* CCR2 bit 2: unlock NW bit */
134 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
135 	/* set 'Not Write-through' */
136 	cr0 = 0x20000000;
137 	write_cr0(read_cr0() | cr0);
138 	/* CCR2 bit 2: lock NW bit and set WT1 */
139 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
140 }
141 
142 static void __cpuinit set_cx86_inc(void)
143 {
144 	unsigned char ccr3;
145 
146 	printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
147 
148 	ccr3 = getCx86(CX86_CCR3);
149 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
150 	/* PCR1 -- Performance Control */
151 	/* Incrementor on, whatever that is */
152 	setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
153 	/* PCR0 -- Performance Control */
154 	/* Incrementor Margin 10 */
155 	setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
156 	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
157 }
158 
159 /*
160  *	Configure later MediaGX and/or Geode processor.
161  */
162 
163 static void __cpuinit geode_configure(void)
164 {
165 	unsigned long flags;
166 	u8 ccr3;
167 	local_irq_save(flags);
168 
169 	/* Suspend on halt power saving and enable #SUSP pin */
170 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
171 
172 	ccr3 = getCx86(CX86_CCR3);
173 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
174 
175 
176 	/* FPU fast, DTE cache, Mem bypass */
177 	setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
178 	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
179 
180 	set_cx86_memwb();
181 	set_cx86_reorder();
182 	set_cx86_inc();
183 
184 	local_irq_restore(flags);
185 }
186 
187 
188 static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
189 {
190 	unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
191 	char *buf = c->x86_model_id;
192 	const char *p = NULL;
193 
194 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
195 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
196 	clear_bit(0*32+31, c->x86_capability);
197 
198 	/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
199 	if ( test_bit(1*32+24, c->x86_capability) ) {
200 		clear_bit(1*32+24, c->x86_capability);
201 		set_bit(X86_FEATURE_CXMMX, c->x86_capability);
202 	}
203 
204 	do_cyrix_devid(&dir0, &dir1);
205 
206 	check_cx686_slop(c);
207 
208 	Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
209 	dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
210 
211 	/* common case step number/rev -- exceptions handled below */
212 	c->x86_model = (dir1 >> 4) + 1;
213 	c->x86_mask = dir1 & 0xf;
214 
215 	/* Now cook; the original recipe is by Channing Corn, from Cyrix.
216 	 * We do the same thing for each generation: we work out
217 	 * the model, multiplier and stepping.  Black magic included,
218 	 * to make the silicon step/rev numbers match the printed ones.
219 	 */
220 
221 	switch (dir0_msn) {
222 		unsigned char tmp;
223 
224 	case 0: /* Cx486SLC/DLC/SRx/DRx */
225 		p = Cx486_name[dir0_lsn & 7];
226 		break;
227 
228 	case 1: /* Cx486S/DX/DX2/DX4 */
229 		p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
230 			: Cx486S_name[dir0_lsn & 3];
231 		break;
232 
233 	case 2: /* 5x86 */
234 		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
235 		p = Cx86_cb+2;
236 		break;
237 
238 	case 3: /* 6x86/6x86L */
239 		Cx86_cb[1] = ' ';
240 		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
241 		if (dir1 > 0x21) { /* 686L */
242 			Cx86_cb[0] = 'L';
243 			p = Cx86_cb;
244 			(c->x86_model)++;
245 		} else             /* 686 */
246 			p = Cx86_cb+1;
247 		/* Emulate MTRRs using Cyrix's ARRs. */
248 		set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
249 		/* 6x86's contain this bug */
250 		c->coma_bug = 1;
251 		break;
252 
253 	case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
254 #ifdef CONFIG_PCI
255 	{
256 		u32 vendor, device;
257 		/* It isn't really a PCI quirk directly, but the cure is the
258 		   same. The MediaGX has deep magic SMM stuff that handles the
259 		   SB emulation. It thows away the fifo on disable_dma() which
260 		   is wrong and ruins the audio.
261 
262 		   Bug2: VSA1 has a wrap bug so that using maximum sized DMA
263 		   causes bad things. According to NatSemi VSA2 has another
264 		   bug to do with 'hlt'. I've not seen any boards using VSA2
265 		   and X doesn't seem to support it either so who cares 8).
266 		   VSA1 we work around however.
267 		*/
268 
269 		printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
270 		isa_dma_bridge_buggy = 2;
271 
272 		/* We do this before the PCI layer is running. However we
273 		   are safe here as we know the bridge must be a Cyrix
274 		   companion and must be present */
275 		vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID);
276 		device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID);
277 
278 		/*
279 		 *  The 5510/5520 companion chips have a funky PIT.
280 		 */
281 		if (vendor == PCI_VENDOR_ID_CYRIX &&
282 	 (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
283 			mark_tsc_unstable("cyrix 5510/5520 detected");
284 	}
285 #endif
286 		c->x86_cache_size=16;	/* Yep 16K integrated cache thats it */
287 
288 		/* GXm supports extended cpuid levels 'ala' AMD */
289 		if (c->cpuid_level == 2) {
290 			/* Enable cxMMX extensions (GX1 Datasheet 54) */
291 			setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
292 
293 			/*
294 			 * GXm : 0x30 ... 0x5f GXm  datasheet 51
295 			 * GXlv: 0x6x          GXlv datasheet 54
296 			 *  ?  : 0x7x
297 			 * GX1 : 0x8x          GX1  datasheet 56
298 			 */
299 			if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f))
300 				geode_configure();
301 			get_model_name(c);  /* get CPU marketing name */
302 			return;
303 		}
304 		else {  /* MediaGX */
305 			Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
306 			p = Cx86_cb+2;
307 			c->x86_model = (dir1 & 0x20) ? 1 : 2;
308 		}
309 		break;
310 
311         case 5: /* 6x86MX/M II */
312 		if (dir1 > 7)
313 		{
314 			dir0_msn++;  /* M II */
315 			/* Enable MMX extensions (App note 108) */
316 			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
317 		}
318 		else
319 		{
320 			c->coma_bug = 1;      /* 6x86MX, it has the bug. */
321 		}
322 		tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
323 		Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
324 		p = Cx86_cb+tmp;
325         	if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
326 			(c->x86_model)++;
327 		/* Emulate MTRRs using Cyrix's ARRs. */
328 		set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
329 		break;
330 
331 	case 0xf:  /* Cyrix 486 without DEVID registers */
332 		switch (dir0_lsn) {
333 		case 0xd:  /* either a 486SLC or DLC w/o DEVID */
334 			dir0_msn = 0;
335 			p = Cx486_name[(c->hard_math) ? 1 : 0];
336 			break;
337 
338 		case 0xe:  /* a 486S A step */
339 			dir0_msn = 0;
340 			p = Cx486S_name[0];
341 			break;
342 		}
343 		break;
344 
345 	default:  /* unknown (shouldn't happen, we know everyone ;-) */
346 		dir0_msn = 7;
347 		break;
348 	}
349 	strcpy(buf, Cx86_model[dir0_msn & 7]);
350 	if (p) strcat(buf, p);
351 	return;
352 }
353 
354 /*
355  * Handle National Semiconductor branded processors
356  */
357 static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
358 {
359 	/* There may be GX1 processors in the wild that are branded
360 	 * NSC and not Cyrix.
361 	 *
362 	 * This function only handles the GX processor, and kicks every
363 	 * thing else to the Cyrix init function above - that should
364 	 * cover any processors that might have been branded differently
365 	 * after NSC acquired Cyrix.
366 	 *
367 	 * If this breaks your GX1 horribly, please e-mail
368 	 * info-linux@ldcmail.amd.com to tell us.
369 	 */
370 
371 	/* Handle the GX (Formally known as the GX2) */
372 
373 	if (c->x86 == 5 && c->x86_model == 5)
374 		display_cacheinfo(c);
375 	else
376 		init_cyrix(c);
377 }
378 
379 /*
380  * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
381  * by the fact that they preserve the flags across the division of 5/2.
382  * PII and PPro exhibit this behavior too, but they have cpuid available.
383  */
384 
385 /*
386  * Perform the Cyrix 5/2 test. A Cyrix won't change
387  * the flags, while other 486 chips will.
388  */
389 static inline int test_cyrix_52div(void)
390 {
391 	unsigned int test;
392 
393 	__asm__ __volatile__(
394 	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
395 	     "div %b2\n\t"	/* divide 5 by 2 */
396 	     "lahf"		/* store flags into %ah */
397 	     : "=a" (test)
398 	     : "0" (5), "q" (2)
399 	     : "cc");
400 
401 	/* AH is 0x02 on Cyrix after the divide.. */
402 	return (unsigned char) (test >> 8) == 0x02;
403 }
404 
405 static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c)
406 {
407 	/* Detect Cyrix with disabled CPUID */
408 	if ( c->x86 == 4 && test_cyrix_52div() ) {
409 		unsigned char dir0, dir1;
410 
411 		strcpy(c->x86_vendor_id, "CyrixInstead");
412 	        c->x86_vendor = X86_VENDOR_CYRIX;
413 
414 	        /* Actually enable cpuid on the older cyrix */
415 
416 	    	/* Retrieve CPU revisions */
417 
418 		do_cyrix_devid(&dir0, &dir1);
419 
420 		dir0>>=4;
421 
422 		/* Check it is an affected model */
423 
424    	        if (dir0 == 5 || dir0 == 3)
425    	        {
426 			unsigned char ccr3;
427 			unsigned long flags;
428 			printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
429 			local_irq_save(flags);
430 			ccr3 = getCx86(CX86_CCR3);
431 			setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN  */
432 			setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80);  /* enable cpuid  */
433 			setCx86(CX86_CCR3, ccr3);                       /* disable MAPEN */
434 			local_irq_restore(flags);
435 		}
436 	}
437 }
438 
439 static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
440 	.c_vendor	= "Cyrix",
441 	.c_ident 	= { "CyrixInstead" },
442 	.c_init		= init_cyrix,
443 	.c_identify	= cyrix_identify,
444 };
445 
446 int __init cyrix_init_cpu(void)
447 {
448 	cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
449 	return 0;
450 }
451 
452 static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
453 	.c_vendor	= "NSC",
454 	.c_ident 	= { "Geode by NSC" },
455 	.c_init		= init_nsc,
456 };
457 
458 int __init nsc_init_cpu(void)
459 {
460 	cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
461 	return 0;
462 }
463 
464