xref: /openbmc/linux/arch/x86/kernel/cpu/cyrix.c (revision a1e58bbd)
1 #include <linux/init.h>
2 #include <linux/bitops.h>
3 #include <linux/delay.h>
4 #include <linux/pci.h>
5 #include <asm/dma.h>
6 #include <asm/io.h>
7 #include <asm/processor-cyrix.h>
8 #include <asm/processor-flags.h>
9 #include <asm/timer.h>
10 #include <asm/pci-direct.h>
11 #include <asm/tsc.h>
12 
13 #include "cpu.h"
14 
15 /*
16  * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17  */
18 static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19 {
20 	unsigned char ccr2, ccr3;
21 	unsigned long flags;
22 
23 	/* we test for DEVID by checking whether CCR3 is writable */
24 	local_irq_save(flags);
25 	ccr3 = getCx86(CX86_CCR3);
26 	setCx86(CX86_CCR3, ccr3 ^ 0x80);
27 	getCx86(0xc0);   /* dummy to change bus */
28 
29 	if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
30 		ccr2 = getCx86(CX86_CCR2);
31 		setCx86(CX86_CCR2, ccr2 ^ 0x04);
32 		getCx86(0xc0);  /* dummy */
33 
34 		if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
35 			*dir0 = 0xfd;
36 		else {                          /* Cx486S A step */
37 			setCx86(CX86_CCR2, ccr2);
38 			*dir0 = 0xfe;
39 		}
40 	}
41 	else {
42 		setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
43 
44 		/* read DIR0 and DIR1 CPU registers */
45 		*dir0 = getCx86(CX86_DIR0);
46 		*dir1 = getCx86(CX86_DIR1);
47 	}
48 	local_irq_restore(flags);
49 }
50 
51 /*
52  * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
53  * order to identify the Cyrix CPU model after we're out of setup.c
54  *
55  * Actually since bugs.h doesn't even reference this perhaps someone should
56  * fix the documentation ???
57  */
58 static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
59 
60 static char Cx86_model[][9] __cpuinitdata = {
61 	"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
62 	"M II ", "Unknown"
63 };
64 static char Cx486_name[][5] __cpuinitdata = {
65 	"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
66 	"SRx2", "DRx2"
67 };
68 static char Cx486S_name[][4] __cpuinitdata = {
69 	"S", "S2", "Se", "S2e"
70 };
71 static char Cx486D_name[][4] __cpuinitdata = {
72 	"DX", "DX2", "?", "?", "?", "DX4"
73 };
74 static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
75 static char cyrix_model_mult1[] __cpuinitdata = "12??43";
76 static char cyrix_model_mult2[] __cpuinitdata = "12233445";
77 
78 /*
79  * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
80  * BIOSes for compatibility with DOS games.  This makes the udelay loop
81  * work correctly, and improves performance.
82  *
83  * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
84  */
85 
86 static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
87 {
88 	unsigned long flags;
89 
90 	if (Cx86_dir0_msb == 3) {
91 		unsigned char ccr3, ccr5;
92 
93 		local_irq_save(flags);
94 		ccr3 = getCx86(CX86_CCR3);
95 		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
96 		ccr5 = getCx86(CX86_CCR5);
97 		if (ccr5 & 2)
98 			setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
99 		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
100 		local_irq_restore(flags);
101 
102 		if (ccr5 & 2) { /* possible wrong calibration done */
103 			printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
104 			calibrate_delay();
105 			c->loops_per_jiffy = loops_per_jiffy;
106 		}
107 	}
108 }
109 
110 
111 static void __cpuinit set_cx86_reorder(void)
112 {
113 	u8 ccr3;
114 
115 	printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
116 	ccr3 = getCx86(CX86_CCR3);
117 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
118 
119 	/* Load/Store Serialize to mem access disable (=reorder it) */
120 	setCx86(CX86_PCR0, getCx86(CX86_PCR0) & ~0x80);
121 	/* set load/store serialize from 1GB to 4GB */
122 	ccr3 |= 0xe0;
123 	setCx86(CX86_CCR3, ccr3);
124 }
125 
126 static void __cpuinit set_cx86_memwb(void)
127 {
128 	printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
129 
130 	/* CCR2 bit 2: unlock NW bit */
131 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
132 	/* set 'Not Write-through' */
133 	write_cr0(read_cr0() | X86_CR0_NW);
134 	/* CCR2 bit 2: lock NW bit and set WT1 */
135 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
136 }
137 
138 static void __cpuinit set_cx86_inc(void)
139 {
140 	unsigned char ccr3;
141 
142 	printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n");
143 
144 	ccr3 = getCx86(CX86_CCR3);
145 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
146 	/* PCR1 -- Performance Control */
147 	/* Incrementor on, whatever that is */
148 	setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02);
149 	/* PCR0 -- Performance Control */
150 	/* Incrementor Margin 10 */
151 	setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04);
152 	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
153 }
154 
155 /*
156  *	Configure later MediaGX and/or Geode processor.
157  */
158 
159 static void __cpuinit geode_configure(void)
160 {
161 	unsigned long flags;
162 	u8 ccr3;
163 	local_irq_save(flags);
164 
165 	/* Suspend on halt power saving and enable #SUSP pin */
166 	setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88);
167 
168 	ccr3 = getCx86(CX86_CCR3);
169 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
170 
171 
172 	/* FPU fast, DTE cache, Mem bypass */
173 	setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x38);
174 	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
175 
176 	set_cx86_memwb();
177 	set_cx86_reorder();
178 	set_cx86_inc();
179 
180 	local_irq_restore(flags);
181 }
182 
183 
184 static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
185 {
186 	unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
187 	char *buf = c->x86_model_id;
188 	const char *p = NULL;
189 
190 	/* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
191 	   3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
192 	clear_bit(0*32+31, c->x86_capability);
193 
194 	/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
195 	if ( test_bit(1*32+24, c->x86_capability) ) {
196 		clear_bit(1*32+24, c->x86_capability);
197 		set_bit(X86_FEATURE_CXMMX, c->x86_capability);
198 	}
199 
200 	do_cyrix_devid(&dir0, &dir1);
201 
202 	check_cx686_slop(c);
203 
204 	Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
205 	dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
206 
207 	/* common case step number/rev -- exceptions handled below */
208 	c->x86_model = (dir1 >> 4) + 1;
209 	c->x86_mask = dir1 & 0xf;
210 
211 	/* Now cook; the original recipe is by Channing Corn, from Cyrix.
212 	 * We do the same thing for each generation: we work out
213 	 * the model, multiplier and stepping.  Black magic included,
214 	 * to make the silicon step/rev numbers match the printed ones.
215 	 */
216 
217 	switch (dir0_msn) {
218 		unsigned char tmp;
219 
220 	case 0: /* Cx486SLC/DLC/SRx/DRx */
221 		p = Cx486_name[dir0_lsn & 7];
222 		break;
223 
224 	case 1: /* Cx486S/DX/DX2/DX4 */
225 		p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
226 			: Cx486S_name[dir0_lsn & 3];
227 		break;
228 
229 	case 2: /* 5x86 */
230 		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
231 		p = Cx86_cb+2;
232 		break;
233 
234 	case 3: /* 6x86/6x86L */
235 		Cx86_cb[1] = ' ';
236 		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
237 		if (dir1 > 0x21) { /* 686L */
238 			Cx86_cb[0] = 'L';
239 			p = Cx86_cb;
240 			(c->x86_model)++;
241 		} else             /* 686 */
242 			p = Cx86_cb+1;
243 		/* Emulate MTRRs using Cyrix's ARRs. */
244 		set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
245 		/* 6x86's contain this bug */
246 		c->coma_bug = 1;
247 		break;
248 
249 	case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
250 #ifdef CONFIG_PCI
251 	{
252 		u32 vendor, device;
253 		/* It isn't really a PCI quirk directly, but the cure is the
254 		   same. The MediaGX has deep magic SMM stuff that handles the
255 		   SB emulation. It throws away the fifo on disable_dma() which
256 		   is wrong and ruins the audio.
257 
258 		   Bug2: VSA1 has a wrap bug so that using maximum sized DMA
259 		   causes bad things. According to NatSemi VSA2 has another
260 		   bug to do with 'hlt'. I've not seen any boards using VSA2
261 		   and X doesn't seem to support it either so who cares 8).
262 		   VSA1 we work around however.
263 		*/
264 
265 		printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
266 		isa_dma_bridge_buggy = 2;
267 
268 		/* We do this before the PCI layer is running. However we
269 		   are safe here as we know the bridge must be a Cyrix
270 		   companion and must be present */
271 		vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID);
272 		device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID);
273 
274 		/*
275 		 *  The 5510/5520 companion chips have a funky PIT.
276 		 */
277 		if (vendor == PCI_VENDOR_ID_CYRIX &&
278 	 (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520))
279 			mark_tsc_unstable("cyrix 5510/5520 detected");
280 	}
281 #endif
282 		c->x86_cache_size=16;	/* Yep 16K integrated cache thats it */
283 
284 		/* GXm supports extended cpuid levels 'ala' AMD */
285 		if (c->cpuid_level == 2) {
286 			/* Enable cxMMX extensions (GX1 Datasheet 54) */
287 			setCx86(CX86_CCR7, getCx86(CX86_CCR7) | 1);
288 
289 			/*
290 			 * GXm : 0x30 ... 0x5f GXm  datasheet 51
291 			 * GXlv: 0x6x          GXlv datasheet 54
292 			 *  ?  : 0x7x
293 			 * GX1 : 0x8x          GX1  datasheet 56
294 			 */
295 			if((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <=dir1 && dir1 <= 0x8f))
296 				geode_configure();
297 			get_model_name(c);  /* get CPU marketing name */
298 			return;
299 		}
300 		else {  /* MediaGX */
301 			Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
302 			p = Cx86_cb+2;
303 			c->x86_model = (dir1 & 0x20) ? 1 : 2;
304 		}
305 		break;
306 
307         case 5: /* 6x86MX/M II */
308 		if (dir1 > 7)
309 		{
310 			dir0_msn++;  /* M II */
311 			/* Enable MMX extensions (App note 108) */
312 			setCx86(CX86_CCR7, getCx86(CX86_CCR7)|1);
313 		}
314 		else
315 		{
316 			c->coma_bug = 1;      /* 6x86MX, it has the bug. */
317 		}
318 		tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
319 		Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
320 		p = Cx86_cb+tmp;
321         	if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
322 			(c->x86_model)++;
323 		/* Emulate MTRRs using Cyrix's ARRs. */
324 		set_bit(X86_FEATURE_CYRIX_ARR, c->x86_capability);
325 		break;
326 
327 	case 0xf:  /* Cyrix 486 without DEVID registers */
328 		switch (dir0_lsn) {
329 		case 0xd:  /* either a 486SLC or DLC w/o DEVID */
330 			dir0_msn = 0;
331 			p = Cx486_name[(c->hard_math) ? 1 : 0];
332 			break;
333 
334 		case 0xe:  /* a 486S A step */
335 			dir0_msn = 0;
336 			p = Cx486S_name[0];
337 			break;
338 		}
339 		break;
340 
341 	default:  /* unknown (shouldn't happen, we know everyone ;-) */
342 		dir0_msn = 7;
343 		break;
344 	}
345 	strcpy(buf, Cx86_model[dir0_msn & 7]);
346 	if (p) strcat(buf, p);
347 	return;
348 }
349 
350 /*
351  * Handle National Semiconductor branded processors
352  */
353 static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
354 {
355 	/* There may be GX1 processors in the wild that are branded
356 	 * NSC and not Cyrix.
357 	 *
358 	 * This function only handles the GX processor, and kicks every
359 	 * thing else to the Cyrix init function above - that should
360 	 * cover any processors that might have been branded differently
361 	 * after NSC acquired Cyrix.
362 	 *
363 	 * If this breaks your GX1 horribly, please e-mail
364 	 * info-linux@ldcmail.amd.com to tell us.
365 	 */
366 
367 	/* Handle the GX (Formally known as the GX2) */
368 
369 	if (c->x86 == 5 && c->x86_model == 5)
370 		display_cacheinfo(c);
371 	else
372 		init_cyrix(c);
373 }
374 
375 /*
376  * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
377  * by the fact that they preserve the flags across the division of 5/2.
378  * PII and PPro exhibit this behavior too, but they have cpuid available.
379  */
380 
381 /*
382  * Perform the Cyrix 5/2 test. A Cyrix won't change
383  * the flags, while other 486 chips will.
384  */
385 static inline int test_cyrix_52div(void)
386 {
387 	unsigned int test;
388 
389 	__asm__ __volatile__(
390 	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
391 	     "div %b2\n\t"	/* divide 5 by 2 */
392 	     "lahf"		/* store flags into %ah */
393 	     : "=a" (test)
394 	     : "0" (5), "q" (2)
395 	     : "cc");
396 
397 	/* AH is 0x02 on Cyrix after the divide.. */
398 	return (unsigned char) (test >> 8) == 0x02;
399 }
400 
401 static void __cpuinit cyrix_identify(struct cpuinfo_x86 * c)
402 {
403 	/* Detect Cyrix with disabled CPUID */
404 	if ( c->x86 == 4 && test_cyrix_52div() ) {
405 		unsigned char dir0, dir1;
406 
407 		strcpy(c->x86_vendor_id, "CyrixInstead");
408 	        c->x86_vendor = X86_VENDOR_CYRIX;
409 
410 	        /* Actually enable cpuid on the older cyrix */
411 
412 	    	/* Retrieve CPU revisions */
413 
414 		do_cyrix_devid(&dir0, &dir1);
415 
416 		dir0>>=4;
417 
418 		/* Check it is an affected model */
419 
420    	        if (dir0 == 5 || dir0 == 3)
421    	        {
422 			unsigned char ccr3;
423 			unsigned long flags;
424 			printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
425 			local_irq_save(flags);
426 			ccr3 = getCx86(CX86_CCR3);
427 			setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);       /* enable MAPEN  */
428 			setCx86(CX86_CCR4, getCx86(CX86_CCR4) | 0x80);  /* enable cpuid  */
429 			setCx86(CX86_CCR3, ccr3);                       /* disable MAPEN */
430 			local_irq_restore(flags);
431 		}
432 	}
433 }
434 
435 static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
436 	.c_vendor	= "Cyrix",
437 	.c_ident 	= { "CyrixInstead" },
438 	.c_init		= init_cyrix,
439 	.c_identify	= cyrix_identify,
440 };
441 
442 int __init cyrix_init_cpu(void)
443 {
444 	cpu_devs[X86_VENDOR_CYRIX] = &cyrix_cpu_dev;
445 	return 0;
446 }
447 
448 static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
449 	.c_vendor	= "NSC",
450 	.c_ident 	= { "Geode by NSC" },
451 	.c_init		= init_nsc,
452 };
453 
454 int __init nsc_init_cpu(void)
455 {
456 	cpu_devs[X86_VENDOR_NSC] = &nsc_cpu_dev;
457 	return 0;
458 }
459 
460