xref: /openbmc/linux/arch/x86/kernel/cpu/cyrix.c (revision f35e839a)
1 #include <linux/init.h>
2 #include <linux/bitops.h>
3 #include <linux/delay.h>
4 #include <linux/pci.h>
5 #include <asm/dma.h>
6 #include <linux/io.h>
7 #include <asm/processor-cyrix.h>
8 #include <asm/processor-flags.h>
9 #include <linux/timer.h>
10 #include <asm/pci-direct.h>
11 #include <asm/tsc.h>
12 
13 #include "cpu.h"
14 
15 /*
16  * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17  */
18 static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19 {
20 	unsigned char ccr2, ccr3;
21 
22 	/* we test for DEVID by checking whether CCR3 is writable */
23 	ccr3 = getCx86(CX86_CCR3);
24 	setCx86(CX86_CCR3, ccr3 ^ 0x80);
25 	getCx86(0xc0);   /* dummy to change bus */
26 
27 	if (getCx86(CX86_CCR3) == ccr3) {       /* no DEVID regs. */
28 		ccr2 = getCx86(CX86_CCR2);
29 		setCx86(CX86_CCR2, ccr2 ^ 0x04);
30 		getCx86(0xc0);  /* dummy */
31 
32 		if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */
33 			*dir0 = 0xfd;
34 		else {                          /* Cx486S A step */
35 			setCx86(CX86_CCR2, ccr2);
36 			*dir0 = 0xfe;
37 		}
38 	} else {
39 		setCx86(CX86_CCR3, ccr3);  /* restore CCR3 */
40 
41 		/* read DIR0 and DIR1 CPU registers */
42 		*dir0 = getCx86(CX86_DIR0);
43 		*dir1 = getCx86(CX86_DIR1);
44 	}
45 }
46 
47 static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48 {
49 	unsigned long flags;
50 
51 	local_irq_save(flags);
52 	__do_cyrix_devid(dir0, dir1);
53 	local_irq_restore(flags);
54 }
55 /*
56  * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
57  * order to identify the Cyrix CPU model after we're out of setup.c
58  *
59  * Actually since bugs.h doesn't even reference this perhaps someone should
60  * fix the documentation ???
61  */
62 static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
63 
64 static const char __cpuinitconst Cx86_model[][9] = {
65 	"Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 	"M II ", "Unknown"
67 };
68 static const char __cpuinitconst Cx486_name[][5] = {
69 	"SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 	"SRx2", "DRx2"
71 };
72 static const char __cpuinitconst Cx486S_name[][4] = {
73 	"S", "S2", "Se", "S2e"
74 };
75 static const char __cpuinitconst Cx486D_name[][4] = {
76 	"DX", "DX2", "?", "?", "?", "DX4"
77 };
78 static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
79 static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
80 static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
81 
82 /*
83  * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
84  * BIOSes for compatibility with DOS games.  This makes the udelay loop
85  * work correctly, and improves performance.
86  *
87  * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
88  */
89 
90 static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
91 {
92 	unsigned long flags;
93 
94 	if (Cx86_dir0_msb == 3) {
95 		unsigned char ccr3, ccr5;
96 
97 		local_irq_save(flags);
98 		ccr3 = getCx86(CX86_CCR3);
99 		setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
100 		ccr5 = getCx86(CX86_CCR5);
101 		if (ccr5 & 2)
102 			setCx86(CX86_CCR5, ccr5 & 0xfd);  /* reset SLOP */
103 		setCx86(CX86_CCR3, ccr3);                 /* disable MAPEN */
104 		local_irq_restore(flags);
105 
106 		if (ccr5 & 2) { /* possible wrong calibration done */
107 			printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
108 			calibrate_delay();
109 			c->loops_per_jiffy = loops_per_jiffy;
110 		}
111 	}
112 }
113 
114 
115 static void __cpuinit set_cx86_reorder(void)
116 {
117 	u8 ccr3;
118 
119 	printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
120 	ccr3 = getCx86(CX86_CCR3);
121 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
122 
123 	/* Load/Store Serialize to mem access disable (=reorder it) */
124 	setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80);
125 	/* set load/store serialize from 1GB to 4GB */
126 	ccr3 |= 0xe0;
127 	setCx86(CX86_CCR3, ccr3);
128 }
129 
130 static void __cpuinit set_cx86_memwb(void)
131 {
132 	printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
133 
134 	/* CCR2 bit 2: unlock NW bit */
135 	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
136 	/* set 'Not Write-through' */
137 	write_cr0(read_cr0() | X86_CR0_NW);
138 	/* CCR2 bit 2: lock NW bit and set WT1 */
139 	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14);
140 }
141 
142 /*
143  *	Configure later MediaGX and/or Geode processor.
144  */
145 
146 static void __cpuinit geode_configure(void)
147 {
148 	unsigned long flags;
149 	u8 ccr3;
150 	local_irq_save(flags);
151 
152 	/* Suspend on halt power saving and enable #SUSP pin */
153 	setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88);
154 
155 	ccr3 = getCx86(CX86_CCR3);
156 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
157 
158 
159 	/* FPU fast, DTE cache, Mem bypass */
160 	setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38);
161 	setCx86(CX86_CCR3, ccr3);			/* disable MAPEN */
162 
163 	set_cx86_memwb();
164 	set_cx86_reorder();
165 
166 	local_irq_restore(flags);
167 }
168 
169 static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
170 {
171 	unsigned char dir0, dir0_msn, dir1 = 0;
172 
173 	__do_cyrix_devid(&dir0, &dir1);
174 	dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
175 
176 	switch (dir0_msn) {
177 	case 3: /* 6x86/6x86L */
178 		/* Emulate MTRRs using Cyrix's ARRs. */
179 		set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
180 		break;
181 	case 5: /* 6x86MX/M II */
182 		/* Emulate MTRRs using Cyrix's ARRs. */
183 		set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
184 		break;
185 	}
186 }
187 
188 static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
189 {
190 	unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
191 	char *buf = c->x86_model_id;
192 	const char *p = NULL;
193 
194 	/*
195 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
196 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
197 	 */
198 	clear_cpu_cap(c, 0*32+31);
199 
200 	/* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */
201 	if (test_cpu_cap(c, 1*32+24)) {
202 		clear_cpu_cap(c, 1*32+24);
203 		set_cpu_cap(c, X86_FEATURE_CXMMX);
204 	}
205 
206 	do_cyrix_devid(&dir0, &dir1);
207 
208 	check_cx686_slop(c);
209 
210 	Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family"   */
211 	dir0_lsn = dir0 & 0xf;                /* model or clock multiplier */
212 
213 	/* common case step number/rev -- exceptions handled below */
214 	c->x86_model = (dir1 >> 4) + 1;
215 	c->x86_mask = dir1 & 0xf;
216 
217 	/* Now cook; the original recipe is by Channing Corn, from Cyrix.
218 	 * We do the same thing for each generation: we work out
219 	 * the model, multiplier and stepping.  Black magic included,
220 	 * to make the silicon step/rev numbers match the printed ones.
221 	 */
222 
223 	switch (dir0_msn) {
224 		unsigned char tmp;
225 
226 	case 0: /* Cx486SLC/DLC/SRx/DRx */
227 		p = Cx486_name[dir0_lsn & 7];
228 		break;
229 
230 	case 1: /* Cx486S/DX/DX2/DX4 */
231 		p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5]
232 			: Cx486S_name[dir0_lsn & 3];
233 		break;
234 
235 	case 2: /* 5x86 */
236 		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
237 		p = Cx86_cb+2;
238 		break;
239 
240 	case 3: /* 6x86/6x86L */
241 		Cx86_cb[1] = ' ';
242 		Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5];
243 		if (dir1 > 0x21) { /* 686L */
244 			Cx86_cb[0] = 'L';
245 			p = Cx86_cb;
246 			(c->x86_model)++;
247 		} else             /* 686 */
248 			p = Cx86_cb+1;
249 		/* Emulate MTRRs using Cyrix's ARRs. */
250 		set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
251 		/* 6x86's contain this bug */
252 		set_cpu_bug(c, X86_BUG_COMA);
253 		break;
254 
255 	case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
256 #ifdef CONFIG_PCI
257 	{
258 		u32 vendor, device;
259 		/*
260 		 * It isn't really a PCI quirk directly, but the cure is the
261 		 * same. The MediaGX has deep magic SMM stuff that handles the
262 		 * SB emulation. It throws away the fifo on disable_dma() which
263 		 * is wrong and ruins the audio.
264 		 *
265 		 *  Bug2: VSA1 has a wrap bug so that using maximum sized DMA
266 		 *  causes bad things. According to NatSemi VSA2 has another
267 		 *  bug to do with 'hlt'. I've not seen any boards using VSA2
268 		 *  and X doesn't seem to support it either so who cares 8).
269 		 *  VSA1 we work around however.
270 		 */
271 
272 		printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
273 		isa_dma_bridge_buggy = 2;
274 
275 		/* We do this before the PCI layer is running. However we
276 		   are safe here as we know the bridge must be a Cyrix
277 		   companion and must be present */
278 		vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID);
279 		device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID);
280 
281 		/*
282 		 *  The 5510/5520 companion chips have a funky PIT.
283 		 */
284 		if (vendor == PCI_VENDOR_ID_CYRIX &&
285 			(device == PCI_DEVICE_ID_CYRIX_5510 ||
286 					device == PCI_DEVICE_ID_CYRIX_5520))
287 			mark_tsc_unstable("cyrix 5510/5520 detected");
288 	}
289 #endif
290 		c->x86_cache_size = 16;	/* Yep 16K integrated cache thats it */
291 
292 		/* GXm supports extended cpuid levels 'ala' AMD */
293 		if (c->cpuid_level == 2) {
294 			/* Enable cxMMX extensions (GX1 Datasheet 54) */
295 			setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1);
296 
297 			/*
298 			 * GXm : 0x30 ... 0x5f GXm  datasheet 51
299 			 * GXlv: 0x6x          GXlv datasheet 54
300 			 *  ?  : 0x7x
301 			 * GX1 : 0x8x          GX1  datasheet 56
302 			 */
303 			if ((0x30 <= dir1 && dir1 <= 0x6f) ||
304 					(0x80 <= dir1 && dir1 <= 0x8f))
305 				geode_configure();
306 			return;
307 		} else { /* MediaGX */
308 			Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4';
309 			p = Cx86_cb+2;
310 			c->x86_model = (dir1 & 0x20) ? 1 : 2;
311 		}
312 		break;
313 
314 	case 5: /* 6x86MX/M II */
315 		if (dir1 > 7) {
316 			dir0_msn++;  /* M II */
317 			/* Enable MMX extensions (App note 108) */
318 			setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1);
319 		} else {
320 			/* A 6x86MX - it has the bug. */
321 			set_cpu_bug(c, X86_BUG_COMA);
322 		}
323 		tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0;
324 		Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7];
325 		p = Cx86_cb+tmp;
326 		if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20))
327 			(c->x86_model)++;
328 		/* Emulate MTRRs using Cyrix's ARRs. */
329 		set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
330 		break;
331 
332 	case 0xf:  /* Cyrix 486 without DEVID registers */
333 		switch (dir0_lsn) {
334 		case 0xd:  /* either a 486SLC or DLC w/o DEVID */
335 			dir0_msn = 0;
336 			p = Cx486_name[(c->hard_math) ? 1 : 0];
337 			break;
338 
339 		case 0xe:  /* a 486S A step */
340 			dir0_msn = 0;
341 			p = Cx486S_name[0];
342 			break;
343 		}
344 		break;
345 
346 	default:  /* unknown (shouldn't happen, we know everyone ;-) */
347 		dir0_msn = 7;
348 		break;
349 	}
350 	strcpy(buf, Cx86_model[dir0_msn & 7]);
351 	if (p)
352 		strcat(buf, p);
353 	return;
354 }
355 
356 /*
357  * Handle National Semiconductor branded processors
358  */
359 static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
360 {
361 	/*
362 	 * There may be GX1 processors in the wild that are branded
363 	 * NSC and not Cyrix.
364 	 *
365 	 * This function only handles the GX processor, and kicks every
366 	 * thing else to the Cyrix init function above - that should
367 	 * cover any processors that might have been branded differently
368 	 * after NSC acquired Cyrix.
369 	 *
370 	 * If this breaks your GX1 horribly, please e-mail
371 	 * info-linux@ldcmail.amd.com to tell us.
372 	 */
373 
374 	/* Handle the GX (Formally known as the GX2) */
375 
376 	if (c->x86 == 5 && c->x86_model == 5)
377 		cpu_detect_cache_sizes(c);
378 	else
379 		init_cyrix(c);
380 }
381 
382 /*
383  * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected
384  * by the fact that they preserve the flags across the division of 5/2.
385  * PII and PPro exhibit this behavior too, but they have cpuid available.
386  */
387 
388 /*
389  * Perform the Cyrix 5/2 test. A Cyrix won't change
390  * the flags, while other 486 chips will.
391  */
392 static inline int test_cyrix_52div(void)
393 {
394 	unsigned int test;
395 
396 	__asm__ __volatile__(
397 	     "sahf\n\t"		/* clear flags (%eax = 0x0005) */
398 	     "div %b2\n\t"	/* divide 5 by 2 */
399 	     "lahf"		/* store flags into %ah */
400 	     : "=a" (test)
401 	     : "0" (5), "q" (2)
402 	     : "cc");
403 
404 	/* AH is 0x02 on Cyrix after the divide.. */
405 	return (unsigned char) (test >> 8) == 0x02;
406 }
407 
408 static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
409 {
410 	/* Detect Cyrix with disabled CPUID */
411 	if (c->x86 == 4 && test_cyrix_52div()) {
412 		unsigned char dir0, dir1;
413 
414 		strcpy(c->x86_vendor_id, "CyrixInstead");
415 		c->x86_vendor = X86_VENDOR_CYRIX;
416 
417 		/* Actually enable cpuid on the older cyrix */
418 
419 		/* Retrieve CPU revisions */
420 
421 		do_cyrix_devid(&dir0, &dir1);
422 
423 		dir0 >>= 4;
424 
425 		/* Check it is an affected model */
426 
427 		if (dir0 == 5 || dir0 == 3) {
428 			unsigned char ccr3;
429 			unsigned long flags;
430 			printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
431 			local_irq_save(flags);
432 			ccr3 = getCx86(CX86_CCR3);
433 			/* enable MAPEN  */
434 			setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
435 			/* enable cpuid  */
436 			setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80);
437 			/* disable MAPEN */
438 			setCx86(CX86_CCR3, ccr3);
439 			local_irq_restore(flags);
440 		}
441 	}
442 }
443 
444 static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
445 	.c_vendor	= "Cyrix",
446 	.c_ident	= { "CyrixInstead" },
447 	.c_early_init	= early_init_cyrix,
448 	.c_init		= init_cyrix,
449 	.c_identify	= cyrix_identify,
450 	.c_x86_vendor	= X86_VENDOR_CYRIX,
451 };
452 
453 cpu_dev_register(cyrix_cpu_dev);
454 
455 static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
456 	.c_vendor	= "NSC",
457 	.c_ident	= { "Geode by NSC" },
458 	.c_init		= init_nsc,
459 	.c_x86_vendor	= X86_VENDOR_NSC,
460 };
461 
462 cpu_dev_register(nsc_cpu_dev);
463