xref: /openbmc/linux/arch/x86/kernel/cpu/mtrr/cyrix.c (revision 2ec1df4130c60d1eb49dc0fa0ed15858fede6b05)
1*2ec1df41SThomas Gleixner #include <linux/init.h>
2*2ec1df41SThomas Gleixner #include <linux/mm.h>
3*2ec1df41SThomas Gleixner #include <asm/mtrr.h>
4*2ec1df41SThomas Gleixner #include <asm/msr.h>
5*2ec1df41SThomas Gleixner #include <asm/io.h>
6*2ec1df41SThomas Gleixner #include <asm/processor-cyrix.h>
7*2ec1df41SThomas Gleixner #include "mtrr.h"
8*2ec1df41SThomas Gleixner 
9*2ec1df41SThomas Gleixner int arr3_protected;
10*2ec1df41SThomas Gleixner 
11*2ec1df41SThomas Gleixner static void
12*2ec1df41SThomas Gleixner cyrix_get_arr(unsigned int reg, unsigned long *base,
13*2ec1df41SThomas Gleixner 	      unsigned long *size, mtrr_type * type)
14*2ec1df41SThomas Gleixner {
15*2ec1df41SThomas Gleixner 	unsigned long flags;
16*2ec1df41SThomas Gleixner 	unsigned char arr, ccr3, rcr, shift;
17*2ec1df41SThomas Gleixner 
18*2ec1df41SThomas Gleixner 	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
19*2ec1df41SThomas Gleixner 
20*2ec1df41SThomas Gleixner 	/* Save flags and disable interrupts */
21*2ec1df41SThomas Gleixner 	local_irq_save(flags);
22*2ec1df41SThomas Gleixner 
23*2ec1df41SThomas Gleixner 	ccr3 = getCx86(CX86_CCR3);
24*2ec1df41SThomas Gleixner 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);	/* enable MAPEN */
25*2ec1df41SThomas Gleixner 	((unsigned char *) base)[3] = getCx86(arr);
26*2ec1df41SThomas Gleixner 	((unsigned char *) base)[2] = getCx86(arr + 1);
27*2ec1df41SThomas Gleixner 	((unsigned char *) base)[1] = getCx86(arr + 2);
28*2ec1df41SThomas Gleixner 	rcr = getCx86(CX86_RCR_BASE + reg);
29*2ec1df41SThomas Gleixner 	setCx86(CX86_CCR3, ccr3);	/* disable MAPEN */
30*2ec1df41SThomas Gleixner 
31*2ec1df41SThomas Gleixner 	/* Enable interrupts if it was enabled previously */
32*2ec1df41SThomas Gleixner 	local_irq_restore(flags);
33*2ec1df41SThomas Gleixner 	shift = ((unsigned char *) base)[1] & 0x0f;
34*2ec1df41SThomas Gleixner 	*base >>= PAGE_SHIFT;
35*2ec1df41SThomas Gleixner 
36*2ec1df41SThomas Gleixner 	/* Power of two, at least 4K on ARR0-ARR6, 256K on ARR7
37*2ec1df41SThomas Gleixner 	 * Note: shift==0xf means 4G, this is unsupported.
38*2ec1df41SThomas Gleixner 	 */
39*2ec1df41SThomas Gleixner 	if (shift)
40*2ec1df41SThomas Gleixner 		*size = (reg < 7 ? 0x1UL : 0x40UL) << (shift - 1);
41*2ec1df41SThomas Gleixner 	else
42*2ec1df41SThomas Gleixner 		*size = 0;
43*2ec1df41SThomas Gleixner 
44*2ec1df41SThomas Gleixner 	/* Bit 0 is Cache Enable on ARR7, Cache Disable on ARR0-ARR6 */
45*2ec1df41SThomas Gleixner 	if (reg < 7) {
46*2ec1df41SThomas Gleixner 		switch (rcr) {
47*2ec1df41SThomas Gleixner 		case 1:
48*2ec1df41SThomas Gleixner 			*type = MTRR_TYPE_UNCACHABLE;
49*2ec1df41SThomas Gleixner 			break;
50*2ec1df41SThomas Gleixner 		case 8:
51*2ec1df41SThomas Gleixner 			*type = MTRR_TYPE_WRBACK;
52*2ec1df41SThomas Gleixner 			break;
53*2ec1df41SThomas Gleixner 		case 9:
54*2ec1df41SThomas Gleixner 			*type = MTRR_TYPE_WRCOMB;
55*2ec1df41SThomas Gleixner 			break;
56*2ec1df41SThomas Gleixner 		case 24:
57*2ec1df41SThomas Gleixner 		default:
58*2ec1df41SThomas Gleixner 			*type = MTRR_TYPE_WRTHROUGH;
59*2ec1df41SThomas Gleixner 			break;
60*2ec1df41SThomas Gleixner 		}
61*2ec1df41SThomas Gleixner 	} else {
62*2ec1df41SThomas Gleixner 		switch (rcr) {
63*2ec1df41SThomas Gleixner 		case 0:
64*2ec1df41SThomas Gleixner 			*type = MTRR_TYPE_UNCACHABLE;
65*2ec1df41SThomas Gleixner 			break;
66*2ec1df41SThomas Gleixner 		case 8:
67*2ec1df41SThomas Gleixner 			*type = MTRR_TYPE_WRCOMB;
68*2ec1df41SThomas Gleixner 			break;
69*2ec1df41SThomas Gleixner 		case 9:
70*2ec1df41SThomas Gleixner 			*type = MTRR_TYPE_WRBACK;
71*2ec1df41SThomas Gleixner 			break;
72*2ec1df41SThomas Gleixner 		case 25:
73*2ec1df41SThomas Gleixner 		default:
74*2ec1df41SThomas Gleixner 			*type = MTRR_TYPE_WRTHROUGH;
75*2ec1df41SThomas Gleixner 			break;
76*2ec1df41SThomas Gleixner 		}
77*2ec1df41SThomas Gleixner 	}
78*2ec1df41SThomas Gleixner }
79*2ec1df41SThomas Gleixner 
80*2ec1df41SThomas Gleixner static int
81*2ec1df41SThomas Gleixner cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
82*2ec1df41SThomas Gleixner /*  [SUMMARY] Get a free ARR.
83*2ec1df41SThomas Gleixner     <base> The starting (base) address of the region.
84*2ec1df41SThomas Gleixner     <size> The size (in bytes) of the region.
85*2ec1df41SThomas Gleixner     [RETURNS] The index of the region on success, else -1 on error.
86*2ec1df41SThomas Gleixner */
87*2ec1df41SThomas Gleixner {
88*2ec1df41SThomas Gleixner 	int i;
89*2ec1df41SThomas Gleixner 	mtrr_type ltype;
90*2ec1df41SThomas Gleixner 	unsigned long lbase, lsize;
91*2ec1df41SThomas Gleixner 
92*2ec1df41SThomas Gleixner 	switch (replace_reg) {
93*2ec1df41SThomas Gleixner 	case 7:
94*2ec1df41SThomas Gleixner 		if (size < 0x40)
95*2ec1df41SThomas Gleixner 			break;
96*2ec1df41SThomas Gleixner 	case 6:
97*2ec1df41SThomas Gleixner 	case 5:
98*2ec1df41SThomas Gleixner 	case 4:
99*2ec1df41SThomas Gleixner 		return replace_reg;
100*2ec1df41SThomas Gleixner 	case 3:
101*2ec1df41SThomas Gleixner 		if (arr3_protected)
102*2ec1df41SThomas Gleixner 			break;
103*2ec1df41SThomas Gleixner 	case 2:
104*2ec1df41SThomas Gleixner 	case 1:
105*2ec1df41SThomas Gleixner 	case 0:
106*2ec1df41SThomas Gleixner 		return replace_reg;
107*2ec1df41SThomas Gleixner 	}
108*2ec1df41SThomas Gleixner 	/* If we are to set up a region >32M then look at ARR7 immediately */
109*2ec1df41SThomas Gleixner 	if (size > 0x2000) {
110*2ec1df41SThomas Gleixner 		cyrix_get_arr(7, &lbase, &lsize, &ltype);
111*2ec1df41SThomas Gleixner 		if (lsize == 0)
112*2ec1df41SThomas Gleixner 			return 7;
113*2ec1df41SThomas Gleixner 		/*  Else try ARR0-ARR6 first  */
114*2ec1df41SThomas Gleixner 	} else {
115*2ec1df41SThomas Gleixner 		for (i = 0; i < 7; i++) {
116*2ec1df41SThomas Gleixner 			cyrix_get_arr(i, &lbase, &lsize, &ltype);
117*2ec1df41SThomas Gleixner 			if ((i == 3) && arr3_protected)
118*2ec1df41SThomas Gleixner 				continue;
119*2ec1df41SThomas Gleixner 			if (lsize == 0)
120*2ec1df41SThomas Gleixner 				return i;
121*2ec1df41SThomas Gleixner 		}
122*2ec1df41SThomas Gleixner 		/* ARR0-ARR6 isn't free, try ARR7 but its size must be at least 256K */
123*2ec1df41SThomas Gleixner 		cyrix_get_arr(i, &lbase, &lsize, &ltype);
124*2ec1df41SThomas Gleixner 		if ((lsize == 0) && (size >= 0x40))
125*2ec1df41SThomas Gleixner 			return i;
126*2ec1df41SThomas Gleixner 	}
127*2ec1df41SThomas Gleixner 	return -ENOSPC;
128*2ec1df41SThomas Gleixner }
129*2ec1df41SThomas Gleixner 
130*2ec1df41SThomas Gleixner static u32 cr4 = 0;
131*2ec1df41SThomas Gleixner static u32 ccr3;
132*2ec1df41SThomas Gleixner 
133*2ec1df41SThomas Gleixner static void prepare_set(void)
134*2ec1df41SThomas Gleixner {
135*2ec1df41SThomas Gleixner 	u32 cr0;
136*2ec1df41SThomas Gleixner 
137*2ec1df41SThomas Gleixner 	/*  Save value of CR4 and clear Page Global Enable (bit 7)  */
138*2ec1df41SThomas Gleixner 	if ( cpu_has_pge ) {
139*2ec1df41SThomas Gleixner 		cr4 = read_cr4();
140*2ec1df41SThomas Gleixner 		write_cr4(cr4 & ~X86_CR4_PGE);
141*2ec1df41SThomas Gleixner 	}
142*2ec1df41SThomas Gleixner 
143*2ec1df41SThomas Gleixner 	/*  Disable and flush caches. Note that wbinvd flushes the TLBs as
144*2ec1df41SThomas Gleixner 	    a side-effect  */
145*2ec1df41SThomas Gleixner 	cr0 = read_cr0() | 0x40000000;
146*2ec1df41SThomas Gleixner 	wbinvd();
147*2ec1df41SThomas Gleixner 	write_cr0(cr0);
148*2ec1df41SThomas Gleixner 	wbinvd();
149*2ec1df41SThomas Gleixner 
150*2ec1df41SThomas Gleixner 	/* Cyrix ARRs - everything else were excluded at the top */
151*2ec1df41SThomas Gleixner 	ccr3 = getCx86(CX86_CCR3);
152*2ec1df41SThomas Gleixner 
153*2ec1df41SThomas Gleixner 	/* Cyrix ARRs - everything else were excluded at the top */
154*2ec1df41SThomas Gleixner 	setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10);
155*2ec1df41SThomas Gleixner 
156*2ec1df41SThomas Gleixner }
157*2ec1df41SThomas Gleixner 
158*2ec1df41SThomas Gleixner static void post_set(void)
159*2ec1df41SThomas Gleixner {
160*2ec1df41SThomas Gleixner 	/*  Flush caches and TLBs  */
161*2ec1df41SThomas Gleixner 	wbinvd();
162*2ec1df41SThomas Gleixner 
163*2ec1df41SThomas Gleixner 	/* Cyrix ARRs - everything else was excluded at the top */
164*2ec1df41SThomas Gleixner 	setCx86(CX86_CCR3, ccr3);
165*2ec1df41SThomas Gleixner 
166*2ec1df41SThomas Gleixner 	/*  Enable caches  */
167*2ec1df41SThomas Gleixner 	write_cr0(read_cr0() & 0xbfffffff);
168*2ec1df41SThomas Gleixner 
169*2ec1df41SThomas Gleixner 	/*  Restore value of CR4  */
170*2ec1df41SThomas Gleixner 	if ( cpu_has_pge )
171*2ec1df41SThomas Gleixner 		write_cr4(cr4);
172*2ec1df41SThomas Gleixner }
173*2ec1df41SThomas Gleixner 
174*2ec1df41SThomas Gleixner static void cyrix_set_arr(unsigned int reg, unsigned long base,
175*2ec1df41SThomas Gleixner 			  unsigned long size, mtrr_type type)
176*2ec1df41SThomas Gleixner {
177*2ec1df41SThomas Gleixner 	unsigned char arr, arr_type, arr_size;
178*2ec1df41SThomas Gleixner 
179*2ec1df41SThomas Gleixner 	arr = CX86_ARR_BASE + (reg << 1) + reg;	/* avoid multiplication by 3 */
180*2ec1df41SThomas Gleixner 
181*2ec1df41SThomas Gleixner 	/* count down from 32M (ARR0-ARR6) or from 2G (ARR7) */
182*2ec1df41SThomas Gleixner 	if (reg >= 7)
183*2ec1df41SThomas Gleixner 		size >>= 6;
184*2ec1df41SThomas Gleixner 
185*2ec1df41SThomas Gleixner 	size &= 0x7fff;		/* make sure arr_size <= 14 */
186*2ec1df41SThomas Gleixner 	for (arr_size = 0; size; arr_size++, size >>= 1) ;
187*2ec1df41SThomas Gleixner 
188*2ec1df41SThomas Gleixner 	if (reg < 7) {
189*2ec1df41SThomas Gleixner 		switch (type) {
190*2ec1df41SThomas Gleixner 		case MTRR_TYPE_UNCACHABLE:
191*2ec1df41SThomas Gleixner 			arr_type = 1;
192*2ec1df41SThomas Gleixner 			break;
193*2ec1df41SThomas Gleixner 		case MTRR_TYPE_WRCOMB:
194*2ec1df41SThomas Gleixner 			arr_type = 9;
195*2ec1df41SThomas Gleixner 			break;
196*2ec1df41SThomas Gleixner 		case MTRR_TYPE_WRTHROUGH:
197*2ec1df41SThomas Gleixner 			arr_type = 24;
198*2ec1df41SThomas Gleixner 			break;
199*2ec1df41SThomas Gleixner 		default:
200*2ec1df41SThomas Gleixner 			arr_type = 8;
201*2ec1df41SThomas Gleixner 			break;
202*2ec1df41SThomas Gleixner 		}
203*2ec1df41SThomas Gleixner 	} else {
204*2ec1df41SThomas Gleixner 		switch (type) {
205*2ec1df41SThomas Gleixner 		case MTRR_TYPE_UNCACHABLE:
206*2ec1df41SThomas Gleixner 			arr_type = 0;
207*2ec1df41SThomas Gleixner 			break;
208*2ec1df41SThomas Gleixner 		case MTRR_TYPE_WRCOMB:
209*2ec1df41SThomas Gleixner 			arr_type = 8;
210*2ec1df41SThomas Gleixner 			break;
211*2ec1df41SThomas Gleixner 		case MTRR_TYPE_WRTHROUGH:
212*2ec1df41SThomas Gleixner 			arr_type = 25;
213*2ec1df41SThomas Gleixner 			break;
214*2ec1df41SThomas Gleixner 		default:
215*2ec1df41SThomas Gleixner 			arr_type = 9;
216*2ec1df41SThomas Gleixner 			break;
217*2ec1df41SThomas Gleixner 		}
218*2ec1df41SThomas Gleixner 	}
219*2ec1df41SThomas Gleixner 
220*2ec1df41SThomas Gleixner 	prepare_set();
221*2ec1df41SThomas Gleixner 
222*2ec1df41SThomas Gleixner 	base <<= PAGE_SHIFT;
223*2ec1df41SThomas Gleixner 	setCx86(arr, ((unsigned char *) &base)[3]);
224*2ec1df41SThomas Gleixner 	setCx86(arr + 1, ((unsigned char *) &base)[2]);
225*2ec1df41SThomas Gleixner 	setCx86(arr + 2, (((unsigned char *) &base)[1]) | arr_size);
226*2ec1df41SThomas Gleixner 	setCx86(CX86_RCR_BASE + reg, arr_type);
227*2ec1df41SThomas Gleixner 
228*2ec1df41SThomas Gleixner 	post_set();
229*2ec1df41SThomas Gleixner }
230*2ec1df41SThomas Gleixner 
231*2ec1df41SThomas Gleixner typedef struct {
232*2ec1df41SThomas Gleixner 	unsigned long base;
233*2ec1df41SThomas Gleixner 	unsigned long size;
234*2ec1df41SThomas Gleixner 	mtrr_type type;
235*2ec1df41SThomas Gleixner } arr_state_t;
236*2ec1df41SThomas Gleixner 
237*2ec1df41SThomas Gleixner static arr_state_t arr_state[8] = {
238*2ec1df41SThomas Gleixner 	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL},
239*2ec1df41SThomas Gleixner 	{0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}, {0UL, 0UL, 0UL}
240*2ec1df41SThomas Gleixner };
241*2ec1df41SThomas Gleixner 
242*2ec1df41SThomas Gleixner static unsigned char ccr_state[7] = { 0, 0, 0, 0, 0, 0, 0 };
243*2ec1df41SThomas Gleixner 
244*2ec1df41SThomas Gleixner static void cyrix_set_all(void)
245*2ec1df41SThomas Gleixner {
246*2ec1df41SThomas Gleixner 	int i;
247*2ec1df41SThomas Gleixner 
248*2ec1df41SThomas Gleixner 	prepare_set();
249*2ec1df41SThomas Gleixner 
250*2ec1df41SThomas Gleixner 	/* the CCRs are not contiguous */
251*2ec1df41SThomas Gleixner 	for (i = 0; i < 4; i++)
252*2ec1df41SThomas Gleixner 		setCx86(CX86_CCR0 + i, ccr_state[i]);
253*2ec1df41SThomas Gleixner 	for (; i < 7; i++)
254*2ec1df41SThomas Gleixner 		setCx86(CX86_CCR4 + i, ccr_state[i]);
255*2ec1df41SThomas Gleixner 	for (i = 0; i < 8; i++)
256*2ec1df41SThomas Gleixner 		cyrix_set_arr(i, arr_state[i].base,
257*2ec1df41SThomas Gleixner 			      arr_state[i].size, arr_state[i].type);
258*2ec1df41SThomas Gleixner 
259*2ec1df41SThomas Gleixner 	post_set();
260*2ec1df41SThomas Gleixner }
261*2ec1df41SThomas Gleixner 
262*2ec1df41SThomas Gleixner #if 0
263*2ec1df41SThomas Gleixner /*
264*2ec1df41SThomas Gleixner  * On Cyrix 6x86(MX) and M II the ARR3 is special: it has connection
265*2ec1df41SThomas Gleixner  * with the SMM (System Management Mode) mode. So we need the following:
266*2ec1df41SThomas Gleixner  * Check whether SMI_LOCK (CCR3 bit 0) is set
267*2ec1df41SThomas Gleixner  *   if it is set, write a warning message: ARR3 cannot be changed!
268*2ec1df41SThomas Gleixner  *     (it cannot be changed until the next processor reset)
269*2ec1df41SThomas Gleixner  *   if it is reset, then we can change it, set all the needed bits:
270*2ec1df41SThomas Gleixner  *   - disable access to SMM memory through ARR3 range (CCR1 bit 7 reset)
271*2ec1df41SThomas Gleixner  *   - disable access to SMM memory (CCR1 bit 2 reset)
272*2ec1df41SThomas Gleixner  *   - disable SMM mode (CCR1 bit 1 reset)
273*2ec1df41SThomas Gleixner  *   - disable write protection of ARR3 (CCR6 bit 1 reset)
274*2ec1df41SThomas Gleixner  *   - (maybe) disable ARR3
275*2ec1df41SThomas Gleixner  * Just to be sure, we enable ARR usage by the processor (CCR5 bit 5 set)
276*2ec1df41SThomas Gleixner  */
277*2ec1df41SThomas Gleixner static void __init
278*2ec1df41SThomas Gleixner cyrix_arr_init(void)
279*2ec1df41SThomas Gleixner {
280*2ec1df41SThomas Gleixner 	struct set_mtrr_context ctxt;
281*2ec1df41SThomas Gleixner 	unsigned char ccr[7];
282*2ec1df41SThomas Gleixner 	int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 };
283*2ec1df41SThomas Gleixner #ifdef CONFIG_SMP
284*2ec1df41SThomas Gleixner 	int i;
285*2ec1df41SThomas Gleixner #endif
286*2ec1df41SThomas Gleixner 
287*2ec1df41SThomas Gleixner 	/* flush cache and enable MAPEN */
288*2ec1df41SThomas Gleixner 	set_mtrr_prepare_save(&ctxt);
289*2ec1df41SThomas Gleixner 	set_mtrr_cache_disable(&ctxt);
290*2ec1df41SThomas Gleixner 
291*2ec1df41SThomas Gleixner 	/* Save all CCRs locally */
292*2ec1df41SThomas Gleixner 	ccr[0] = getCx86(CX86_CCR0);
293*2ec1df41SThomas Gleixner 	ccr[1] = getCx86(CX86_CCR1);
294*2ec1df41SThomas Gleixner 	ccr[2] = getCx86(CX86_CCR2);
295*2ec1df41SThomas Gleixner 	ccr[3] = ctxt.ccr3;
296*2ec1df41SThomas Gleixner 	ccr[4] = getCx86(CX86_CCR4);
297*2ec1df41SThomas Gleixner 	ccr[5] = getCx86(CX86_CCR5);
298*2ec1df41SThomas Gleixner 	ccr[6] = getCx86(CX86_CCR6);
299*2ec1df41SThomas Gleixner 
300*2ec1df41SThomas Gleixner 	if (ccr[3] & 1) {
301*2ec1df41SThomas Gleixner 		ccrc[3] = 1;
302*2ec1df41SThomas Gleixner 		arr3_protected = 1;
303*2ec1df41SThomas Gleixner 	} else {
304*2ec1df41SThomas Gleixner 		/* Disable SMM mode (bit 1), access to SMM memory (bit 2) and
305*2ec1df41SThomas Gleixner 		 * access to SMM memory through ARR3 (bit 7).
306*2ec1df41SThomas Gleixner 		 */
307*2ec1df41SThomas Gleixner 		if (ccr[1] & 0x80) {
308*2ec1df41SThomas Gleixner 			ccr[1] &= 0x7f;
309*2ec1df41SThomas Gleixner 			ccrc[1] |= 0x80;
310*2ec1df41SThomas Gleixner 		}
311*2ec1df41SThomas Gleixner 		if (ccr[1] & 0x04) {
312*2ec1df41SThomas Gleixner 			ccr[1] &= 0xfb;
313*2ec1df41SThomas Gleixner 			ccrc[1] |= 0x04;
314*2ec1df41SThomas Gleixner 		}
315*2ec1df41SThomas Gleixner 		if (ccr[1] & 0x02) {
316*2ec1df41SThomas Gleixner 			ccr[1] &= 0xfd;
317*2ec1df41SThomas Gleixner 			ccrc[1] |= 0x02;
318*2ec1df41SThomas Gleixner 		}
319*2ec1df41SThomas Gleixner 		arr3_protected = 0;
320*2ec1df41SThomas Gleixner 		if (ccr[6] & 0x02) {
321*2ec1df41SThomas Gleixner 			ccr[6] &= 0xfd;
322*2ec1df41SThomas Gleixner 			ccrc[6] = 1;	/* Disable write protection of ARR3 */
323*2ec1df41SThomas Gleixner 			setCx86(CX86_CCR6, ccr[6]);
324*2ec1df41SThomas Gleixner 		}
325*2ec1df41SThomas Gleixner 		/* Disable ARR3. This is safe now that we disabled SMM. */
326*2ec1df41SThomas Gleixner 		/* cyrix_set_arr_up (3, 0, 0, 0, FALSE); */
327*2ec1df41SThomas Gleixner 	}
328*2ec1df41SThomas Gleixner 	/* If we changed CCR1 in memory, change it in the processor, too. */
329*2ec1df41SThomas Gleixner 	if (ccrc[1])
330*2ec1df41SThomas Gleixner 		setCx86(CX86_CCR1, ccr[1]);
331*2ec1df41SThomas Gleixner 
332*2ec1df41SThomas Gleixner 	/* Enable ARR usage by the processor */
333*2ec1df41SThomas Gleixner 	if (!(ccr[5] & 0x20)) {
334*2ec1df41SThomas Gleixner 		ccr[5] |= 0x20;
335*2ec1df41SThomas Gleixner 		ccrc[5] = 1;
336*2ec1df41SThomas Gleixner 		setCx86(CX86_CCR5, ccr[5]);
337*2ec1df41SThomas Gleixner 	}
338*2ec1df41SThomas Gleixner #ifdef CONFIG_SMP
339*2ec1df41SThomas Gleixner 	for (i = 0; i < 7; i++)
340*2ec1df41SThomas Gleixner 		ccr_state[i] = ccr[i];
341*2ec1df41SThomas Gleixner 	for (i = 0; i < 8; i++)
342*2ec1df41SThomas Gleixner 		cyrix_get_arr(i,
343*2ec1df41SThomas Gleixner 			      &arr_state[i].base, &arr_state[i].size,
344*2ec1df41SThomas Gleixner 			      &arr_state[i].type);
345*2ec1df41SThomas Gleixner #endif
346*2ec1df41SThomas Gleixner 
347*2ec1df41SThomas Gleixner 	set_mtrr_done(&ctxt);	/* flush cache and disable MAPEN */
348*2ec1df41SThomas Gleixner 
349*2ec1df41SThomas Gleixner 	if (ccrc[5])
350*2ec1df41SThomas Gleixner 		printk(KERN_INFO "mtrr: ARR usage was not enabled, enabled manually\n");
351*2ec1df41SThomas Gleixner 	if (ccrc[3])
352*2ec1df41SThomas Gleixner 		printk(KERN_INFO "mtrr: ARR3 cannot be changed\n");
353*2ec1df41SThomas Gleixner /*
354*2ec1df41SThomas Gleixner     if ( ccrc[1] & 0x80) printk ("mtrr: SMM memory access through ARR3 disabled\n");
355*2ec1df41SThomas Gleixner     if ( ccrc[1] & 0x04) printk ("mtrr: SMM memory access disabled\n");
356*2ec1df41SThomas Gleixner     if ( ccrc[1] & 0x02) printk ("mtrr: SMM mode disabled\n");
357*2ec1df41SThomas Gleixner */
358*2ec1df41SThomas Gleixner 	if (ccrc[6])
359*2ec1df41SThomas Gleixner 		printk(KERN_INFO "mtrr: ARR3 was write protected, unprotected\n");
360*2ec1df41SThomas Gleixner }
361*2ec1df41SThomas Gleixner #endif
362*2ec1df41SThomas Gleixner 
363*2ec1df41SThomas Gleixner static struct mtrr_ops cyrix_mtrr_ops = {
364*2ec1df41SThomas Gleixner 	.vendor            = X86_VENDOR_CYRIX,
365*2ec1df41SThomas Gleixner //	.init              = cyrix_arr_init,
366*2ec1df41SThomas Gleixner 	.set_all	   = cyrix_set_all,
367*2ec1df41SThomas Gleixner 	.set               = cyrix_set_arr,
368*2ec1df41SThomas Gleixner 	.get               = cyrix_get_arr,
369*2ec1df41SThomas Gleixner 	.get_free_region   = cyrix_get_free_region,
370*2ec1df41SThomas Gleixner 	.validate_add_page = generic_validate_add_page,
371*2ec1df41SThomas Gleixner 	.have_wrcomb       = positive_have_wrcomb,
372*2ec1df41SThomas Gleixner };
373*2ec1df41SThomas Gleixner 
374*2ec1df41SThomas Gleixner int __init cyrix_init_mtrr(void)
375*2ec1df41SThomas Gleixner {
376*2ec1df41SThomas Gleixner 	set_mtrr_ops(&cyrix_mtrr_ops);
377*2ec1df41SThomas Gleixner 	return 0;
378*2ec1df41SThomas Gleixner }
379*2ec1df41SThomas Gleixner 
380*2ec1df41SThomas Gleixner //arch_initcall(cyrix_init_mtrr);
381