xref: /openbmc/linux/arch/powerpc/kernel/l2cr_6xx.S (revision 8e20ba2e)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3	L2CR functions
4	Copyright © 1997-1998 by PowerLogix R & D, Inc.
5
6*/
7/*
8	Thur, Dec. 12, 1998.
9	- First public release, contributed by PowerLogix.
10	***********
11	Sat, Aug. 7, 1999.
12	- Terry: Made sure code disabled interrupts before running. (Previously
13			it was assumed interrupts were already disabled).
14	- Terry: Updated for tentative G4 support.  4MB of memory is now flushed
15			instead of 2MB.  (Prob. only 3 is necessary).
16	- Terry: Updated for workaround to HID0[DPM] processor bug
17			during global invalidates.
18	***********
19	Thu, July 13, 2000.
20	- Terry: Added isync to correct for an errata.
21
22	22 August 2001.
23	- DanM: Finally added the 7450 patch I've had for the past
24		several months.  The L2CR is similar, but I'm going
25		to assume the user of this functions knows what they
26		are doing.
27
28	Author:	Terry Greeniaus (tgree@phys.ualberta.ca)
29	Please e-mail updates to this file to me, thanks!
30*/
31#include <asm/processor.h>
32#include <asm/cputable.h>
33#include <asm/ppc_asm.h>
34#include <asm/cache.h>
35#include <asm/page.h>
36#include <asm/feature-fixups.h>
37
38/* Usage:
39
40	When setting the L2CR register, you must do a few special
41	things.  If you are enabling the cache, you must perform a
42	global invalidate.  If you are disabling the cache, you must
43	flush the cache contents first.  This routine takes care of
44	doing these things.  When first enabling the cache, make sure
45	you pass in the L2CR you want, as well as passing in the
46	global invalidate bit set.  A global invalidate will only be
47	performed if the L2I bit is set in applyThis.  When enabling
48	the cache, you should also set the L2E bit in applyThis.  If
49	you want to modify the L2CR contents after the cache has been
50	enabled, the recommended procedure is to first call
51	__setL2CR(0) to disable the cache and then call it again with
52	the new values for L2CR.  Examples:
53
54	_setL2CR(0)		- disables the cache
55	_setL2CR(0xB3A04000)	- enables my G3 upgrade card:
56				- L2E set to turn on the cache
57				- L2SIZ set to 1MB
58				- L2CLK set to 1:1
59				- L2RAM set to pipelined synchronous late-write
60				- L2I set to perform a global invalidation
61				- L2OH set to 0.5 nS
62				- L2DF set because this upgrade card
63				  requires it
64
65	A similar call should work for your card.  You need to know
66	the correct setting for your card and then place them in the
67	fields I have outlined above.  Other fields support optional
68	features, such as L2DO which caches only data, or L2TS which
69	causes cache pushes from the L1 cache to go to the L2 cache
70	instead of to main memory.
71
72IMPORTANT:
73	Starting with the 7450, the bits in this register have moved
74	or behave differently.  The Enable, Parity Enable, Size,
75	and L2 Invalidate are the only bits that have not moved.
76	The size is read-only for these processors with internal L2
77	cache, and the invalidate is a control as well as status.
78		-- Dan
79
80*/
81/*
82 * Summary: this procedure ignores the L2I bit in the value passed in,
83 * flushes the cache if it was already enabled, always invalidates the
84 * cache, then enables the cache if the L2E bit is set in the value
85 * passed in.
86 *   -- paulus.
87 */
88_GLOBAL(_set_L2CR)
89	/* Make sure this is a 750 or 7400 chip */
90BEGIN_FTR_SECTION
91	li	r3,-1
92	blr
93END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
94
95	mflr	r9
96
97	/* Stop DST streams */
98BEGIN_FTR_SECTION
99	DSSALL
100	sync
101END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
102
103	/* Turn off interrupts and data relocation. */
104	mfmsr	r7		/* Save MSR in r7 */
105	rlwinm	r4,r7,0,17,15
106	rlwinm	r4,r4,0,28,26	/* Turn off DR bit */
107	sync
108	mtmsr	r4
109	isync
110
111	/* Before we perform the global invalidation, we must disable dynamic
112	 * power management via HID0[DPM] to work around a processor bug where
113	 * DPM can possibly interfere with the state machine in the processor
114	 * that invalidates the L2 cache tags.
115	 */
116	mfspr	r8,SPRN_HID0		/* Save HID0 in r8 */
117	rlwinm	r4,r8,0,12,10		/* Turn off HID0[DPM] */
118	sync
119	mtspr	SPRN_HID0,r4		/* Disable DPM */
120	sync
121
122	/* Get the current enable bit of the L2CR into r4 */
123	mfspr	r4,SPRN_L2CR
124
125	/* Tweak some bits */
126	rlwinm	r5,r3,0,0,0		/* r5 contains the new enable bit */
127	rlwinm	r3,r3,0,11,9		/* Turn off the invalidate bit */
128	rlwinm	r3,r3,0,1,31		/* Turn off the enable bit */
129
130	/* Check to see if we need to flush */
131	rlwinm.	r4,r4,0,0,0
132	beq	2f
133
134	/* Flush the cache. First, read the first 4MB of memory (physical) to
135	 * put new data in the cache.  (Actually we only need
136	 * the size of the L2 cache plus the size of the L1 cache, but 4MB will
137	 * cover everything just to be safe).
138	 */
139
140	 /**** Might be a good idea to set L2DO here - to prevent instructions
141	       from getting into the cache.  But since we invalidate
142	       the next time we enable the cache it doesn't really matter.
143	       Don't do this unless you accommodate all processor variations.
144	       The bit moved on the 7450.....
145	  ****/
146
147BEGIN_FTR_SECTION
148	/* Disable L2 prefetch on some 745x and try to ensure
149	 * L2 prefetch engines are idle. As explained by errata
150	 * text, we can't be sure they are, we just hope very hard
151	 * that well be enough (sic !). At least I noticed Apple
152	 * doesn't even bother doing the dcbf's here...
153	 */
154	mfspr	r4,SPRN_MSSCR0
155	rlwinm	r4,r4,0,0,29
156	sync
157	mtspr	SPRN_MSSCR0,r4
158	sync
159	isync
160	lis	r4,KERNELBASE@h
161	dcbf	0,r4
162	dcbf	0,r4
163	dcbf	0,r4
164	dcbf	0,r4
165END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
166
167	/* TODO: use HW flush assist when available */
168
169	lis	r4,0x0002
170	mtctr	r4
171	li	r4,0
1721:
173	lwzx	r0,0,r4
174	addi	r4,r4,32		/* Go to start of next cache line */
175	bdnz	1b
176	isync
177
178	/* Now, flush the first 4MB of memory */
179	lis	r4,0x0002
180	mtctr	r4
181	li	r4,0
182	sync
1831:
184	dcbf	0,r4
185	addi	r4,r4,32		/* Go to start of next cache line */
186	bdnz	1b
187
1882:
189	/* Set up the L2CR configuration bits (and switch L2 off) */
190	/* CPU errata: Make sure the mtspr below is already in the
191	 * L1 icache
192	 */
193	b	20f
194	.balign	L1_CACHE_BYTES
19522:
196	sync
197	mtspr	SPRN_L2CR,r3
198	sync
199	b	23f
20020:
201	b	21f
20221:	sync
203	isync
204	b	22b
205
20623:
207	/* Perform a global invalidation */
208	oris	r3,r3,0x0020
209	sync
210	mtspr	SPRN_L2CR,r3
211	sync
212	isync				/* For errata */
213
214BEGIN_FTR_SECTION
215	/* On the 7450, we wait for the L2I bit to clear......
216	*/
21710:	mfspr	r3,SPRN_L2CR
218	andis.	r4,r3,0x0020
219	bne	10b
220	b	11f
221END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
222
223	/* Wait for the invalidation to complete */
2243:	mfspr	r3,SPRN_L2CR
225	rlwinm.	r4,r3,0,31,31
226	bne	3b
227
22811:	rlwinm	r3,r3,0,11,9		/* Turn off the L2I bit */
229	sync
230	mtspr	SPRN_L2CR,r3
231	sync
232
233	/* See if we need to enable the cache */
234	cmplwi	r5,0
235	beq	4f
236
237	/* Enable the cache */
238	oris	r3,r3,0x8000
239	mtspr	SPRN_L2CR,r3
240	sync
241
242	/* Enable L2 HW prefetch on 744x/745x */
243BEGIN_FTR_SECTION
244	mfspr	r3,SPRN_MSSCR0
245	ori	r3,r3,3
246	sync
247	mtspr	SPRN_MSSCR0,r3
248	sync
249	isync
250END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
2514:
252
253	/* Restore HID0[DPM] to whatever it was before */
254	sync
255	mtspr	1008,r8
256	sync
257
258	/* Restore MSR (restores EE and DR bits to original state) */
259	SYNC
260	mtmsr	r7
261	isync
262
263	mtlr	r9
264	blr
265
266_GLOBAL(_get_L2CR)
267	/* Return the L2CR contents */
268	li	r3,0
269BEGIN_FTR_SECTION
270	mfspr	r3,SPRN_L2CR
271END_FTR_SECTION_IFSET(CPU_FTR_L2CR)
272	blr
273
274
275/*
276 * Here is a similar routine for dealing with the L3 cache
277 * on the 745x family of chips
278 */
279
280_GLOBAL(_set_L3CR)
281	/* Make sure this is a 745x chip */
282BEGIN_FTR_SECTION
283	li	r3,-1
284	blr
285END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
286
287	/* Turn off interrupts and data relocation. */
288	mfmsr	r7		/* Save MSR in r7 */
289	rlwinm	r4,r7,0,17,15
290	rlwinm	r4,r4,0,28,26	/* Turn off DR bit */
291	sync
292	mtmsr	r4
293	isync
294
295	/* Stop DST streams */
296	DSSALL
297	sync
298
299	/* Get the current enable bit of the L3CR into r4 */
300	mfspr	r4,SPRN_L3CR
301
302	/* Tweak some bits */
303	rlwinm	r5,r3,0,0,0		/* r5 contains the new enable bit */
304	rlwinm	r3,r3,0,22,20		/* Turn off the invalidate bit */
305	rlwinm	r3,r3,0,2,31		/* Turn off the enable & PE bits */
306	rlwinm	r3,r3,0,5,3		/* Turn off the clken bit */
307	/* Check to see if we need to flush */
308	rlwinm.	r4,r4,0,0,0
309	beq	2f
310
311	/* Flush the cache.
312	 */
313
314	/* TODO: use HW flush assist */
315
316	lis	r4,0x0008
317	mtctr	r4
318	li	r4,0
3191:
320	lwzx	r0,0,r4
321	dcbf	0,r4
322	addi	r4,r4,32		/* Go to start of next cache line */
323	bdnz	1b
324
3252:
326	/* Set up the L3CR configuration bits (and switch L3 off) */
327	sync
328	mtspr	SPRN_L3CR,r3
329	sync
330
331	oris	r3,r3,L3CR_L3RES@h		/* Set reserved bit 5 */
332	mtspr	SPRN_L3CR,r3
333	sync
334	oris	r3,r3,L3CR_L3CLKEN@h		/* Set clken */
335	mtspr	SPRN_L3CR,r3
336	sync
337
338	/* Wait for stabilize */
339	li	r0,256
340	mtctr	r0
3411:	bdnz	1b
342
343	/* Perform a global invalidation */
344	ori	r3,r3,0x0400
345	sync
346	mtspr	SPRN_L3CR,r3
347	sync
348	isync
349
350	/* We wait for the L3I bit to clear...... */
35110:	mfspr	r3,SPRN_L3CR
352	andi.	r4,r3,0x0400
353	bne	10b
354
355	/* Clear CLKEN */
356	rlwinm	r3,r3,0,5,3		/* Turn off the clken bit */
357	mtspr	SPRN_L3CR,r3
358	sync
359
360	/* Wait for stabilize */
361	li	r0,256
362	mtctr	r0
3631:	bdnz	1b
364
365	/* See if we need to enable the cache */
366	cmplwi	r5,0
367	beq	4f
368
369	/* Enable the cache */
370	oris	r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h
371	mtspr	SPRN_L3CR,r3
372	sync
373
374	/* Wait for stabilize */
375	li	r0,256
376	mtctr	r0
3771:	bdnz	1b
378
379	/* Restore MSR (restores EE and DR bits to original state) */
3804:	SYNC
381	mtmsr	r7
382	isync
383	blr
384
385_GLOBAL(_get_L3CR)
386	/* Return the L3CR contents */
387	li	r3,0
388BEGIN_FTR_SECTION
389	mfspr	r3,SPRN_L3CR
390END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
391	blr
392
393/* --- End of PowerLogix code ---
394 */
395
396
397/* flush_disable_L1()	- Flush and disable L1 cache
398 *
399 * clobbers r0, r3, ctr, cr0
400 * Must be called with interrupts disabled and MMU enabled.
401 */
402_GLOBAL(__flush_disable_L1)
403	/* Stop pending alitvec streams and memory accesses */
404BEGIN_FTR_SECTION
405	DSSALL
406END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
407 	sync
408
409	/* Load counter to 0x4000 cache lines (512k) and
410	 * load cache with datas
411	 */
412	li	r3,0x4000	/* 512kB / 32B */
413	mtctr	r3
414	lis	r3,KERNELBASE@h
4151:
416	lwz	r0,0(r3)
417	addi	r3,r3,0x0020	/* Go to start of next cache line */
418	bdnz	1b
419	isync
420	sync
421
422	/* Now flush those cache lines */
423	li	r3,0x4000	/* 512kB / 32B */
424	mtctr	r3
425	lis	r3,KERNELBASE@h
4261:
427	dcbf	0,r3
428	addi	r3,r3,0x0020	/* Go to start of next cache line */
429	bdnz	1b
430	sync
431
432	/* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
433	mfspr	r3,SPRN_HID0
434	rlwinm	r3,r3,0,18,15
435	mtspr	SPRN_HID0,r3
436	sync
437	isync
438 	blr
439
440/* inval_enable_L1	- Invalidate and enable L1 cache
441 *
442 * Assumes L1 is already disabled and MSR:EE is off
443 *
444 * clobbers r3
445 */
446_GLOBAL(__inval_enable_L1)
447	/* Enable and then Flash inval the instruction & data cache */
448	mfspr	r3,SPRN_HID0
449	ori	r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
450	sync
451	isync
452	mtspr	SPRN_HID0,r3
453	xori	r3,r3, HID0_ICFI|HID0_DCI
454	mtspr	SPRN_HID0,r3
455	sync
456
457 	blr
458
459
460