xref: /openbmc/linux/arch/powerpc/kernel/l2cr_6xx.S (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
174ba9207SThomas Gleixner/* SPDX-License-Identifier: GPL-2.0-or-later */
20eb4cb9bSPaul Mackerras/*
30eb4cb9bSPaul Mackerras	L2CR functions
496de0e25SJan Engelhardt	Copyright © 1997-1998 by PowerLogix R & D, Inc.
50eb4cb9bSPaul Mackerras
60eb4cb9bSPaul Mackerras*/
70eb4cb9bSPaul Mackerras/*
80eb4cb9bSPaul Mackerras	Thur, Dec. 12, 1998.
90eb4cb9bSPaul Mackerras	- First public release, contributed by PowerLogix.
100eb4cb9bSPaul Mackerras	***********
110eb4cb9bSPaul Mackerras	Sat, Aug. 7, 1999.
120eb4cb9bSPaul Mackerras	- Terry: Made sure code disabled interrupts before running. (Previously
130eb4cb9bSPaul Mackerras			it was assumed interrupts were already disabled).
140eb4cb9bSPaul Mackerras	- Terry: Updated for tentative G4 support.  4MB of memory is now flushed
150eb4cb9bSPaul Mackerras			instead of 2MB.  (Prob. only 3 is necessary).
160eb4cb9bSPaul Mackerras	- Terry: Updated for workaround to HID0[DPM] processor bug
170eb4cb9bSPaul Mackerras			during global invalidates.
180eb4cb9bSPaul Mackerras	***********
190eb4cb9bSPaul Mackerras	Thu, July 13, 2000.
200eb4cb9bSPaul Mackerras	- Terry: Added isync to correct for an errata.
210eb4cb9bSPaul Mackerras
220eb4cb9bSPaul Mackerras	22 August 2001.
230eb4cb9bSPaul Mackerras	- DanM: Finally added the 7450 patch I've had for the past
240eb4cb9bSPaul Mackerras		several months.  The L2CR is similar, but I'm going
250eb4cb9bSPaul Mackerras		to assume the user of this functions knows what they
260eb4cb9bSPaul Mackerras		are doing.
270eb4cb9bSPaul Mackerras
280eb4cb9bSPaul Mackerras	Author:	Terry Greeniaus (tgree@phys.ualberta.ca)
290eb4cb9bSPaul Mackerras	Please e-mail updates to this file to me, thanks!
300eb4cb9bSPaul Mackerras*/
310eb4cb9bSPaul Mackerras#include <asm/processor.h>
320eb4cb9bSPaul Mackerras#include <asm/cputable.h>
330eb4cb9bSPaul Mackerras#include <asm/ppc_asm.h>
340eb4cb9bSPaul Mackerras#include <asm/cache.h>
350eb4cb9bSPaul Mackerras#include <asm/page.h>
362c86cd18SChristophe Leroy#include <asm/feature-fixups.h>
370eb4cb9bSPaul Mackerras
380eb4cb9bSPaul Mackerras/* Usage:
390eb4cb9bSPaul Mackerras
400eb4cb9bSPaul Mackerras	When setting the L2CR register, you must do a few special
410eb4cb9bSPaul Mackerras	things.  If you are enabling the cache, you must perform a
420eb4cb9bSPaul Mackerras	global invalidate.  If you are disabling the cache, you must
430eb4cb9bSPaul Mackerras	flush the cache contents first.  This routine takes care of
440eb4cb9bSPaul Mackerras	doing these things.  When first enabling the cache, make sure
450eb4cb9bSPaul Mackerras	you pass in the L2CR you want, as well as passing in the
460eb4cb9bSPaul Mackerras	global invalidate bit set.  A global invalidate will only be
470eb4cb9bSPaul Mackerras	performed if the L2I bit is set in applyThis.  When enabling
480eb4cb9bSPaul Mackerras	the cache, you should also set the L2E bit in applyThis.  If
490eb4cb9bSPaul Mackerras	you want to modify the L2CR contents after the cache has been
500eb4cb9bSPaul Mackerras	enabled, the recommended procedure is to first call
510eb4cb9bSPaul Mackerras	__setL2CR(0) to disable the cache and then call it again with
520eb4cb9bSPaul Mackerras	the new values for L2CR.  Examples:
530eb4cb9bSPaul Mackerras
540eb4cb9bSPaul Mackerras	_setL2CR(0)		- disables the cache
550eb4cb9bSPaul Mackerras	_setL2CR(0xB3A04000)	- enables my G3 upgrade card:
560eb4cb9bSPaul Mackerras				- L2E set to turn on the cache
570eb4cb9bSPaul Mackerras				- L2SIZ set to 1MB
580eb4cb9bSPaul Mackerras				- L2CLK set to 1:1
590eb4cb9bSPaul Mackerras				- L2RAM set to pipelined synchronous late-write
600eb4cb9bSPaul Mackerras				- L2I set to perform a global invalidation
610eb4cb9bSPaul Mackerras				- L2OH set to 0.5 nS
620eb4cb9bSPaul Mackerras				- L2DF set because this upgrade card
630eb4cb9bSPaul Mackerras				  requires it
640eb4cb9bSPaul Mackerras
650eb4cb9bSPaul Mackerras	A similar call should work for your card.  You need to know
660eb4cb9bSPaul Mackerras	the correct setting for your card and then place them in the
670eb4cb9bSPaul Mackerras	fields I have outlined above.  Other fields support optional
680eb4cb9bSPaul Mackerras	features, such as L2DO which caches only data, or L2TS which
690eb4cb9bSPaul Mackerras	causes cache pushes from the L1 cache to go to the L2 cache
700eb4cb9bSPaul Mackerras	instead of to main memory.
710eb4cb9bSPaul Mackerras
720eb4cb9bSPaul MackerrasIMPORTANT:
730eb4cb9bSPaul Mackerras	Starting with the 7450, the bits in this register have moved
740eb4cb9bSPaul Mackerras	or behave differently.  The Enable, Parity Enable, Size,
750eb4cb9bSPaul Mackerras	and L2 Invalidate are the only bits that have not moved.
760eb4cb9bSPaul Mackerras	The size is read-only for these processors with internal L2
770eb4cb9bSPaul Mackerras	cache, and the invalidate is a control as well as status.
780eb4cb9bSPaul Mackerras		-- Dan
790eb4cb9bSPaul Mackerras
800eb4cb9bSPaul Mackerras*/
810eb4cb9bSPaul Mackerras/*
820eb4cb9bSPaul Mackerras * Summary: this procedure ignores the L2I bit in the value passed in,
830eb4cb9bSPaul Mackerras * flushes the cache if it was already enabled, always invalidates the
840eb4cb9bSPaul Mackerras * cache, then enables the cache if the L2E bit is set in the value
850eb4cb9bSPaul Mackerras * passed in.
860eb4cb9bSPaul Mackerras *   -- paulus.
870eb4cb9bSPaul Mackerras */
880eb4cb9bSPaul Mackerras_GLOBAL(_set_L2CR)
890eb4cb9bSPaul Mackerras	/* Make sure this is a 750 or 7400 chip */
900eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
910eb4cb9bSPaul Mackerras	li	r3,-1
920eb4cb9bSPaul Mackerras	blr
930eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
940eb4cb9bSPaul Mackerras
950eb4cb9bSPaul Mackerras	mflr	r9
960eb4cb9bSPaul Mackerras
970eb4cb9bSPaul Mackerras	/* Stop DST streams */
980eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
99*d51f86cfSAlexey Kardashevskiy	PPC_DSSALL
1000eb4cb9bSPaul Mackerras	sync
1010eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1020eb4cb9bSPaul Mackerras
1030eb4cb9bSPaul Mackerras	/* Turn off interrupts and data relocation. */
1040eb4cb9bSPaul Mackerras	mfmsr	r7		/* Save MSR in r7 */
1050eb4cb9bSPaul Mackerras	rlwinm	r4,r7,0,17,15
1060eb4cb9bSPaul Mackerras	rlwinm	r4,r4,0,28,26	/* Turn off DR bit */
1070eb4cb9bSPaul Mackerras	sync
1080eb4cb9bSPaul Mackerras	mtmsr	r4
1090eb4cb9bSPaul Mackerras	isync
1100eb4cb9bSPaul Mackerras
1110eb4cb9bSPaul Mackerras	/* Before we perform the global invalidation, we must disable dynamic
1120eb4cb9bSPaul Mackerras	 * power management via HID0[DPM] to work around a processor bug where
1130eb4cb9bSPaul Mackerras	 * DPM can possibly interfere with the state machine in the processor
1140eb4cb9bSPaul Mackerras	 * that invalidates the L2 cache tags.
1150eb4cb9bSPaul Mackerras	 */
1160eb4cb9bSPaul Mackerras	mfspr	r8,SPRN_HID0		/* Save HID0 in r8 */
1170eb4cb9bSPaul Mackerras	rlwinm	r4,r8,0,12,10		/* Turn off HID0[DPM] */
1180eb4cb9bSPaul Mackerras	sync
1190eb4cb9bSPaul Mackerras	mtspr	SPRN_HID0,r4		/* Disable DPM */
1200eb4cb9bSPaul Mackerras	sync
1210eb4cb9bSPaul Mackerras
1220eb4cb9bSPaul Mackerras	/* Get the current enable bit of the L2CR into r4 */
1230eb4cb9bSPaul Mackerras	mfspr	r4,SPRN_L2CR
1240eb4cb9bSPaul Mackerras
1250eb4cb9bSPaul Mackerras	/* Tweak some bits */
1260eb4cb9bSPaul Mackerras	rlwinm	r5,r3,0,0,0		/* r5 contains the new enable bit */
1270eb4cb9bSPaul Mackerras	rlwinm	r3,r3,0,11,9		/* Turn off the invalidate bit */
1280eb4cb9bSPaul Mackerras	rlwinm	r3,r3,0,1,31		/* Turn off the enable bit */
1290eb4cb9bSPaul Mackerras
1300eb4cb9bSPaul Mackerras	/* Check to see if we need to flush */
1310eb4cb9bSPaul Mackerras	rlwinm.	r4,r4,0,0,0
1320eb4cb9bSPaul Mackerras	beq	2f
1330eb4cb9bSPaul Mackerras
1340eb4cb9bSPaul Mackerras	/* Flush the cache. First, read the first 4MB of memory (physical) to
1350eb4cb9bSPaul Mackerras	 * put new data in the cache.  (Actually we only need
1360eb4cb9bSPaul Mackerras	 * the size of the L2 cache plus the size of the L1 cache, but 4MB will
1370eb4cb9bSPaul Mackerras	 * cover everything just to be safe).
1380eb4cb9bSPaul Mackerras	 */
1390eb4cb9bSPaul Mackerras
1400eb4cb9bSPaul Mackerras	 /**** Might be a good idea to set L2DO here - to prevent instructions
1410eb4cb9bSPaul Mackerras	       from getting into the cache.  But since we invalidate
1420eb4cb9bSPaul Mackerras	       the next time we enable the cache it doesn't really matter.
14325985edcSLucas De Marchi	       Don't do this unless you accommodate all processor variations.
1440eb4cb9bSPaul Mackerras	       The bit moved on the 7450.....
1450eb4cb9bSPaul Mackerras	  ****/
1460eb4cb9bSPaul Mackerras
1470eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
1480eb4cb9bSPaul Mackerras	/* Disable L2 prefetch on some 745x and try to ensure
1490eb4cb9bSPaul Mackerras	 * L2 prefetch engines are idle. As explained by errata
1500eb4cb9bSPaul Mackerras	 * text, we can't be sure they are, we just hope very hard
1510eb4cb9bSPaul Mackerras	 * that well be enough (sic !). At least I noticed Apple
1520eb4cb9bSPaul Mackerras	 * doesn't even bother doing the dcbf's here...
1530eb4cb9bSPaul Mackerras	 */
1540eb4cb9bSPaul Mackerras	mfspr	r4,SPRN_MSSCR0
1550eb4cb9bSPaul Mackerras	rlwinm	r4,r4,0,0,29
1560eb4cb9bSPaul Mackerras	sync
1570eb4cb9bSPaul Mackerras	mtspr	SPRN_MSSCR0,r4
1580eb4cb9bSPaul Mackerras	sync
1590eb4cb9bSPaul Mackerras	isync
1600eb4cb9bSPaul Mackerras	lis	r4,KERNELBASE@h
1610eb4cb9bSPaul Mackerras	dcbf	0,r4
1620eb4cb9bSPaul Mackerras	dcbf	0,r4
1630eb4cb9bSPaul Mackerras	dcbf	0,r4
1640eb4cb9bSPaul Mackerras	dcbf	0,r4
1650eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
1660eb4cb9bSPaul Mackerras
1670eb4cb9bSPaul Mackerras	/* TODO: use HW flush assist when available */
1680eb4cb9bSPaul Mackerras
1690eb4cb9bSPaul Mackerras	lis	r4,0x0002
1700eb4cb9bSPaul Mackerras	mtctr	r4
1710eb4cb9bSPaul Mackerras	li	r4,0
1720eb4cb9bSPaul Mackerras1:
1736c80d316SAndreas Schwab	lwzx	r0,0,r4
1740eb4cb9bSPaul Mackerras	addi	r4,r4,32		/* Go to start of next cache line */
1750eb4cb9bSPaul Mackerras	bdnz	1b
1760eb4cb9bSPaul Mackerras	isync
1770eb4cb9bSPaul Mackerras
1780eb4cb9bSPaul Mackerras	/* Now, flush the first 4MB of memory */
1790eb4cb9bSPaul Mackerras	lis	r4,0x0002
1800eb4cb9bSPaul Mackerras	mtctr	r4
1810eb4cb9bSPaul Mackerras	li	r4,0
1820eb4cb9bSPaul Mackerras	sync
1830eb4cb9bSPaul Mackerras1:
1840eb4cb9bSPaul Mackerras	dcbf	0,r4
1850eb4cb9bSPaul Mackerras	addi	r4,r4,32		/* Go to start of next cache line */
1860eb4cb9bSPaul Mackerras	bdnz	1b
1870eb4cb9bSPaul Mackerras
1880eb4cb9bSPaul Mackerras2:
1890eb4cb9bSPaul Mackerras	/* Set up the L2CR configuration bits (and switch L2 off) */
1900eb4cb9bSPaul Mackerras	/* CPU errata: Make sure the mtspr below is already in the
1910eb4cb9bSPaul Mackerras	 * L1 icache
1920eb4cb9bSPaul Mackerras	 */
1930eb4cb9bSPaul Mackerras	b	20f
1940eb4cb9bSPaul Mackerras	.balign	L1_CACHE_BYTES
1950eb4cb9bSPaul Mackerras22:
1960eb4cb9bSPaul Mackerras	sync
1970eb4cb9bSPaul Mackerras	mtspr	SPRN_L2CR,r3
1980eb4cb9bSPaul Mackerras	sync
1990eb4cb9bSPaul Mackerras	b	23f
2000eb4cb9bSPaul Mackerras20:
2010eb4cb9bSPaul Mackerras	b	21f
2020eb4cb9bSPaul Mackerras21:	sync
2030eb4cb9bSPaul Mackerras	isync
2040eb4cb9bSPaul Mackerras	b	22b
2050eb4cb9bSPaul Mackerras
2060eb4cb9bSPaul Mackerras23:
2070eb4cb9bSPaul Mackerras	/* Perform a global invalidation */
2080eb4cb9bSPaul Mackerras	oris	r3,r3,0x0020
2090eb4cb9bSPaul Mackerras	sync
2100eb4cb9bSPaul Mackerras	mtspr	SPRN_L2CR,r3
2110eb4cb9bSPaul Mackerras	sync
2120eb4cb9bSPaul Mackerras	isync				/* For errata */
2130eb4cb9bSPaul Mackerras
2140eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
2150eb4cb9bSPaul Mackerras	/* On the 7450, we wait for the L2I bit to clear......
2160eb4cb9bSPaul Mackerras	*/
2170eb4cb9bSPaul Mackerras10:	mfspr	r3,SPRN_L2CR
2180eb4cb9bSPaul Mackerras	andis.	r4,r3,0x0020
2190eb4cb9bSPaul Mackerras	bne	10b
2200eb4cb9bSPaul Mackerras	b	11f
2210eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
2220eb4cb9bSPaul Mackerras
2230eb4cb9bSPaul Mackerras	/* Wait for the invalidation to complete */
2240eb4cb9bSPaul Mackerras3:	mfspr	r3,SPRN_L2CR
2250eb4cb9bSPaul Mackerras	rlwinm.	r4,r3,0,31,31
2260eb4cb9bSPaul Mackerras	bne	3b
2270eb4cb9bSPaul Mackerras
2280eb4cb9bSPaul Mackerras11:	rlwinm	r3,r3,0,11,9		/* Turn off the L2I bit */
2290eb4cb9bSPaul Mackerras	sync
2300eb4cb9bSPaul Mackerras	mtspr	SPRN_L2CR,r3
2310eb4cb9bSPaul Mackerras	sync
2320eb4cb9bSPaul Mackerras
2330eb4cb9bSPaul Mackerras	/* See if we need to enable the cache */
2340eb4cb9bSPaul Mackerras	cmplwi	r5,0
2350eb4cb9bSPaul Mackerras	beq	4f
2360eb4cb9bSPaul Mackerras
2370eb4cb9bSPaul Mackerras	/* Enable the cache */
2380eb4cb9bSPaul Mackerras	oris	r3,r3,0x8000
2390eb4cb9bSPaul Mackerras	mtspr	SPRN_L2CR,r3
2400eb4cb9bSPaul Mackerras	sync
2410eb4cb9bSPaul Mackerras
2420eb4cb9bSPaul Mackerras	/* Enable L2 HW prefetch on 744x/745x */
2430eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
2440eb4cb9bSPaul Mackerras	mfspr	r3,SPRN_MSSCR0
2450eb4cb9bSPaul Mackerras	ori	r3,r3,3
2460eb4cb9bSPaul Mackerras	sync
2470eb4cb9bSPaul Mackerras	mtspr	SPRN_MSSCR0,r3
2480eb4cb9bSPaul Mackerras	sync
2490eb4cb9bSPaul Mackerras	isync
2500eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
2510eb4cb9bSPaul Mackerras4:
2520eb4cb9bSPaul Mackerras
2530eb4cb9bSPaul Mackerras	/* Restore HID0[DPM] to whatever it was before */
2540eb4cb9bSPaul Mackerras	sync
2550eb4cb9bSPaul Mackerras	mtspr	1008,r8
2560eb4cb9bSPaul Mackerras	sync
2570eb4cb9bSPaul Mackerras
2580eb4cb9bSPaul Mackerras	/* Restore MSR (restores EE and DR bits to original state) */
2590eb4cb9bSPaul Mackerras	mtmsr	r7
2600eb4cb9bSPaul Mackerras	isync
2610eb4cb9bSPaul Mackerras
2620eb4cb9bSPaul Mackerras	mtlr	r9
2630eb4cb9bSPaul Mackerras	blr
2640eb4cb9bSPaul Mackerras
2650eb4cb9bSPaul Mackerras_GLOBAL(_get_L2CR)
2660eb4cb9bSPaul Mackerras	/* Return the L2CR contents */
2670eb4cb9bSPaul Mackerras	li	r3,0
2680eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
2690eb4cb9bSPaul Mackerras	mfspr	r3,SPRN_L2CR
2700eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_L2CR)
2710eb4cb9bSPaul Mackerras	blr
2720eb4cb9bSPaul Mackerras
2730eb4cb9bSPaul Mackerras
2740eb4cb9bSPaul Mackerras/*
2750eb4cb9bSPaul Mackerras * Here is a similar routine for dealing with the L3 cache
2760eb4cb9bSPaul Mackerras * on the 745x family of chips
2770eb4cb9bSPaul Mackerras */
2780eb4cb9bSPaul Mackerras
2790eb4cb9bSPaul Mackerras_GLOBAL(_set_L3CR)
2800eb4cb9bSPaul Mackerras	/* Make sure this is a 745x chip */
2810eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
2820eb4cb9bSPaul Mackerras	li	r3,-1
2830eb4cb9bSPaul Mackerras	blr
2840eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
2850eb4cb9bSPaul Mackerras
2860eb4cb9bSPaul Mackerras	/* Turn off interrupts and data relocation. */
2870eb4cb9bSPaul Mackerras	mfmsr	r7		/* Save MSR in r7 */
2880eb4cb9bSPaul Mackerras	rlwinm	r4,r7,0,17,15
2890eb4cb9bSPaul Mackerras	rlwinm	r4,r4,0,28,26	/* Turn off DR bit */
2900eb4cb9bSPaul Mackerras	sync
2910eb4cb9bSPaul Mackerras	mtmsr	r4
2920eb4cb9bSPaul Mackerras	isync
2930eb4cb9bSPaul Mackerras
2940eb4cb9bSPaul Mackerras	/* Stop DST streams */
295*d51f86cfSAlexey Kardashevskiy	PPC_DSSALL
2960eb4cb9bSPaul Mackerras	sync
2970eb4cb9bSPaul Mackerras
2980eb4cb9bSPaul Mackerras	/* Get the current enable bit of the L3CR into r4 */
2990eb4cb9bSPaul Mackerras	mfspr	r4,SPRN_L3CR
3000eb4cb9bSPaul Mackerras
3010eb4cb9bSPaul Mackerras	/* Tweak some bits */
3020eb4cb9bSPaul Mackerras	rlwinm	r5,r3,0,0,0		/* r5 contains the new enable bit */
3030eb4cb9bSPaul Mackerras	rlwinm	r3,r3,0,22,20		/* Turn off the invalidate bit */
3040eb4cb9bSPaul Mackerras	rlwinm	r3,r3,0,2,31		/* Turn off the enable & PE bits */
3050eb4cb9bSPaul Mackerras	rlwinm	r3,r3,0,5,3		/* Turn off the clken bit */
3060eb4cb9bSPaul Mackerras	/* Check to see if we need to flush */
3070eb4cb9bSPaul Mackerras	rlwinm.	r4,r4,0,0,0
3080eb4cb9bSPaul Mackerras	beq	2f
3090eb4cb9bSPaul Mackerras
3100eb4cb9bSPaul Mackerras	/* Flush the cache.
3110eb4cb9bSPaul Mackerras	 */
3120eb4cb9bSPaul Mackerras
3130eb4cb9bSPaul Mackerras	/* TODO: use HW flush assist */
3140eb4cb9bSPaul Mackerras
3150eb4cb9bSPaul Mackerras	lis	r4,0x0008
3160eb4cb9bSPaul Mackerras	mtctr	r4
3170eb4cb9bSPaul Mackerras	li	r4,0
3180eb4cb9bSPaul Mackerras1:
3196c80d316SAndreas Schwab	lwzx	r0,0,r4
3200eb4cb9bSPaul Mackerras	dcbf	0,r4
3210eb4cb9bSPaul Mackerras	addi	r4,r4,32		/* Go to start of next cache line */
3220eb4cb9bSPaul Mackerras	bdnz	1b
3230eb4cb9bSPaul Mackerras
3240eb4cb9bSPaul Mackerras2:
3250eb4cb9bSPaul Mackerras	/* Set up the L3CR configuration bits (and switch L3 off) */
3260eb4cb9bSPaul Mackerras	sync
3270eb4cb9bSPaul Mackerras	mtspr	SPRN_L3CR,r3
3280eb4cb9bSPaul Mackerras	sync
3290eb4cb9bSPaul Mackerras
3300eb4cb9bSPaul Mackerras	oris	r3,r3,L3CR_L3RES@h		/* Set reserved bit 5 */
3310eb4cb9bSPaul Mackerras	mtspr	SPRN_L3CR,r3
3320eb4cb9bSPaul Mackerras	sync
3330eb4cb9bSPaul Mackerras	oris	r3,r3,L3CR_L3CLKEN@h		/* Set clken */
3340eb4cb9bSPaul Mackerras	mtspr	SPRN_L3CR,r3
3350eb4cb9bSPaul Mackerras	sync
3360eb4cb9bSPaul Mackerras
3370eb4cb9bSPaul Mackerras	/* Wait for stabilize */
3380eb4cb9bSPaul Mackerras	li	r0,256
3390eb4cb9bSPaul Mackerras	mtctr	r0
3400eb4cb9bSPaul Mackerras1:	bdnz	1b
3410eb4cb9bSPaul Mackerras
3420eb4cb9bSPaul Mackerras	/* Perform a global invalidation */
3430eb4cb9bSPaul Mackerras	ori	r3,r3,0x0400
3440eb4cb9bSPaul Mackerras	sync
3450eb4cb9bSPaul Mackerras	mtspr	SPRN_L3CR,r3
3460eb4cb9bSPaul Mackerras	sync
3470eb4cb9bSPaul Mackerras	isync
3480eb4cb9bSPaul Mackerras
3490eb4cb9bSPaul Mackerras	/* We wait for the L3I bit to clear...... */
3500eb4cb9bSPaul Mackerras10:	mfspr	r3,SPRN_L3CR
3510eb4cb9bSPaul Mackerras	andi.	r4,r3,0x0400
3520eb4cb9bSPaul Mackerras	bne	10b
3530eb4cb9bSPaul Mackerras
3540eb4cb9bSPaul Mackerras	/* Clear CLKEN */
3550eb4cb9bSPaul Mackerras	rlwinm	r3,r3,0,5,3		/* Turn off the clken bit */
3560eb4cb9bSPaul Mackerras	mtspr	SPRN_L3CR,r3
3570eb4cb9bSPaul Mackerras	sync
3580eb4cb9bSPaul Mackerras
3590eb4cb9bSPaul Mackerras	/* Wait for stabilize */
3600eb4cb9bSPaul Mackerras	li	r0,256
3610eb4cb9bSPaul Mackerras	mtctr	r0
3620eb4cb9bSPaul Mackerras1:	bdnz	1b
3630eb4cb9bSPaul Mackerras
3640eb4cb9bSPaul Mackerras	/* See if we need to enable the cache */
3650eb4cb9bSPaul Mackerras	cmplwi	r5,0
3660eb4cb9bSPaul Mackerras	beq	4f
3670eb4cb9bSPaul Mackerras
3680eb4cb9bSPaul Mackerras	/* Enable the cache */
3690eb4cb9bSPaul Mackerras	oris	r3,r3,(L3CR_L3E | L3CR_L3CLKEN)@h
3700eb4cb9bSPaul Mackerras	mtspr	SPRN_L3CR,r3
3710eb4cb9bSPaul Mackerras	sync
3720eb4cb9bSPaul Mackerras
3730eb4cb9bSPaul Mackerras	/* Wait for stabilize */
3740eb4cb9bSPaul Mackerras	li	r0,256
3750eb4cb9bSPaul Mackerras	mtctr	r0
3760eb4cb9bSPaul Mackerras1:	bdnz	1b
3770eb4cb9bSPaul Mackerras
3780eb4cb9bSPaul Mackerras	/* Restore MSR (restores EE and DR bits to original state) */
379d2a5cd83SChristophe Leroy4:
3800eb4cb9bSPaul Mackerras	mtmsr	r7
3810eb4cb9bSPaul Mackerras	isync
3820eb4cb9bSPaul Mackerras	blr
3830eb4cb9bSPaul Mackerras
3840eb4cb9bSPaul Mackerras_GLOBAL(_get_L3CR)
3850eb4cb9bSPaul Mackerras	/* Return the L3CR contents */
3860eb4cb9bSPaul Mackerras	li	r3,0
3870eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
3880eb4cb9bSPaul Mackerras	mfspr	r3,SPRN_L3CR
3890eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_L3CR)
3900eb4cb9bSPaul Mackerras	blr
3910eb4cb9bSPaul Mackerras
3920eb4cb9bSPaul Mackerras/* --- End of PowerLogix code ---
3930eb4cb9bSPaul Mackerras */
3940eb4cb9bSPaul Mackerras
3950eb4cb9bSPaul Mackerras
3960eb4cb9bSPaul Mackerras/* flush_disable_L1()	- Flush and disable L1 cache
3970eb4cb9bSPaul Mackerras *
3980eb4cb9bSPaul Mackerras * clobbers r0, r3, ctr, cr0
3990eb4cb9bSPaul Mackerras * Must be called with interrupts disabled and MMU enabled.
4000eb4cb9bSPaul Mackerras */
4010eb4cb9bSPaul Mackerras_GLOBAL(__flush_disable_L1)
4020eb4cb9bSPaul Mackerras	/* Stop pending alitvec streams and memory accesses */
4030eb4cb9bSPaul MackerrasBEGIN_FTR_SECTION
404*d51f86cfSAlexey Kardashevskiy	PPC_DSSALL
4050eb4cb9bSPaul MackerrasEND_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
4060eb4cb9bSPaul Mackerras 	sync
4070eb4cb9bSPaul Mackerras
4080eb4cb9bSPaul Mackerras	/* Load counter to 0x4000 cache lines (512k) and
4090eb4cb9bSPaul Mackerras	 * load cache with datas
4100eb4cb9bSPaul Mackerras	 */
4110eb4cb9bSPaul Mackerras	li	r3,0x4000	/* 512kB / 32B */
4120eb4cb9bSPaul Mackerras	mtctr	r3
4130eb4cb9bSPaul Mackerras	lis	r3,KERNELBASE@h
4140eb4cb9bSPaul Mackerras1:
4150eb4cb9bSPaul Mackerras	lwz	r0,0(r3)
4160eb4cb9bSPaul Mackerras	addi	r3,r3,0x0020	/* Go to start of next cache line */
4170eb4cb9bSPaul Mackerras	bdnz	1b
4180eb4cb9bSPaul Mackerras	isync
4190eb4cb9bSPaul Mackerras	sync
4200eb4cb9bSPaul Mackerras
4210eb4cb9bSPaul Mackerras	/* Now flush those cache lines */
4220eb4cb9bSPaul Mackerras	li	r3,0x4000	/* 512kB / 32B */
4230eb4cb9bSPaul Mackerras	mtctr	r3
4240eb4cb9bSPaul Mackerras	lis	r3,KERNELBASE@h
4250eb4cb9bSPaul Mackerras1:
4260eb4cb9bSPaul Mackerras	dcbf	0,r3
4270eb4cb9bSPaul Mackerras	addi	r3,r3,0x0020	/* Go to start of next cache line */
4280eb4cb9bSPaul Mackerras	bdnz	1b
4290eb4cb9bSPaul Mackerras	sync
4300eb4cb9bSPaul Mackerras
4310eb4cb9bSPaul Mackerras	/* We can now disable the L1 cache (HID0:DCE, HID0:ICE) */
4320eb4cb9bSPaul Mackerras	mfspr	r3,SPRN_HID0
4330eb4cb9bSPaul Mackerras	rlwinm	r3,r3,0,18,15
4340eb4cb9bSPaul Mackerras	mtspr	SPRN_HID0,r3
4350eb4cb9bSPaul Mackerras	sync
4360eb4cb9bSPaul Mackerras	isync
4370eb4cb9bSPaul Mackerras 	blr
4380eb4cb9bSPaul Mackerras
4390eb4cb9bSPaul Mackerras/* inval_enable_L1	- Invalidate and enable L1 cache
4400eb4cb9bSPaul Mackerras *
4410eb4cb9bSPaul Mackerras * Assumes L1 is already disabled and MSR:EE is off
4420eb4cb9bSPaul Mackerras *
4430eb4cb9bSPaul Mackerras * clobbers r3
4440eb4cb9bSPaul Mackerras */
4450eb4cb9bSPaul Mackerras_GLOBAL(__inval_enable_L1)
4460eb4cb9bSPaul Mackerras	/* Enable and then Flash inval the instruction & data cache */
4470eb4cb9bSPaul Mackerras	mfspr	r3,SPRN_HID0
4480eb4cb9bSPaul Mackerras	ori	r3,r3, HID0_ICE|HID0_ICFI|HID0_DCE|HID0_DCI
4490eb4cb9bSPaul Mackerras	sync
4500eb4cb9bSPaul Mackerras	isync
4510eb4cb9bSPaul Mackerras	mtspr	SPRN_HID0,r3
4520eb4cb9bSPaul Mackerras	xori	r3,r3, HID0_ICFI|HID0_DCI
4530eb4cb9bSPaul Mackerras	mtspr	SPRN_HID0,r3
4540eb4cb9bSPaul Mackerras	sync
4550eb4cb9bSPaul Mackerras
4560eb4cb9bSPaul Mackerras 	blr
4575f32e836SChristophe Leroy_ASM_NOKPROBE_SYMBOL(__inval_enable_L1)
4580eb4cb9bSPaul Mackerras
4590eb4cb9bSPaul Mackerras
460