xref: /openbmc/u-boot/arch/powerpc/cpu/mpc85xx/start.S (revision dd1033e4)
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc.
4 * Copyright (C) 2003  Motorola,Inc.
5 */
6
7/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
8 *
9 * The processor starts at 0xfffffffc and the code is first executed in the
10 * last 4K page(0xfffff000-0xffffffff) in flash/rom.
11 *
12 */
13
14#include <asm-offsets.h>
15#include <config.h>
16#include <mpc85xx.h>
17#include <version.h>
18
19#include <ppc_asm.tmpl>
20#include <ppc_defs.h>
21
22#include <asm/cache.h>
23#include <asm/mmu.h>
24
25#undef	MSR_KERNEL
26#define MSR_KERNEL ( MSR_ME )	/* Machine Check */
27
28#define LAW_EN		0x80000000
29
30#if defined(CONFIG_NAND_SPL) || \
31	(defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL))
32#define MINIMAL_SPL
33#endif
34
35#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \
36	!defined(CONFIG_SECURE_BOOT) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE)
37#define NOR_BOOT
38#endif
39
40/*
41 * Set up GOT: Global Offset Table
42 *
43 * Use r12 to access the GOT
44 */
45	START_GOT
46	GOT_ENTRY(_GOT2_TABLE_)
47	GOT_ENTRY(_FIXUP_TABLE_)
48
49#ifndef MINIMAL_SPL
50	GOT_ENTRY(_start)
51	GOT_ENTRY(_start_of_vectors)
52	GOT_ENTRY(_end_of_vectors)
53	GOT_ENTRY(transfer_to_handler)
54#endif
55
56	GOT_ENTRY(__init_end)
57	GOT_ENTRY(__bss_end)
58	GOT_ENTRY(__bss_start)
59	END_GOT
60
61/*
62 * e500 Startup -- after reset only the last 4KB of the effective
63 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
64 * section is located at THIS LAST page and basically does three
65 * things: clear some registers, set up exception tables and
66 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to
67 * continue the boot procedure.
68
69 * Once the boot rom is mapped by TLB entries we can proceed
70 * with normal startup.
71 *
72 */
73
74	.section .bootpg,"ax"
75	.globl _start_e500
76
77_start_e500:
78/* Enable debug exception */
79	li	r1,MSR_DE
80	mtmsr 	r1
81
82	/*
83	 * If we got an ePAPR device tree pointer passed in as r3, we need that
84	 * later in cpu_init_early_f(). Save it to a safe register before we
85	 * clobber it so that we can fetch it from there later.
86	 */
87	mr	r24, r3
88
89#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
90	mfspr	r3,SPRN_SVR
91	rlwinm	r3,r3,0,0xff
92	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
93	cmpw	r3,r4
94	beq	1f
95
96#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
97	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
98	cmpw	r3,r4
99	beq	1f
100#endif
101
102	/* Not a supported revision affected by erratum */
103	li	r27,0
104	b	2f
105
1061:	li	r27,1	/* Remember for later that we have the erratum */
107	/* Erratum says set bits 55:60 to 001001 */
108	msync
109	isync
110	mfspr	r3,SPRN_HDBCR0
111	li	r4,0x48
112	rlwimi	r3,r4,0,0x1f8
113	mtspr	SPRN_HDBCR0,r3
114	isync
1152:
116#endif
117#ifdef CONFIG_SYS_FSL_ERRATUM_A005125
118	msync
119	isync
120	mfspr	r3, SPRN_HDBCR0
121	oris	r3, r3, 0x0080
122	mtspr	SPRN_HDBCR0, r3
123#endif
124
125
126#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) && \
127	!defined(CONFIG_E6500)
128	/* ISBC uses L2 as stack.
129	 * Disable L2 cache here so that u-boot can enable it later
130	 * as part of it's normal flow
131	*/
132
133	/* Check if L2 is enabled */
134	mfspr	r3, SPRN_L2CSR0
135	lis	r2, L2CSR0_L2E@h
136	ori	r2, r2, L2CSR0_L2E@l
137	and.	r4, r3, r2
138	beq	l2_disabled
139
140	mfspr r3, SPRN_L2CSR0
141	/* Flush L2 cache */
142	lis     r2,(L2CSR0_L2FL)@h
143	ori     r2, r2, (L2CSR0_L2FL)@l
144	or      r3, r2, r3
145	sync
146	isync
147	mtspr   SPRN_L2CSR0,r3
148	isync
1491:
150	mfspr r3, SPRN_L2CSR0
151	and. r1, r3, r2
152	bne 1b
153
154	mfspr r3, SPRN_L2CSR0
155	lis r2, L2CSR0_L2E@h
156	ori r2, r2, L2CSR0_L2E@l
157	andc r4, r3, r2
158	sync
159	isync
160	mtspr SPRN_L2CSR0,r4
161	isync
162
163l2_disabled:
164#endif
165
166/* clear registers/arrays not reset by hardware */
167
168	/* L1 */
169	li	r0,2
170	mtspr	L1CSR0,r0	/* invalidate d-cache */
171	mtspr	L1CSR1,r0	/* invalidate i-cache */
172
173	mfspr	r1,DBSR
174	mtspr	DBSR,r1		/* Clear all valid bits */
175
176
177	.macro	create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch
178	lis	\scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
179	ori	\scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
180	mtspr	MAS0, \scratch
181	lis	\scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h
182	ori	\scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l
183	mtspr	MAS1, \scratch
184	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
185	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
186	mtspr	MAS2, \scratch
187	lis	\scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
188	ori	\scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
189	mtspr	MAS3, \scratch
190	lis	\scratch, \phy_high@h
191	ori	\scratch, \scratch, \phy_high@l
192	mtspr	MAS7, \scratch
193	isync
194	msync
195	tlbwe
196	isync
197	.endm
198
199	.macro	create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch
200	lis	\scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
201	ori	\scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
202	mtspr	MAS0, \scratch
203	lis	\scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h
204	ori	\scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l
205	mtspr	MAS1, \scratch
206	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
207	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
208	mtspr	MAS2, \scratch
209	lis	\scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
210	ori	\scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
211	mtspr	MAS3, \scratch
212	lis	\scratch, \phy_high@h
213	ori	\scratch, \scratch, \phy_high@l
214	mtspr	MAS7, \scratch
215	isync
216	msync
217	tlbwe
218	isync
219	.endm
220
221	.macro	delete_tlb1_entry esel scratch
222	lis	\scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
223	ori	\scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
224	mtspr	MAS0, \scratch
225	li	\scratch, 0
226	mtspr	MAS1, \scratch
227	isync
228	msync
229	tlbwe
230	isync
231	.endm
232
233	.macro	delete_tlb0_entry esel epn wimg scratch
234	lis	\scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
235	ori	\scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
236	mtspr	MAS0, \scratch
237	li	\scratch, 0
238	mtspr	MAS1, \scratch
239	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
240	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
241	mtspr	MAS2, \scratch
242	isync
243	msync
244	tlbwe
245	isync
246	.endm
247
248/* Interrupt vectors do not fit in minimal SPL. */
249#if !defined(MINIMAL_SPL)
250	/* Setup interrupt vectors */
251	lis	r1,CONFIG_SYS_MONITOR_BASE@h
252	mtspr	IVPR,r1
253
254	li	r4,CriticalInput@l
255	mtspr	IVOR0,r4	/* 0: Critical input */
256	li	r4,MachineCheck@l
257	mtspr	IVOR1,r4	/* 1: Machine check */
258	li	r4,DataStorage@l
259	mtspr	IVOR2,r4	/* 2: Data storage */
260	li	r4,InstStorage@l
261	mtspr	IVOR3,r4	/* 3: Instruction storage */
262	li	r4,ExtInterrupt@l
263	mtspr	IVOR4,r4	/* 4: External interrupt */
264	li	r4,Alignment@l
265	mtspr	IVOR5,r4	/* 5: Alignment */
266	li	r4,ProgramCheck@l
267	mtspr	IVOR6,r4	/* 6: Program check */
268	li	r4,FPUnavailable@l
269	mtspr	IVOR7,r4	/* 7: floating point unavailable */
270	li	r4,SystemCall@l
271	mtspr	IVOR8,r4	/* 8: System call */
272	/* 9: Auxiliary processor unavailable(unsupported) */
273	li	r4,Decrementer@l
274	mtspr	IVOR10,r4	/* 10: Decrementer */
275	li	r4,IntervalTimer@l
276	mtspr	IVOR11,r4	/* 11: Interval timer */
277	li	r4,WatchdogTimer@l
278	mtspr	IVOR12,r4	/* 12: Watchdog timer */
279	li	r4,DataTLBError@l
280	mtspr	IVOR13,r4	/* 13: Data TLB error */
281	li	r4,InstructionTLBError@l
282	mtspr	IVOR14,r4	/* 14: Instruction TLB error */
283	li	r4,DebugBreakpoint@l
284	mtspr	IVOR15,r4	/* 15: Debug */
285#endif
286
287	/* Clear and set up some registers. */
288	li      r0,0x0000
289	lis	r1,0xffff
290	mtspr	DEC,r0			/* prevent dec exceptions */
291	mttbl	r0			/* prevent fit & wdt exceptions */
292	mttbu	r0
293	mtspr	TSR,r1			/* clear all timer exception status */
294	mtspr	TCR,r0			/* disable all */
295	mtspr	ESR,r0			/* clear exception syndrome register */
296	mtspr	MCSR,r0			/* machine check syndrome register */
297	mtxer	r0			/* clear integer exception register */
298
299#ifdef CONFIG_SYS_BOOK3E_HV
300	mtspr	MAS8,r0			/* make sure MAS8 is clear */
301#endif
302
303	/* Enable Time Base and Select Time Base Clock */
304	lis	r0,HID0_EMCP@h		/* Enable machine check */
305#if defined(CONFIG_ENABLE_36BIT_PHYS)
306	ori	r0,r0,HID0_ENMAS7@l	/* Enable MAS7 */
307#endif
308#ifndef CONFIG_E500MC
309	ori	r0,r0,HID0_TBEN@l	/* Enable Timebase */
310#endif
311	mtspr	HID0,r0
312
313#if !defined(CONFIG_E500MC) && !defined(CONFIG_ARCH_QEMU_E500)
314	li	r0,(HID1_ASTME|HID1_ABE)@l	/* Addr streaming & broadcast */
315	mfspr	r3,PVR
316	andi.	r3,r3, 0xff
317	cmpwi	r3,0x50@l	/* if we are rev 5.0 or greater set MBDD */
318	blt 1f
319	/* Set MBDD bit also */
320	ori r0, r0, HID1_MBDD@l
3211:
322	mtspr	HID1,r0
323#endif
324
325#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
326	mfspr	r3,SPRN_HDBCR1
327	oris	r3,r3,0x0100
328	mtspr	SPRN_HDBCR1,r3
329#endif
330
331	/* Enable Branch Prediction */
332#if defined(CONFIG_BTB)
333	lis	r0,BUCSR_ENABLE@h
334	ori	r0,r0,BUCSR_ENABLE@l
335	mtspr	SPRN_BUCSR,r0
336#endif
337
338#if defined(CONFIG_SYS_INIT_DBCR)
339	lis	r1,0xffff
340	ori	r1,r1,0xffff
341	mtspr	DBSR,r1			/* Clear all status bits */
342	lis	r0,CONFIG_SYS_INIT_DBCR@h	/* DBCR0[IDM] must be set */
343	ori	r0,r0,CONFIG_SYS_INIT_DBCR@l
344	mtspr	DBCR0,r0
345#endif
346
347#ifdef CONFIG_ARCH_MPC8569
348#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000)
349#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0)
350
351	/* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to
352	 * use address space which is more than 12bits, and it must be done in
353	 * the 4K boot page. So we set this bit here.
354	 */
355
356	/* create a temp mapping TLB0[0] for LBCR  */
357	create_tlb0_entry 0, \
358		0, BOOKE_PAGESZ_4K, \
359		CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G, \
360		CONFIG_SYS_LBC_ADDR, MAS3_SW|MAS3_SR, \
361		0, r6
362
363	/* Set LBCR register */
364	lis     r4,CONFIG_SYS_LBCR_ADDR@h
365	ori     r4,r4,CONFIG_SYS_LBCR_ADDR@l
366
367	lis     r5,CONFIG_SYS_LBC_LBCR@h
368	ori     r5,r5,CONFIG_SYS_LBC_LBCR@l
369	stw     r5,0(r4)
370	isync
371
372	/* invalidate this temp TLB */
373	lis	r4,CONFIG_SYS_LBC_ADDR@h
374	ori	r4,r4,CONFIG_SYS_LBC_ADDR@l
375	tlbivax	0,r4
376	isync
377
378#endif /* CONFIG_ARCH_MPC8569 */
379
380/*
381 * Search for the TLB that covers the code we're executing, and shrink it
382 * so that it covers only this 4K page.  That will ensure that any other
383 * TLB we create won't interfere with it.  We assume that the TLB exists,
384 * which is why we don't check the Valid bit of MAS1.  We also assume
385 * it is in TLB1.
386 *
387 * This is necessary, for example, when booting from the on-chip ROM,
388 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
389 */
390	bl	nexti		/* Find our address */
391nexti:	mflr	r1		/* R1 = our PC */
392	li	r2, 0
393	mtspr	MAS6, r2	/* Assume the current PID and AS are 0 */
394	isync
395	msync
396	tlbsx	0, r1		/* This must succeed */
397
398	mfspr	r14, MAS0	/* Save ESEL for later */
399	rlwinm	r14, r14, 16, 0xfff
400
401	/* Set the size of the TLB to 4KB */
402	mfspr	r3, MAS1
403	li	r2, 0xF80
404	andc	r3, r3, r2	/* Clear the TSIZE bits */
405	ori	r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
406	oris	r3, r3, MAS1_IPROT@h
407	mtspr	MAS1, r3
408
409	/*
410	 * Set the base address of the TLB to our PC.  We assume that
411	 * virtual == physical.  We also assume that MAS2_EPN == MAS3_RPN.
412	 */
413	lis	r3, MAS2_EPN@h
414	ori	r3, r3, MAS2_EPN@l	/* R3 = MAS2_EPN */
415
416	and	r1, r1, r3	/* Our PC, rounded down to the nearest page */
417
418	mfspr	r2, MAS2
419	andc	r2, r2, r3
420	or	r2, r2, r1
421#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
422	cmpwi	r27,0
423	beq	1f
424	andi.	r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */
425	rlwinm	r2, r2, 0, ~MAS2_I
426	ori	r2, r2, MAS2_G
4271:
428#endif
429	mtspr	MAS2, r2	/* Set the EPN to our PC base address */
430
431	mfspr	r2, MAS3
432	andc	r2, r2, r3
433	or	r2, r2, r1
434	mtspr	MAS3, r2	/* Set the RPN to our PC base address */
435
436	isync
437	msync
438	tlbwe
439
440/*
441 * Clear out any other TLB entries that may exist, to avoid conflicts.
442 * Our TLB entry is in r14.
443 */
444	li	r0, TLBIVAX_ALL | TLBIVAX_TLB0
445	tlbivax 0, r0
446	tlbsync
447
448	mfspr	r4, SPRN_TLB1CFG
449	rlwinm	r4, r4, 0, TLBnCFG_NENTRY_MASK
450
451	li	r3, 0
452	mtspr	MAS1, r3
4531:	cmpw	r3, r14
454	rlwinm	r5, r3, 16, MAS0_ESEL_MSK
455	addi	r3, r3, 1
456	beq	2f		/* skip the entry we're executing from */
457
458	oris	r5, r5, MAS0_TLBSEL(1)@h
459	mtspr	MAS0, r5
460
461	isync
462	tlbwe
463	isync
464	msync
465
4662:	cmpw	r3, r4
467	blt	1b
468
469#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) && \
470	!defined(CONFIG_SECURE_BOOT)
471/*
472 * TLB entry for debuggging in AS1
473 * Create temporary TLB entry in AS0 to handle debug exception
474 * As on debug exception MSR is cleared i.e. Address space is changed
475 * to 0. A TLB entry (in AS0) is required to handle debug exception generated
476 * in AS1.
477 */
478
479#ifdef NOR_BOOT
480/*
481 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
482 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff.
483 * and this window is outside of 4K boot window.
484 */
485	create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
486		0, BOOKE_PAGESZ_4M, \
487		CONFIG_SYS_MONITOR_BASE & 0xffc00000,  MAS2_I|MAS2_G, \
488		0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
489		0, r6
490
491#else
492/*
493 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
494 * because "nexti" will resize TLB to 4K
495 */
496	create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
497		0, BOOKE_PAGESZ_256K, \
498		CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \
499		CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \
500		0, r6
501#endif
502#endif
503
504/*
505 * Relocate CCSR, if necessary.  We relocate CCSR if (obviously) the default
506 * location is not where we want it.  This typically happens on a 36-bit
507 * system, where we want to move CCSR to near the top of 36-bit address space.
508 *
509 * To move CCSR, we create two temporary TLBs, one for the old location, and
510 * another for the new location.  On CoreNet systems, we also need to create
511 * a special, temporary LAW.
512 *
513 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
514 * long-term TLBs, so we use TLB0 here.
515 */
516#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
517
518#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
519#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
520#endif
521
522create_ccsr_new_tlb:
523	/*
524	 * Create a TLB for the new location of CCSR.  Register R8 is reserved
525	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
526	 */
527	lis	r8, CONFIG_SYS_CCSRBAR@h
528	ori	r8, r8, CONFIG_SYS_CCSRBAR@l
529	lis	r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
530	ori	r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
531	create_tlb0_entry 0, \
532		0, BOOKE_PAGESZ_4K, \
533		CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \
534		CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \
535		CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
536	/*
537	 * Create a TLB for the current location of CCSR.  Register R9 is reserved
538	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
539	 */
540create_ccsr_old_tlb:
541	create_tlb0_entry 1, \
542		0, BOOKE_PAGESZ_4K, \
543		CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \
544		CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \
545		0, r3 /* The default CCSR address is always a 32-bit number */
546
547
548	/*
549	 * We have a TLB for what we think is the current (old) CCSR.  Let's
550	 * verify that, otherwise we won't be able to move it.
551	 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
552	 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
553	 */
554verify_old_ccsr:
555	lis     r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
556	ori     r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
557#ifdef CONFIG_FSL_CORENET
558	lwz	r1, 4(r9)		/* CCSRBARL */
559#else
560	lwz	r1, 0(r9)		/* CCSRBAR, shifted right by 12 */
561	slwi	r1, r1, 12
562#endif
563
564	cmpl	0, r0, r1
565
566	/*
567	 * If the value we read from CCSRBARL is not what we expect, then
568	 * enter an infinite loop.  This will at least allow a debugger to
569	 * halt execution and examine TLBs, etc.  There's no point in going
570	 * on.
571	 */
572infinite_debug_loop:
573	bne	infinite_debug_loop
574
575#ifdef CONFIG_FSL_CORENET
576
577#define CCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
578#define LAW_SIZE_4K	0xb
579#define CCSRBAR_LAWAR	(LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
580#define CCSRAR_C	0x80000000	/* Commit */
581
582create_temp_law:
583	/*
584	 * On CoreNet systems, we create the temporary LAW using a special LAW
585	 * target ID of 0x1e.  LAWBARH is at offset 0xc00 in CCSR.
586	 */
587	lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
588	ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
589	lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
590	ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
591	lis     r2, CCSRBAR_LAWAR@h
592	ori     r2, r2, CCSRBAR_LAWAR@l
593
594	stw     r0, 0xc00(r9)	/* LAWBARH0 */
595	stw     r1, 0xc04(r9)	/* LAWBARL0 */
596	sync
597	stw     r2, 0xc08(r9)	/* LAWAR0 */
598
599	/*
600	 * Read back from LAWAR to ensure the update is complete.  e500mc
601	 * cores also require an isync.
602	 */
603	lwz	r0, 0xc08(r9)	/* LAWAR0 */
604	isync
605
606	/*
607	 * Read the current CCSRBARH and CCSRBARL using load word instructions.
608	 * Follow this with an isync instruction. This forces any outstanding
609	 * accesses to configuration space to completion.
610	 */
611read_old_ccsrbar:
612	lwz	r0, 0(r9)	/* CCSRBARH */
613	lwz	r0, 4(r9)	/* CCSRBARL */
614	isync
615
616	/*
617	 * Write the new values for CCSRBARH and CCSRBARL to their old
618	 * locations.  The CCSRBARH has a shadow register. When the CCSRBARH
619	 * has a new value written it loads a CCSRBARH shadow register. When
620	 * the CCSRBARL is written, the CCSRBARH shadow register contents
621	 * along with the CCSRBARL value are loaded into the CCSRBARH and
622	 * CCSRBARL registers, respectively.  Follow this with a sync
623	 * instruction.
624	 */
625write_new_ccsrbar:
626	lis	r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
627	ori	r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
628	lis	r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
629	ori	r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
630	lis	r2, CCSRAR_C@h
631	ori	r2, r2, CCSRAR_C@l
632
633	stw	r0, 0(r9)	/* Write to CCSRBARH */
634	sync			/* Make sure we write to CCSRBARH first */
635	stw	r1, 4(r9)	/* Write to CCSRBARL */
636	sync
637
638	/*
639	 * Write a 1 to the commit bit (C) of CCSRAR at the old location.
640	 * Follow this with a sync instruction.
641	 */
642	stw	r2, 8(r9)
643	sync
644
645	/* Delete the temporary LAW */
646delete_temp_law:
647	li	r1, 0
648	stw	r1, 0xc08(r8)
649	sync
650	stw	r1, 0xc00(r8)
651	stw	r1, 0xc04(r8)
652	sync
653
654#else /* #ifdef CONFIG_FSL_CORENET */
655
656write_new_ccsrbar:
657	/*
658	 * Read the current value of CCSRBAR using a load word instruction
659	 * followed by an isync. This forces all accesses to configuration
660	 * space to complete.
661	 */
662	sync
663	lwz	r0, 0(r9)
664	isync
665
666/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
667#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
668			   (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
669
670	/* Write the new value to CCSRBAR. */
671	lis	r0, CCSRBAR_PHYS_RS12@h
672	ori	r0, r0, CCSRBAR_PHYS_RS12@l
673	stw	r0, 0(r9)
674	sync
675
676	/*
677	 * The manual says to perform a load of an address that does not
678	 * access configuration space or the on-chip SRAM using an existing TLB,
679	 * but that doesn't appear to be necessary.  We will do the isync,
680	 * though.
681	 */
682	isync
683
684	/*
685	 * Read the contents of CCSRBAR from its new location, followed by
686	 * another isync.
687	 */
688	lwz	r0, 0(r8)
689	isync
690
691#endif  /* #ifdef CONFIG_FSL_CORENET */
692
693	/* Delete the temporary TLBs */
694delete_temp_tlbs:
695	delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3
696	delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3
697
698#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
699
700#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
701create_ccsr_l2_tlb:
702	/*
703	 * Create a TLB for the MMR location of CCSR
704	 * to access L2CSR0 register
705	 */
706	create_tlb0_entry 0, \
707		0, BOOKE_PAGESZ_4K, \
708		CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \
709		CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \
710		CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
711
712enable_l2_cluster_l2:
713	/* enable L2 cache */
714	lis	r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h
715	ori	r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l
716	li	r4, 33	/* stash id */
717	stw	r4, 4(r3)
718	lis	r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h
719	ori	r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l
720	sync
721	stw	r4, 0(r3)	/* invalidate L2 */
722	/* Poll till the bits are cleared */
7231:	sync
724	lwz	r0, 0(r3)
725	twi	0, r0, 0
726	isync
727	and.	r1, r0, r4
728	bne	1b
729
730	/* L2PE must be set before L2 cache is enabled */
731	lis	r4, (L2CSR0_L2PE)@h
732	ori	r4, r4, (L2CSR0_L2PE)@l
733	sync
734	stw	r4, 0(r3)	/* enable L2 parity/ECC error checking */
735	/* Poll till the bit is set */
7361:	sync
737	lwz	r0, 0(r3)
738	twi	0, r0, 0
739	isync
740	and.	r1, r0, r4
741	beq	1b
742
743	lis	r4, (L2CSR0_L2E|L2CSR0_L2PE)@h
744	ori	r4, r4, (L2CSR0_L2REP_MODE)@l
745	sync
746	stw	r4, 0(r3)	/* enable L2 */
747	/* Poll till the bit is set */
7481:	sync
749	lwz	r0, 0(r3)
750	twi	0, r0, 0
751	isync
752	and.	r1, r0, r4
753	beq	1b
754
755delete_ccsr_l2_tlb:
756	delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
757#endif
758
759	/*
760	 * Enable the L1. On e6500, this has to be done
761	 * after the L2 is up.
762	 */
763
764#ifdef CONFIG_SYS_CACHE_STASHING
765	/* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
766	li	r2,(32 + 0)
767	mtspr	L1CSR2,r2
768#endif
769
770	/* Enable/invalidate the I-Cache */
771	lis	r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
772	ori	r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
773	mtspr	SPRN_L1CSR1,r2
7741:
775	mfspr	r3,SPRN_L1CSR1
776	and.	r1,r3,r2
777	bne	1b
778
779	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
780	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
781	mtspr	SPRN_L1CSR1,r3
782	isync
7832:
784	mfspr	r3,SPRN_L1CSR1
785	andi.	r1,r3,L1CSR1_ICE@l
786	beq	2b
787
788	/* Enable/invalidate the D-Cache */
789	lis	r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
790	ori	r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
791	mtspr	SPRN_L1CSR0,r2
7921:
793	mfspr	r3,SPRN_L1CSR0
794	and.	r1,r3,r2
795	bne	1b
796
797	lis	r3,(L1CSR0_CPE|L1CSR0_DCE)@h
798	ori	r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
799	mtspr	SPRN_L1CSR0,r3
800	isync
8012:
802	mfspr	r3,SPRN_L1CSR0
803	andi.	r1,r3,L1CSR0_DCE@l
804	beq	2b
805#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
806#define DCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
807#define LAW_SIZE_1M	0x13
808#define DCSRBAR_LAWAR	(LAW_EN | (0x1d << 20) | LAW_SIZE_1M)
809
810	cmpwi	r27,0
811	beq	9f
812
813	/*
814	 * Create a TLB entry for CCSR
815	 *
816	 * We're executing out of TLB1 entry in r14, and that's the only
817	 * TLB entry that exists.  To allocate some TLB entries for our
818	 * own use, flip a bit high enough that we won't flip it again
819	 * via incrementing.
820	 */
821
822	xori	r8, r14, 32
823	lis	r0, MAS0_TLBSEL(1)@h
824	rlwimi	r0, r8, 16, MAS0_ESEL_MSK
825	lis	r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h
826	ori	r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l
827	lis	r7, CONFIG_SYS_CCSRBAR@h
828	ori	r7, r7, CONFIG_SYS_CCSRBAR@l
829	ori	r2, r7, MAS2_I|MAS2_G
830	lis	r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
831	ori	r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
832	lis	r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
833	ori	r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
834	mtspr	MAS0, r0
835	mtspr	MAS1, r1
836	mtspr	MAS2, r2
837	mtspr	MAS3, r3
838	mtspr	MAS7, r4
839	isync
840	tlbwe
841	isync
842	msync
843
844	/* Map DCSR temporarily to physical address zero */
845	li	r0, 0
846	lis	r3, DCSRBAR_LAWAR@h
847	ori	r3, r3, DCSRBAR_LAWAR@l
848
849	stw	r0, 0xc00(r7)	/* LAWBARH0 */
850	stw	r0, 0xc04(r7)	/* LAWBARL0 */
851	sync
852	stw	r3, 0xc08(r7)	/* LAWAR0 */
853
854	/* Read back from LAWAR to ensure the update is complete. */
855	lwz	r3, 0xc08(r7)	/* LAWAR0 */
856	isync
857
858	/* Create a TLB entry for DCSR at zero */
859
860	addi	r9, r8, 1
861	lis	r0, MAS0_TLBSEL(1)@h
862	rlwimi	r0, r9, 16, MAS0_ESEL_MSK
863	lis	r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h
864	ori	r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l
865	li	r6, 0	/* DCSR effective address */
866	ori	r2, r6, MAS2_I|MAS2_G
867	li	r3, MAS3_SW|MAS3_SR
868	li	r4, 0
869	mtspr	MAS0, r0
870	mtspr	MAS1, r1
871	mtspr	MAS2, r2
872	mtspr	MAS3, r3
873	mtspr	MAS7, r4
874	isync
875	tlbwe
876	isync
877	msync
878
879	/* enable the timebase */
880#define CTBENR	0xe2084
881	li	r3, 1
882	addis	r4, r7, CTBENR@ha
883	stw	r3, CTBENR@l(r4)
884	lwz	r3, CTBENR@l(r4)
885	twi	0,r3,0
886	isync
887
888	.macro	erratum_set_ccsr offset value
889	addis	r3, r7, \offset@ha
890	lis	r4, \value@h
891	addi	r3, r3, \offset@l
892	ori	r4, r4, \value@l
893	bl	erratum_set_value
894	.endm
895
896	.macro	erratum_set_dcsr offset value
897	addis	r3, r6, \offset@ha
898	lis	r4, \value@h
899	addi	r3, r3, \offset@l
900	ori	r4, r4, \value@l
901	bl	erratum_set_value
902	.endm
903
904	erratum_set_dcsr 0xb0e08 0xe0201800
905	erratum_set_dcsr 0xb0e18 0xe0201800
906	erratum_set_dcsr 0xb0e38 0xe0400000
907	erratum_set_dcsr 0xb0008 0x00900000
908	erratum_set_dcsr 0xb0e40 0xe00a0000
909	erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY
910#ifdef  CONFIG_RAMBOOT_PBL
911	erratum_set_ccsr 0x10f00 0x495e5000
912#else
913	erratum_set_ccsr 0x10f00 0x415e5000
914#endif
915	erratum_set_ccsr 0x11f00 0x415e5000
916
917	/* Make temp mapping uncacheable again, if it was initially */
918	bl	2f
9192:	mflr	r3
920	tlbsx	0, r3
921	mfspr	r4, MAS2
922	rlwimi	r4, r15, 0, MAS2_I
923	rlwimi	r4, r15, 0, MAS2_G
924	mtspr	MAS2, r4
925	isync
926	tlbwe
927	isync
928	msync
929
930	/* Clear the cache */
931	lis	r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
932	ori	r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
933	sync
934	isync
935	mtspr	SPRN_L1CSR1,r3
936	isync
9372:	sync
938	mfspr	r4,SPRN_L1CSR1
939	and.	r4,r4,r3
940	bne	2b
941
942	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
943	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
944	sync
945	isync
946	mtspr	SPRN_L1CSR1,r3
947	isync
9482:	sync
949	mfspr	r4,SPRN_L1CSR1
950	and.	r4,r4,r3
951	beq	2b
952
953	/* Remove temporary mappings */
954	lis	r0, MAS0_TLBSEL(1)@h
955	rlwimi	r0, r9, 16, MAS0_ESEL_MSK
956	li	r3, 0
957	mtspr	MAS0, r0
958	mtspr	MAS1, r3
959	isync
960	tlbwe
961	isync
962	msync
963
964	li	r3, 0
965	stw	r3, 0xc08(r7)	/* LAWAR0 */
966	lwz	r3, 0xc08(r7)
967	isync
968
969	lis	r0, MAS0_TLBSEL(1)@h
970	rlwimi	r0, r8, 16, MAS0_ESEL_MSK
971	li	r3, 0
972	mtspr	MAS0, r0
973	mtspr	MAS1, r3
974	isync
975	tlbwe
976	isync
977	msync
978
979	b	9f
980
981	/* r3 = addr, r4 = value, clobbers r5, r11, r12 */
982erratum_set_value:
983	/* Lock two cache lines into I-Cache */
984	sync
985	mfspr	r11, SPRN_L1CSR1
986	rlwinm	r11, r11, 0, ~L1CSR1_ICUL
987	sync
988	isync
989	mtspr	SPRN_L1CSR1, r11
990	isync
991
992	mflr	r12
993	bl	5f
9945:	mflr	r5
995	addi	r5, r5, 2f - 5b
996	icbtls	0, 0, r5
997	addi	r5, r5, 64
998
999	sync
1000	mfspr	r11, SPRN_L1CSR1
10013:	andi.	r11, r11, L1CSR1_ICUL
1002	bne	3b
1003
1004	icbtls	0, 0, r5
1005	addi	r5, r5, 64
1006
1007	sync
1008	mfspr	r11, SPRN_L1CSR1
10093:	andi.	r11, r11, L1CSR1_ICUL
1010	bne	3b
1011
1012	b	2f
1013	.align	6
1014	/* Inside a locked cacheline, wait a while, write, then wait a while */
10152:	sync
1016
1017	mfspr	r5, SPRN_TBRL
1018	addis	r11, r5, 0x10000@h /* wait 65536 timebase ticks */
10194:	mfspr	r5, SPRN_TBRL
1020	subf.	r5, r5, r11
1021	bgt	4b
1022
1023	stw	r4, 0(r3)
1024
1025	mfspr	r5, SPRN_TBRL
1026	addis	r11, r5, 0x10000@h /* wait 65536 timebase ticks */
10274:	mfspr	r5, SPRN_TBRL
1028	subf.	r5, r5, r11
1029	bgt	4b
1030
1031	sync
1032
1033	/*
1034	 * Fill out the rest of this cache line and the next with nops,
1035	 * to ensure that nothing outside the locked area will be
1036	 * fetched due to a branch.
1037	 */
1038	.rept 19
1039	nop
1040	.endr
1041
1042	sync
1043	mfspr	r11, SPRN_L1CSR1
1044	rlwinm	r11, r11, 0, ~L1CSR1_ICUL
1045	sync
1046	isync
1047	mtspr	SPRN_L1CSR1, r11
1048	isync
1049
1050	mtlr	r12
1051	blr
1052
10539:
1054#endif
1055
1056create_init_ram_area:
1057	lis     r6,FSL_BOOKE_MAS0(1, 15, 0)@h
1058	ori     r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
1059
1060#ifdef NOR_BOOT
1061	/* create a temp mapping in AS=1 to the 4M boot window */
1062	create_tlb1_entry 15, \
1063		1, BOOKE_PAGESZ_4M, \
1064		CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \
1065		0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1066		0, r6
1067
1068#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT)
1069	/* create a temp mapping in AS = 1 for Flash mapping
1070	 * created by PBL for ISBC code
1071	 */
1072	create_tlb1_entry 15, \
1073		1, BOOKE_PAGESZ_1M, \
1074		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1075		CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1076		0, r6
1077
1078/*
1079 * For Targets without CONFIG_SPL like P3, P5
1080 * and for targets with CONFIG_SPL like T1, T2, T4, only for
1081 * u-boot-spl i.e. CONFIG_SPL_BUILD
1082 */
1083#elif defined(CONFIG_RAMBOOT_PBL) && defined(CONFIG_SECURE_BOOT) && \
1084	(!defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD))
1085	/* create a temp mapping in AS = 1 for mapping CONFIG_SYS_MONITOR_BASE
1086	 * to L3 Address configured by PBL for ISBC code
1087	 */
1088	create_tlb1_entry 15, \
1089		1, BOOKE_PAGESZ_1M, \
1090		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1091		CONFIG_SYS_INIT_L3_ADDR & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1092		0, r6
1093
1094#else
1095	/*
1096	 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main
1097	 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage.
1098	 */
1099	create_tlb1_entry 15, \
1100		1, BOOKE_PAGESZ_1M, \
1101		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1102		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1103		0, r6
1104#endif
1105
1106	/* create a temp mapping in AS=1 to the stack */
1107#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
1108    defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
1109	create_tlb1_entry 14, \
1110		1, BOOKE_PAGESZ_16K, \
1111		CONFIG_SYS_INIT_RAM_ADDR, 0, \
1112		CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \
1113		CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6
1114
1115#else
1116	create_tlb1_entry 14, \
1117		1, BOOKE_PAGESZ_16K, \
1118		CONFIG_SYS_INIT_RAM_ADDR, 0, \
1119		CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \
1120		0, r6
1121#endif
1122
1123	lis	r6,MSR_IS|MSR_DS|MSR_DE@h
1124	ori	r6,r6,MSR_IS|MSR_DS|MSR_DE@l
1125	lis	r7,switch_as@h
1126	ori	r7,r7,switch_as@l
1127
1128	mtspr	SPRN_SRR0,r7
1129	mtspr	SPRN_SRR1,r6
1130	rfi
1131
1132switch_as:
1133/* L1 DCache is used for initial RAM */
1134
1135	/* Allocate Initial RAM in data cache.
1136	 */
1137	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1138	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1139	mfspr	r2, L1CFG0
1140	andi.	r2, r2, 0x1ff
1141	/* cache size * 1024 / (2 * L1 line size) */
1142	slwi	r2, r2, (10 - 1 - L1_CACHE_SHIFT)
1143	mtctr	r2
1144	li	r0,0
11451:
1146	dcbz	r0,r3
1147#ifdef CONFIG_E6500	/* Lock/unlock L2 cache long with L1 */
1148	dcbtls	2, r0, r3
1149	dcbtls	0, r0, r3
1150#else
1151	dcbtls	0, r0, r3
1152#endif
1153	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1154	bdnz	1b
1155
1156	/* Jump out the last 4K page and continue to 'normal' start */
1157#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)
1158	/* We assume that we're already running at the address we're linked at */
1159	b	_start_cont
1160#else
1161	/* Calculate absolute address in FLASH and jump there		*/
1162	/*--------------------------------------------------------------*/
1163	lis	r3,CONFIG_SYS_MONITOR_BASE@h
1164	ori	r3,r3,CONFIG_SYS_MONITOR_BASE@l
1165	addi	r3,r3,_start_cont - _start
1166	mtlr	r3
1167	blr
1168#endif
1169
1170	.text
1171	.globl	_start
1172_start:
1173	.long	0x27051956		/* U-BOOT Magic Number */
1174	.globl	version_string
1175version_string:
1176	.ascii U_BOOT_VERSION_STRING, "\0"
1177
1178	.align	4
1179	.globl	_start_cont
1180_start_cont:
1181	/* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
1182	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR)@h
1183	ori	r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */
1184
1185#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1186#if CONFIG_VAL(SYS_MALLOC_F_LEN) + GENERATED_GBL_DATA_SIZE > CONFIG_SYS_INIT_RAM_SIZE
1187#error "SYS_MALLOC_F_LEN too large to fit into initial RAM."
1188#endif
1189
1190	/* Leave 16+ byte for back chain termination and NULL return address */
1191	subi	r3,r3,((CONFIG_VAL(SYS_MALLOC_F_LEN)+16+15)&~0xf)
1192#endif
1193
1194	/* End of RAM */
1195	lis	r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1196	ori	r4,r4,(CONFIG_SYS_INIT_RAM_SIZE)@l
1197
1198	li	r0,0
1199
12001: 	subi 	r4,r4,4
1201	stw 	r0,0(r4)
1202	cmplw 	r4,r3
1203	bne	1b
1204
1205#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1206	lis	r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1207	ori	r4,r4,(CONFIG_SYS_GBL_DATA_OFFSET)@l
1208
1209	addi	r3,r3,16	/* Pre-relocation malloc area */
1210	stw	r3,GD_MALLOC_BASE(r4)
1211	subi	r3,r3,16
1212#endif
1213	li	r0,0
1214	stw	r0,0(r3)	/* Terminate Back Chain */
1215	stw	r0,+4(r3)	/* NULL return address. */
1216	mr	r1,r3		/* Transfer to SP(r1) */
1217
1218	GET_GOT
1219
1220	/* Pass our potential ePAPR device tree pointer to cpu_init_early_f */
1221	mr	r3, r24
1222
1223	bl	cpu_init_early_f
1224
1225	/* switch back to AS = 0 */
1226	lis	r3,(MSR_CE|MSR_ME|MSR_DE)@h
1227	ori	r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
1228	mtmsr	r3
1229	isync
1230
1231	bl	cpu_init_f	/* return boot_flag for calling board_init_f */
1232	bl	board_init_f
1233	isync
1234
1235	/* NOTREACHED - board_init_f() does not return */
1236
1237#ifndef MINIMAL_SPL
1238	.globl	_start_of_vectors
1239_start_of_vectors:
1240
1241/* Critical input. */
1242	CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
1243
1244/* Machine check */
1245	MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
1246
1247/* Data Storage exception. */
1248	STD_EXCEPTION(0x0300, DataStorage, UnknownException)
1249
1250/* Instruction Storage exception. */
1251	STD_EXCEPTION(0x0400, InstStorage, UnknownException)
1252
1253/* External Interrupt exception. */
1254	STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
1255
1256/* Alignment exception. */
1257Alignment:
1258	EXCEPTION_PROLOG(SRR0, SRR1)
1259	mfspr	r4,DAR
1260	stw	r4,_DAR(r21)
1261	mfspr	r5,DSISR
1262	stw	r5,_DSISR(r21)
1263	addi	r3,r1,STACK_FRAME_OVERHEAD
1264	EXC_XFER_TEMPLATE(0x600, Alignment, AlignmentException,
1265		MSR_KERNEL, COPY_EE)
1266
1267/* Program check exception */
1268ProgramCheck:
1269	EXCEPTION_PROLOG(SRR0, SRR1)
1270	addi	r3,r1,STACK_FRAME_OVERHEAD
1271	EXC_XFER_TEMPLATE(0x700, ProgramCheck, ProgramCheckException,
1272		MSR_KERNEL, COPY_EE)
1273
1274	/* No FPU on MPC85xx.  This exception is not supposed to happen.
1275	*/
1276	STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
1277	STD_EXCEPTION(0x0900, SystemCall, UnknownException)
1278	STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
1279	STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
1280	STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
1281
1282	STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
1283	STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
1284
1285	CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
1286
1287	.globl	_end_of_vectors
1288_end_of_vectors:
1289
1290
1291	. = . + (0x100 - ( . & 0xff ))	/* align for debug */
1292
1293/*
1294 * This code finishes saving the registers to the exception frame
1295 * and jumps to the appropriate handler for the exception.
1296 * Register r21 is pointer into trap frame, r1 has new stack pointer.
1297 * r23 is the address of the handler.
1298 */
1299	.globl	transfer_to_handler
1300transfer_to_handler:
1301	SAVE_GPR(7, r21)
1302	SAVE_4GPRS(8, r21)
1303	SAVE_8GPRS(12, r21)
1304	SAVE_8GPRS(24, r21)
1305
1306	li	r22,0
1307	stw	r22,RESULT(r21)
1308	mtspr	SPRG2,r22		/* r1 is now kernel sp */
1309
1310	mtctr	r23			/* virtual address of handler */
1311	mtmsr	r20
1312	bctrl
1313
1314int_return:
1315	mfmsr	r28		/* Disable interrupts */
1316	li	r4,0
1317	ori	r4,r4,MSR_EE
1318	andc	r28,r28,r4
1319	SYNC			/* Some chip revs need this... */
1320	mtmsr	r28
1321	SYNC
1322	lwz	r2,_CTR(r1)
1323	lwz	r0,_LINK(r1)
1324	mtctr	r2
1325	mtlr	r0
1326	lwz	r2,_XER(r1)
1327	lwz	r0,_CCR(r1)
1328	mtspr	XER,r2
1329	mtcrf	0xFF,r0
1330	REST_10GPRS(3, r1)
1331	REST_10GPRS(13, r1)
1332	REST_8GPRS(23, r1)
1333	REST_GPR(31, r1)
1334	lwz	r2,_NIP(r1)	/* Restore environment */
1335	lwz	r0,_MSR(r1)
1336	mtspr	SRR0,r2
1337	mtspr	SRR1,r0
1338	lwz	r0,GPR0(r1)
1339	lwz	r2,GPR2(r1)
1340	lwz	r1,GPR1(r1)
1341	SYNC
1342	rfi
1343
1344/* Cache functions.
1345*/
1346.globl flush_icache
1347flush_icache:
1348.globl invalidate_icache
1349invalidate_icache:
1350	mfspr	r0,L1CSR1
1351	ori	r0,r0,L1CSR1_ICFI
1352	msync
1353	isync
1354	mtspr	L1CSR1,r0
1355	isync
1356	blr				/* entire I cache */
1357
1358.globl invalidate_dcache
1359invalidate_dcache:
1360	mfspr	r0,L1CSR0
1361	ori	r0,r0,L1CSR0_DCFI
1362	msync
1363	isync
1364	mtspr	L1CSR0,r0
1365	isync
1366	blr
1367
1368	.globl	icache_enable
1369icache_enable:
1370	mflr	r8
1371	bl	invalidate_icache
1372	mtlr	r8
1373	isync
1374	mfspr	r4,L1CSR1
1375	ori	r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@l
1376	oris	r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@h
1377	mtspr	L1CSR1,r4
1378	isync
1379	blr
1380
1381	.globl	icache_disable
1382icache_disable:
1383	mfspr	r0,L1CSR1
1384	lis	r3,0
1385	ori	r3,r3,L1CSR1_ICE
1386	andc	r0,r0,r3
1387	mtspr	L1CSR1,r0
1388	isync
1389	blr
1390
1391	.globl	icache_status
1392icache_status:
1393	mfspr	r3,L1CSR1
1394	andi.	r3,r3,L1CSR1_ICE
1395	blr
1396
1397	.globl	dcache_enable
1398dcache_enable:
1399	mflr	r8
1400	bl	invalidate_dcache
1401	mtlr	r8
1402	isync
1403	mfspr	r0,L1CSR0
1404	ori	r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@l
1405	oris	r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@h
1406	msync
1407	isync
1408	mtspr	L1CSR0,r0
1409	isync
1410	blr
1411
1412	.globl	dcache_disable
1413dcache_disable:
1414	mfspr	r3,L1CSR0
1415	lis	r4,0
1416	ori	r4,r4,L1CSR0_DCE
1417	andc	r3,r3,r4
1418	mtspr	L1CSR0,r3
1419	isync
1420	blr
1421
1422	.globl	dcache_status
1423dcache_status:
1424	mfspr	r3,L1CSR0
1425	andi.	r3,r3,L1CSR0_DCE
1426	blr
1427
1428/*------------------------------------------------------------------------------- */
1429/* Function:	 in8 */
1430/* Description:	 Input 8 bits */
1431/*------------------------------------------------------------------------------- */
1432	.globl	in8
1433in8:
1434	lbz	r3,0x0000(r3)
1435	blr
1436
1437/*------------------------------------------------------------------------------- */
1438/* Function:	 out8 */
1439/* Description:	 Output 8 bits */
1440/*------------------------------------------------------------------------------- */
1441	.globl	out8
1442out8:
1443	stb	r4,0x0000(r3)
1444	sync
1445	blr
1446
1447/*------------------------------------------------------------------------------- */
1448/* Function:	 out16 */
1449/* Description:	 Output 16 bits */
1450/*------------------------------------------------------------------------------- */
1451	.globl	out16
1452out16:
1453	sth	r4,0x0000(r3)
1454	sync
1455	blr
1456
1457/*------------------------------------------------------------------------------- */
1458/* Function:	 out16r */
1459/* Description:	 Byte reverse and output 16 bits */
1460/*------------------------------------------------------------------------------- */
1461	.globl	out16r
1462out16r:
1463	sthbrx	r4,r0,r3
1464	sync
1465	blr
1466
1467/*------------------------------------------------------------------------------- */
1468/* Function:	 out32 */
1469/* Description:	 Output 32 bits */
1470/*------------------------------------------------------------------------------- */
1471	.globl	out32
1472out32:
1473	stw	r4,0x0000(r3)
1474	sync
1475	blr
1476
1477/*------------------------------------------------------------------------------- */
1478/* Function:	 out32r */
1479/* Description:	 Byte reverse and output 32 bits */
1480/*------------------------------------------------------------------------------- */
1481	.globl	out32r
1482out32r:
1483	stwbrx	r4,r0,r3
1484	sync
1485	blr
1486
1487/*------------------------------------------------------------------------------- */
1488/* Function:	 in16 */
1489/* Description:	 Input 16 bits */
1490/*------------------------------------------------------------------------------- */
1491	.globl	in16
1492in16:
1493	lhz	r3,0x0000(r3)
1494	blr
1495
1496/*------------------------------------------------------------------------------- */
1497/* Function:	 in16r */
1498/* Description:	 Input 16 bits and byte reverse */
1499/*------------------------------------------------------------------------------- */
1500	.globl	in16r
1501in16r:
1502	lhbrx	r3,r0,r3
1503	blr
1504
1505/*------------------------------------------------------------------------------- */
1506/* Function:	 in32 */
1507/* Description:	 Input 32 bits */
1508/*------------------------------------------------------------------------------- */
1509	.globl	in32
1510in32:
1511	lwz	3,0x0000(3)
1512	blr
1513
1514/*------------------------------------------------------------------------------- */
1515/* Function:	 in32r */
1516/* Description:	 Input 32 bits and byte reverse */
1517/*------------------------------------------------------------------------------- */
1518	.globl	in32r
1519in32r:
1520	lwbrx	r3,r0,r3
1521	blr
1522#endif  /* !MINIMAL_SPL */
1523
1524/*------------------------------------------------------------------------------*/
1525
1526/*
1527 * void write_tlb(mas0, mas1, mas2, mas3, mas7)
1528 */
1529	.globl	write_tlb
1530write_tlb:
1531	mtspr	MAS0,r3
1532	mtspr	MAS1,r4
1533	mtspr	MAS2,r5
1534	mtspr	MAS3,r6
1535#ifdef CONFIG_ENABLE_36BIT_PHYS
1536	mtspr	MAS7,r7
1537#endif
1538	li	r3,0
1539#ifdef CONFIG_SYS_BOOK3E_HV
1540	mtspr	MAS8,r3
1541#endif
1542	isync
1543	tlbwe
1544	msync
1545	isync
1546	blr
1547
1548/*
1549 * void relocate_code (addr_sp, gd, addr_moni)
1550 *
1551 * This "function" does not return, instead it continues in RAM
1552 * after relocating the monitor code.
1553 *
1554 * r3 = dest
1555 * r4 = src
1556 * r5 = length in bytes
1557 * r6 = cachelinesize
1558 */
1559	.globl	relocate_code
1560relocate_code:
1561	mr	r1,r3		/* Set new stack pointer		*/
1562	mr	r9,r4		/* Save copy of Init Data pointer	*/
1563	mr	r10,r5		/* Save copy of Destination Address	*/
1564
1565	GET_GOT
1566#ifndef CONFIG_SPL_SKIP_RELOCATE
1567	mr	r3,r5				/* Destination Address	*/
1568	lis	r4,CONFIG_SYS_MONITOR_BASE@h		/* Source      Address	*/
1569	ori	r4,r4,CONFIG_SYS_MONITOR_BASE@l
1570	lwz	r5,GOT(__init_end)
1571	sub	r5,r5,r4
1572	li	r6,CONFIG_SYS_CACHELINE_SIZE		/* Cache Line Size	*/
1573
1574	/*
1575	 * Fix GOT pointer:
1576	 *
1577	 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
1578	 *
1579	 * Offset:
1580	 */
1581	sub	r15,r10,r4
1582
1583	/* First our own GOT */
1584	add	r12,r12,r15
1585	/* the the one used by the C code */
1586	add	r30,r30,r15
1587
1588	/*
1589	 * Now relocate code
1590	 */
1591
1592	cmplw	cr1,r3,r4
1593	addi	r0,r5,3
1594	srwi.	r0,r0,2
1595	beq	cr1,4f		/* In place copy is not necessary	*/
1596	beq	7f		/* Protect against 0 count		*/
1597	mtctr	r0
1598	bge	cr1,2f
1599
1600	la	r8,-4(r4)
1601	la	r7,-4(r3)
16021:	lwzu	r0,4(r8)
1603	stwu	r0,4(r7)
1604	bdnz	1b
1605	b	4f
1606
16072:	slwi	r0,r0,2
1608	add	r8,r4,r0
1609	add	r7,r3,r0
16103:	lwzu	r0,-4(r8)
1611	stwu	r0,-4(r7)
1612	bdnz	3b
1613
1614/*
1615 * Now flush the cache: note that we must start from a cache aligned
1616 * address. Otherwise we might miss one cache line.
1617 */
16184:	cmpwi	r6,0
1619	add	r5,r3,r5
1620	beq	7f		/* Always flush prefetch queue in any case */
1621	subi	r0,r6,1
1622	andc	r3,r3,r0
1623	mr	r4,r3
16245:	dcbst	0,r4
1625	add	r4,r4,r6
1626	cmplw	r4,r5
1627	blt	5b
1628	sync			/* Wait for all dcbst to complete on bus */
1629	mr	r4,r3
16306:	icbi	0,r4
1631	add	r4,r4,r6
1632	cmplw	r4,r5
1633	blt	6b
16347:	sync			/* Wait for all icbi to complete on bus */
1635	isync
1636
1637/*
1638 * We are done. Do not return, instead branch to second part of board
1639 * initialization, now running from RAM.
1640 */
1641
1642	addi	r0,r10,in_ram - _start
1643
1644	/*
1645	 * As IVPR is going to point RAM address,
1646	 * Make sure IVOR15 has valid opcode to support debugger
1647	 */
1648	mtspr	IVOR15,r0
1649
1650	/*
1651	 * Re-point the IVPR at RAM
1652	 */
1653	mtspr	IVPR,r10
1654
1655	mtlr	r0
1656	blr				/* NEVER RETURNS! */
1657#endif
1658	.globl	in_ram
1659in_ram:
1660
1661	/*
1662	 * Relocation Function, r12 point to got2+0x8000
1663	 *
1664	 * Adjust got2 pointers, no need to check for 0, this code
1665	 * already puts a few entries in the table.
1666	 */
1667	li	r0,__got2_entries@sectoff@l
1668	la	r3,GOT(_GOT2_TABLE_)
1669	lwz	r11,GOT(_GOT2_TABLE_)
1670	mtctr	r0
1671	sub	r11,r3,r11
1672	addi	r3,r3,-4
16731:	lwzu	r0,4(r3)
1674	cmpwi	r0,0
1675	beq-	2f
1676	add	r0,r0,r11
1677	stw	r0,0(r3)
16782:	bdnz	1b
1679
1680	/*
1681	 * Now adjust the fixups and the pointers to the fixups
1682	 * in case we need to move ourselves again.
1683	 */
1684	li	r0,__fixup_entries@sectoff@l
1685	lwz	r3,GOT(_FIXUP_TABLE_)
1686	cmpwi	r0,0
1687	mtctr	r0
1688	addi	r3,r3,-4
1689	beq	4f
16903:	lwzu	r4,4(r3)
1691	lwzux	r0,r4,r11
1692	cmpwi	r0,0
1693	add	r0,r0,r11
1694	stw	r4,0(r3)
1695	beq-	5f
1696	stw	r0,0(r4)
16975:	bdnz	3b
16984:
1699clear_bss:
1700	/*
1701	 * Now clear BSS segment
1702	 */
1703	lwz	r3,GOT(__bss_start)
1704	lwz	r4,GOT(__bss_end)
1705
1706	cmplw	0,r3,r4
1707	beq	6f
1708
1709	li	r0,0
17105:
1711	stw	r0,0(r3)
1712	addi	r3,r3,4
1713	cmplw	0,r3,r4
1714	blt	5b
17156:
1716
1717	mr	r3,r9		/* Init Data pointer		*/
1718	mr	r4,r10		/* Destination Address		*/
1719	bl	board_init_r
1720
1721#ifndef MINIMAL_SPL
1722	/*
1723	 * Copy exception vector code to low memory
1724	 *
1725	 * r3: dest_addr
1726	 * r7: source address, r8: end address, r9: target address
1727	 */
1728	.globl	trap_init
1729trap_init:
1730	mflr	r11
1731	bl	_GLOBAL_OFFSET_TABLE_-4
1732	mflr	r12
1733
1734	/* Update IVORs as per relocation */
1735	mtspr	IVPR,r3
1736
1737	lwz	r4,CriticalInput@got(r12)
1738	mtspr	IVOR0,r4	/* 0: Critical input */
1739	lwz	r4,MachineCheck@got(r12)
1740	mtspr	IVOR1,r4	/* 1: Machine check */
1741	lwz	r4,DataStorage@got(r12)
1742	mtspr	IVOR2,r4	/* 2: Data storage */
1743	lwz	r4,InstStorage@got(r12)
1744	mtspr	IVOR3,r4	/* 3: Instruction storage */
1745	lwz	r4,ExtInterrupt@got(r12)
1746	mtspr	IVOR4,r4	/* 4: External interrupt */
1747	lwz	r4,Alignment@got(r12)
1748	mtspr	IVOR5,r4	/* 5: Alignment */
1749	lwz	r4,ProgramCheck@got(r12)
1750	mtspr	IVOR6,r4	/* 6: Program check */
1751	lwz	r4,FPUnavailable@got(r12)
1752	mtspr	IVOR7,r4	/* 7: floating point unavailable */
1753	lwz	r4,SystemCall@got(r12)
1754	mtspr	IVOR8,r4	/* 8: System call */
1755	/* 9: Auxiliary processor unavailable(unsupported) */
1756	lwz	r4,Decrementer@got(r12)
1757	mtspr	IVOR10,r4	/* 10: Decrementer */
1758	lwz	r4,IntervalTimer@got(r12)
1759	mtspr	IVOR11,r4	/* 11: Interval timer */
1760	lwz	r4,WatchdogTimer@got(r12)
1761	mtspr	IVOR12,r4	/* 12: Watchdog timer */
1762	lwz	r4,DataTLBError@got(r12)
1763	mtspr	IVOR13,r4	/* 13: Data TLB error */
1764	lwz	r4,InstructionTLBError@got(r12)
1765	mtspr	IVOR14,r4	/* 14: Instruction TLB error */
1766	lwz	r4,DebugBreakpoint@got(r12)
1767	mtspr	IVOR15,r4	/* 15: Debug */
1768
1769	mtlr	r11
1770	blr
1771
1772.globl unlock_ram_in_cache
1773unlock_ram_in_cache:
1774	/* invalidate the INIT_RAM section */
1775	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
1776	ori	r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
1777	mfspr	r4,L1CFG0
1778	andi.	r4,r4,0x1ff
1779	slwi	r4,r4,(10 - 1 - L1_CACHE_SHIFT)
1780	mtctr	r4
17811:	dcbi	r0,r3
1782#ifdef CONFIG_E6500	/* lock/unlock L2 cache long with L1 */
1783	dcblc	2, r0, r3
1784	dcblc	0, r0, r3
1785#else
1786	dcblc	r0,r3
1787#endif
1788	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1789	bdnz	1b
1790	sync
1791
1792	/* Invalidate the TLB entries for the cache */
1793	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1794	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1795	tlbivax	0,r3
1796	addi	r3,r3,0x1000
1797	tlbivax	0,r3
1798	addi	r3,r3,0x1000
1799	tlbivax	0,r3
1800	addi	r3,r3,0x1000
1801	tlbivax	0,r3
1802	isync
1803	blr
1804
1805.globl flush_dcache
1806flush_dcache:
1807	mfspr	r3,SPRN_L1CFG0
1808
1809	rlwinm	r5,r3,9,3	/* Extract cache block size */
1810	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
1811				 * are currently defined.
1812				 */
1813	li	r4,32
1814	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
1815				 *      log2(number of ways)
1816				 */
1817	slw	r5,r4,r5	/* r5 = cache block size */
1818
1819	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
1820	mulli	r7,r7,13	/* An 8-way cache will require 13
1821				 * loads per set.
1822				 */
1823	slw	r7,r7,r6
1824
1825	/* save off HID0 and set DCFA */
1826	mfspr	r8,SPRN_HID0
1827	ori	r9,r8,HID0_DCFA@l
1828	mtspr	SPRN_HID0,r9
1829	isync
1830
1831	lis	r4,0
1832	mtctr	r7
1833
18341:	lwz	r3,0(r4)	/* Load... */
1835	add	r4,r4,r5
1836	bdnz	1b
1837
1838	msync
1839	lis	r4,0
1840	mtctr	r7
1841
18421:	dcbf	0,r4		/* ...and flush. */
1843	add	r4,r4,r5
1844	bdnz	1b
1845
1846	/* restore HID0 */
1847	mtspr	SPRN_HID0,r8
1848	isync
1849
1850	blr
1851#endif /* !MINIMAL_SPL */
1852