xref: /openbmc/u-boot/arch/powerpc/cpu/mpc85xx/start.S (revision bb737ced)
1/*
2 * Copyright 2004, 2007-2012 Freescale Semiconductor, Inc.
3 * Copyright (C) 2003  Motorola,Inc.
4 *
5 * SPDX-License-Identifier:	GPL-2.0+
6 */
7
8/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
9 *
10 * The processor starts at 0xfffffffc and the code is first executed in the
11 * last 4K page(0xfffff000-0xffffffff) in flash/rom.
12 *
13 */
14
15#include <asm-offsets.h>
16#include <config.h>
17#include <mpc85xx.h>
18#include <version.h>
19
20#include <ppc_asm.tmpl>
21#include <ppc_defs.h>
22
23#include <asm/cache.h>
24#include <asm/mmu.h>
25
26#undef	MSR_KERNEL
27#define MSR_KERNEL ( MSR_ME )	/* Machine Check */
28
29#define LAW_EN		0x80000000
30
31#if defined(CONFIG_NAND_SPL) || \
32	(defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL))
33#define MINIMAL_SPL
34#endif
35
36#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \
37	!defined(CONFIG_SECURE_BOOT) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE)
38#define NOR_BOOT
39#endif
40
41/*
42 * Set up GOT: Global Offset Table
43 *
44 * Use r12 to access the GOT
45 */
46	START_GOT
47	GOT_ENTRY(_GOT2_TABLE_)
48	GOT_ENTRY(_FIXUP_TABLE_)
49
50#ifndef MINIMAL_SPL
51	GOT_ENTRY(_start)
52	GOT_ENTRY(_start_of_vectors)
53	GOT_ENTRY(_end_of_vectors)
54	GOT_ENTRY(transfer_to_handler)
55#endif
56
57	GOT_ENTRY(__init_end)
58	GOT_ENTRY(__bss_end)
59	GOT_ENTRY(__bss_start)
60	END_GOT
61
62/*
63 * e500 Startup -- after reset only the last 4KB of the effective
64 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
65 * section is located at THIS LAST page and basically does three
66 * things: clear some registers, set up exception tables and
67 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to
68 * continue the boot procedure.
69
70 * Once the boot rom is mapped by TLB entries we can proceed
71 * with normal startup.
72 *
73 */
74
75	.section .bootpg,"ax"
76	.globl _start_e500
77
78_start_e500:
79/* Enable debug exception */
80	li	r1,MSR_DE
81	mtmsr 	r1
82
83	/*
84	 * If we got an ePAPR device tree pointer passed in as r3, we need that
85	 * later in cpu_init_early_f(). Save it to a safe register before we
86	 * clobber it so that we can fetch it from there later.
87	 */
88	mr	r24, r3
89
90#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
91	mfspr	r3,SPRN_SVR
92	rlwinm	r3,r3,0,0xff
93	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
94	cmpw	r3,r4
95	beq	1f
96
97#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
98	li	r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
99	cmpw	r3,r4
100	beq	1f
101#endif
102
103	/* Not a supported revision affected by erratum */
104	li	r27,0
105	b	2f
106
1071:	li	r27,1	/* Remember for later that we have the erratum */
108	/* Erratum says set bits 55:60 to 001001 */
109	msync
110	isync
111	mfspr	r3,SPRN_HDBCR0
112	li	r4,0x48
113	rlwimi	r3,r4,0,0x1f8
114	mtspr	SPRN_HDBCR0,r3
115	isync
1162:
117#endif
118#ifdef CONFIG_SYS_FSL_ERRATUM_A005125
119	msync
120	isync
121	mfspr	r3, SPRN_HDBCR0
122	oris	r3, r3, 0x0080
123	mtspr	SPRN_HDBCR0, r3
124#endif
125
126
127#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC) && \
128	!defined(CONFIG_E6500)
129	/* ISBC uses L2 as stack.
130	 * Disable L2 cache here so that u-boot can enable it later
131	 * as part of it's normal flow
132	*/
133
134	/* Check if L2 is enabled */
135	mfspr	r3, SPRN_L2CSR0
136	lis	r2, L2CSR0_L2E@h
137	ori	r2, r2, L2CSR0_L2E@l
138	and.	r4, r3, r2
139	beq	l2_disabled
140
141	mfspr r3, SPRN_L2CSR0
142	/* Flush L2 cache */
143	lis     r2,(L2CSR0_L2FL)@h
144	ori     r2, r2, (L2CSR0_L2FL)@l
145	or      r3, r2, r3
146	sync
147	isync
148	mtspr   SPRN_L2CSR0,r3
149	isync
1501:
151	mfspr r3, SPRN_L2CSR0
152	and. r1, r3, r2
153	bne 1b
154
155	mfspr r3, SPRN_L2CSR0
156	lis r2, L2CSR0_L2E@h
157	ori r2, r2, L2CSR0_L2E@l
158	andc r4, r3, r2
159	sync
160	isync
161	mtspr SPRN_L2CSR0,r4
162	isync
163
164l2_disabled:
165#endif
166
167/* clear registers/arrays not reset by hardware */
168
169	/* L1 */
170	li	r0,2
171	mtspr	L1CSR0,r0	/* invalidate d-cache */
172	mtspr	L1CSR1,r0	/* invalidate i-cache */
173
174	mfspr	r1,DBSR
175	mtspr	DBSR,r1		/* Clear all valid bits */
176
177
178	.macro	create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch
179	lis	\scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
180	ori	\scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
181	mtspr	MAS0, \scratch
182	lis	\scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h
183	ori	\scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l
184	mtspr	MAS1, \scratch
185	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
186	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
187	mtspr	MAS2, \scratch
188	lis	\scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
189	ori	\scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
190	mtspr	MAS3, \scratch
191	lis	\scratch, \phy_high@h
192	ori	\scratch, \scratch, \phy_high@l
193	mtspr	MAS7, \scratch
194	isync
195	msync
196	tlbwe
197	isync
198	.endm
199
200	.macro	create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch
201	lis	\scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
202	ori	\scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
203	mtspr	MAS0, \scratch
204	lis	\scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h
205	ori	\scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l
206	mtspr	MAS1, \scratch
207	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
208	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
209	mtspr	MAS2, \scratch
210	lis	\scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
211	ori	\scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
212	mtspr	MAS3, \scratch
213	lis	\scratch, \phy_high@h
214	ori	\scratch, \scratch, \phy_high@l
215	mtspr	MAS7, \scratch
216	isync
217	msync
218	tlbwe
219	isync
220	.endm
221
222	.macro	delete_tlb1_entry esel scratch
223	lis	\scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
224	ori	\scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
225	mtspr	MAS0, \scratch
226	li	\scratch, 0
227	mtspr	MAS1, \scratch
228	isync
229	msync
230	tlbwe
231	isync
232	.endm
233
234	.macro	delete_tlb0_entry esel epn wimg scratch
235	lis	\scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
236	ori	\scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
237	mtspr	MAS0, \scratch
238	li	\scratch, 0
239	mtspr	MAS1, \scratch
240	lis	\scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
241	ori	\scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
242	mtspr	MAS2, \scratch
243	isync
244	msync
245	tlbwe
246	isync
247	.endm
248
249/* Interrupt vectors do not fit in minimal SPL. */
250#if !defined(MINIMAL_SPL)
251	/* Setup interrupt vectors */
252	lis	r1,CONFIG_SYS_MONITOR_BASE@h
253	mtspr	IVPR,r1
254
255	li	r4,CriticalInput@l
256	mtspr	IVOR0,r4	/* 0: Critical input */
257	li	r4,MachineCheck@l
258	mtspr	IVOR1,r4	/* 1: Machine check */
259	li	r4,DataStorage@l
260	mtspr	IVOR2,r4	/* 2: Data storage */
261	li	r4,InstStorage@l
262	mtspr	IVOR3,r4	/* 3: Instruction storage */
263	li	r4,ExtInterrupt@l
264	mtspr	IVOR4,r4	/* 4: External interrupt */
265	li	r4,Alignment@l
266	mtspr	IVOR5,r4	/* 5: Alignment */
267	li	r4,ProgramCheck@l
268	mtspr	IVOR6,r4	/* 6: Program check */
269	li	r4,FPUnavailable@l
270	mtspr	IVOR7,r4	/* 7: floating point unavailable */
271	li	r4,SystemCall@l
272	mtspr	IVOR8,r4	/* 8: System call */
273	/* 9: Auxiliary processor unavailable(unsupported) */
274	li	r4,Decrementer@l
275	mtspr	IVOR10,r4	/* 10: Decrementer */
276	li	r4,IntervalTimer@l
277	mtspr	IVOR11,r4	/* 11: Interval timer */
278	li	r4,WatchdogTimer@l
279	mtspr	IVOR12,r4	/* 12: Watchdog timer */
280	li	r4,DataTLBError@l
281	mtspr	IVOR13,r4	/* 13: Data TLB error */
282	li	r4,InstructionTLBError@l
283	mtspr	IVOR14,r4	/* 14: Instruction TLB error */
284	li	r4,DebugBreakpoint@l
285	mtspr	IVOR15,r4	/* 15: Debug */
286#endif
287
288	/* Clear and set up some registers. */
289	li      r0,0x0000
290	lis	r1,0xffff
291	mtspr	DEC,r0			/* prevent dec exceptions */
292	mttbl	r0			/* prevent fit & wdt exceptions */
293	mttbu	r0
294	mtspr	TSR,r1			/* clear all timer exception status */
295	mtspr	TCR,r0			/* disable all */
296	mtspr	ESR,r0			/* clear exception syndrome register */
297	mtspr	MCSR,r0			/* machine check syndrome register */
298	mtxer	r0			/* clear integer exception register */
299
300#ifdef CONFIG_SYS_BOOK3E_HV
301	mtspr	MAS8,r0			/* make sure MAS8 is clear */
302#endif
303
304	/* Enable Time Base and Select Time Base Clock */
305	lis	r0,HID0_EMCP@h		/* Enable machine check */
306#if defined(CONFIG_ENABLE_36BIT_PHYS)
307	ori	r0,r0,HID0_ENMAS7@l	/* Enable MAS7 */
308#endif
309#ifndef CONFIG_E500MC
310	ori	r0,r0,HID0_TBEN@l	/* Enable Timebase */
311#endif
312	mtspr	HID0,r0
313
314#if !defined(CONFIG_E500MC) && !defined(CONFIG_ARCH_QEMU_E500)
315	li	r0,(HID1_ASTME|HID1_ABE)@l	/* Addr streaming & broadcast */
316	mfspr	r3,PVR
317	andi.	r3,r3, 0xff
318	cmpwi	r3,0x50@l	/* if we are rev 5.0 or greater set MBDD */
319	blt 1f
320	/* Set MBDD bit also */
321	ori r0, r0, HID1_MBDD@l
3221:
323	mtspr	HID1,r0
324#endif
325
326#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
327	mfspr	r3,SPRN_HDBCR1
328	oris	r3,r3,0x0100
329	mtspr	SPRN_HDBCR1,r3
330#endif
331
332	/* Enable Branch Prediction */
333#if defined(CONFIG_BTB)
334	lis	r0,BUCSR_ENABLE@h
335	ori	r0,r0,BUCSR_ENABLE@l
336	mtspr	SPRN_BUCSR,r0
337#endif
338
339#if defined(CONFIG_SYS_INIT_DBCR)
340	lis	r1,0xffff
341	ori	r1,r1,0xffff
342	mtspr	DBSR,r1			/* Clear all status bits */
343	lis	r0,CONFIG_SYS_INIT_DBCR@h	/* DBCR0[IDM] must be set */
344	ori	r0,r0,CONFIG_SYS_INIT_DBCR@l
345	mtspr	DBCR0,r0
346#endif
347
348#ifdef CONFIG_ARCH_MPC8569
349#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000)
350#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0)
351
352	/* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to
353	 * use address space which is more than 12bits, and it must be done in
354	 * the 4K boot page. So we set this bit here.
355	 */
356
357	/* create a temp mapping TLB0[0] for LBCR  */
358	create_tlb0_entry 0, \
359		0, BOOKE_PAGESZ_4K, \
360		CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G, \
361		CONFIG_SYS_LBC_ADDR, MAS3_SW|MAS3_SR, \
362		0, r6
363
364	/* Set LBCR register */
365	lis     r4,CONFIG_SYS_LBCR_ADDR@h
366	ori     r4,r4,CONFIG_SYS_LBCR_ADDR@l
367
368	lis     r5,CONFIG_SYS_LBC_LBCR@h
369	ori     r5,r5,CONFIG_SYS_LBC_LBCR@l
370	stw     r5,0(r4)
371	isync
372
373	/* invalidate this temp TLB */
374	lis	r4,CONFIG_SYS_LBC_ADDR@h
375	ori	r4,r4,CONFIG_SYS_LBC_ADDR@l
376	tlbivax	0,r4
377	isync
378
379#endif /* CONFIG_ARCH_MPC8569 */
380
381/*
382 * Search for the TLB that covers the code we're executing, and shrink it
383 * so that it covers only this 4K page.  That will ensure that any other
384 * TLB we create won't interfere with it.  We assume that the TLB exists,
385 * which is why we don't check the Valid bit of MAS1.  We also assume
386 * it is in TLB1.
387 *
388 * This is necessary, for example, when booting from the on-chip ROM,
389 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
390 */
391	bl	nexti		/* Find our address */
392nexti:	mflr	r1		/* R1 = our PC */
393	li	r2, 0
394	mtspr	MAS6, r2	/* Assume the current PID and AS are 0 */
395	isync
396	msync
397	tlbsx	0, r1		/* This must succeed */
398
399	mfspr	r14, MAS0	/* Save ESEL for later */
400	rlwinm	r14, r14, 16, 0xfff
401
402	/* Set the size of the TLB to 4KB */
403	mfspr	r3, MAS1
404	li	r2, 0xF80
405	andc	r3, r3, r2	/* Clear the TSIZE bits */
406	ori	r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
407	oris	r3, r3, MAS1_IPROT@h
408	mtspr	MAS1, r3
409
410	/*
411	 * Set the base address of the TLB to our PC.  We assume that
412	 * virtual == physical.  We also assume that MAS2_EPN == MAS3_RPN.
413	 */
414	lis	r3, MAS2_EPN@h
415	ori	r3, r3, MAS2_EPN@l	/* R3 = MAS2_EPN */
416
417	and	r1, r1, r3	/* Our PC, rounded down to the nearest page */
418
419	mfspr	r2, MAS2
420	andc	r2, r2, r3
421	or	r2, r2, r1
422#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
423	cmpwi	r27,0
424	beq	1f
425	andi.	r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */
426	rlwinm	r2, r2, 0, ~MAS2_I
427	ori	r2, r2, MAS2_G
4281:
429#endif
430	mtspr	MAS2, r2	/* Set the EPN to our PC base address */
431
432	mfspr	r2, MAS3
433	andc	r2, r2, r3
434	or	r2, r2, r1
435	mtspr	MAS3, r2	/* Set the RPN to our PC base address */
436
437	isync
438	msync
439	tlbwe
440
441/*
442 * Clear out any other TLB entries that may exist, to avoid conflicts.
443 * Our TLB entry is in r14.
444 */
445	li	r0, TLBIVAX_ALL | TLBIVAX_TLB0
446	tlbivax 0, r0
447	tlbsync
448
449	mfspr	r4, SPRN_TLB1CFG
450	rlwinm	r4, r4, 0, TLBnCFG_NENTRY_MASK
451
452	li	r3, 0
453	mtspr	MAS1, r3
4541:	cmpw	r3, r14
455	rlwinm	r5, r3, 16, MAS0_ESEL_MSK
456	addi	r3, r3, 1
457	beq	2f		/* skip the entry we're executing from */
458
459	oris	r5, r5, MAS0_TLBSEL(1)@h
460	mtspr	MAS0, r5
461
462	isync
463	tlbwe
464	isync
465	msync
466
4672:	cmpw	r3, r4
468	blt	1b
469
470#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) && \
471	!defined(CONFIG_SECURE_BOOT)
472/*
473 * TLB entry for debuggging in AS1
474 * Create temporary TLB entry in AS0 to handle debug exception
475 * As on debug exception MSR is cleared i.e. Address space is changed
476 * to 0. A TLB entry (in AS0) is required to handle debug exception generated
477 * in AS1.
478 */
479
480#ifdef NOR_BOOT
481/*
482 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
483 * bacause flash's virtual address maps to 0xff800000 - 0xffffffff.
484 * and this window is outside of 4K boot window.
485 */
486	create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
487		0, BOOKE_PAGESZ_4M, \
488		CONFIG_SYS_MONITOR_BASE & 0xffc00000,  MAS2_I|MAS2_G, \
489		0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
490		0, r6
491
492#else
493/*
494 * TLB entry is created for IVPR + IVOR15 to map on valid OP code address
495 * because "nexti" will resize TLB to 4K
496 */
497	create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
498		0, BOOKE_PAGESZ_256K, \
499		CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS2_I, \
500		CONFIG_SYS_MONITOR_BASE & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \
501		0, r6
502#endif
503#endif
504
505/*
506 * Relocate CCSR, if necessary.  We relocate CCSR if (obviously) the default
507 * location is not where we want it.  This typically happens on a 36-bit
508 * system, where we want to move CCSR to near the top of 36-bit address space.
509 *
510 * To move CCSR, we create two temporary TLBs, one for the old location, and
511 * another for the new location.  On CoreNet systems, we also need to create
512 * a special, temporary LAW.
513 *
514 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
515 * long-term TLBs, so we use TLB0 here.
516 */
517#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
518
519#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
520#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
521#endif
522
523create_ccsr_new_tlb:
524	/*
525	 * Create a TLB for the new location of CCSR.  Register R8 is reserved
526	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
527	 */
528	lis	r8, CONFIG_SYS_CCSRBAR@h
529	ori	r8, r8, CONFIG_SYS_CCSRBAR@l
530	lis	r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
531	ori	r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
532	create_tlb0_entry 0, \
533		0, BOOKE_PAGESZ_4K, \
534		CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \
535		CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \
536		CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
537	/*
538	 * Create a TLB for the current location of CCSR.  Register R9 is reserved
539	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
540	 */
541create_ccsr_old_tlb:
542	create_tlb0_entry 1, \
543		0, BOOKE_PAGESZ_4K, \
544		CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \
545		CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \
546		0, r3 /* The default CCSR address is always a 32-bit number */
547
548
549	/*
550	 * We have a TLB for what we think is the current (old) CCSR.  Let's
551	 * verify that, otherwise we won't be able to move it.
552	 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
553	 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
554	 */
555verify_old_ccsr:
556	lis     r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
557	ori     r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
558#ifdef CONFIG_FSL_CORENET
559	lwz	r1, 4(r9)		/* CCSRBARL */
560#else
561	lwz	r1, 0(r9)		/* CCSRBAR, shifted right by 12 */
562	slwi	r1, r1, 12
563#endif
564
565	cmpl	0, r0, r1
566
567	/*
568	 * If the value we read from CCSRBARL is not what we expect, then
569	 * enter an infinite loop.  This will at least allow a debugger to
570	 * halt execution and examine TLBs, etc.  There's no point in going
571	 * on.
572	 */
573infinite_debug_loop:
574	bne	infinite_debug_loop
575
576#ifdef CONFIG_FSL_CORENET
577
578#define CCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
579#define LAW_SIZE_4K	0xb
580#define CCSRBAR_LAWAR	(LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
581#define CCSRAR_C	0x80000000	/* Commit */
582
583create_temp_law:
584	/*
585	 * On CoreNet systems, we create the temporary LAW using a special LAW
586	 * target ID of 0x1e.  LAWBARH is at offset 0xc00 in CCSR.
587	 */
588	lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
589	ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
590	lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
591	ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
592	lis     r2, CCSRBAR_LAWAR@h
593	ori     r2, r2, CCSRBAR_LAWAR@l
594
595	stw     r0, 0xc00(r9)	/* LAWBARH0 */
596	stw     r1, 0xc04(r9)	/* LAWBARL0 */
597	sync
598	stw     r2, 0xc08(r9)	/* LAWAR0 */
599
600	/*
601	 * Read back from LAWAR to ensure the update is complete.  e500mc
602	 * cores also require an isync.
603	 */
604	lwz	r0, 0xc08(r9)	/* LAWAR0 */
605	isync
606
607	/*
608	 * Read the current CCSRBARH and CCSRBARL using load word instructions.
609	 * Follow this with an isync instruction. This forces any outstanding
610	 * accesses to configuration space to completion.
611	 */
612read_old_ccsrbar:
613	lwz	r0, 0(r9)	/* CCSRBARH */
614	lwz	r0, 4(r9)	/* CCSRBARL */
615	isync
616
617	/*
618	 * Write the new values for CCSRBARH and CCSRBARL to their old
619	 * locations.  The CCSRBARH has a shadow register. When the CCSRBARH
620	 * has a new value written it loads a CCSRBARH shadow register. When
621	 * the CCSRBARL is written, the CCSRBARH shadow register contents
622	 * along with the CCSRBARL value are loaded into the CCSRBARH and
623	 * CCSRBARL registers, respectively.  Follow this with a sync
624	 * instruction.
625	 */
626write_new_ccsrbar:
627	lis	r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
628	ori	r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
629	lis	r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
630	ori	r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
631	lis	r2, CCSRAR_C@h
632	ori	r2, r2, CCSRAR_C@l
633
634	stw	r0, 0(r9)	/* Write to CCSRBARH */
635	sync			/* Make sure we write to CCSRBARH first */
636	stw	r1, 4(r9)	/* Write to CCSRBARL */
637	sync
638
639	/*
640	 * Write a 1 to the commit bit (C) of CCSRAR at the old location.
641	 * Follow this with a sync instruction.
642	 */
643	stw	r2, 8(r9)
644	sync
645
646	/* Delete the temporary LAW */
647delete_temp_law:
648	li	r1, 0
649	stw	r1, 0xc08(r8)
650	sync
651	stw	r1, 0xc00(r8)
652	stw	r1, 0xc04(r8)
653	sync
654
655#else /* #ifdef CONFIG_FSL_CORENET */
656
657write_new_ccsrbar:
658	/*
659	 * Read the current value of CCSRBAR using a load word instruction
660	 * followed by an isync. This forces all accesses to configuration
661	 * space to complete.
662	 */
663	sync
664	lwz	r0, 0(r9)
665	isync
666
667/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
668#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
669			   (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
670
671	/* Write the new value to CCSRBAR. */
672	lis	r0, CCSRBAR_PHYS_RS12@h
673	ori	r0, r0, CCSRBAR_PHYS_RS12@l
674	stw	r0, 0(r9)
675	sync
676
677	/*
678	 * The manual says to perform a load of an address that does not
679	 * access configuration space or the on-chip SRAM using an existing TLB,
680	 * but that doesn't appear to be necessary.  We will do the isync,
681	 * though.
682	 */
683	isync
684
685	/*
686	 * Read the contents of CCSRBAR from its new location, followed by
687	 * another isync.
688	 */
689	lwz	r0, 0(r8)
690	isync
691
692#endif  /* #ifdef CONFIG_FSL_CORENET */
693
694	/* Delete the temporary TLBs */
695delete_temp_tlbs:
696	delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3
697	delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3
698
699#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
700
701#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
702create_ccsr_l2_tlb:
703	/*
704	 * Create a TLB for the MMR location of CCSR
705	 * to access L2CSR0 register
706	 */
707	create_tlb0_entry 0, \
708		0, BOOKE_PAGESZ_4K, \
709		CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \
710		CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \
711		CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
712
713enable_l2_cluster_l2:
714	/* enable L2 cache */
715	lis	r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h
716	ori	r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l
717	li	r4, 33	/* stash id */
718	stw	r4, 4(r3)
719	lis	r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h
720	ori	r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l
721	sync
722	stw	r4, 0(r3)	/* invalidate L2 */
723	/* Poll till the bits are cleared */
7241:	sync
725	lwz	r0, 0(r3)
726	twi	0, r0, 0
727	isync
728	and.	r1, r0, r4
729	bne	1b
730
731	/* L2PE must be set before L2 cache is enabled */
732	lis	r4, (L2CSR0_L2PE)@h
733	ori	r4, r4, (L2CSR0_L2PE)@l
734	sync
735	stw	r4, 0(r3)	/* enable L2 parity/ECC error checking */
736	/* Poll till the bit is set */
7371:	sync
738	lwz	r0, 0(r3)
739	twi	0, r0, 0
740	isync
741	and.	r1, r0, r4
742	beq	1b
743
744	lis	r4, (L2CSR0_L2E|L2CSR0_L2PE)@h
745	ori	r4, r4, (L2CSR0_L2REP_MODE)@l
746	sync
747	stw	r4, 0(r3)	/* enable L2 */
748	/* Poll till the bit is set */
7491:	sync
750	lwz	r0, 0(r3)
751	twi	0, r0, 0
752	isync
753	and.	r1, r0, r4
754	beq	1b
755
756delete_ccsr_l2_tlb:
757	delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
758#endif
759
760	/*
761	 * Enable the L1. On e6500, this has to be done
762	 * after the L2 is up.
763	 */
764
765#ifdef CONFIG_SYS_CACHE_STASHING
766	/* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
767	li	r2,(32 + 0)
768	mtspr	L1CSR2,r2
769#endif
770
771	/* Enable/invalidate the I-Cache */
772	lis	r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
773	ori	r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
774	mtspr	SPRN_L1CSR1,r2
7751:
776	mfspr	r3,SPRN_L1CSR1
777	and.	r1,r3,r2
778	bne	1b
779
780	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
781	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
782	mtspr	SPRN_L1CSR1,r3
783	isync
7842:
785	mfspr	r3,SPRN_L1CSR1
786	andi.	r1,r3,L1CSR1_ICE@l
787	beq	2b
788
789	/* Enable/invalidate the D-Cache */
790	lis	r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
791	ori	r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
792	mtspr	SPRN_L1CSR0,r2
7931:
794	mfspr	r3,SPRN_L1CSR0
795	and.	r1,r3,r2
796	bne	1b
797
798	lis	r3,(L1CSR0_CPE|L1CSR0_DCE)@h
799	ori	r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
800	mtspr	SPRN_L1CSR0,r3
801	isync
8022:
803	mfspr	r3,SPRN_L1CSR0
804	andi.	r1,r3,L1CSR0_DCE@l
805	beq	2b
806#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
807#define DCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
808#define LAW_SIZE_1M	0x13
809#define DCSRBAR_LAWAR	(LAW_EN | (0x1d << 20) | LAW_SIZE_1M)
810
811	cmpwi	r27,0
812	beq	9f
813
814	/*
815	 * Create a TLB entry for CCSR
816	 *
817	 * We're executing out of TLB1 entry in r14, and that's the only
818	 * TLB entry that exists.  To allocate some TLB entries for our
819	 * own use, flip a bit high enough that we won't flip it again
820	 * via incrementing.
821	 */
822
823	xori	r8, r14, 32
824	lis	r0, MAS0_TLBSEL(1)@h
825	rlwimi	r0, r8, 16, MAS0_ESEL_MSK
826	lis	r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h
827	ori	r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l
828	lis	r7, CONFIG_SYS_CCSRBAR@h
829	ori	r7, r7, CONFIG_SYS_CCSRBAR@l
830	ori	r2, r7, MAS2_I|MAS2_G
831	lis	r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
832	ori	r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
833	lis	r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
834	ori	r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
835	mtspr	MAS0, r0
836	mtspr	MAS1, r1
837	mtspr	MAS2, r2
838	mtspr	MAS3, r3
839	mtspr	MAS7, r4
840	isync
841	tlbwe
842	isync
843	msync
844
845	/* Map DCSR temporarily to physical address zero */
846	li	r0, 0
847	lis	r3, DCSRBAR_LAWAR@h
848	ori	r3, r3, DCSRBAR_LAWAR@l
849
850	stw	r0, 0xc00(r7)	/* LAWBARH0 */
851	stw	r0, 0xc04(r7)	/* LAWBARL0 */
852	sync
853	stw	r3, 0xc08(r7)	/* LAWAR0 */
854
855	/* Read back from LAWAR to ensure the update is complete. */
856	lwz	r3, 0xc08(r7)	/* LAWAR0 */
857	isync
858
859	/* Create a TLB entry for DCSR at zero */
860
861	addi	r9, r8, 1
862	lis	r0, MAS0_TLBSEL(1)@h
863	rlwimi	r0, r9, 16, MAS0_ESEL_MSK
864	lis	r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h
865	ori	r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l
866	li	r6, 0	/* DCSR effective address */
867	ori	r2, r6, MAS2_I|MAS2_G
868	li	r3, MAS3_SW|MAS3_SR
869	li	r4, 0
870	mtspr	MAS0, r0
871	mtspr	MAS1, r1
872	mtspr	MAS2, r2
873	mtspr	MAS3, r3
874	mtspr	MAS7, r4
875	isync
876	tlbwe
877	isync
878	msync
879
880	/* enable the timebase */
881#define CTBENR	0xe2084
882	li	r3, 1
883	addis	r4, r7, CTBENR@ha
884	stw	r3, CTBENR@l(r4)
885	lwz	r3, CTBENR@l(r4)
886	twi	0,r3,0
887	isync
888
889	.macro	erratum_set_ccsr offset value
890	addis	r3, r7, \offset@ha
891	lis	r4, \value@h
892	addi	r3, r3, \offset@l
893	ori	r4, r4, \value@l
894	bl	erratum_set_value
895	.endm
896
897	.macro	erratum_set_dcsr offset value
898	addis	r3, r6, \offset@ha
899	lis	r4, \value@h
900	addi	r3, r3, \offset@l
901	ori	r4, r4, \value@l
902	bl	erratum_set_value
903	.endm
904
905	erratum_set_dcsr 0xb0e08 0xe0201800
906	erratum_set_dcsr 0xb0e18 0xe0201800
907	erratum_set_dcsr 0xb0e38 0xe0400000
908	erratum_set_dcsr 0xb0008 0x00900000
909	erratum_set_dcsr 0xb0e40 0xe00a0000
910	erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY
911#ifdef  CONFIG_RAMBOOT_PBL
912	erratum_set_ccsr 0x10f00 0x495e5000
913#else
914	erratum_set_ccsr 0x10f00 0x415e5000
915#endif
916	erratum_set_ccsr 0x11f00 0x415e5000
917
918	/* Make temp mapping uncacheable again, if it was initially */
919	bl	2f
9202:	mflr	r3
921	tlbsx	0, r3
922	mfspr	r4, MAS2
923	rlwimi	r4, r15, 0, MAS2_I
924	rlwimi	r4, r15, 0, MAS2_G
925	mtspr	MAS2, r4
926	isync
927	tlbwe
928	isync
929	msync
930
931	/* Clear the cache */
932	lis	r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
933	ori	r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
934	sync
935	isync
936	mtspr	SPRN_L1CSR1,r3
937	isync
9382:	sync
939	mfspr	r4,SPRN_L1CSR1
940	and.	r4,r4,r3
941	bne	2b
942
943	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
944	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
945	sync
946	isync
947	mtspr	SPRN_L1CSR1,r3
948	isync
9492:	sync
950	mfspr	r4,SPRN_L1CSR1
951	and.	r4,r4,r3
952	beq	2b
953
954	/* Remove temporary mappings */
955	lis	r0, MAS0_TLBSEL(1)@h
956	rlwimi	r0, r9, 16, MAS0_ESEL_MSK
957	li	r3, 0
958	mtspr	MAS0, r0
959	mtspr	MAS1, r3
960	isync
961	tlbwe
962	isync
963	msync
964
965	li	r3, 0
966	stw	r3, 0xc08(r7)	/* LAWAR0 */
967	lwz	r3, 0xc08(r7)
968	isync
969
970	lis	r0, MAS0_TLBSEL(1)@h
971	rlwimi	r0, r8, 16, MAS0_ESEL_MSK
972	li	r3, 0
973	mtspr	MAS0, r0
974	mtspr	MAS1, r3
975	isync
976	tlbwe
977	isync
978	msync
979
980	b	9f
981
982	/* r3 = addr, r4 = value, clobbers r5, r11, r12 */
983erratum_set_value:
984	/* Lock two cache lines into I-Cache */
985	sync
986	mfspr	r11, SPRN_L1CSR1
987	rlwinm	r11, r11, 0, ~L1CSR1_ICUL
988	sync
989	isync
990	mtspr	SPRN_L1CSR1, r11
991	isync
992
993	mflr	r12
994	bl	5f
9955:	mflr	r5
996	addi	r5, r5, 2f - 5b
997	icbtls	0, 0, r5
998	addi	r5, r5, 64
999
1000	sync
1001	mfspr	r11, SPRN_L1CSR1
10023:	andi.	r11, r11, L1CSR1_ICUL
1003	bne	3b
1004
1005	icbtls	0, 0, r5
1006	addi	r5, r5, 64
1007
1008	sync
1009	mfspr	r11, SPRN_L1CSR1
10103:	andi.	r11, r11, L1CSR1_ICUL
1011	bne	3b
1012
1013	b	2f
1014	.align	6
1015	/* Inside a locked cacheline, wait a while, write, then wait a while */
10162:	sync
1017
1018	mfspr	r5, SPRN_TBRL
1019	addis	r11, r5, 0x10000@h /* wait 65536 timebase ticks */
10204:	mfspr	r5, SPRN_TBRL
1021	subf.	r5, r5, r11
1022	bgt	4b
1023
1024	stw	r4, 0(r3)
1025
1026	mfspr	r5, SPRN_TBRL
1027	addis	r11, r5, 0x10000@h /* wait 65536 timebase ticks */
10284:	mfspr	r5, SPRN_TBRL
1029	subf.	r5, r5, r11
1030	bgt	4b
1031
1032	sync
1033
1034	/*
1035	 * Fill out the rest of this cache line and the next with nops,
1036	 * to ensure that nothing outside the locked area will be
1037	 * fetched due to a branch.
1038	 */
1039	.rept 19
1040	nop
1041	.endr
1042
1043	sync
1044	mfspr	r11, SPRN_L1CSR1
1045	rlwinm	r11, r11, 0, ~L1CSR1_ICUL
1046	sync
1047	isync
1048	mtspr	SPRN_L1CSR1, r11
1049	isync
1050
1051	mtlr	r12
1052	blr
1053
10549:
1055#endif
1056
1057create_init_ram_area:
1058	lis     r6,FSL_BOOKE_MAS0(1, 15, 0)@h
1059	ori     r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
1060
1061#ifdef NOR_BOOT
1062	/* create a temp mapping in AS=1 to the 4M boot window */
1063	create_tlb1_entry 15, \
1064		1, BOOKE_PAGESZ_4M, \
1065		CONFIG_SYS_MONITOR_BASE & 0xffc00000, MAS2_I|MAS2_G, \
1066		0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1067		0, r6
1068
1069#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT)
1070	/* create a temp mapping in AS = 1 for Flash mapping
1071	 * created by PBL for ISBC code
1072	 */
1073	create_tlb1_entry 15, \
1074		1, BOOKE_PAGESZ_1M, \
1075		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1076		CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1077		0, r6
1078
1079/*
1080 * For Targets without CONFIG_SPL like P3, P5
1081 * and for targets with CONFIG_SPL like T1, T2, T4, only for
1082 * u-boot-spl i.e. CONFIG_SPL_BUILD
1083 */
1084#elif defined(CONFIG_RAMBOOT_PBL) && defined(CONFIG_SECURE_BOOT) && \
1085	(!defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD))
1086	/* create a temp mapping in AS = 1 for mapping CONFIG_SYS_MONITOR_BASE
1087	 * to L3 Address configured by PBL for ISBC code
1088	 */
1089	create_tlb1_entry 15, \
1090		1, BOOKE_PAGESZ_1M, \
1091		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1092		CONFIG_SYS_INIT_L3_ADDR & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1093		0, r6
1094
1095#else
1096	/*
1097	 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main
1098	 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage.
1099	 */
1100	create_tlb1_entry 15, \
1101		1, BOOKE_PAGESZ_1M, \
1102		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS2_I|MAS2_G, \
1103		CONFIG_SYS_MONITOR_BASE & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
1104		0, r6
1105#endif
1106
1107	/* create a temp mapping in AS=1 to the stack */
1108#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
1109    defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
1110	create_tlb1_entry 14, \
1111		1, BOOKE_PAGESZ_16K, \
1112		CONFIG_SYS_INIT_RAM_ADDR, 0, \
1113		CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \
1114		CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6
1115
1116#else
1117	create_tlb1_entry 14, \
1118		1, BOOKE_PAGESZ_16K, \
1119		CONFIG_SYS_INIT_RAM_ADDR, 0, \
1120		CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \
1121		0, r6
1122#endif
1123
1124	lis	r6,MSR_IS|MSR_DS|MSR_DE@h
1125	ori	r6,r6,MSR_IS|MSR_DS|MSR_DE@l
1126	lis	r7,switch_as@h
1127	ori	r7,r7,switch_as@l
1128
1129	mtspr	SPRN_SRR0,r7
1130	mtspr	SPRN_SRR1,r6
1131	rfi
1132
1133switch_as:
1134/* L1 DCache is used for initial RAM */
1135
1136	/* Allocate Initial RAM in data cache.
1137	 */
1138	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1139	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1140	mfspr	r2, L1CFG0
1141	andi.	r2, r2, 0x1ff
1142	/* cache size * 1024 / (2 * L1 line size) */
1143	slwi	r2, r2, (10 - 1 - L1_CACHE_SHIFT)
1144	mtctr	r2
1145	li	r0,0
11461:
1147	dcbz	r0,r3
1148#ifdef CONFIG_E6500	/* Lock/unlock L2 cache long with L1 */
1149	dcbtls	2, r0, r3
1150	dcbtls	0, r0, r3
1151#else
1152	dcbtls	0, r0, r3
1153#endif
1154	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1155	bdnz	1b
1156
1157	/* Jump out the last 4K page and continue to 'normal' start */
1158#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)
1159	/* We assume that we're already running at the address we're linked at */
1160	b	_start_cont
1161#else
1162	/* Calculate absolute address in FLASH and jump there		*/
1163	/*--------------------------------------------------------------*/
1164	lis	r3,CONFIG_SYS_MONITOR_BASE@h
1165	ori	r3,r3,CONFIG_SYS_MONITOR_BASE@l
1166	addi	r3,r3,_start_cont - _start
1167	mtlr	r3
1168	blr
1169#endif
1170
1171	.text
1172	.globl	_start
1173_start:
1174	.long	0x27051956		/* U-BOOT Magic Number */
1175	.globl	version_string
1176version_string:
1177	.ascii U_BOOT_VERSION_STRING, "\0"
1178
1179	.align	4
1180	.globl	_start_cont
1181_start_cont:
1182	/* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
1183	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR)@h
1184	ori	r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */
1185
1186#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1187#if CONFIG_VAL(SYS_MALLOC_F_LEN) + GENERATED_GBL_DATA_SIZE > CONFIG_SYS_INIT_RAM_SIZE
1188#error "SYS_MALLOC_F_LEN too large to fit into initial RAM."
1189#endif
1190
1191	/* Leave 16+ byte for back chain termination and NULL return address */
1192	subi	r3,r3,((CONFIG_VAL(SYS_MALLOC_F_LEN)+16+15)&~0xf)
1193#endif
1194
1195	/* End of RAM */
1196	lis	r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1197	ori	r4,r4,(CONFIG_SYS_INIT_RAM_SIZE)@l
1198
1199	li	r0,0
1200
12011: 	subi 	r4,r4,4
1202	stw 	r0,0(r4)
1203	cmplw 	r4,r3
1204	bne	1b
1205
1206#if CONFIG_VAL(SYS_MALLOC_F_LEN)
1207	lis	r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
1208	ori	r4,r4,(CONFIG_SYS_GBL_DATA_OFFSET)@l
1209
1210	addi	r3,r3,16	/* Pre-relocation malloc area */
1211	stw	r3,GD_MALLOC_BASE(r4)
1212	subi	r3,r3,16
1213#endif
1214	li	r0,0
1215	stw	r0,0(r3)	/* Terminate Back Chain */
1216	stw	r0,+4(r3)	/* NULL return address. */
1217	mr	r1,r3		/* Transfer to SP(r1) */
1218
1219	GET_GOT
1220
1221	/* Pass our potential ePAPR device tree pointer to cpu_init_early_f */
1222	mr	r3, r24
1223
1224	bl	cpu_init_early_f
1225
1226	/* switch back to AS = 0 */
1227	lis	r3,(MSR_CE|MSR_ME|MSR_DE)@h
1228	ori	r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
1229	mtmsr	r3
1230	isync
1231
1232	bl	cpu_init_f	/* return boot_flag for calling board_init_f */
1233	bl	board_init_f
1234	isync
1235
1236	/* NOTREACHED - board_init_f() does not return */
1237
1238#ifndef MINIMAL_SPL
1239	.globl	_start_of_vectors
1240_start_of_vectors:
1241
1242/* Critical input. */
1243	CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
1244
1245/* Machine check */
1246	MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
1247
1248/* Data Storage exception. */
1249	STD_EXCEPTION(0x0300, DataStorage, UnknownException)
1250
1251/* Instruction Storage exception. */
1252	STD_EXCEPTION(0x0400, InstStorage, UnknownException)
1253
1254/* External Interrupt exception. */
1255	STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
1256
1257/* Alignment exception. */
1258Alignment:
1259	EXCEPTION_PROLOG(SRR0, SRR1)
1260	mfspr	r4,DAR
1261	stw	r4,_DAR(r21)
1262	mfspr	r5,DSISR
1263	stw	r5,_DSISR(r21)
1264	addi	r3,r1,STACK_FRAME_OVERHEAD
1265	EXC_XFER_TEMPLATE(0x600, Alignment, AlignmentException,
1266		MSR_KERNEL, COPY_EE)
1267
1268/* Program check exception */
1269ProgramCheck:
1270	EXCEPTION_PROLOG(SRR0, SRR1)
1271	addi	r3,r1,STACK_FRAME_OVERHEAD
1272	EXC_XFER_TEMPLATE(0x700, ProgramCheck, ProgramCheckException,
1273		MSR_KERNEL, COPY_EE)
1274
1275	/* No FPU on MPC85xx.  This exception is not supposed to happen.
1276	*/
1277	STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
1278	STD_EXCEPTION(0x0900, SystemCall, UnknownException)
1279	STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
1280	STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
1281	STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
1282
1283	STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
1284	STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
1285
1286	CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
1287
1288	.globl	_end_of_vectors
1289_end_of_vectors:
1290
1291
1292	. = . + (0x100 - ( . & 0xff ))	/* align for debug */
1293
1294/*
1295 * This code finishes saving the registers to the exception frame
1296 * and jumps to the appropriate handler for the exception.
1297 * Register r21 is pointer into trap frame, r1 has new stack pointer.
1298 * r23 is the address of the handler.
1299 */
1300	.globl	transfer_to_handler
1301transfer_to_handler:
1302	SAVE_GPR(7, r21)
1303	SAVE_4GPRS(8, r21)
1304	SAVE_8GPRS(12, r21)
1305	SAVE_8GPRS(24, r21)
1306
1307	li	r22,0
1308	stw	r22,RESULT(r21)
1309	mtspr	SPRG2,r22		/* r1 is now kernel sp */
1310
1311	mtctr	r23			/* virtual address of handler */
1312	mtmsr	r20
1313	bctrl
1314
1315int_return:
1316	mfmsr	r28		/* Disable interrupts */
1317	li	r4,0
1318	ori	r4,r4,MSR_EE
1319	andc	r28,r28,r4
1320	SYNC			/* Some chip revs need this... */
1321	mtmsr	r28
1322	SYNC
1323	lwz	r2,_CTR(r1)
1324	lwz	r0,_LINK(r1)
1325	mtctr	r2
1326	mtlr	r0
1327	lwz	r2,_XER(r1)
1328	lwz	r0,_CCR(r1)
1329	mtspr	XER,r2
1330	mtcrf	0xFF,r0
1331	REST_10GPRS(3, r1)
1332	REST_10GPRS(13, r1)
1333	REST_8GPRS(23, r1)
1334	REST_GPR(31, r1)
1335	lwz	r2,_NIP(r1)	/* Restore environment */
1336	lwz	r0,_MSR(r1)
1337	mtspr	SRR0,r2
1338	mtspr	SRR1,r0
1339	lwz	r0,GPR0(r1)
1340	lwz	r2,GPR2(r1)
1341	lwz	r1,GPR1(r1)
1342	SYNC
1343	rfi
1344
1345/* Cache functions.
1346*/
1347.globl flush_icache
1348flush_icache:
1349.globl invalidate_icache
1350invalidate_icache:
1351	mfspr	r0,L1CSR1
1352	ori	r0,r0,L1CSR1_ICFI
1353	msync
1354	isync
1355	mtspr	L1CSR1,r0
1356	isync
1357	blr				/* entire I cache */
1358
1359.globl invalidate_dcache
1360invalidate_dcache:
1361	mfspr	r0,L1CSR0
1362	ori	r0,r0,L1CSR0_DCFI
1363	msync
1364	isync
1365	mtspr	L1CSR0,r0
1366	isync
1367	blr
1368
1369	.globl	icache_enable
1370icache_enable:
1371	mflr	r8
1372	bl	invalidate_icache
1373	mtlr	r8
1374	isync
1375	mfspr	r4,L1CSR1
1376	ori	r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@l
1377	oris	r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@h
1378	mtspr	L1CSR1,r4
1379	isync
1380	blr
1381
1382	.globl	icache_disable
1383icache_disable:
1384	mfspr	r0,L1CSR1
1385	lis	r3,0
1386	ori	r3,r3,L1CSR1_ICE
1387	andc	r0,r0,r3
1388	mtspr	L1CSR1,r0
1389	isync
1390	blr
1391
1392	.globl	icache_status
1393icache_status:
1394	mfspr	r3,L1CSR1
1395	andi.	r3,r3,L1CSR1_ICE
1396	blr
1397
1398	.globl	dcache_enable
1399dcache_enable:
1400	mflr	r8
1401	bl	invalidate_dcache
1402	mtlr	r8
1403	isync
1404	mfspr	r0,L1CSR0
1405	ori	r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@l
1406	oris	r0,r0,(L1CSR0_CPE |  L1CSR0_DCE)@h
1407	msync
1408	isync
1409	mtspr	L1CSR0,r0
1410	isync
1411	blr
1412
1413	.globl	dcache_disable
1414dcache_disable:
1415	mfspr	r3,L1CSR0
1416	lis	r4,0
1417	ori	r4,r4,L1CSR0_DCE
1418	andc	r3,r3,r4
1419	mtspr	L1CSR0,r3
1420	isync
1421	blr
1422
1423	.globl	dcache_status
1424dcache_status:
1425	mfspr	r3,L1CSR0
1426	andi.	r3,r3,L1CSR0_DCE
1427	blr
1428
1429/*------------------------------------------------------------------------------- */
1430/* Function:	 in8 */
1431/* Description:	 Input 8 bits */
1432/*------------------------------------------------------------------------------- */
1433	.globl	in8
1434in8:
1435	lbz	r3,0x0000(r3)
1436	blr
1437
1438/*------------------------------------------------------------------------------- */
1439/* Function:	 out8 */
1440/* Description:	 Output 8 bits */
1441/*------------------------------------------------------------------------------- */
1442	.globl	out8
1443out8:
1444	stb	r4,0x0000(r3)
1445	sync
1446	blr
1447
1448/*------------------------------------------------------------------------------- */
1449/* Function:	 out16 */
1450/* Description:	 Output 16 bits */
1451/*------------------------------------------------------------------------------- */
1452	.globl	out16
1453out16:
1454	sth	r4,0x0000(r3)
1455	sync
1456	blr
1457
1458/*------------------------------------------------------------------------------- */
1459/* Function:	 out16r */
1460/* Description:	 Byte reverse and output 16 bits */
1461/*------------------------------------------------------------------------------- */
1462	.globl	out16r
1463out16r:
1464	sthbrx	r4,r0,r3
1465	sync
1466	blr
1467
1468/*------------------------------------------------------------------------------- */
1469/* Function:	 out32 */
1470/* Description:	 Output 32 bits */
1471/*------------------------------------------------------------------------------- */
1472	.globl	out32
1473out32:
1474	stw	r4,0x0000(r3)
1475	sync
1476	blr
1477
1478/*------------------------------------------------------------------------------- */
1479/* Function:	 out32r */
1480/* Description:	 Byte reverse and output 32 bits */
1481/*------------------------------------------------------------------------------- */
1482	.globl	out32r
1483out32r:
1484	stwbrx	r4,r0,r3
1485	sync
1486	blr
1487
1488/*------------------------------------------------------------------------------- */
1489/* Function:	 in16 */
1490/* Description:	 Input 16 bits */
1491/*------------------------------------------------------------------------------- */
1492	.globl	in16
1493in16:
1494	lhz	r3,0x0000(r3)
1495	blr
1496
1497/*------------------------------------------------------------------------------- */
1498/* Function:	 in16r */
1499/* Description:	 Input 16 bits and byte reverse */
1500/*------------------------------------------------------------------------------- */
1501	.globl	in16r
1502in16r:
1503	lhbrx	r3,r0,r3
1504	blr
1505
1506/*------------------------------------------------------------------------------- */
1507/* Function:	 in32 */
1508/* Description:	 Input 32 bits */
1509/*------------------------------------------------------------------------------- */
1510	.globl	in32
1511in32:
1512	lwz	3,0x0000(3)
1513	blr
1514
1515/*------------------------------------------------------------------------------- */
1516/* Function:	 in32r */
1517/* Description:	 Input 32 bits and byte reverse */
1518/*------------------------------------------------------------------------------- */
1519	.globl	in32r
1520in32r:
1521	lwbrx	r3,r0,r3
1522	blr
1523#endif  /* !MINIMAL_SPL */
1524
1525/*------------------------------------------------------------------------------*/
1526
1527/*
1528 * void write_tlb(mas0, mas1, mas2, mas3, mas7)
1529 */
1530	.globl	write_tlb
1531write_tlb:
1532	mtspr	MAS0,r3
1533	mtspr	MAS1,r4
1534	mtspr	MAS2,r5
1535	mtspr	MAS3,r6
1536#ifdef CONFIG_ENABLE_36BIT_PHYS
1537	mtspr	MAS7,r7
1538#endif
1539	li	r3,0
1540#ifdef CONFIG_SYS_BOOK3E_HV
1541	mtspr	MAS8,r3
1542#endif
1543	isync
1544	tlbwe
1545	msync
1546	isync
1547	blr
1548
1549/*
1550 * void relocate_code (addr_sp, gd, addr_moni)
1551 *
1552 * This "function" does not return, instead it continues in RAM
1553 * after relocating the monitor code.
1554 *
1555 * r3 = dest
1556 * r4 = src
1557 * r5 = length in bytes
1558 * r6 = cachelinesize
1559 */
1560	.globl	relocate_code
1561relocate_code:
1562	mr	r1,r3		/* Set new stack pointer		*/
1563	mr	r9,r4		/* Save copy of Init Data pointer	*/
1564	mr	r10,r5		/* Save copy of Destination Address	*/
1565
1566	GET_GOT
1567#ifndef CONFIG_SPL_SKIP_RELOCATE
1568	mr	r3,r5				/* Destination Address	*/
1569	lis	r4,CONFIG_SYS_MONITOR_BASE@h		/* Source      Address	*/
1570	ori	r4,r4,CONFIG_SYS_MONITOR_BASE@l
1571	lwz	r5,GOT(__init_end)
1572	sub	r5,r5,r4
1573	li	r6,CONFIG_SYS_CACHELINE_SIZE		/* Cache Line Size	*/
1574
1575	/*
1576	 * Fix GOT pointer:
1577	 *
1578	 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
1579	 *
1580	 * Offset:
1581	 */
1582	sub	r15,r10,r4
1583
1584	/* First our own GOT */
1585	add	r12,r12,r15
1586	/* the the one used by the C code */
1587	add	r30,r30,r15
1588
1589	/*
1590	 * Now relocate code
1591	 */
1592
1593	cmplw	cr1,r3,r4
1594	addi	r0,r5,3
1595	srwi.	r0,r0,2
1596	beq	cr1,4f		/* In place copy is not necessary	*/
1597	beq	7f		/* Protect against 0 count		*/
1598	mtctr	r0
1599	bge	cr1,2f
1600
1601	la	r8,-4(r4)
1602	la	r7,-4(r3)
16031:	lwzu	r0,4(r8)
1604	stwu	r0,4(r7)
1605	bdnz	1b
1606	b	4f
1607
16082:	slwi	r0,r0,2
1609	add	r8,r4,r0
1610	add	r7,r3,r0
16113:	lwzu	r0,-4(r8)
1612	stwu	r0,-4(r7)
1613	bdnz	3b
1614
1615/*
1616 * Now flush the cache: note that we must start from a cache aligned
1617 * address. Otherwise we might miss one cache line.
1618 */
16194:	cmpwi	r6,0
1620	add	r5,r3,r5
1621	beq	7f		/* Always flush prefetch queue in any case */
1622	subi	r0,r6,1
1623	andc	r3,r3,r0
1624	mr	r4,r3
16255:	dcbst	0,r4
1626	add	r4,r4,r6
1627	cmplw	r4,r5
1628	blt	5b
1629	sync			/* Wait for all dcbst to complete on bus */
1630	mr	r4,r3
16316:	icbi	0,r4
1632	add	r4,r4,r6
1633	cmplw	r4,r5
1634	blt	6b
16357:	sync			/* Wait for all icbi to complete on bus */
1636	isync
1637
1638/*
1639 * We are done. Do not return, instead branch to second part of board
1640 * initialization, now running from RAM.
1641 */
1642
1643	addi	r0,r10,in_ram - _start
1644
1645	/*
1646	 * As IVPR is going to point RAM address,
1647	 * Make sure IVOR15 has valid opcode to support debugger
1648	 */
1649	mtspr	IVOR15,r0
1650
1651	/*
1652	 * Re-point the IVPR at RAM
1653	 */
1654	mtspr	IVPR,r10
1655
1656	mtlr	r0
1657	blr				/* NEVER RETURNS! */
1658#endif
1659	.globl	in_ram
1660in_ram:
1661
1662	/*
1663	 * Relocation Function, r12 point to got2+0x8000
1664	 *
1665	 * Adjust got2 pointers, no need to check for 0, this code
1666	 * already puts a few entries in the table.
1667	 */
1668	li	r0,__got2_entries@sectoff@l
1669	la	r3,GOT(_GOT2_TABLE_)
1670	lwz	r11,GOT(_GOT2_TABLE_)
1671	mtctr	r0
1672	sub	r11,r3,r11
1673	addi	r3,r3,-4
16741:	lwzu	r0,4(r3)
1675	cmpwi	r0,0
1676	beq-	2f
1677	add	r0,r0,r11
1678	stw	r0,0(r3)
16792:	bdnz	1b
1680
1681	/*
1682	 * Now adjust the fixups and the pointers to the fixups
1683	 * in case we need to move ourselves again.
1684	 */
1685	li	r0,__fixup_entries@sectoff@l
1686	lwz	r3,GOT(_FIXUP_TABLE_)
1687	cmpwi	r0,0
1688	mtctr	r0
1689	addi	r3,r3,-4
1690	beq	4f
16913:	lwzu	r4,4(r3)
1692	lwzux	r0,r4,r11
1693	cmpwi	r0,0
1694	add	r0,r0,r11
1695	stw	r4,0(r3)
1696	beq-	5f
1697	stw	r0,0(r4)
16985:	bdnz	3b
16994:
1700clear_bss:
1701	/*
1702	 * Now clear BSS segment
1703	 */
1704	lwz	r3,GOT(__bss_start)
1705	lwz	r4,GOT(__bss_end)
1706
1707	cmplw	0,r3,r4
1708	beq	6f
1709
1710	li	r0,0
17115:
1712	stw	r0,0(r3)
1713	addi	r3,r3,4
1714	cmplw	0,r3,r4
1715	blt	5b
17166:
1717
1718	mr	r3,r9		/* Init Data pointer		*/
1719	mr	r4,r10		/* Destination Address		*/
1720	bl	board_init_r
1721
1722#ifndef MINIMAL_SPL
1723	/*
1724	 * Copy exception vector code to low memory
1725	 *
1726	 * r3: dest_addr
1727	 * r7: source address, r8: end address, r9: target address
1728	 */
1729	.globl	trap_init
1730trap_init:
1731	mflr	r11
1732	bl	_GLOBAL_OFFSET_TABLE_-4
1733	mflr	r12
1734
1735	/* Update IVORs as per relocation */
1736	mtspr	IVPR,r3
1737
1738	lwz	r4,CriticalInput@got(r12)
1739	mtspr	IVOR0,r4	/* 0: Critical input */
1740	lwz	r4,MachineCheck@got(r12)
1741	mtspr	IVOR1,r4	/* 1: Machine check */
1742	lwz	r4,DataStorage@got(r12)
1743	mtspr	IVOR2,r4	/* 2: Data storage */
1744	lwz	r4,InstStorage@got(r12)
1745	mtspr	IVOR3,r4	/* 3: Instruction storage */
1746	lwz	r4,ExtInterrupt@got(r12)
1747	mtspr	IVOR4,r4	/* 4: External interrupt */
1748	lwz	r4,Alignment@got(r12)
1749	mtspr	IVOR5,r4	/* 5: Alignment */
1750	lwz	r4,ProgramCheck@got(r12)
1751	mtspr	IVOR6,r4	/* 6: Program check */
1752	lwz	r4,FPUnavailable@got(r12)
1753	mtspr	IVOR7,r4	/* 7: floating point unavailable */
1754	lwz	r4,SystemCall@got(r12)
1755	mtspr	IVOR8,r4	/* 8: System call */
1756	/* 9: Auxiliary processor unavailable(unsupported) */
1757	lwz	r4,Decrementer@got(r12)
1758	mtspr	IVOR10,r4	/* 10: Decrementer */
1759	lwz	r4,IntervalTimer@got(r12)
1760	mtspr	IVOR11,r4	/* 11: Interval timer */
1761	lwz	r4,WatchdogTimer@got(r12)
1762	mtspr	IVOR12,r4	/* 12: Watchdog timer */
1763	lwz	r4,DataTLBError@got(r12)
1764	mtspr	IVOR13,r4	/* 13: Data TLB error */
1765	lwz	r4,InstructionTLBError@got(r12)
1766	mtspr	IVOR14,r4	/* 14: Instruction TLB error */
1767	lwz	r4,DebugBreakpoint@got(r12)
1768	mtspr	IVOR15,r4	/* 15: Debug */
1769
1770	mtlr	r11
1771	blr
1772
1773.globl unlock_ram_in_cache
1774unlock_ram_in_cache:
1775	/* invalidate the INIT_RAM section */
1776	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
1777	ori	r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
1778	mfspr	r4,L1CFG0
1779	andi.	r4,r4,0x1ff
1780	slwi	r4,r4,(10 - 1 - L1_CACHE_SHIFT)
1781	mtctr	r4
17821:	dcbi	r0,r3
1783#ifdef CONFIG_E6500	/* lock/unlock L2 cache long with L1 */
1784	dcblc	2, r0, r3
1785	dcblc	0, r0, r3
1786#else
1787	dcblc	r0,r3
1788#endif
1789	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1790	bdnz	1b
1791	sync
1792
1793	/* Invalidate the TLB entries for the cache */
1794	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1795	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1796	tlbivax	0,r3
1797	addi	r3,r3,0x1000
1798	tlbivax	0,r3
1799	addi	r3,r3,0x1000
1800	tlbivax	0,r3
1801	addi	r3,r3,0x1000
1802	tlbivax	0,r3
1803	isync
1804	blr
1805
1806.globl flush_dcache
1807flush_dcache:
1808	mfspr	r3,SPRN_L1CFG0
1809
1810	rlwinm	r5,r3,9,3	/* Extract cache block size */
1811	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
1812				 * are currently defined.
1813				 */
1814	li	r4,32
1815	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
1816				 *      log2(number of ways)
1817				 */
1818	slw	r5,r4,r5	/* r5 = cache block size */
1819
1820	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
1821	mulli	r7,r7,13	/* An 8-way cache will require 13
1822				 * loads per set.
1823				 */
1824	slw	r7,r7,r6
1825
1826	/* save off HID0 and set DCFA */
1827	mfspr	r8,SPRN_HID0
1828	ori	r9,r8,HID0_DCFA@l
1829	mtspr	SPRN_HID0,r9
1830	isync
1831
1832	lis	r4,0
1833	mtctr	r7
1834
18351:	lwz	r3,0(r4)	/* Load... */
1836	add	r4,r4,r5
1837	bdnz	1b
1838
1839	msync
1840	lis	r4,0
1841	mtctr	r7
1842
18431:	dcbf	0,r4		/* ...and flush. */
1844	add	r4,r4,r5
1845	bdnz	1b
1846
1847	/* restore HID0 */
1848	mtspr	SPRN_HID0,r8
1849	isync
1850
1851	blr
1852#endif /* !MINIMAL_SPL */
1853