xref: /openbmc/u-boot/arch/powerpc/cpu/mpc85xx/start.S (revision c9e798d3)
1/*
2 * Copyright 2004, 2007-2011 Freescale Semiconductor, Inc.
3 * Copyright (C) 2003  Motorola,Inc.
4 *
5 * See file CREDITS for list of people who contributed to this
6 * project.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of
11 * the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
21 * MA 02111-1307 USA
22 */
23
24/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
25 *
26 * The processor starts at 0xfffffffc and the code is first executed in the
27 * last 4K page(0xfffff000-0xffffffff) in flash/rom.
28 *
29 */
30
31#include <asm-offsets.h>
32#include <config.h>
33#include <mpc85xx.h>
34#include <version.h>
35
36#define _LINUX_CONFIG_H 1	/* avoid reading Linux autoconf.h file	*/
37
38#include <ppc_asm.tmpl>
39#include <ppc_defs.h>
40
41#include <asm/cache.h>
42#include <asm/mmu.h>
43
44#undef	MSR_KERNEL
45#define MSR_KERNEL ( MSR_ME )	/* Machine Check */
46
47/*
48 * Set up GOT: Global Offset Table
49 *
50 * Use r12 to access the GOT
51 */
52	START_GOT
53	GOT_ENTRY(_GOT2_TABLE_)
54	GOT_ENTRY(_FIXUP_TABLE_)
55
56#ifndef CONFIG_NAND_SPL
57	GOT_ENTRY(_start)
58	GOT_ENTRY(_start_of_vectors)
59	GOT_ENTRY(_end_of_vectors)
60	GOT_ENTRY(transfer_to_handler)
61#endif
62
63	GOT_ENTRY(__init_end)
64	GOT_ENTRY(__bss_end__)
65	GOT_ENTRY(__bss_start)
66	END_GOT
67
68/*
69 * e500 Startup -- after reset only the last 4KB of the effective
70 * address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
71 * section is located at THIS LAST page and basically does three
72 * things: clear some registers, set up exception tables and
73 * add more TLB entries for 'larger spaces'(e.g. the boot rom) to
74 * continue the boot procedure.
75
76 * Once the boot rom is mapped by TLB entries we can proceed
77 * with normal startup.
78 *
79 */
80
81	.section .bootpg,"ax"
82	.globl _start_e500
83
84_start_e500:
85
86#if defined(CONFIG_SECURE_BOOT) && defined(CONFIG_E500MC)
87	/* ISBC uses L2 as stack.
88	 * Disable L2 cache here so that u-boot can enable it later
89	 * as part of it's normal flow
90	*/
91
92	/* Check if L2 is enabled */
93	mfspr	r3, SPRN_L2CSR0
94	lis	r2, L2CSR0_L2E@h
95	ori	r2, r2, L2CSR0_L2E@l
96	and.	r4, r3, r2
97	beq	l2_disabled
98
99	mfspr r3, SPRN_L2CSR0
100	/* Flush L2 cache */
101	lis     r2,(L2CSR0_L2FL)@h
102	ori     r2, r2, (L2CSR0_L2FL)@l
103	or      r3, r2, r3
104	sync
105	isync
106	mtspr   SPRN_L2CSR0,r3
107	isync
1081:
109	mfspr r3, SPRN_L2CSR0
110	and. r1, r3, r2
111	bne 1b
112
113	mfspr r3, SPRN_L2CSR0
114	lis r2, L2CSR0_L2E@h
115	ori r2, r2, L2CSR0_L2E@l
116	andc r4, r3, r2
117	sync
118	isync
119	mtspr SPRN_L2CSR0,r4
120	isync
121
122l2_disabled:
123#endif
124
125/* clear registers/arrays not reset by hardware */
126
127	/* L1 */
128	li	r0,2
129	mtspr	L1CSR0,r0	/* invalidate d-cache */
130	mtspr	L1CSR1,r0	/* invalidate i-cache */
131
132	mfspr	r1,DBSR
133	mtspr	DBSR,r1		/* Clear all valid bits */
134
135	/*
136	 *	Enable L1 Caches early
137	 *
138	 */
139
140#if defined(CONFIG_E500MC) && defined(CONFIG_SYS_CACHE_STASHING)
141	/* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
142	li	r2,(32 + 0)
143	mtspr	L1CSR2,r2
144#endif
145
146	/* Enable/invalidate the I-Cache */
147	lis	r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
148	ori	r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
149	mtspr	SPRN_L1CSR1,r2
1501:
151	mfspr	r3,SPRN_L1CSR1
152	and.	r1,r3,r2
153	bne	1b
154
155	lis	r3,(L1CSR1_CPE|L1CSR1_ICE)@h
156	ori	r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
157	mtspr	SPRN_L1CSR1,r3
158	isync
1592:
160	mfspr	r3,SPRN_L1CSR1
161	andi.	r1,r3,L1CSR1_ICE@l
162	beq	2b
163
164	/* Enable/invalidate the D-Cache */
165	lis	r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
166	ori	r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
167	mtspr	SPRN_L1CSR0,r2
1681:
169	mfspr	r3,SPRN_L1CSR0
170	and.	r1,r3,r2
171	bne	1b
172
173	lis	r3,(L1CSR0_CPE|L1CSR0_DCE)@h
174	ori	r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
175	mtspr	SPRN_L1CSR0,r3
176	isync
1772:
178	mfspr	r3,SPRN_L1CSR0
179	andi.	r1,r3,L1CSR0_DCE@l
180	beq	2b
181
182	/* Setup interrupt vectors */
183	lis	r1,CONFIG_SYS_MONITOR_BASE@h
184	mtspr	IVPR,r1
185
186	li	r1,0x0100
187	mtspr	IVOR0,r1	/* 0: Critical input */
188	li	r1,0x0200
189	mtspr	IVOR1,r1	/* 1: Machine check */
190	li	r1,0x0300
191	mtspr	IVOR2,r1	/* 2: Data storage */
192	li	r1,0x0400
193	mtspr	IVOR3,r1	/* 3: Instruction storage */
194	li	r1,0x0500
195	mtspr	IVOR4,r1	/* 4: External interrupt */
196	li	r1,0x0600
197	mtspr	IVOR5,r1	/* 5: Alignment */
198	li	r1,0x0700
199	mtspr	IVOR6,r1	/* 6: Program check */
200	li	r1,0x0800
201	mtspr	IVOR7,r1	/* 7: floating point unavailable */
202	li	r1,0x0900
203	mtspr	IVOR8,r1	/* 8: System call */
204	/* 9: Auxiliary processor unavailable(unsupported) */
205	li	r1,0x0a00
206	mtspr	IVOR10,r1	/* 10: Decrementer */
207	li	r1,0x0b00
208	mtspr	IVOR11,r1	/* 11: Interval timer */
209	li	r1,0x0c00
210	mtspr	IVOR12,r1	/* 12: Watchdog timer */
211	li	r1,0x0d00
212	mtspr	IVOR13,r1	/* 13: Data TLB error */
213	li	r1,0x0e00
214	mtspr	IVOR14,r1	/* 14: Instruction TLB error */
215	li	r1,0x0f00
216	mtspr	IVOR15,r1	/* 15: Debug */
217
218	/* Clear and set up some registers. */
219	li      r0,0x0000
220	lis	r1,0xffff
221	mtspr	DEC,r0			/* prevent dec exceptions */
222	mttbl	r0			/* prevent fit & wdt exceptions */
223	mttbu	r0
224	mtspr	TSR,r1			/* clear all timer exception status */
225	mtspr	TCR,r0			/* disable all */
226	mtspr	ESR,r0			/* clear exception syndrome register */
227	mtspr	MCSR,r0			/* machine check syndrome register */
228	mtxer	r0			/* clear integer exception register */
229
230#ifdef CONFIG_SYS_BOOK3E_HV
231	mtspr	MAS8,r0			/* make sure MAS8 is clear */
232#endif
233
234	/* Enable Time Base and Select Time Base Clock */
235	lis	r0,HID0_EMCP@h		/* Enable machine check */
236#if defined(CONFIG_ENABLE_36BIT_PHYS)
237	ori	r0,r0,HID0_ENMAS7@l	/* Enable MAS7 */
238#endif
239#ifndef CONFIG_E500MC
240	ori	r0,r0,HID0_TBEN@l	/* Enable Timebase */
241#endif
242	mtspr	HID0,r0
243
244#ifndef CONFIG_E500MC
245	li	r0,(HID1_ASTME|HID1_ABE)@l	/* Addr streaming & broadcast */
246	mfspr	r3,PVR
247	andi.	r3,r3, 0xff
248	cmpwi	r3,0x50@l	/* if we are rev 5.0 or greater set MBDD */
249	blt 1f
250	/* Set MBDD bit also */
251	ori r0, r0, HID1_MBDD@l
2521:
253	mtspr	HID1,r0
254#endif
255
256	/* Enable Branch Prediction */
257#if defined(CONFIG_BTB)
258	lis	r0,BUCSR_ENABLE@h
259	ori	r0,r0,BUCSR_ENABLE@l
260	mtspr	SPRN_BUCSR,r0
261#endif
262
263#if defined(CONFIG_SYS_INIT_DBCR)
264	lis	r1,0xffff
265	ori	r1,r1,0xffff
266	mtspr	DBSR,r1			/* Clear all status bits */
267	lis	r0,CONFIG_SYS_INIT_DBCR@h	/* DBCR0[IDM] must be set */
268	ori	r0,r0,CONFIG_SYS_INIT_DBCR@l
269	mtspr	DBCR0,r0
270#endif
271
272#ifdef CONFIG_MPC8569
273#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000)
274#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0)
275
276	/* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to
277	 * use address space which is more than 12bits, and it must be done in
278	 * the 4K boot page. So we set this bit here.
279	 */
280
281	/* create a temp mapping TLB0[0] for LBCR  */
282	lis     r6,FSL_BOOKE_MAS0(0, 0, 0)@h
283	ori     r6,r6,FSL_BOOKE_MAS0(0, 0, 0)@l
284
285	lis     r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h
286	ori     r7,r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l
287
288	lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@h
289	ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@l
290
291	lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0,
292						(MAS3_SX|MAS3_SW|MAS3_SR))@h
293	ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0,
294						(MAS3_SX|MAS3_SW|MAS3_SR))@l
295
296	mtspr   MAS0,r6
297	mtspr   MAS1,r7
298	mtspr   MAS2,r8
299	mtspr   MAS3,r9
300	isync
301	msync
302	tlbwe
303
304	/* Set LBCR register */
305	lis     r4,CONFIG_SYS_LBCR_ADDR@h
306	ori     r4,r4,CONFIG_SYS_LBCR_ADDR@l
307
308	lis     r5,CONFIG_SYS_LBC_LBCR@h
309	ori     r5,r5,CONFIG_SYS_LBC_LBCR@l
310	stw     r5,0(r4)
311	isync
312
313	/* invalidate this temp TLB */
314	lis	r4,CONFIG_SYS_LBC_ADDR@h
315	ori	r4,r4,CONFIG_SYS_LBC_ADDR@l
316	tlbivax	0,r4
317	isync
318
319#endif /* CONFIG_MPC8569 */
320
321/*
322 * Search for the TLB that covers the code we're executing, and shrink it
323 * so that it covers only this 4K page.  That will ensure that any other
324 * TLB we create won't interfere with it.  We assume that the TLB exists,
325 * which is why we don't check the Valid bit of MAS1.
326 *
327 * This is necessary, for example, when booting from the on-chip ROM,
328 * which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
329 * If we don't shrink this TLB now, then we'll accidentally delete it
330 * in "purge_old_ccsr_tlb" below.
331 */
332	bl	nexti		/* Find our address */
333nexti:	mflr	r1		/* R1 = our PC */
334	li	r2, 0
335	mtspr	MAS6, r2	/* Assume the current PID and AS are 0 */
336	isync
337	msync
338	tlbsx	0, r1		/* This must succeed */
339
340	/* Set the size of the TLB to 4KB */
341	mfspr	r3, MAS1
342	li	r2, 0xF00
343	andc	r3, r3, r2	/* Clear the TSIZE bits */
344	ori	r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
345	mtspr	MAS1, r3
346
347	/*
348	 * Set the base address of the TLB to our PC.  We assume that
349	 * virtual == physical.  We also assume that MAS2_EPN == MAS3_RPN.
350	 */
351	lis	r3, MAS2_EPN@h
352	ori	r3, r3, MAS2_EPN@l	/* R3 = MAS2_EPN */
353
354	and	r1, r1, r3	/* Our PC, rounded down to the nearest page */
355
356	mfspr	r2, MAS2
357	andc	r2, r2, r3
358	or	r2, r2, r1
359	mtspr	MAS2, r2	/* Set the EPN to our PC base address */
360
361	mfspr	r2, MAS3
362	andc	r2, r2, r3
363	or	r2, r2, r1
364	mtspr	MAS3, r2	/* Set the RPN to our PC base address */
365
366	isync
367	msync
368	tlbwe
369
370/*
371 * Relocate CCSR, if necessary.  We relocate CCSR if (obviously) the default
372 * location is not where we want it.  This typically happens on a 36-bit
373 * system, where we want to move CCSR to near the top of 36-bit address space.
374 *
375 * To move CCSR, we create two temporary TLBs, one for the old location, and
376 * another for the new location.  On CoreNet systems, we also need to create
377 * a special, temporary LAW.
378 *
379 * As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
380 * long-term TLBs, so we use TLB0 here.
381 */
382#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
383
384#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
385#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
386#endif
387
388purge_old_ccsr_tlb:
389	lis	r8, CONFIG_SYS_CCSRBAR@h
390	ori	r8, r8, CONFIG_SYS_CCSRBAR@l
391	lis	r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
392	ori	r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
393
394	/*
395	 * In a multi-stage boot (e.g. NAND boot), a previous stage may have
396	 * created a TLB for CCSR, which will interfere with our relocation
397	 * code.  Since we're going to create a new TLB for CCSR anyway,
398	 * it should be safe to delete this old TLB here.  We have to search
399	 * for it, though.
400	 */
401
402	li	r1, 0
403	mtspr	MAS6, r1	/* Search the current address space and PID */
404	isync
405	msync
406	tlbsx	0, r8
407	mfspr	r1, MAS1
408	andis.  r2, r1, MAS1_VALID@h	/* Check for the Valid bit */
409	beq     1f			/* Skip if no TLB found */
410
411	rlwinm	r1, r1, 0, 1, 31	/* Clear Valid bit */
412	mtspr	MAS1, r1
413	isync
414	msync
415	tlbwe
4161:
417
418create_ccsr_new_tlb:
419	/*
420	 * Create a TLB for the new location of CCSR.  Register R8 is reserved
421	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
422	 */
423	lis     r0, FSL_BOOKE_MAS0(0, 0, 0)@h
424	ori     r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l
425	lis     r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h
426	ori     r1, r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l
427	lis     r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h
428	ori     r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l
429	lis     r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
430	ori     r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
431	lis	r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
432	ori	r7, r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
433	mtspr   MAS0, r0
434	mtspr   MAS1, r1
435	mtspr   MAS2, r2
436	mtspr   MAS3, r3
437	mtspr   MAS7, r7
438	isync
439	msync
440	tlbwe
441
442	/*
443	 * Create a TLB for the current location of CCSR.  Register R9 is reserved
444	 * for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
445	 */
446create_ccsr_old_tlb:
447	lis     r0, FSL_BOOKE_MAS0(0, 1, 0)@h
448	ori     r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l
449	lis     r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h
450	ori     r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l
451	lis     r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@h
452	ori     r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_DEFAULT, 0, (MAS3_SW|MAS3_SR))@l
453	li	r7, 0	/* The default CCSR address is always a 32-bit number */
454	mtspr   MAS0, r0
455	/* MAS1 is the same as above */
456	mtspr   MAS2, r2
457	mtspr   MAS3, r3
458	mtspr   MAS7, r7
459	isync
460	msync
461	tlbwe
462
463	/*
464	 * We have a TLB for what we think is the current (old) CCSR.  Let's
465	 * verify that, otherwise we won't be able to move it.
466	 * CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
467	 * need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
468	 */
469verify_old_ccsr:
470	lis     r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
471	ori     r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
472#ifdef CONFIG_FSL_CORENET
473	lwz	r1, 4(r9)		/* CCSRBARL */
474#else
475	lwz	r1, 0(r9)		/* CCSRBAR, shifted right by 12 */
476	slwi	r1, r1, 12
477#endif
478
479	cmpl	0, r0, r1
480
481	/*
482	 * If the value we read from CCSRBARL is not what we expect, then
483	 * enter an infinite loop.  This will at least allow a debugger to
484	 * halt execution and examine TLBs, etc.  There's no point in going
485	 * on.
486	 */
487infinite_debug_loop:
488	bne	infinite_debug_loop
489
490#ifdef CONFIG_FSL_CORENET
491
492#define CCSR_LAWBARH0	(CONFIG_SYS_CCSRBAR + 0x1000)
493#define LAW_EN		0x80000000
494#define LAW_SIZE_4K	0xb
495#define CCSRBAR_LAWAR	(LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
496#define CCSRAR_C	0x80000000	/* Commit */
497
498create_temp_law:
499	/*
500	 * On CoreNet systems, we create the temporary LAW using a special LAW
501	 * target ID of 0x1e.  LAWBARH is at offset 0xc00 in CCSR.
502	 */
503	lis     r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
504	ori     r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
505	lis     r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
506	ori     r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
507	lis     r2, CCSRBAR_LAWAR@h
508	ori     r2, r2, CCSRBAR_LAWAR@l
509
510	stw     r0, 0xc00(r9)	/* LAWBARH0 */
511	stw     r1, 0xc04(r9)	/* LAWBARL0 */
512	sync
513	stw     r2, 0xc08(r9)	/* LAWAR0 */
514
515	/*
516	 * Read back from LAWAR to ensure the update is complete.  e500mc
517	 * cores also require an isync.
518	 */
519	lwz	r0, 0xc08(r9)	/* LAWAR0 */
520	isync
521
522	/*
523	 * Read the current CCSRBARH and CCSRBARL using load word instructions.
524	 * Follow this with an isync instruction. This forces any outstanding
525	 * accesses to configuration space to completion.
526	 */
527read_old_ccsrbar:
528	lwz	r0, 0(r9)	/* CCSRBARH */
529	lwz	r0, 4(r9)	/* CCSRBARL */
530	isync
531
532	/*
533	 * Write the new values for CCSRBARH and CCSRBARL to their old
534	 * locations.  The CCSRBARH has a shadow register. When the CCSRBARH
535	 * has a new value written it loads a CCSRBARH shadow register. When
536	 * the CCSRBARL is written, the CCSRBARH shadow register contents
537	 * along with the CCSRBARL value are loaded into the CCSRBARH and
538	 * CCSRBARL registers, respectively.  Follow this with a sync
539	 * instruction.
540	 */
541write_new_ccsrbar:
542	lis	r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
543	ori	r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
544	lis	r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
545	ori	r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
546	lis	r2, CCSRAR_C@h
547	ori	r2, r2, CCSRAR_C@l
548
549	stw	r0, 0(r9)	/* Write to CCSRBARH */
550	sync			/* Make sure we write to CCSRBARH first */
551	stw	r1, 4(r9)	/* Write to CCSRBARL */
552	sync
553
554	/*
555	 * Write a 1 to the commit bit (C) of CCSRAR at the old location.
556	 * Follow this with a sync instruction.
557	 */
558	stw	r2, 8(r9)
559	sync
560
561	/* Delete the temporary LAW */
562delete_temp_law:
563	li	r1, 0
564	stw	r1, 0xc08(r8)
565	sync
566	stw	r1, 0xc00(r8)
567	stw	r1, 0xc04(r8)
568	sync
569
570#else /* #ifdef CONFIG_FSL_CORENET */
571
572write_new_ccsrbar:
573	/*
574	 * Read the current value of CCSRBAR using a load word instruction
575	 * followed by an isync. This forces all accesses to configuration
576	 * space to complete.
577	 */
578	sync
579	lwz	r0, 0(r9)
580	isync
581
582/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
583#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
584			   (CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
585
586	/* Write the new value to CCSRBAR. */
587	lis	r0, CCSRBAR_PHYS_RS12@h
588	ori	r0, r0, CCSRBAR_PHYS_RS12@l
589	stw	r0, 0(r9)
590	sync
591
592	/*
593	 * The manual says to perform a load of an address that does not
594	 * access configuration space or the on-chip SRAM using an existing TLB,
595	 * but that doesn't appear to be necessary.  We will do the isync,
596	 * though.
597	 */
598	isync
599
600	/*
601	 * Read the contents of CCSRBAR from its new location, followed by
602	 * another isync.
603	 */
604	lwz	r0, 0(r8)
605	isync
606
607#endif  /* #ifdef CONFIG_FSL_CORENET */
608
609	/* Delete the temporary TLBs */
610delete_temp_tlbs:
611	lis     r0, FSL_BOOKE_MAS0(0, 0, 0)@h
612	ori     r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l
613	li	r1, 0
614	lis     r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h
615	ori     r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l
616	mtspr   MAS0, r0
617	mtspr   MAS1, r1
618	mtspr   MAS2, r2
619	isync
620	msync
621	tlbwe
622
623	lis     r0, FSL_BOOKE_MAS0(0, 1, 0)@h
624	ori     r0, r0, FSL_BOOKE_MAS0(0, 1, 0)@l
625	lis     r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@h
626	ori     r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0x1000, (MAS2_I|MAS2_G))@l
627	mtspr   MAS0, r0
628	mtspr   MAS2, r2
629	isync
630	msync
631	tlbwe
632#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
633
634create_init_ram_area:
635	lis     r6,FSL_BOOKE_MAS0(1, 15, 0)@h
636	ori     r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
637
638#if !defined(CONFIG_SYS_RAMBOOT) && !defined(CONFIG_SECURE_BOOT)
639	/* create a temp mapping in AS=1 to the 4M boot window */
640	lis     r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@h
641	ori     r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_4M)@l
642
643	lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@h
644	ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE & 0xffc00000, (MAS2_I|MAS2_G))@l
645
646	/* The 85xx has the default boot window 0xff800000 - 0xffffffff */
647	lis     r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h
648	ori     r9,r9,FSL_BOOKE_MAS3(0xffc00000, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l
649#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_SECURE_BOOT)
650	/* create a temp mapping in AS = 1 for Flash mapping
651	 * created by PBL for ISBC code
652	*/
653	lis     r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h
654	ori     r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l
655
656	lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h
657	ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l
658
659	lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0,
660						(MAS3_SX|MAS3_SW|MAS3_SR))@h
661	ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_PBI_FLASH_WINDOW, 0,
662						(MAS3_SX|MAS3_SW|MAS3_SR))@l
663#else
664	/*
665	 * create a temp mapping in AS=1 to the 1M CONFIG_SYS_MONITOR_BASE space, the main
666	 * image has been relocated to CONFIG_SYS_MONITOR_BASE on the second stage.
667	 */
668	lis     r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@h
669	ori     r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_1M)@l
670
671	lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@h
672	ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_MONITOR_BASE, (MAS2_I|MAS2_G))@l
673
674	lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h
675	ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_MONITOR_BASE, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l
676#endif
677
678	mtspr   MAS0,r6
679	mtspr   MAS1,r7
680	mtspr   MAS2,r8
681	mtspr   MAS3,r9
682	isync
683	msync
684	tlbwe
685
686	/* create a temp mapping in AS=1 to the stack */
687	lis     r6,FSL_BOOKE_MAS0(1, 14, 0)@h
688	ori     r6,r6,FSL_BOOKE_MAS0(1, 14, 0)@l
689
690	lis     r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_16K)@h
691	ori     r7,r7,FSL_BOOKE_MAS1(1, 1, 0, 1, BOOKE_PAGESZ_16K)@l
692
693	lis     r8,FSL_BOOKE_MAS2(CONFIG_SYS_INIT_RAM_ADDR, 0)@h
694	ori     r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_INIT_RAM_ADDR, 0)@l
695
696#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
697    defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
698	lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, 0,
699				(MAS3_SX|MAS3_SW|MAS3_SR))@h
700	ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, 0,
701				(MAS3_SX|MAS3_SW|MAS3_SR))@l
702	li      r10,CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH
703	mtspr	MAS7,r10
704#else
705	lis     r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@h
706	ori     r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_INIT_RAM_ADDR, 0, (MAS3_SX|MAS3_SW|MAS3_SR))@l
707#endif
708
709	mtspr   MAS0,r6
710	mtspr   MAS1,r7
711	mtspr   MAS2,r8
712	mtspr   MAS3,r9
713	isync
714	msync
715	tlbwe
716
717	lis	r6,MSR_IS|MSR_DS@h
718	ori	r6,r6,MSR_IS|MSR_DS@l
719	lis	r7,switch_as@h
720	ori	r7,r7,switch_as@l
721
722	mtspr	SPRN_SRR0,r7
723	mtspr	SPRN_SRR1,r6
724	rfi
725
726switch_as:
727/* L1 DCache is used for initial RAM */
728
729	/* Allocate Initial RAM in data cache.
730	 */
731	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
732	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
733	mfspr	r2, L1CFG0
734	andi.	r2, r2, 0x1ff
735	/* cache size * 1024 / (2 * L1 line size) */
736	slwi	r2, r2, (10 - 1 - L1_CACHE_SHIFT)
737	mtctr	r2
738	li	r0,0
7391:
740	dcbz	r0,r3
741	dcbtls	0,r0,r3
742	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
743	bdnz	1b
744
745	/* Jump out the last 4K page and continue to 'normal' start */
746#ifdef CONFIG_SYS_RAMBOOT
747	b	_start_cont
748#else
749	/* Calculate absolute address in FLASH and jump there		*/
750	/*--------------------------------------------------------------*/
751	lis	r3,CONFIG_SYS_MONITOR_BASE@h
752	ori	r3,r3,CONFIG_SYS_MONITOR_BASE@l
753	addi	r3,r3,_start_cont - _start + _START_OFFSET
754	mtlr	r3
755	blr
756#endif
757
758	.text
759	.globl	_start
760_start:
761	.long	0x27051956		/* U-BOOT Magic Number */
762	.globl	version_string
763version_string:
764	.ascii U_BOOT_VERSION_STRING, "\0"
765
766	.align	4
767	.globl	_start_cont
768_start_cont:
769	/* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
770	lis	r1,CONFIG_SYS_INIT_RAM_ADDR@h
771	ori	r1,r1,CONFIG_SYS_INIT_SP_OFFSET@l
772
773	li	r0,0
774	stwu	r0,-4(r1)
775	stwu	r0,-4(r1)		/* Terminate call chain */
776
777	stwu	r1,-8(r1)		/* Save back chain and move SP */
778	lis	r0,RESET_VECTOR@h	/* Address of reset vector */
779	ori	r0,r0,RESET_VECTOR@l
780	stwu	r1,-8(r1)		/* Save back chain and move SP */
781	stw	r0,+12(r1)		/* Save return addr (underflow vect) */
782
783	GET_GOT
784	bl	cpu_init_early_f
785
786	/* switch back to AS = 0 */
787	lis	r3,(MSR_CE|MSR_ME|MSR_DE)@h
788	ori	r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
789	mtmsr	r3
790	isync
791
792	bl	cpu_init_f
793	bl	board_init_f
794	isync
795
796	/* NOTREACHED - board_init_f() does not return */
797
798#ifndef CONFIG_NAND_SPL
799	. = EXC_OFF_SYS_RESET
800	.globl	_start_of_vectors
801_start_of_vectors:
802
803/* Critical input. */
804	CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
805
806/* Machine check */
807	MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
808
809/* Data Storage exception. */
810	STD_EXCEPTION(0x0300, DataStorage, UnknownException)
811
812/* Instruction Storage exception. */
813	STD_EXCEPTION(0x0400, InstStorage, UnknownException)
814
815/* External Interrupt exception. */
816	STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
817
818/* Alignment exception. */
819	. = 0x0600
820Alignment:
821	EXCEPTION_PROLOG(SRR0, SRR1)
822	mfspr	r4,DAR
823	stw	r4,_DAR(r21)
824	mfspr	r5,DSISR
825	stw	r5,_DSISR(r21)
826	addi	r3,r1,STACK_FRAME_OVERHEAD
827	EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
828
829/* Program check exception */
830	. = 0x0700
831ProgramCheck:
832	EXCEPTION_PROLOG(SRR0, SRR1)
833	addi	r3,r1,STACK_FRAME_OVERHEAD
834	EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
835		MSR_KERNEL, COPY_EE)
836
837	/* No FPU on MPC85xx.  This exception is not supposed to happen.
838	*/
839	STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
840
841	. = 0x0900
842/*
843 * r0 - SYSCALL number
844 * r3-... arguments
845 */
846SystemCall:
847	addis	r11,r0,0	/* get functions table addr */
848	ori	r11,r11,0	/* Note: this code is patched in trap_init */
849	addis	r12,r0,0	/* get number of functions */
850	ori	r12,r12,0
851
852	cmplw	0,r0,r12
853	bge	1f
854
855	rlwinm	r0,r0,2,0,31	/* fn_addr = fn_tbl[r0] */
856	add	r11,r11,r0
857	lwz	r11,0(r11)
858
859	li	r20,0xd00-4	/* Get stack pointer */
860	lwz	r12,0(r20)
861	subi	r12,r12,12	/* Adjust stack pointer */
862	li	r0,0xc00+_end_back-SystemCall
863	cmplw	0,r0,r12	/* Check stack overflow */
864	bgt	1f
865	stw	r12,0(r20)
866
867	mflr	r0
868	stw	r0,0(r12)
869	mfspr	r0,SRR0
870	stw	r0,4(r12)
871	mfspr	r0,SRR1
872	stw	r0,8(r12)
873
874	li	r12,0xc00+_back-SystemCall
875	mtlr	r12
876	mtspr	SRR0,r11
877
8781:	SYNC
879	rfi
880_back:
881
882	mfmsr	r11			/* Disable interrupts */
883	li	r12,0
884	ori	r12,r12,MSR_EE
885	andc	r11,r11,r12
886	SYNC				/* Some chip revs need this... */
887	mtmsr	r11
888	SYNC
889
890	li	r12,0xd00-4		/* restore regs */
891	lwz	r12,0(r12)
892
893	lwz	r11,0(r12)
894	mtlr	r11
895	lwz	r11,4(r12)
896	mtspr	SRR0,r11
897	lwz	r11,8(r12)
898	mtspr	SRR1,r11
899
900	addi	r12,r12,12		/* Adjust stack pointer */
901	li	r20,0xd00-4
902	stw	r12,0(r20)
903
904	SYNC
905	rfi
906_end_back:
907
908	STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
909	STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
910	STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
911
912	STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
913	STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
914
915	CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
916
917	.globl	_end_of_vectors
918_end_of_vectors:
919
920
921	. = . + (0x100 - ( . & 0xff ))	/* align for debug */
922
923/*
924 * This code finishes saving the registers to the exception frame
925 * and jumps to the appropriate handler for the exception.
926 * Register r21 is pointer into trap frame, r1 has new stack pointer.
927 */
928	.globl	transfer_to_handler
929transfer_to_handler:
930	stw	r22,_NIP(r21)
931	lis	r22,MSR_POW@h
932	andc	r23,r23,r22
933	stw	r23,_MSR(r21)
934	SAVE_GPR(7, r21)
935	SAVE_4GPRS(8, r21)
936	SAVE_8GPRS(12, r21)
937	SAVE_8GPRS(24, r21)
938
939	mflr	r23
940	andi.	r24,r23,0x3f00		/* get vector offset */
941	stw	r24,TRAP(r21)
942	li	r22,0
943	stw	r22,RESULT(r21)
944	mtspr	SPRG2,r22		/* r1 is now kernel sp */
945
946	lwz	r24,0(r23)		/* virtual address of handler */
947	lwz	r23,4(r23)		/* where to go when done */
948	mtspr	SRR0,r24
949	mtspr	SRR1,r20
950	mtlr	r23
951	SYNC
952	rfi				/* jump to handler, enable MMU */
953
954int_return:
955	mfmsr	r28		/* Disable interrupts */
956	li	r4,0
957	ori	r4,r4,MSR_EE
958	andc	r28,r28,r4
959	SYNC			/* Some chip revs need this... */
960	mtmsr	r28
961	SYNC
962	lwz	r2,_CTR(r1)
963	lwz	r0,_LINK(r1)
964	mtctr	r2
965	mtlr	r0
966	lwz	r2,_XER(r1)
967	lwz	r0,_CCR(r1)
968	mtspr	XER,r2
969	mtcrf	0xFF,r0
970	REST_10GPRS(3, r1)
971	REST_10GPRS(13, r1)
972	REST_8GPRS(23, r1)
973	REST_GPR(31, r1)
974	lwz	r2,_NIP(r1)	/* Restore environment */
975	lwz	r0,_MSR(r1)
976	mtspr	SRR0,r2
977	mtspr	SRR1,r0
978	lwz	r0,GPR0(r1)
979	lwz	r2,GPR2(r1)
980	lwz	r1,GPR1(r1)
981	SYNC
982	rfi
983
984crit_return:
985	mfmsr	r28		/* Disable interrupts */
986	li	r4,0
987	ori	r4,r4,MSR_EE
988	andc	r28,r28,r4
989	SYNC			/* Some chip revs need this... */
990	mtmsr	r28
991	SYNC
992	lwz	r2,_CTR(r1)
993	lwz	r0,_LINK(r1)
994	mtctr	r2
995	mtlr	r0
996	lwz	r2,_XER(r1)
997	lwz	r0,_CCR(r1)
998	mtspr	XER,r2
999	mtcrf	0xFF,r0
1000	REST_10GPRS(3, r1)
1001	REST_10GPRS(13, r1)
1002	REST_8GPRS(23, r1)
1003	REST_GPR(31, r1)
1004	lwz	r2,_NIP(r1)	/* Restore environment */
1005	lwz	r0,_MSR(r1)
1006	mtspr	SPRN_CSRR0,r2
1007	mtspr	SPRN_CSRR1,r0
1008	lwz	r0,GPR0(r1)
1009	lwz	r2,GPR2(r1)
1010	lwz	r1,GPR1(r1)
1011	SYNC
1012	rfci
1013
1014mck_return:
1015	mfmsr	r28		/* Disable interrupts */
1016	li	r4,0
1017	ori	r4,r4,MSR_EE
1018	andc	r28,r28,r4
1019	SYNC			/* Some chip revs need this... */
1020	mtmsr	r28
1021	SYNC
1022	lwz	r2,_CTR(r1)
1023	lwz	r0,_LINK(r1)
1024	mtctr	r2
1025	mtlr	r0
1026	lwz	r2,_XER(r1)
1027	lwz	r0,_CCR(r1)
1028	mtspr	XER,r2
1029	mtcrf	0xFF,r0
1030	REST_10GPRS(3, r1)
1031	REST_10GPRS(13, r1)
1032	REST_8GPRS(23, r1)
1033	REST_GPR(31, r1)
1034	lwz	r2,_NIP(r1)	/* Restore environment */
1035	lwz	r0,_MSR(r1)
1036	mtspr	SPRN_MCSRR0,r2
1037	mtspr	SPRN_MCSRR1,r0
1038	lwz	r0,GPR0(r1)
1039	lwz	r2,GPR2(r1)
1040	lwz	r1,GPR1(r1)
1041	SYNC
1042	rfmci
1043
1044/* Cache functions.
1045*/
1046.globl flush_icache
1047flush_icache:
1048.globl invalidate_icache
1049invalidate_icache:
1050	mfspr	r0,L1CSR1
1051	ori	r0,r0,L1CSR1_ICFI
1052	msync
1053	isync
1054	mtspr	L1CSR1,r0
1055	isync
1056	blr				/* entire I cache */
1057
1058.globl invalidate_dcache
1059invalidate_dcache:
1060	mfspr	r0,L1CSR0
1061	ori	r0,r0,L1CSR0_DCFI
1062	msync
1063	isync
1064	mtspr	L1CSR0,r0
1065	isync
1066	blr
1067
1068	.globl	icache_enable
1069icache_enable:
1070	mflr	r8
1071	bl	invalidate_icache
1072	mtlr	r8
1073	isync
1074	mfspr	r4,L1CSR1
1075	ori	r4,r4,0x0001
1076	oris	r4,r4,0x0001
1077	mtspr	L1CSR1,r4
1078	isync
1079	blr
1080
1081	.globl	icache_disable
1082icache_disable:
1083	mfspr	r0,L1CSR1
1084	lis	r3,0
1085	ori	r3,r3,L1CSR1_ICE
1086	andc	r0,r0,r3
1087	mtspr	L1CSR1,r0
1088	isync
1089	blr
1090
1091	.globl	icache_status
1092icache_status:
1093	mfspr	r3,L1CSR1
1094	andi.	r3,r3,L1CSR1_ICE
1095	blr
1096
1097	.globl	dcache_enable
1098dcache_enable:
1099	mflr	r8
1100	bl	invalidate_dcache
1101	mtlr	r8
1102	isync
1103	mfspr	r0,L1CSR0
1104	ori	r0,r0,0x0001
1105	oris	r0,r0,0x0001
1106	msync
1107	isync
1108	mtspr	L1CSR0,r0
1109	isync
1110	blr
1111
1112	.globl	dcache_disable
1113dcache_disable:
1114	mfspr	r3,L1CSR0
1115	lis	r4,0
1116	ori	r4,r4,L1CSR0_DCE
1117	andc	r3,r3,r4
1118	mtspr	L1CSR0,r3
1119	isync
1120	blr
1121
1122	.globl	dcache_status
1123dcache_status:
1124	mfspr	r3,L1CSR0
1125	andi.	r3,r3,L1CSR0_DCE
1126	blr
1127
1128	.globl get_pir
1129get_pir:
1130	mfspr	r3,PIR
1131	blr
1132
1133	.globl get_pvr
1134get_pvr:
1135	mfspr	r3,PVR
1136	blr
1137
1138	.globl get_svr
1139get_svr:
1140	mfspr	r3,SVR
1141	blr
1142
1143	.globl wr_tcr
1144wr_tcr:
1145	mtspr	TCR,r3
1146	blr
1147
1148/*------------------------------------------------------------------------------- */
1149/* Function:	 in8 */
1150/* Description:	 Input 8 bits */
1151/*------------------------------------------------------------------------------- */
1152	.globl	in8
1153in8:
1154	lbz	r3,0x0000(r3)
1155	blr
1156
1157/*------------------------------------------------------------------------------- */
1158/* Function:	 out8 */
1159/* Description:	 Output 8 bits */
1160/*------------------------------------------------------------------------------- */
1161	.globl	out8
1162out8:
1163	stb	r4,0x0000(r3)
1164	sync
1165	blr
1166
1167/*------------------------------------------------------------------------------- */
1168/* Function:	 out16 */
1169/* Description:	 Output 16 bits */
1170/*------------------------------------------------------------------------------- */
1171	.globl	out16
1172out16:
1173	sth	r4,0x0000(r3)
1174	sync
1175	blr
1176
1177/*------------------------------------------------------------------------------- */
1178/* Function:	 out16r */
1179/* Description:	 Byte reverse and output 16 bits */
1180/*------------------------------------------------------------------------------- */
1181	.globl	out16r
1182out16r:
1183	sthbrx	r4,r0,r3
1184	sync
1185	blr
1186
1187/*------------------------------------------------------------------------------- */
1188/* Function:	 out32 */
1189/* Description:	 Output 32 bits */
1190/*------------------------------------------------------------------------------- */
1191	.globl	out32
1192out32:
1193	stw	r4,0x0000(r3)
1194	sync
1195	blr
1196
1197/*------------------------------------------------------------------------------- */
1198/* Function:	 out32r */
1199/* Description:	 Byte reverse and output 32 bits */
1200/*------------------------------------------------------------------------------- */
1201	.globl	out32r
1202out32r:
1203	stwbrx	r4,r0,r3
1204	sync
1205	blr
1206
1207/*------------------------------------------------------------------------------- */
1208/* Function:	 in16 */
1209/* Description:	 Input 16 bits */
1210/*------------------------------------------------------------------------------- */
1211	.globl	in16
1212in16:
1213	lhz	r3,0x0000(r3)
1214	blr
1215
1216/*------------------------------------------------------------------------------- */
1217/* Function:	 in16r */
1218/* Description:	 Input 16 bits and byte reverse */
1219/*------------------------------------------------------------------------------- */
1220	.globl	in16r
1221in16r:
1222	lhbrx	r3,r0,r3
1223	blr
1224
1225/*------------------------------------------------------------------------------- */
1226/* Function:	 in32 */
1227/* Description:	 Input 32 bits */
1228/*------------------------------------------------------------------------------- */
1229	.globl	in32
1230in32:
1231	lwz	3,0x0000(3)
1232	blr
1233
1234/*------------------------------------------------------------------------------- */
1235/* Function:	 in32r */
1236/* Description:	 Input 32 bits and byte reverse */
1237/*------------------------------------------------------------------------------- */
1238	.globl	in32r
1239in32r:
1240	lwbrx	r3,r0,r3
1241	blr
1242#endif  /* !CONFIG_NAND_SPL */
1243
1244/*------------------------------------------------------------------------------*/
1245
1246/*
1247 * void write_tlb(mas0, mas1, mas2, mas3, mas7)
1248 */
1249	.globl	write_tlb
1250write_tlb:
1251	mtspr	MAS0,r3
1252	mtspr	MAS1,r4
1253	mtspr	MAS2,r5
1254	mtspr	MAS3,r6
1255#ifdef CONFIG_ENABLE_36BIT_PHYS
1256	mtspr	MAS7,r7
1257#endif
1258	li	r3,0
1259#ifdef CONFIG_SYS_BOOK3E_HV
1260	mtspr	MAS8,r3
1261#endif
1262	isync
1263	tlbwe
1264	msync
1265	isync
1266	blr
1267
1268/*
1269 * void relocate_code (addr_sp, gd, addr_moni)
1270 *
1271 * This "function" does not return, instead it continues in RAM
1272 * after relocating the monitor code.
1273 *
1274 * r3 = dest
1275 * r4 = src
1276 * r5 = length in bytes
1277 * r6 = cachelinesize
1278 */
1279	.globl	relocate_code
1280relocate_code:
1281	mr	r1,r3		/* Set new stack pointer		*/
1282	mr	r9,r4		/* Save copy of Init Data pointer	*/
1283	mr	r10,r5		/* Save copy of Destination Address	*/
1284
1285	GET_GOT
1286	mr	r3,r5				/* Destination Address	*/
1287	lis	r4,CONFIG_SYS_MONITOR_BASE@h		/* Source      Address	*/
1288	ori	r4,r4,CONFIG_SYS_MONITOR_BASE@l
1289	lwz	r5,GOT(__init_end)
1290	sub	r5,r5,r4
1291	li	r6,CONFIG_SYS_CACHELINE_SIZE		/* Cache Line Size	*/
1292
1293	/*
1294	 * Fix GOT pointer:
1295	 *
1296	 * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE) + Destination Address
1297	 *
1298	 * Offset:
1299	 */
1300	sub	r15,r10,r4
1301
1302	/* First our own GOT */
1303	add	r12,r12,r15
1304	/* the the one used by the C code */
1305	add	r30,r30,r15
1306
1307	/*
1308	 * Now relocate code
1309	 */
1310
1311	cmplw	cr1,r3,r4
1312	addi	r0,r5,3
1313	srwi.	r0,r0,2
1314	beq	cr1,4f		/* In place copy is not necessary	*/
1315	beq	7f		/* Protect against 0 count		*/
1316	mtctr	r0
1317	bge	cr1,2f
1318
1319	la	r8,-4(r4)
1320	la	r7,-4(r3)
13211:	lwzu	r0,4(r8)
1322	stwu	r0,4(r7)
1323	bdnz	1b
1324	b	4f
1325
13262:	slwi	r0,r0,2
1327	add	r8,r4,r0
1328	add	r7,r3,r0
13293:	lwzu	r0,-4(r8)
1330	stwu	r0,-4(r7)
1331	bdnz	3b
1332
1333/*
1334 * Now flush the cache: note that we must start from a cache aligned
1335 * address. Otherwise we might miss one cache line.
1336 */
13374:	cmpwi	r6,0
1338	add	r5,r3,r5
1339	beq	7f		/* Always flush prefetch queue in any case */
1340	subi	r0,r6,1
1341	andc	r3,r3,r0
1342	mr	r4,r3
13435:	dcbst	0,r4
1344	add	r4,r4,r6
1345	cmplw	r4,r5
1346	blt	5b
1347	sync			/* Wait for all dcbst to complete on bus */
1348	mr	r4,r3
13496:	icbi	0,r4
1350	add	r4,r4,r6
1351	cmplw	r4,r5
1352	blt	6b
13537:	sync			/* Wait for all icbi to complete on bus */
1354	isync
1355
1356	/*
1357	 * Re-point the IVPR at RAM
1358	 */
1359	mtspr	IVPR,r10
1360
1361/*
1362 * We are done. Do not return, instead branch to second part of board
1363 * initialization, now running from RAM.
1364 */
1365
1366	addi	r0,r10,in_ram - _start + _START_OFFSET
1367	mtlr	r0
1368	blr				/* NEVER RETURNS! */
1369	.globl	in_ram
1370in_ram:
1371
1372	/*
1373	 * Relocation Function, r12 point to got2+0x8000
1374	 *
1375	 * Adjust got2 pointers, no need to check for 0, this code
1376	 * already puts a few entries in the table.
1377	 */
1378	li	r0,__got2_entries@sectoff@l
1379	la	r3,GOT(_GOT2_TABLE_)
1380	lwz	r11,GOT(_GOT2_TABLE_)
1381	mtctr	r0
1382	sub	r11,r3,r11
1383	addi	r3,r3,-4
13841:	lwzu	r0,4(r3)
1385	cmpwi	r0,0
1386	beq-	2f
1387	add	r0,r0,r11
1388	stw	r0,0(r3)
13892:	bdnz	1b
1390
1391	/*
1392	 * Now adjust the fixups and the pointers to the fixups
1393	 * in case we need to move ourselves again.
1394	 */
1395	li	r0,__fixup_entries@sectoff@l
1396	lwz	r3,GOT(_FIXUP_TABLE_)
1397	cmpwi	r0,0
1398	mtctr	r0
1399	addi	r3,r3,-4
1400	beq	4f
14013:	lwzu	r4,4(r3)
1402	lwzux	r0,r4,r11
1403	cmpwi	r0,0
1404	add	r0,r0,r11
1405	stw	r4,0(r3)
1406	beq-	5f
1407	stw	r0,0(r4)
14085:	bdnz	3b
14094:
1410clear_bss:
1411	/*
1412	 * Now clear BSS segment
1413	 */
1414	lwz	r3,GOT(__bss_start)
1415	lwz	r4,GOT(__bss_end__)
1416
1417	cmplw	0,r3,r4
1418	beq	6f
1419
1420	li	r0,0
14215:
1422	stw	r0,0(r3)
1423	addi	r3,r3,4
1424	cmplw	0,r3,r4
1425	bne	5b
14266:
1427
1428	mr	r3,r9		/* Init Data pointer		*/
1429	mr	r4,r10		/* Destination Address		*/
1430	bl	board_init_r
1431
1432#ifndef CONFIG_NAND_SPL
1433	/*
1434	 * Copy exception vector code to low memory
1435	 *
1436	 * r3: dest_addr
1437	 * r7: source address, r8: end address, r9: target address
1438	 */
1439	.globl	trap_init
1440trap_init:
1441	mflr	r4			/* save link register		*/
1442	GET_GOT
1443	lwz	r7,GOT(_start_of_vectors)
1444	lwz	r8,GOT(_end_of_vectors)
1445
1446	li	r9,0x100		/* reset vector always at 0x100 */
1447
1448	cmplw	0,r7,r8
1449	bgelr				/* return if r7>=r8 - just in case */
14501:
1451	lwz	r0,0(r7)
1452	stw	r0,0(r9)
1453	addi	r7,r7,4
1454	addi	r9,r9,4
1455	cmplw	0,r7,r8
1456	bne	1b
1457
1458	/*
1459	 * relocate `hdlr' and `int_return' entries
1460	 */
1461	li	r7,.L_CriticalInput - _start + _START_OFFSET
1462	bl	trap_reloc
1463	li	r7,.L_MachineCheck - _start + _START_OFFSET
1464	bl	trap_reloc
1465	li	r7,.L_DataStorage - _start + _START_OFFSET
1466	bl	trap_reloc
1467	li	r7,.L_InstStorage - _start + _START_OFFSET
1468	bl	trap_reloc
1469	li	r7,.L_ExtInterrupt - _start + _START_OFFSET
1470	bl	trap_reloc
1471	li	r7,.L_Alignment - _start + _START_OFFSET
1472	bl	trap_reloc
1473	li	r7,.L_ProgramCheck - _start + _START_OFFSET
1474	bl	trap_reloc
1475	li	r7,.L_FPUnavailable - _start + _START_OFFSET
1476	bl	trap_reloc
1477	li	r7,.L_Decrementer - _start + _START_OFFSET
1478	bl	trap_reloc
1479	li	r7,.L_IntervalTimer - _start + _START_OFFSET
1480	li	r8,_end_of_vectors - _start + _START_OFFSET
14812:
1482	bl	trap_reloc
1483	addi	r7,r7,0x100		/* next exception vector	*/
1484	cmplw	0,r7,r8
1485	blt	2b
1486
1487	lis	r7,0x0
1488	mtspr	IVPR,r7
1489
1490	mtlr	r4			/* restore link register	*/
1491	blr
1492
1493.globl unlock_ram_in_cache
1494unlock_ram_in_cache:
1495	/* invalidate the INIT_RAM section */
1496	lis	r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
1497	ori	r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
1498	mfspr	r4,L1CFG0
1499	andi.	r4,r4,0x1ff
1500	slwi	r4,r4,(10 - 1 - L1_CACHE_SHIFT)
1501	mtctr	r4
15021:	dcbi	r0,r3
1503	addi	r3,r3,CONFIG_SYS_CACHELINE_SIZE
1504	bdnz	1b
1505	sync
1506
1507	/* Invalidate the TLB entries for the cache */
1508	lis	r3,CONFIG_SYS_INIT_RAM_ADDR@h
1509	ori	r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
1510	tlbivax	0,r3
1511	addi	r3,r3,0x1000
1512	tlbivax	0,r3
1513	addi	r3,r3,0x1000
1514	tlbivax	0,r3
1515	addi	r3,r3,0x1000
1516	tlbivax	0,r3
1517	isync
1518	blr
1519
1520.globl flush_dcache
1521flush_dcache:
1522	mfspr	r3,SPRN_L1CFG0
1523
1524	rlwinm	r5,r3,9,3	/* Extract cache block size */
1525	twlgti	r5,1		/* Only 32 and 64 byte cache blocks
1526				 * are currently defined.
1527				 */
1528	li	r4,32
1529	subfic	r6,r5,2		/* r6 = log2(1KiB / cache block size) -
1530				 *      log2(number of ways)
1531				 */
1532	slw	r5,r4,r5	/* r5 = cache block size */
1533
1534	rlwinm	r7,r3,0,0xff	/* Extract number of KiB in the cache */
1535	mulli	r7,r7,13	/* An 8-way cache will require 13
1536				 * loads per set.
1537				 */
1538	slw	r7,r7,r6
1539
1540	/* save off HID0 and set DCFA */
1541	mfspr	r8,SPRN_HID0
1542	ori	r9,r8,HID0_DCFA@l
1543	mtspr	SPRN_HID0,r9
1544	isync
1545
1546	lis	r4,0
1547	mtctr	r7
1548
15491:	lwz	r3,0(r4)	/* Load... */
1550	add	r4,r4,r5
1551	bdnz	1b
1552
1553	msync
1554	lis	r4,0
1555	mtctr	r7
1556
15571:	dcbf	0,r4		/* ...and flush. */
1558	add	r4,r4,r5
1559	bdnz	1b
1560
1561	/* restore HID0 */
1562	mtspr	SPRN_HID0,r8
1563	isync
1564
1565	blr
1566
1567.globl setup_ivors
1568setup_ivors:
1569
1570#include "fixed_ivor.S"
1571	blr
1572#endif /* !CONFIG_NAND_SPL */
1573