xref: /openbmc/linux/arch/powerpc/kernel/head_book3s_32.S (revision e533cda12d8f0e7936354bafdc85c81741f805d2)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 *  This file contains the low-level support and setup for the
15 *  PowerPC platform, including trap and interrupt dispatch.
16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/bug.h>
31#include <asm/kvm_book3s_asm.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34
35#include "head_32.h"
36
37#define LOAD_BAT(n, reg, RA, RB)	\
38	/* see the comment for clear_bats() -- Cort */ \
39	li	RA,0;			\
40	mtspr	SPRN_IBAT##n##U,RA;	\
41	mtspr	SPRN_DBAT##n##U,RA;	\
42	lwz	RA,(n*16)+0(reg);	\
43	lwz	RB,(n*16)+4(reg);	\
44	mtspr	SPRN_IBAT##n##U,RA;	\
45	mtspr	SPRN_IBAT##n##L,RB;	\
46	lwz	RA,(n*16)+8(reg);	\
47	lwz	RB,(n*16)+12(reg);	\
48	mtspr	SPRN_DBAT##n##U,RA;	\
49	mtspr	SPRN_DBAT##n##L,RB
50
51	__HEAD
52	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
53	.stabs	"head_book3s_32.S",N_SO,0,0,0f
540:
55_ENTRY(_stext);
56
57/*
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
60 */
61_ENTRY(_start);
62	/*
63	 * These are here for legacy reasons, the kernel used to
64	 * need to look like a coff function entry for the pmac
65	 * but we're always started by some kind of bootloader now.
66	 *  -- Cort
67	 */
68	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
69	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
70	nop
71
72/* PMAC
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address).  Address translation is turned on, with the prom
77 * managing the hash table.  Interrupts are disabled.  The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
80 *
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 *  r3: 'BooX' (0x426f6f58)
84 *  r4: virtual address of boot_infos_t
85 *  r5: 0
86 *
87 * PREP
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader.  The expected layout
90 * of the regs is:
91 *   r3: ptr to residual data
92 *   r4: initrd_start or if no initrd then 0
93 *   r5: initrd_end - unused if r4 is 0
94 *   r6: Start of command line string
95 *   r7: End of command line string
96 *
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
99 * -- Cort
100 */
101
102	.globl	__start
103__start:
104/*
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
108 */
109	cmpwi	0,r5,0
110	beq	1f
111
112#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113	/* find out where we are now */
114	bcl	20,31,$+4
1150:	mflr	r8			/* r8 = runtime addr here */
116	addis	r8,r8,(_stext - 0b)@ha
117	addi	r8,r8,(_stext - 0b)@l	/* current runtime base addr */
118	bl	prom_init
119#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
120
121	/* We never return. We also hit that trap if trying to boot
122	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
123	trap
124
125/*
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
128 */
129#ifdef CONFIG_PPC_PMAC
1301:	lis	r31,0x426f
131	ori	r31,r31,0x6f58
132	cmpw	0,r3,r31
133	bne	1f
134	bl	bootx_init
135	trap
136#endif /* CONFIG_PPC_PMAC */
137
1381:	mr	r31,r3			/* save device tree ptr */
139	li	r24,0			/* cpu # */
140
141/*
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 *  -- Cort <cort@fsmlabs.com>
145 */
146	bl	early_init
147
148/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
150 */
151 	bl	mmu_off
152__after_mmu_off:
153	bl	clear_bats
154	bl	flush_tlbs
155
156	bl	initial_bats
157	bl	load_segment_registers
158BEGIN_MMU_FTR_SECTION
159	bl	early_hash_table
160END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
161#if defined(CONFIG_BOOTX_TEXT)
162	bl	setup_disp_bat
163#endif
164#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
165	bl	setup_cpm_bat
166#endif
167#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
168	bl	setup_usbgecko_bat
169#endif
170
171/*
172 * Call setup_cpu for CPU 0 and initialize 6xx Idle
173 */
174	bl	reloc_offset
175	li	r24,0			/* cpu# */
176	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
177	bl	reloc_offset
178	bl	init_idle_6xx
179
180
181/*
182 * We need to run with _start at physical address 0.
183 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
184 * the exception vectors at 0 (and therefore this copy
185 * overwrites OF's exception vectors with our own).
186 * The MMU is off at this point.
187 */
188	bl	reloc_offset
189	mr	r26,r3
190	addis	r4,r3,KERNELBASE@h	/* current address of _start */
191	lis	r5,PHYSICAL_START@h
192	cmplw	0,r4,r5			/* already running at PHYSICAL_START? */
193	bne	relocate_kernel
194/*
195 * we now have the 1st 16M of ram mapped with the bats.
196 * prep needs the mmu to be turned on here, but pmac already has it on.
197 * this shouldn't bother the pmac since it just gets turned on again
198 * as we jump to our code at KERNELBASE. -- Cort
199 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
200 * off, and in other cases, we now turn it off before changing BATs above.
201 */
202turn_on_mmu:
203	mfmsr	r0
204	ori	r0,r0,MSR_DR|MSR_IR|MSR_RI
205	mtspr	SPRN_SRR1,r0
206	lis	r0,start_here@h
207	ori	r0,r0,start_here@l
208	mtspr	SPRN_SRR0,r0
209	RFI				/* enables MMU */
210
211/*
212 * We need __secondary_hold as a place to hold the other cpus on
213 * an SMP machine, even when we are running a UP kernel.
214 */
215	. = 0xc0			/* for prep bootloader */
216	li	r3,1			/* MTX only has 1 cpu */
217	.globl	__secondary_hold
218__secondary_hold:
219	/* tell the master we're here */
220	stw	r3,__secondary_hold_acknowledge@l(0)
221#ifdef CONFIG_SMP
222100:	lwz	r4,0(0)
223	/* wait until we're told to start */
224	cmpw	0,r4,r3
225	bne	100b
226	/* our cpu # was at addr 0 - go */
227	mr	r24,r3			/* cpu # */
228	b	__secondary_start
229#else
230	b	.
231#endif /* CONFIG_SMP */
232
233	.globl	__secondary_hold_spinloop
234__secondary_hold_spinloop:
235	.long	0
236	.globl	__secondary_hold_acknowledge
237__secondary_hold_acknowledge:
238	.long	-1
239
240/* System reset */
241/* core99 pmac starts the seconary here by changing the vector, and
242   putting it back to what it was (unknown_exception) when done.  */
243	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
244
245/* Machine check */
246/*
247 * On CHRP, this is complicated by the fact that we could get a
248 * machine check inside RTAS, and we have no guarantee that certain
249 * critical registers will have the values we expect.  The set of
250 * registers that might have bad values includes all the GPRs
251 * and all the BATs.  We indicate that we are in RTAS by putting
252 * a non-zero value, the address of the exception frame to use,
253 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
254 * and uses its value if it is non-zero.
255 * (Other exception handlers assume that r1 is a valid kernel stack
256 * pointer when we take an exception from supervisor mode.)
257 *	-- paulus.
258 */
259	. = 0x200
260	DO_KVM  0x200
261MachineCheck:
262	EXCEPTION_PROLOG_0
263#ifdef CONFIG_PPC_CHRP
264	mfspr	r11, SPRN_SPRG_THREAD
265	lwz	r11, RTAS_SP(r11)
266	cmpwi	cr1, r11, 0
267	bne	cr1, 7f
268#endif /* CONFIG_PPC_CHRP */
269	EXCEPTION_PROLOG_1 for_rtas=1
2707:	EXCEPTION_PROLOG_2
271	addi	r3,r1,STACK_FRAME_OVERHEAD
272#ifdef CONFIG_PPC_CHRP
273#ifdef CONFIG_VMAP_STACK
274	mfspr	r4, SPRN_SPRG_THREAD
275	tovirt(r4, r4)
276	lwz	r4, RTAS_SP(r4)
277	cmpwi	cr1, r4, 0
278#endif
279	beq	cr1, machine_check_tramp
280	twi	31, 0, 0
281#else
282	b	machine_check_tramp
283#endif
284
285/* Data access exception. */
286	. = 0x300
287	DO_KVM  0x300
288DataAccess:
289#ifdef CONFIG_VMAP_STACK
290	mtspr	SPRN_SPRG_SCRATCH0,r10
291	mfspr	r10, SPRN_SPRG_THREAD
292BEGIN_MMU_FTR_SECTION
293	stw	r11, THR11(r10)
294	mfspr	r10, SPRN_DSISR
295	mfcr	r11
296#ifdef CONFIG_PPC_KUAP
297	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
298#else
299	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
300#endif
301	mfspr	r10, SPRN_SPRG_THREAD
302	beq	hash_page_dsi
303.Lhash_page_dsi_cont:
304	mtcr	r11
305	lwz	r11, THR11(r10)
306END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
307	mtspr	SPRN_SPRG_SCRATCH1,r11
308	mfspr	r11, SPRN_DAR
309	stw	r11, DAR(r10)
310	mfspr	r11, SPRN_DSISR
311	stw	r11, DSISR(r10)
312	mfspr	r11, SPRN_SRR0
313	stw	r11, SRR0(r10)
314	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
315	stw	r11, SRR1(r10)
316	mfcr	r10
317	andi.	r11, r11, MSR_PR
318
319	EXCEPTION_PROLOG_1
320	b	handle_page_fault_tramp_1
321#else	/* CONFIG_VMAP_STACK */
322	EXCEPTION_PROLOG handle_dar_dsisr=1
323	get_and_save_dar_dsisr_on_stack	r4, r5, r11
324BEGIN_MMU_FTR_SECTION
325#ifdef CONFIG_PPC_KUAP
326	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
327#else
328	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
329#endif
330	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
331	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
332	bl	hash_page
333	b	handle_page_fault_tramp_1
334FTR_SECTION_ELSE
335	b	handle_page_fault_tramp_2
336ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
337#endif	/* CONFIG_VMAP_STACK */
338
339/* Instruction access exception. */
340	. = 0x400
341	DO_KVM  0x400
342InstructionAccess:
343#ifdef CONFIG_VMAP_STACK
344	mtspr	SPRN_SPRG_SCRATCH0,r10
345	mtspr	SPRN_SPRG_SCRATCH1,r11
346	mfspr	r10, SPRN_SPRG_THREAD
347	mfspr	r11, SPRN_SRR0
348	stw	r11, SRR0(r10)
349	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
350	stw	r11, SRR1(r10)
351	mfcr	r10
352BEGIN_MMU_FTR_SECTION
353	andis.	r11, r11, SRR1_ISI_NOPT@h	/* no pte found? */
354	bne	hash_page_isi
355.Lhash_page_isi_cont:
356	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
357END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
358	andi.	r11, r11, MSR_PR
359
360	EXCEPTION_PROLOG_1
361	EXCEPTION_PROLOG_2
362#else	/* CONFIG_VMAP_STACK */
363	EXCEPTION_PROLOG
364	andis.	r0,r9,SRR1_ISI_NOPT@h	/* no pte found? */
365	beq	1f			/* if so, try to put a PTE */
366	li	r3,0			/* into the hash table */
367	mr	r4,r12			/* SRR0 is fault address */
368BEGIN_MMU_FTR_SECTION
369	bl	hash_page
370END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
371#endif	/* CONFIG_VMAP_STACK */
3721:	mr	r4,r12
373	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
374	stw	r4, _DAR(r11)
375	EXC_XFER_LITE(0x400, handle_page_fault)
376
377/* External interrupt */
378	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
379
380/* Alignment exception */
381	. = 0x600
382	DO_KVM  0x600
383Alignment:
384	EXCEPTION_PROLOG handle_dar_dsisr=1
385	save_dar_dsisr_on_stack r4, r5, r11
386	addi	r3,r1,STACK_FRAME_OVERHEAD
387	b	alignment_exception_tramp
388
389/* Program check exception */
390	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
391
392/* Floating-point unavailable */
393	. = 0x800
394	DO_KVM  0x800
395FPUnavailable:
396BEGIN_FTR_SECTION
397/*
398 * Certain Freescale cores don't have a FPU and treat fp instructions
399 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
400 */
401	b 	ProgramCheck
402END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
403	EXCEPTION_PROLOG
404	beq	1f
405	bl	load_up_fpu		/* if from user, just load it up */
406	b	fast_exception_return
4071:	addi	r3,r1,STACK_FRAME_OVERHEAD
408	EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
409
410/* Decrementer */
411	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
412
413	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
414	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
415
416/* System call */
417	. = 0xc00
418	DO_KVM  0xc00
419SystemCall:
420	SYSCALL_ENTRY	0xc00
421
422	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
423	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
424
425/*
426 * The Altivec unavailable trap is at 0x0f20.  Foo.
427 * We effectively remap it to 0x3000.
428 * We include an altivec unavailable exception vector even if
429 * not configured for Altivec, so that you can't panic a
430 * non-altivec kernel running on a machine with altivec just
431 * by executing an altivec instruction.
432 */
433	. = 0xf00
434	DO_KVM  0xf00
435	b	PerformanceMonitor
436
437	. = 0xf20
438	DO_KVM  0xf20
439	b	AltiVecUnavailable
440
441/*
442 * Handle TLB miss for instruction on 603/603e.
443 * Note: we get an alternate set of r0 - r3 to use automatically.
444 */
445	. = 0x1000
446InstructionTLBMiss:
447/*
448 * r0:	scratch
449 * r1:	linux style pte ( later becomes ppc hardware pte )
450 * r2:	ptr to linux-style pte
451 * r3:	scratch
452 */
453	/* Get PTE (linux-style) and check access */
454	mfspr	r3,SPRN_IMISS
455#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
456	lis	r1, TASK_SIZE@h		/* check if kernel address */
457	cmplw	0,r1,r3
458#endif
459	mfspr	r2, SPRN_SPRG_PGDIR
460#ifdef CONFIG_SWAP
461	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
462#else
463	li	r1,_PAGE_PRESENT | _PAGE_EXEC
464#endif
465#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
466	bgt-	112f
467	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
468	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
469#endif
470112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
471	lwz	r2,0(r2)		/* get pmd entry */
472	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
473	beq-	InstructionAddressInvalid	/* return if no mapping */
474	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
475	lwz	r0,0(r2)		/* get linux-style pte */
476	andc.	r1,r1,r0		/* check access & ~permission */
477	bne-	InstructionAddressInvalid /* return if access not permitted */
478	/* Convert linux-style PTE to low word of PPC-style PTE */
479	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
480	ori	r1, r1, 0xe06		/* clear out reserved bits */
481	andc	r1, r0, r1		/* PP = user? 1 : 0 */
482BEGIN_FTR_SECTION
483	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
484END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
485	mtspr	SPRN_RPA,r1
486	tlbli	r3
487	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
488	mtcrf	0x80,r3
489	rfi
490InstructionAddressInvalid:
491	mfspr	r3,SPRN_SRR1
492	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
493
494	addis	r1,r1,0x2000
495	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
496	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
497	or	r2,r2,r1
498	mtspr	SPRN_SRR1,r2
499	mfspr	r1,SPRN_IMISS	/* Get failing address */
500	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
501	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
502	xor	r1,r1,r2
503	mtspr	SPRN_DAR,r1	/* Set fault address */
504	mfmsr	r0		/* Restore "normal" registers */
505	xoris	r0,r0,MSR_TGPR>>16
506	mtcrf	0x80,r3		/* Restore CR0 */
507	mtmsr	r0
508	b	InstructionAccess
509
510/*
511 * Handle TLB miss for DATA Load operation on 603/603e
512 */
513	. = 0x1100
514DataLoadTLBMiss:
515/*
516 * r0:	scratch
517 * r1:	linux style pte ( later becomes ppc hardware pte )
518 * r2:	ptr to linux-style pte
519 * r3:	scratch
520 */
521	/* Get PTE (linux-style) and check access */
522	mfspr	r3,SPRN_DMISS
523	lis	r1, TASK_SIZE@h		/* check if kernel address */
524	cmplw	0,r1,r3
525	mfspr	r2, SPRN_SPRG_PGDIR
526#ifdef CONFIG_SWAP
527	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
528#else
529	li	r1, _PAGE_PRESENT
530#endif
531	bgt-	112f
532	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
533	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
534112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
535	lwz	r2,0(r2)		/* get pmd entry */
536	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
537	beq-	DataAddressInvalid	/* return if no mapping */
538	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
539	lwz	r0,0(r2)		/* get linux-style pte */
540	andc.	r1,r1,r0		/* check access & ~permission */
541	bne-	DataAddressInvalid	/* return if access not permitted */
542	/*
543	 * NOTE! We are assuming this is not an SMP system, otherwise
544	 * we would need to update the pte atomically with lwarx/stwcx.
545	 */
546	/* Convert linux-style PTE to low word of PPC-style PTE */
547	rlwinm	r1,r0,32-9,30,30	/* _PAGE_RW -> PP msb */
548	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
549	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
550	ori	r1,r1,0xe04		/* clear out reserved bits */
551	andc	r1,r0,r1		/* PP = user? rw? 1: 3: 0 */
552BEGIN_FTR_SECTION
553	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
554END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
555	mtspr	SPRN_RPA,r1
556	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
557	mtcrf	0x80,r2
558BEGIN_MMU_FTR_SECTION
559	li	r0,1
560	mfspr	r1,SPRN_SPRG_603_LRU
561	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
562	slw	r0,r0,r2
563	xor	r1,r0,r1
564	srw	r0,r1,r2
565	mtspr   SPRN_SPRG_603_LRU,r1
566	mfspr	r2,SPRN_SRR1
567	rlwimi	r2,r0,31-14,14,14
568	mtspr   SPRN_SRR1,r2
569END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
570	tlbld	r3
571	rfi
572DataAddressInvalid:
573	mfspr	r3,SPRN_SRR1
574	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
575	addis	r1,r1,0x2000
576	mtspr	SPRN_DSISR,r1
577	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
578	mtspr	SPRN_SRR1,r2
579	mfspr	r1,SPRN_DMISS	/* Get failing address */
580	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
581	beq	20f		/* Jump if big endian */
582	xori	r1,r1,3
58320:	mtspr	SPRN_DAR,r1	/* Set fault address */
584	mfmsr	r0		/* Restore "normal" registers */
585	xoris	r0,r0,MSR_TGPR>>16
586	mtcrf	0x80,r3		/* Restore CR0 */
587	mtmsr	r0
588	b	DataAccess
589
590/*
591 * Handle TLB miss for DATA Store on 603/603e
592 */
593	. = 0x1200
594DataStoreTLBMiss:
595/*
596 * r0:	scratch
597 * r1:	linux style pte ( later becomes ppc hardware pte )
598 * r2:	ptr to linux-style pte
599 * r3:	scratch
600 */
601	/* Get PTE (linux-style) and check access */
602	mfspr	r3,SPRN_DMISS
603	lis	r1, TASK_SIZE@h		/* check if kernel address */
604	cmplw	0,r1,r3
605	mfspr	r2, SPRN_SPRG_PGDIR
606#ifdef CONFIG_SWAP
607	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
608#else
609	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
610#endif
611	bgt-	112f
612	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
613	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
614112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
615	lwz	r2,0(r2)		/* get pmd entry */
616	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
617	beq-	DataAddressInvalid	/* return if no mapping */
618	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
619	lwz	r0,0(r2)		/* get linux-style pte */
620	andc.	r1,r1,r0		/* check access & ~permission */
621	bne-	DataAddressInvalid	/* return if access not permitted */
622	/*
623	 * NOTE! We are assuming this is not an SMP system, otherwise
624	 * we would need to update the pte atomically with lwarx/stwcx.
625	 */
626	/* Convert linux-style PTE to low word of PPC-style PTE */
627	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
628	li	r1,0xe06		/* clear out reserved bits & PP msb */
629	andc	r1,r0,r1		/* PP = user? 1: 0 */
630BEGIN_FTR_SECTION
631	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
632END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
633	mtspr	SPRN_RPA,r1
634	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
635	mtcrf	0x80,r2
636BEGIN_MMU_FTR_SECTION
637	li	r0,1
638	mfspr	r1,SPRN_SPRG_603_LRU
639	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
640	slw	r0,r0,r2
641	xor	r1,r0,r1
642	srw	r0,r1,r2
643	mtspr   SPRN_SPRG_603_LRU,r1
644	mfspr	r2,SPRN_SRR1
645	rlwimi	r2,r0,31-14,14,14
646	mtspr   SPRN_SRR1,r2
647END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
648	tlbld	r3
649	rfi
650
651#ifndef CONFIG_ALTIVEC
652#define altivec_assist_exception	unknown_exception
653#endif
654
655#ifndef CONFIG_TAU_INT
656#define TAUException	unknown_exception
657#endif
658
659	EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
660	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
661	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
662	EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
663	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
664	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
665	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
666	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
667	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
668	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
669	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
670	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
671	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
672	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
673	EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
674	EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
675	EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
676	EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
677	EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
678	EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
679	EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
680	EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
681	EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
682	EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
683	EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
684	EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
685	EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
686	EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
687	EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
688
689	. = 0x3000
690
691machine_check_tramp:
692	EXC_XFER_STD(0x200, machine_check_exception)
693
694alignment_exception_tramp:
695	EXC_XFER_STD(0x600, alignment_exception)
696
697handle_page_fault_tramp_1:
698#ifdef CONFIG_VMAP_STACK
699	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
700#endif
701	lwz	r4, _DAR(r11)
702	lwz	r5, _DSISR(r11)
703	/* fall through */
704handle_page_fault_tramp_2:
705	EXC_XFER_LITE(0x300, handle_page_fault)
706
707#ifdef CONFIG_VMAP_STACK
708.macro save_regs_thread		thread
709	stw	r0, THR0(\thread)
710	stw	r3, THR3(\thread)
711	stw	r4, THR4(\thread)
712	stw	r5, THR5(\thread)
713	stw	r6, THR6(\thread)
714	stw	r8, THR8(\thread)
715	stw	r9, THR9(\thread)
716	mflr	r0
717	stw	r0, THLR(\thread)
718	mfctr	r0
719	stw	r0, THCTR(\thread)
720.endm
721
722.macro restore_regs_thread	thread
723	lwz	r0, THLR(\thread)
724	mtlr	r0
725	lwz	r0, THCTR(\thread)
726	mtctr	r0
727	lwz	r0, THR0(\thread)
728	lwz	r3, THR3(\thread)
729	lwz	r4, THR4(\thread)
730	lwz	r5, THR5(\thread)
731	lwz	r6, THR6(\thread)
732	lwz	r8, THR8(\thread)
733	lwz	r9, THR9(\thread)
734.endm
735
736hash_page_dsi:
737	save_regs_thread	r10
738	mfdsisr	r3
739	mfdar	r4
740	mfsrr0	r5
741	mfsrr1	r9
742	rlwinm	r3, r3, 32 - 15, _PAGE_RW	/* DSISR_STORE -> _PAGE_RW */
743	bl	hash_page
744	mfspr	r10, SPRN_SPRG_THREAD
745	restore_regs_thread r10
746	b	.Lhash_page_dsi_cont
747
748hash_page_isi:
749	mr	r11, r10
750	mfspr	r10, SPRN_SPRG_THREAD
751	save_regs_thread	r10
752	li	r3, 0
753	lwz	r4, SRR0(r10)
754	lwz	r9, SRR1(r10)
755	bl	hash_page
756	mfspr	r10, SPRN_SPRG_THREAD
757	restore_regs_thread r10
758	mr	r10, r11
759	b	.Lhash_page_isi_cont
760
761	.globl fast_hash_page_return
762fast_hash_page_return:
763	andis.	r10, r9, SRR1_ISI_NOPT@h	/* Set on ISI, cleared on DSI */
764	mfspr	r10, SPRN_SPRG_THREAD
765	restore_regs_thread r10
766	bne	1f
767
768	/* DSI */
769	mtcr	r11
770	lwz	r11, THR11(r10)
771	mfspr	r10, SPRN_SPRG_SCRATCH0
772	RFI
773
7741:	/* ISI */
775	mtcr	r11
776	mfspr	r11, SPRN_SPRG_SCRATCH1
777	mfspr	r10, SPRN_SPRG_SCRATCH0
778	RFI
779
780stack_overflow:
781	vmap_stack_overflow_exception
782#endif
783
784AltiVecUnavailable:
785	EXCEPTION_PROLOG
786#ifdef CONFIG_ALTIVEC
787	beq	1f
788	bl	load_up_altivec		/* if from user, just load it up */
789	b	fast_exception_return
790#endif /* CONFIG_ALTIVEC */
7911:	addi	r3,r1,STACK_FRAME_OVERHEAD
792	EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
793
794PerformanceMonitor:
795	EXCEPTION_PROLOG
796	addi	r3,r1,STACK_FRAME_OVERHEAD
797	EXC_XFER_STD(0xf00, performance_monitor_exception)
798
799
800/*
801 * This code is jumped to from the startup code to copy
802 * the kernel image to physical address PHYSICAL_START.
803 */
804relocate_kernel:
805	addis	r9,r26,klimit@ha	/* fetch klimit */
806	lwz	r25,klimit@l(r9)
807	addis	r25,r25,-KERNELBASE@h
808	lis	r3,PHYSICAL_START@h	/* Destination base address */
809	li	r6,0			/* Destination offset */
810	li	r5,0x4000		/* # bytes of memory to copy */
811	bl	copy_and_flush		/* copy the first 0x4000 bytes */
812	addi	r0,r3,4f@l		/* jump to the address of 4f */
813	mtctr	r0			/* in copy and do the rest. */
814	bctr				/* jump to the copy */
8154:	mr	r5,r25
816	bl	copy_and_flush		/* copy the rest */
817	b	turn_on_mmu
818
819/*
820 * Copy routine used to copy the kernel to start at physical address 0
821 * and flush and invalidate the caches as needed.
822 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
823 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
824 */
825_ENTRY(copy_and_flush)
826	addi	r5,r5,-4
827	addi	r6,r6,-4
8284:	li	r0,L1_CACHE_BYTES/4
829	mtctr	r0
8303:	addi	r6,r6,4			/* copy a cache line */
831	lwzx	r0,r6,r4
832	stwx	r0,r6,r3
833	bdnz	3b
834	dcbst	r6,r3			/* write it to memory */
835	sync
836	icbi	r6,r3			/* flush the icache line */
837	cmplw	0,r6,r5
838	blt	4b
839	sync				/* additional sync needed on g4 */
840	isync
841	addi	r5,r5,4
842	addi	r6,r6,4
843	blr
844
845#ifdef CONFIG_SMP
846	.globl __secondary_start_mpc86xx
847__secondary_start_mpc86xx:
848	mfspr	r3, SPRN_PIR
849	stw	r3, __secondary_hold_acknowledge@l(0)
850	mr	r24, r3			/* cpu # */
851	b	__secondary_start
852
853	.globl	__secondary_start_pmac_0
854__secondary_start_pmac_0:
855	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
856	li	r24,0
857	b	1f
858	li	r24,1
859	b	1f
860	li	r24,2
861	b	1f
862	li	r24,3
8631:
864	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
865	   set to map the 0xf0000000 - 0xffffffff region */
866	mfmsr	r0
867	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
868	mtmsr	r0
869	isync
870
871	.globl	__secondary_start
872__secondary_start:
873	/* Copy some CPU settings from CPU 0 */
874	bl	__restore_cpu_setup
875
876	lis	r3,-KERNELBASE@h
877	mr	r4,r24
878	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
879	lis	r3,-KERNELBASE@h
880	bl	init_idle_6xx
881
882	/* get current's stack and current */
883	lis	r2,secondary_current@ha
884	tophys(r2,r2)
885	lwz	r2,secondary_current@l(r2)
886	tophys(r1,r2)
887	lwz	r1,TASK_STACK(r1)
888
889	/* stack */
890	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
891	li	r0,0
892	tophys(r3,r1)
893	stw	r0,0(r3)
894
895	/* load up the MMU */
896	bl	load_segment_registers
897	bl	load_up_mmu
898
899	/* ptr to phys current thread */
900	tophys(r4,r2)
901	addi	r4,r4,THREAD	/* phys address of our thread_struct */
902	mtspr	SPRN_SPRG_THREAD,r4
903	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
904	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
905	mtspr	SPRN_SPRG_PGDIR, r4
906
907	/* enable MMU and jump to start_secondary */
908	li	r4,MSR_KERNEL
909	lis	r3,start_secondary@h
910	ori	r3,r3,start_secondary@l
911	mtspr	SPRN_SRR0,r3
912	mtspr	SPRN_SRR1,r4
913	RFI
914#endif /* CONFIG_SMP */
915
916#ifdef CONFIG_KVM_BOOK3S_HANDLER
917#include "../kvm/book3s_rmhandlers.S"
918#endif
919
920/*
921 * Load stuff into the MMU.  Intended to be called with
922 * IR=0 and DR=0.
923 */
924early_hash_table:
925	sync			/* Force all PTE updates to finish */
926	isync
927	tlbia			/* Clear all TLB entries */
928	sync			/* wait for tlbia/tlbie to finish */
929	TLBSYNC			/* ... on all CPUs */
930	/* Load the SDR1 register (hash table base & size) */
931	lis	r6, early_hash - PAGE_OFFSET@h
932	ori	r6, r6, 3	/* 256kB table */
933	mtspr	SPRN_SDR1, r6
934	lis	r6, early_hash@h
935	lis	r3, Hash@ha
936	stw	r6, Hash@l(r3)
937	blr
938
939load_up_mmu:
940	sync			/* Force all PTE updates to finish */
941	isync
942	tlbia			/* Clear all TLB entries */
943	sync			/* wait for tlbia/tlbie to finish */
944	TLBSYNC			/* ... on all CPUs */
945	/* Load the SDR1 register (hash table base & size) */
946	lis	r6,_SDR1@ha
947	tophys(r6,r6)
948	lwz	r6,_SDR1@l(r6)
949	mtspr	SPRN_SDR1,r6
950
951/* Load the BAT registers with the values set up by MMU_init. */
952	lis	r3,BATS@ha
953	addi	r3,r3,BATS@l
954	tophys(r3,r3)
955	LOAD_BAT(0,r3,r4,r5)
956	LOAD_BAT(1,r3,r4,r5)
957	LOAD_BAT(2,r3,r4,r5)
958	LOAD_BAT(3,r3,r4,r5)
959BEGIN_MMU_FTR_SECTION
960	LOAD_BAT(4,r3,r4,r5)
961	LOAD_BAT(5,r3,r4,r5)
962	LOAD_BAT(6,r3,r4,r5)
963	LOAD_BAT(7,r3,r4,r5)
964END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
965	blr
966
967_GLOBAL(load_segment_registers)
968	li	r0, NUM_USER_SEGMENTS /* load up user segment register values */
969	mtctr	r0		/* for context 0 */
970	li	r3, 0		/* Kp = 0, Ks = 0, VSID = 0 */
971#ifdef CONFIG_PPC_KUEP
972	oris	r3, r3, SR_NX@h	/* Set Nx */
973#endif
974#ifdef CONFIG_PPC_KUAP
975	oris	r3, r3, SR_KS@h	/* Set Ks */
976#endif
977	li	r4, 0
9783:	mtsrin	r3, r4
979	addi	r3, r3, 0x111	/* increment VSID */
980	addis	r4, r4, 0x1000	/* address of next segment */
981	bdnz	3b
982	li	r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
983	mtctr	r0			/* for context 0 */
984	rlwinm	r3, r3, 0, ~SR_NX	/* Nx = 0 */
985	rlwinm	r3, r3, 0, ~SR_KS	/* Ks = 0 */
986	oris	r3, r3, SR_KP@h		/* Kp = 1 */
9873:	mtsrin	r3, r4
988	addi	r3, r3, 0x111	/* increment VSID */
989	addis	r4, r4, 0x1000	/* address of next segment */
990	bdnz	3b
991	blr
992
993/*
994 * This is where the main kernel code starts.
995 */
996start_here:
997	/* ptr to current */
998	lis	r2,init_task@h
999	ori	r2,r2,init_task@l
1000	/* Set up for using our exception vectors */
1001	/* ptr to phys current thread */
1002	tophys(r4,r2)
1003	addi	r4,r4,THREAD	/* init task's THREAD */
1004	mtspr	SPRN_SPRG_THREAD,r4
1005	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
1006	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1007	mtspr	SPRN_SPRG_PGDIR, r4
1008
1009	/* stack */
1010	lis	r1,init_thread_union@ha
1011	addi	r1,r1,init_thread_union@l
1012	li	r0,0
1013	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1014/*
1015 * Do early platform-specific initialization,
1016 * and set up the MMU.
1017 */
1018#ifdef CONFIG_KASAN
1019	bl	kasan_early_init
1020#endif
1021	li	r3,0
1022	mr	r4,r31
1023	bl	machine_init
1024	bl	__save_cpu_setup
1025	bl	MMU_init
1026	bl	MMU_init_hw_patch
1027
1028/*
1029 * Go back to running unmapped so we can load up new values
1030 * for SDR1 (hash table pointer) and the segment registers
1031 * and change to using our exception vectors.
1032 */
1033	lis	r4,2f@h
1034	ori	r4,r4,2f@l
1035	tophys(r4,r4)
1036	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1037
1038	.align	4
1039	mtspr	SPRN_SRR0,r4
1040	mtspr	SPRN_SRR1,r3
1041	RFI
1042/* Load up the kernel context */
10432:	bl	load_up_mmu
1044
1045#ifdef CONFIG_BDI_SWITCH
1046	/* Add helper information for the Abatron bdiGDB debugger.
1047	 * We do this here because we know the mmu is disabled, and
1048	 * will be enabled for real in just a few instructions.
1049	 */
1050	lis	r5, abatron_pteptrs@h
1051	ori	r5, r5, abatron_pteptrs@l
1052	stw	r5, 0xf0(0)	/* This much match your Abatron config */
1053	lis	r6, swapper_pg_dir@h
1054	ori	r6, r6, swapper_pg_dir@l
1055	tophys(r5, r5)
1056	stw	r6, 0(r5)
1057#endif /* CONFIG_BDI_SWITCH */
1058
1059/* Now turn on the MMU for real! */
1060	li	r4,MSR_KERNEL
1061	lis	r3,start_kernel@h
1062	ori	r3,r3,start_kernel@l
1063	mtspr	SPRN_SRR0,r3
1064	mtspr	SPRN_SRR1,r4
1065	RFI
1066
1067/*
1068 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1069 *
1070 * Set up the segment registers for a new context.
1071 */
1072_ENTRY(switch_mmu_context)
1073	lwz	r3,MMCONTEXTID(r4)
1074	cmpwi	cr0,r3,0
1075	blt-	4f
1076	mulli	r3,r3,897	/* multiply context by skew factor */
1077	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
1078#ifdef CONFIG_PPC_KUEP
1079	oris	r3, r3, SR_NX@h	/* Set Nx */
1080#endif
1081#ifdef CONFIG_PPC_KUAP
1082	oris	r3, r3, SR_KS@h	/* Set Ks */
1083#endif
1084	li	r0,NUM_USER_SEGMENTS
1085	mtctr	r0
1086
1087	lwz	r4, MM_PGD(r4)
1088#ifdef CONFIG_BDI_SWITCH
1089	/* Context switch the PTE pointer for the Abatron BDI2000.
1090	 * The PGDIR is passed as second argument.
1091	 */
1092	lis	r5, abatron_pteptrs@ha
1093	stw	r4, abatron_pteptrs@l + 0x4(r5)
1094#endif
1095	tophys(r4, r4)
1096	mtspr	SPRN_SPRG_PGDIR, r4
1097	li	r4,0
1098	isync
10993:
1100	mtsrin	r3,r4
1101	addi	r3,r3,0x111	/* next VSID */
1102	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
1103	addis	r4,r4,0x1000	/* address of next segment */
1104	bdnz	3b
1105	sync
1106	isync
1107	blr
11084:	trap
1109	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1110	blr
1111EXPORT_SYMBOL(switch_mmu_context)
1112
1113/*
1114 * An undocumented "feature" of 604e requires that the v bit
1115 * be cleared before changing BAT values.
1116 *
1117 * Also, newer IBM firmware does not clear bat3 and 4 so
1118 * this makes sure it's done.
1119 *  -- Cort
1120 */
1121clear_bats:
1122	li	r10,0
1123
1124	mtspr	SPRN_DBAT0U,r10
1125	mtspr	SPRN_DBAT0L,r10
1126	mtspr	SPRN_DBAT1U,r10
1127	mtspr	SPRN_DBAT1L,r10
1128	mtspr	SPRN_DBAT2U,r10
1129	mtspr	SPRN_DBAT2L,r10
1130	mtspr	SPRN_DBAT3U,r10
1131	mtspr	SPRN_DBAT3L,r10
1132	mtspr	SPRN_IBAT0U,r10
1133	mtspr	SPRN_IBAT0L,r10
1134	mtspr	SPRN_IBAT1U,r10
1135	mtspr	SPRN_IBAT1L,r10
1136	mtspr	SPRN_IBAT2U,r10
1137	mtspr	SPRN_IBAT2L,r10
1138	mtspr	SPRN_IBAT3U,r10
1139	mtspr	SPRN_IBAT3L,r10
1140BEGIN_MMU_FTR_SECTION
1141	/* Here's a tweak: at this point, CPU setup have
1142	 * not been called yet, so HIGH_BAT_EN may not be
1143	 * set in HID0 for the 745x processors. However, it
1144	 * seems that doesn't affect our ability to actually
1145	 * write to these SPRs.
1146	 */
1147	mtspr	SPRN_DBAT4U,r10
1148	mtspr	SPRN_DBAT4L,r10
1149	mtspr	SPRN_DBAT5U,r10
1150	mtspr	SPRN_DBAT5L,r10
1151	mtspr	SPRN_DBAT6U,r10
1152	mtspr	SPRN_DBAT6L,r10
1153	mtspr	SPRN_DBAT7U,r10
1154	mtspr	SPRN_DBAT7L,r10
1155	mtspr	SPRN_IBAT4U,r10
1156	mtspr	SPRN_IBAT4L,r10
1157	mtspr	SPRN_IBAT5U,r10
1158	mtspr	SPRN_IBAT5L,r10
1159	mtspr	SPRN_IBAT6U,r10
1160	mtspr	SPRN_IBAT6L,r10
1161	mtspr	SPRN_IBAT7U,r10
1162	mtspr	SPRN_IBAT7L,r10
1163END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1164	blr
1165
1166_ENTRY(update_bats)
1167	lis	r4, 1f@h
1168	ori	r4, r4, 1f@l
1169	tophys(r4, r4)
1170	mfmsr	r6
1171	mflr	r7
1172	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1173	rlwinm	r0, r6, 0, ~MSR_RI
1174	rlwinm	r0, r0, 0, ~MSR_EE
1175	mtmsr	r0
1176
1177	.align	4
1178	mtspr	SPRN_SRR0, r4
1179	mtspr	SPRN_SRR1, r3
1180	RFI
11811:	bl	clear_bats
1182	lis	r3, BATS@ha
1183	addi	r3, r3, BATS@l
1184	tophys(r3, r3)
1185	LOAD_BAT(0, r3, r4, r5)
1186	LOAD_BAT(1, r3, r4, r5)
1187	LOAD_BAT(2, r3, r4, r5)
1188	LOAD_BAT(3, r3, r4, r5)
1189BEGIN_MMU_FTR_SECTION
1190	LOAD_BAT(4, r3, r4, r5)
1191	LOAD_BAT(5, r3, r4, r5)
1192	LOAD_BAT(6, r3, r4, r5)
1193	LOAD_BAT(7, r3, r4, r5)
1194END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1195	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1196	mtmsr	r3
1197	mtspr	SPRN_SRR0, r7
1198	mtspr	SPRN_SRR1, r6
1199	RFI
1200
1201flush_tlbs:
1202	lis	r10, 0x40
12031:	addic.	r10, r10, -0x1000
1204	tlbie	r10
1205	bgt	1b
1206	sync
1207	blr
1208
1209mmu_off:
1210 	addi	r4, r3, __after_mmu_off - _start
1211	mfmsr	r3
1212	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
1213	beqlr
1214	andc	r3,r3,r0
1215
1216	.align	4
1217	mtspr	SPRN_SRR0,r4
1218	mtspr	SPRN_SRR1,r3
1219	sync
1220	RFI
1221
1222/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1223initial_bats:
1224	lis	r11,PAGE_OFFSET@h
1225	tophys(r8,r11)
1226#ifdef CONFIG_SMP
1227	ori	r8,r8,0x12		/* R/W access, M=1 */
1228#else
1229	ori	r8,r8,2			/* R/W access */
1230#endif /* CONFIG_SMP */
1231	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
1232
1233	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx have valid */
1234	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
1235	mtspr	SPRN_IBAT0L,r8
1236	mtspr	SPRN_IBAT0U,r11
1237	isync
1238	blr
1239
1240#ifdef CONFIG_BOOTX_TEXT
1241setup_disp_bat:
1242	/*
1243	 * setup the display bat prepared for us in prom.c
1244	 */
1245	mflr	r8
1246	bl	reloc_offset
1247	mtlr	r8
1248	addis	r8,r3,disp_BAT@ha
1249	addi	r8,r8,disp_BAT@l
1250	cmpwi	cr0,r8,0
1251	beqlr
1252	lwz	r11,0(r8)
1253	lwz	r8,4(r8)
1254	mtspr	SPRN_DBAT3L,r8
1255	mtspr	SPRN_DBAT3U,r11
1256	blr
1257#endif /* CONFIG_BOOTX_TEXT */
1258
1259#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1260setup_cpm_bat:
1261	lis	r8, 0xf000
1262	ori	r8, r8,	0x002a
1263	mtspr	SPRN_DBAT1L, r8
1264
1265	lis	r11, 0xf000
1266	ori	r11, r11, (BL_1M << 2) | 2
1267	mtspr	SPRN_DBAT1U, r11
1268
1269	blr
1270#endif
1271
1272#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1273setup_usbgecko_bat:
1274	/* prepare a BAT for early io */
1275#if defined(CONFIG_GAMECUBE)
1276	lis	r8, 0x0c00
1277#elif defined(CONFIG_WII)
1278	lis	r8, 0x0d00
1279#else
1280#error Invalid platform for USB Gecko based early debugging.
1281#endif
1282	/*
1283	 * The virtual address used must match the virtual address
1284	 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1285	 */
1286	lis	r11, 0xfffe	/* top 128K */
1287	ori	r8, r8, 0x002a	/* uncached, guarded ,rw */
1288	ori	r11, r11, 0x2	/* 128K, Vs=1, Vp=0 */
1289	mtspr	SPRN_DBAT1L, r8
1290	mtspr	SPRN_DBAT1U, r11
1291	blr
1292#endif
1293
1294#ifdef CONFIG_8260
1295/* Jump into the system reset for the rom.
1296 * We first disable the MMU, and then jump to the ROM reset address.
1297 *
1298 * r3 is the board info structure, r4 is the location for starting.
1299 * I use this for building a small kernel that can load other kernels,
1300 * rather than trying to write or rely on a rom monitor that can tftp load.
1301 */
1302       .globl  m8260_gorom
1303m8260_gorom:
1304	mfmsr	r0
1305	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
1306	sync
1307	mtmsr	r0
1308	sync
1309	mfspr	r11, SPRN_HID0
1310	lis	r10, 0
1311	ori	r10,r10,HID0_ICE|HID0_DCE
1312	andc	r11, r11, r10
1313	mtspr	SPRN_HID0, r11
1314	isync
1315	li	r5, MSR_ME|MSR_RI
1316	lis	r6,2f@h
1317	addis	r6,r6,-KERNELBASE@h
1318	ori	r6,r6,2f@l
1319	mtspr	SPRN_SRR0,r6
1320	mtspr	SPRN_SRR1,r5
1321	isync
1322	sync
1323	rfi
13242:
1325	mtlr	r4
1326	blr
1327#endif
1328
1329
1330/*
1331 * We put a few things here that have to be page-aligned.
1332 * This stuff goes at the beginning of the data segment,
1333 * which is page-aligned.
1334 */
1335	.data
1336	.globl	sdata
1337sdata:
1338	.globl	empty_zero_page
1339empty_zero_page:
1340	.space	4096
1341EXPORT_SYMBOL(empty_zero_page)
1342
1343	.globl	swapper_pg_dir
1344swapper_pg_dir:
1345	.space	PGD_TABLE_SIZE
1346
1347/* Room for two PTE pointers, usually the kernel and current user pointers
1348 * to their respective root page table.
1349 */
1350abatron_pteptrs:
1351	.space	8
1352