1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 *  This file contains the low-level support and setup for the
15 *  PowerPC platform, including trap and interrupt dispatch.
16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/bug.h>
31#include <asm/kvm_book3s_asm.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34
35#include "head_32.h"
36
37#define LOAD_BAT(n, reg, RA, RB)	\
38	/* see the comment for clear_bats() -- Cort */ \
39	li	RA,0;			\
40	mtspr	SPRN_IBAT##n##U,RA;	\
41	mtspr	SPRN_DBAT##n##U,RA;	\
42	lwz	RA,(n*16)+0(reg);	\
43	lwz	RB,(n*16)+4(reg);	\
44	mtspr	SPRN_IBAT##n##U,RA;	\
45	mtspr	SPRN_IBAT##n##L,RB;	\
46	lwz	RA,(n*16)+8(reg);	\
47	lwz	RB,(n*16)+12(reg);	\
48	mtspr	SPRN_DBAT##n##U,RA;	\
49	mtspr	SPRN_DBAT##n##L,RB
50
51	__HEAD
52	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
53	.stabs	"head_book3s_32.S",N_SO,0,0,0f
540:
55_ENTRY(_stext);
56
57/*
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
60 */
61_ENTRY(_start);
62	/*
63	 * These are here for legacy reasons, the kernel used to
64	 * need to look like a coff function entry for the pmac
65	 * but we're always started by some kind of bootloader now.
66	 *  -- Cort
67	 */
68	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
69	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
70	nop
71
72/* PMAC
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address).  Address translation is turned on, with the prom
77 * managing the hash table.  Interrupts are disabled.  The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
80 *
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 *  r3: 'BooX' (0x426f6f58)
84 *  r4: virtual address of boot_infos_t
85 *  r5: 0
86 *
87 * PREP
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader.  The expected layout
90 * of the regs is:
91 *   r3: ptr to residual data
92 *   r4: initrd_start or if no initrd then 0
93 *   r5: initrd_end - unused if r4 is 0
94 *   r6: Start of command line string
95 *   r7: End of command line string
96 *
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
99 * -- Cort
100 */
101
102	.globl	__start
103__start:
104/*
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
108 */
109	cmpwi	0,r5,0
110	beq	1f
111
112#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113	/* find out where we are now */
114	bcl	20,31,$+4
1150:	mflr	r8			/* r8 = runtime addr here */
116	addis	r8,r8,(_stext - 0b)@ha
117	addi	r8,r8,(_stext - 0b)@l	/* current runtime base addr */
118	bl	prom_init
119#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
120
121	/* We never return. We also hit that trap if trying to boot
122	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
123	trap
124
125/*
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
128 */
129#ifdef CONFIG_PPC_PMAC
1301:	lis	r31,0x426f
131	ori	r31,r31,0x6f58
132	cmpw	0,r3,r31
133	bne	1f
134	bl	bootx_init
135	trap
136#endif /* CONFIG_PPC_PMAC */
137
1381:	mr	r31,r3			/* save device tree ptr */
139	li	r24,0			/* cpu # */
140
141/*
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 *  -- Cort <cort@fsmlabs.com>
145 */
146	bl	early_init
147
148/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
150 */
151 	bl	mmu_off
152__after_mmu_off:
153	bl	clear_bats
154	bl	flush_tlbs
155
156	bl	initial_bats
157	bl	load_segment_registers
158BEGIN_MMU_FTR_SECTION
159	bl	reloc_offset
160	bl	early_hash_table
161END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
162#if defined(CONFIG_BOOTX_TEXT)
163	bl	setup_disp_bat
164#endif
165#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
166	bl	setup_cpm_bat
167#endif
168#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
169	bl	setup_usbgecko_bat
170#endif
171
172/*
173 * Call setup_cpu for CPU 0 and initialize 6xx Idle
174 */
175	bl	reloc_offset
176	li	r24,0			/* cpu# */
177	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
178	bl	reloc_offset
179	bl	init_idle_6xx
180
181
182/*
183 * We need to run with _start at physical address 0.
184 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
185 * the exception vectors at 0 (and therefore this copy
186 * overwrites OF's exception vectors with our own).
187 * The MMU is off at this point.
188 */
189	bl	reloc_offset
190	mr	r26,r3
191	addis	r4,r3,KERNELBASE@h	/* current address of _start */
192	lis	r5,PHYSICAL_START@h
193	cmplw	0,r4,r5			/* already running at PHYSICAL_START? */
194	bne	relocate_kernel
195/*
196 * we now have the 1st 16M of ram mapped with the bats.
197 * prep needs the mmu to be turned on here, but pmac already has it on.
198 * this shouldn't bother the pmac since it just gets turned on again
199 * as we jump to our code at KERNELBASE. -- Cort
200 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
201 * off, and in other cases, we now turn it off before changing BATs above.
202 */
203turn_on_mmu:
204	mfmsr	r0
205	ori	r0,r0,MSR_DR|MSR_IR|MSR_RI
206	mtspr	SPRN_SRR1,r0
207	lis	r0,start_here@h
208	ori	r0,r0,start_here@l
209	mtspr	SPRN_SRR0,r0
210	rfi				/* enables MMU */
211
212/*
213 * We need __secondary_hold as a place to hold the other cpus on
214 * an SMP machine, even when we are running a UP kernel.
215 */
216	. = 0xc0			/* for prep bootloader */
217	li	r3,1			/* MTX only has 1 cpu */
218	.globl	__secondary_hold
219__secondary_hold:
220	/* tell the master we're here */
221	stw	r3,__secondary_hold_acknowledge@l(0)
222#ifdef CONFIG_SMP
223100:	lwz	r4,0(0)
224	/* wait until we're told to start */
225	cmpw	0,r4,r3
226	bne	100b
227	/* our cpu # was at addr 0 - go */
228	mr	r24,r3			/* cpu # */
229	b	__secondary_start
230#else
231	b	.
232#endif /* CONFIG_SMP */
233
234	.globl	__secondary_hold_spinloop
235__secondary_hold_spinloop:
236	.long	0
237	.globl	__secondary_hold_acknowledge
238__secondary_hold_acknowledge:
239	.long	-1
240
241/* System reset */
242/* core99 pmac starts the seconary here by changing the vector, and
243   putting it back to what it was (unknown_exception) when done.  */
244	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
245
246/* Machine check */
247/*
248 * On CHRP, this is complicated by the fact that we could get a
249 * machine check inside RTAS, and we have no guarantee that certain
250 * critical registers will have the values we expect.  The set of
251 * registers that might have bad values includes all the GPRs
252 * and all the BATs.  We indicate that we are in RTAS by putting
253 * a non-zero value, the address of the exception frame to use,
254 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
255 * and uses its value if it is non-zero.
256 * (Other exception handlers assume that r1 is a valid kernel stack
257 * pointer when we take an exception from supervisor mode.)
258 *	-- paulus.
259 */
260	. = 0x200
261	DO_KVM  0x200
262MachineCheck:
263	EXCEPTION_PROLOG_0
264#ifdef CONFIG_PPC_CHRP
265	mfspr	r11, SPRN_SPRG_THREAD
266	lwz	r11, RTAS_SP(r11)
267	cmpwi	cr1, r11, 0
268	bne	cr1, 7f
269#endif /* CONFIG_PPC_CHRP */
270	EXCEPTION_PROLOG_1 for_rtas=1
2717:	EXCEPTION_PROLOG_2
272	addi	r3,r1,STACK_FRAME_OVERHEAD
273#ifdef CONFIG_PPC_CHRP
274#ifdef CONFIG_VMAP_STACK
275	mfspr	r4, SPRN_SPRG_THREAD
276	tovirt(r4, r4)
277	lwz	r4, RTAS_SP(r4)
278	cmpwi	cr1, r4, 0
279#endif
280	beq	cr1, machine_check_tramp
281	twi	31, 0, 0
282#else
283	b	machine_check_tramp
284#endif
285
286/* Data access exception. */
287	. = 0x300
288	DO_KVM  0x300
289DataAccess:
290#ifdef CONFIG_VMAP_STACK
291	mtspr	SPRN_SPRG_SCRATCH0,r10
292	mfspr	r10, SPRN_SPRG_THREAD
293BEGIN_MMU_FTR_SECTION
294	stw	r11, THR11(r10)
295	mfspr	r10, SPRN_DSISR
296	mfcr	r11
297#ifdef CONFIG_PPC_KUAP
298	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
299#else
300	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
301#endif
302	mfspr	r10, SPRN_SPRG_THREAD
303	beq	hash_page_dsi
304.Lhash_page_dsi_cont:
305	mtcr	r11
306	lwz	r11, THR11(r10)
307END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
308	mtspr	SPRN_SPRG_SCRATCH1,r11
309	mfspr	r11, SPRN_DAR
310	stw	r11, DAR(r10)
311	mfspr	r11, SPRN_DSISR
312	stw	r11, DSISR(r10)
313	mfspr	r11, SPRN_SRR0
314	stw	r11, SRR0(r10)
315	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
316	stw	r11, SRR1(r10)
317	mfcr	r10
318	andi.	r11, r11, MSR_PR
319
320	EXCEPTION_PROLOG_1
321	b	handle_page_fault_tramp_1
322#else	/* CONFIG_VMAP_STACK */
323	EXCEPTION_PROLOG handle_dar_dsisr=1
324	get_and_save_dar_dsisr_on_stack	r4, r5, r11
325BEGIN_MMU_FTR_SECTION
326#ifdef CONFIG_PPC_KUAP
327	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
328#else
329	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
330#endif
331	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
332	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
333	bl	hash_page
334	b	handle_page_fault_tramp_1
335FTR_SECTION_ELSE
336	b	handle_page_fault_tramp_2
337ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
338#endif	/* CONFIG_VMAP_STACK */
339
340/* Instruction access exception. */
341	. = 0x400
342	DO_KVM  0x400
343InstructionAccess:
344#ifdef CONFIG_VMAP_STACK
345	mtspr	SPRN_SPRG_SCRATCH0,r10
346	mtspr	SPRN_SPRG_SCRATCH1,r11
347	mfspr	r10, SPRN_SPRG_THREAD
348	mfspr	r11, SPRN_SRR0
349	stw	r11, SRR0(r10)
350	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
351	stw	r11, SRR1(r10)
352	mfcr	r10
353BEGIN_MMU_FTR_SECTION
354	andis.	r11, r11, SRR1_ISI_NOPT@h	/* no pte found? */
355	bne	hash_page_isi
356.Lhash_page_isi_cont:
357	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
358END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
359	andi.	r11, r11, MSR_PR
360
361	EXCEPTION_PROLOG_1
362	EXCEPTION_PROLOG_2
363#else	/* CONFIG_VMAP_STACK */
364	EXCEPTION_PROLOG
365	andis.	r0,r9,SRR1_ISI_NOPT@h	/* no pte found? */
366	beq	1f			/* if so, try to put a PTE */
367	li	r3,0			/* into the hash table */
368	mr	r4,r12			/* SRR0 is fault address */
369BEGIN_MMU_FTR_SECTION
370	bl	hash_page
371END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
372#endif	/* CONFIG_VMAP_STACK */
3731:	mr	r4,r12
374	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
375	stw	r4, _DAR(r11)
376	EXC_XFER_LITE(0x400, handle_page_fault)
377
378/* External interrupt */
379	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
380
381/* Alignment exception */
382	. = 0x600
383	DO_KVM  0x600
384Alignment:
385	EXCEPTION_PROLOG handle_dar_dsisr=1
386	save_dar_dsisr_on_stack r4, r5, r11
387	addi	r3,r1,STACK_FRAME_OVERHEAD
388	b	alignment_exception_tramp
389
390/* Program check exception */
391	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
392
393/* Floating-point unavailable */
394	. = 0x800
395	DO_KVM  0x800
396FPUnavailable:
397#ifdef CONFIG_PPC_FPU
398BEGIN_FTR_SECTION
399/*
400 * Certain Freescale cores don't have a FPU and treat fp instructions
401 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
402 */
403	b 	ProgramCheck
404END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
405	EXCEPTION_PROLOG
406	beq	1f
407	bl	load_up_fpu		/* if from user, just load it up */
408	b	fast_exception_return
4091:	addi	r3,r1,STACK_FRAME_OVERHEAD
410	EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
411#else
412	b 	ProgramCheck
413#endif
414
415/* Decrementer */
416	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
417
418	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
419	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
420
421/* System call */
422	. = 0xc00
423	DO_KVM  0xc00
424SystemCall:
425	SYSCALL_ENTRY	0xc00
426
427	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
428	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
429
430/*
431 * The Altivec unavailable trap is at 0x0f20.  Foo.
432 * We effectively remap it to 0x3000.
433 * We include an altivec unavailable exception vector even if
434 * not configured for Altivec, so that you can't panic a
435 * non-altivec kernel running on a machine with altivec just
436 * by executing an altivec instruction.
437 */
438	. = 0xf00
439	DO_KVM  0xf00
440	b	PerformanceMonitor
441
442	. = 0xf20
443	DO_KVM  0xf20
444	b	AltiVecUnavailable
445
446/*
447 * Handle TLB miss for instruction on 603/603e.
448 * Note: we get an alternate set of r0 - r3 to use automatically.
449 */
450	. = 0x1000
451InstructionTLBMiss:
452/*
453 * r0:	scratch
454 * r1:	linux style pte ( later becomes ppc hardware pte )
455 * r2:	ptr to linux-style pte
456 * r3:	scratch
457 */
458	/* Get PTE (linux-style) and check access */
459	mfspr	r3,SPRN_IMISS
460#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
461	lis	r1, TASK_SIZE@h		/* check if kernel address */
462	cmplw	0,r1,r3
463#endif
464	mfspr	r2, SPRN_SPRG_PGDIR
465	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
466#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
467	bgt-	112f
468	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
469	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
470#endif
471112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
472	lwz	r2,0(r2)		/* get pmd entry */
473	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
474	beq-	InstructionAddressInvalid	/* return if no mapping */
475	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
476	lwz	r0,0(r2)		/* get linux-style pte */
477	andc.	r1,r1,r0		/* check access & ~permission */
478	bne-	InstructionAddressInvalid /* return if access not permitted */
479	/* Convert linux-style PTE to low word of PPC-style PTE */
480	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
481	ori	r1, r1, 0xe06		/* clear out reserved bits */
482	andc	r1, r0, r1		/* PP = user? 1 : 0 */
483BEGIN_FTR_SECTION
484	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
485END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
486	mtspr	SPRN_RPA,r1
487	tlbli	r3
488	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
489	mtcrf	0x80,r3
490	rfi
491InstructionAddressInvalid:
492	mfspr	r3,SPRN_SRR1
493	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
494
495	addis	r1,r1,0x2000
496	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
497	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
498	or	r2,r2,r1
499	mtspr	SPRN_SRR1,r2
500	mfspr	r1,SPRN_IMISS	/* Get failing address */
501	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
502	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
503	xor	r1,r1,r2
504	mtspr	SPRN_DAR,r1	/* Set fault address */
505	mfmsr	r0		/* Restore "normal" registers */
506	xoris	r0,r0,MSR_TGPR>>16
507	mtcrf	0x80,r3		/* Restore CR0 */
508	mtmsr	r0
509	b	InstructionAccess
510
511/*
512 * Handle TLB miss for DATA Load operation on 603/603e
513 */
514	. = 0x1100
515DataLoadTLBMiss:
516/*
517 * r0:	scratch
518 * r1:	linux style pte ( later becomes ppc hardware pte )
519 * r2:	ptr to linux-style pte
520 * r3:	scratch
521 */
522	/* Get PTE (linux-style) and check access */
523	mfspr	r3,SPRN_DMISS
524	lis	r1, TASK_SIZE@h		/* check if kernel address */
525	cmplw	0,r1,r3
526	mfspr	r2, SPRN_SPRG_PGDIR
527	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
528	bgt-	112f
529	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
530	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
531112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
532	lwz	r2,0(r2)		/* get pmd entry */
533	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
534	beq-	DataAddressInvalid	/* return if no mapping */
535	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
536	lwz	r0,0(r2)		/* get linux-style pte */
537	andc.	r1,r1,r0		/* check access & ~permission */
538	bne-	DataAddressInvalid	/* return if access not permitted */
539	/*
540	 * NOTE! We are assuming this is not an SMP system, otherwise
541	 * we would need to update the pte atomically with lwarx/stwcx.
542	 */
543	/* Convert linux-style PTE to low word of PPC-style PTE */
544	rlwinm	r1,r0,32-9,30,30	/* _PAGE_RW -> PP msb */
545	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
546	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
547	ori	r1,r1,0xe04		/* clear out reserved bits */
548	andc	r1,r0,r1		/* PP = user? rw? 1: 3: 0 */
549BEGIN_FTR_SECTION
550	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
551END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
552	mtspr	SPRN_RPA,r1
553	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
554	mtcrf	0x80,r2
555BEGIN_MMU_FTR_SECTION
556	li	r0,1
557	mfspr	r1,SPRN_SPRG_603_LRU
558	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
559	slw	r0,r0,r2
560	xor	r1,r0,r1
561	srw	r0,r1,r2
562	mtspr   SPRN_SPRG_603_LRU,r1
563	mfspr	r2,SPRN_SRR1
564	rlwimi	r2,r0,31-14,14,14
565	mtspr   SPRN_SRR1,r2
566END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
567	tlbld	r3
568	rfi
569DataAddressInvalid:
570	mfspr	r3,SPRN_SRR1
571	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
572	addis	r1,r1,0x2000
573	mtspr	SPRN_DSISR,r1
574	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
575	mtspr	SPRN_SRR1,r2
576	mfspr	r1,SPRN_DMISS	/* Get failing address */
577	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
578	beq	20f		/* Jump if big endian */
579	xori	r1,r1,3
58020:	mtspr	SPRN_DAR,r1	/* Set fault address */
581	mfmsr	r0		/* Restore "normal" registers */
582	xoris	r0,r0,MSR_TGPR>>16
583	mtcrf	0x80,r3		/* Restore CR0 */
584	mtmsr	r0
585	b	DataAccess
586
587/*
588 * Handle TLB miss for DATA Store on 603/603e
589 */
590	. = 0x1200
591DataStoreTLBMiss:
592/*
593 * r0:	scratch
594 * r1:	linux style pte ( later becomes ppc hardware pte )
595 * r2:	ptr to linux-style pte
596 * r3:	scratch
597 */
598	/* Get PTE (linux-style) and check access */
599	mfspr	r3,SPRN_DMISS
600	lis	r1, TASK_SIZE@h		/* check if kernel address */
601	cmplw	0,r1,r3
602	mfspr	r2, SPRN_SPRG_PGDIR
603	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
604	bgt-	112f
605	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
606	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
607112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
608	lwz	r2,0(r2)		/* get pmd entry */
609	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
610	beq-	DataAddressInvalid	/* return if no mapping */
611	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
612	lwz	r0,0(r2)		/* get linux-style pte */
613	andc.	r1,r1,r0		/* check access & ~permission */
614	bne-	DataAddressInvalid	/* return if access not permitted */
615	/*
616	 * NOTE! We are assuming this is not an SMP system, otherwise
617	 * we would need to update the pte atomically with lwarx/stwcx.
618	 */
619	/* Convert linux-style PTE to low word of PPC-style PTE */
620	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
621	li	r1,0xe06		/* clear out reserved bits & PP msb */
622	andc	r1,r0,r1		/* PP = user? 1: 0 */
623BEGIN_FTR_SECTION
624	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
625END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
626	mtspr	SPRN_RPA,r1
627	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
628	mtcrf	0x80,r2
629BEGIN_MMU_FTR_SECTION
630	li	r0,1
631	mfspr	r1,SPRN_SPRG_603_LRU
632	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
633	slw	r0,r0,r2
634	xor	r1,r0,r1
635	srw	r0,r1,r2
636	mtspr   SPRN_SPRG_603_LRU,r1
637	mfspr	r2,SPRN_SRR1
638	rlwimi	r2,r0,31-14,14,14
639	mtspr   SPRN_SRR1,r2
640END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
641	tlbld	r3
642	rfi
643
644#ifndef CONFIG_ALTIVEC
645#define altivec_assist_exception	unknown_exception
646#endif
647
648#ifndef CONFIG_TAU_INT
649#define TAUException	unknown_exception
650#endif
651
652	EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
653	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
654	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
655	EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
656	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
657	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
658	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
659	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
660	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
661	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
662	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
663	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
664	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
665	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
666	EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
667	EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
668	EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
669	EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
670	EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
671	EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
672	EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
673	EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
674	EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
675	EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
676	EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
677	EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
678	EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
679	EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
680	EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
681
682	. = 0x3000
683
684machine_check_tramp:
685	EXC_XFER_STD(0x200, machine_check_exception)
686
687alignment_exception_tramp:
688	EXC_XFER_STD(0x600, alignment_exception)
689
690handle_page_fault_tramp_1:
691#ifdef CONFIG_VMAP_STACK
692	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
693#endif
694	lwz	r4, _DAR(r11)
695	lwz	r5, _DSISR(r11)
696	/* fall through */
697handle_page_fault_tramp_2:
698	EXC_XFER_LITE(0x300, handle_page_fault)
699
700#ifdef CONFIG_VMAP_STACK
701.macro save_regs_thread		thread
702	stw	r0, THR0(\thread)
703	stw	r3, THR3(\thread)
704	stw	r4, THR4(\thread)
705	stw	r5, THR5(\thread)
706	stw	r6, THR6(\thread)
707	stw	r8, THR8(\thread)
708	stw	r9, THR9(\thread)
709	mflr	r0
710	stw	r0, THLR(\thread)
711	mfctr	r0
712	stw	r0, THCTR(\thread)
713.endm
714
715.macro restore_regs_thread	thread
716	lwz	r0, THLR(\thread)
717	mtlr	r0
718	lwz	r0, THCTR(\thread)
719	mtctr	r0
720	lwz	r0, THR0(\thread)
721	lwz	r3, THR3(\thread)
722	lwz	r4, THR4(\thread)
723	lwz	r5, THR5(\thread)
724	lwz	r6, THR6(\thread)
725	lwz	r8, THR8(\thread)
726	lwz	r9, THR9(\thread)
727.endm
728
729hash_page_dsi:
730	save_regs_thread	r10
731	mfdsisr	r3
732	mfdar	r4
733	mfsrr0	r5
734	mfsrr1	r9
735	rlwinm	r3, r3, 32 - 15, _PAGE_RW	/* DSISR_STORE -> _PAGE_RW */
736	bl	hash_page
737	mfspr	r10, SPRN_SPRG_THREAD
738	restore_regs_thread r10
739	b	.Lhash_page_dsi_cont
740
741hash_page_isi:
742	mr	r11, r10
743	mfspr	r10, SPRN_SPRG_THREAD
744	save_regs_thread	r10
745	li	r3, 0
746	lwz	r4, SRR0(r10)
747	lwz	r9, SRR1(r10)
748	bl	hash_page
749	mfspr	r10, SPRN_SPRG_THREAD
750	restore_regs_thread r10
751	mr	r10, r11
752	b	.Lhash_page_isi_cont
753
754	.globl fast_hash_page_return
755fast_hash_page_return:
756	andis.	r10, r9, SRR1_ISI_NOPT@h	/* Set on ISI, cleared on DSI */
757	mfspr	r10, SPRN_SPRG_THREAD
758	restore_regs_thread r10
759	bne	1f
760
761	/* DSI */
762	mtcr	r11
763	lwz	r11, THR11(r10)
764	mfspr	r10, SPRN_SPRG_SCRATCH0
765	rfi
766
7671:	/* ISI */
768	mtcr	r11
769	mfspr	r11, SPRN_SPRG_SCRATCH1
770	mfspr	r10, SPRN_SPRG_SCRATCH0
771	rfi
772
773stack_overflow:
774	vmap_stack_overflow_exception
775#endif
776
777AltiVecUnavailable:
778	EXCEPTION_PROLOG
779#ifdef CONFIG_ALTIVEC
780	beq	1f
781	bl	load_up_altivec		/* if from user, just load it up */
782	b	fast_exception_return
783#endif /* CONFIG_ALTIVEC */
7841:	addi	r3,r1,STACK_FRAME_OVERHEAD
785	EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
786
787PerformanceMonitor:
788	EXCEPTION_PROLOG
789	addi	r3,r1,STACK_FRAME_OVERHEAD
790	EXC_XFER_STD(0xf00, performance_monitor_exception)
791
792
793/*
794 * This code is jumped to from the startup code to copy
795 * the kernel image to physical address PHYSICAL_START.
796 */
797relocate_kernel:
798	addis	r9,r26,klimit@ha	/* fetch klimit */
799	lwz	r25,klimit@l(r9)
800	addis	r25,r25,-KERNELBASE@h
801	lis	r3,PHYSICAL_START@h	/* Destination base address */
802	li	r6,0			/* Destination offset */
803	li	r5,0x4000		/* # bytes of memory to copy */
804	bl	copy_and_flush		/* copy the first 0x4000 bytes */
805	addi	r0,r3,4f@l		/* jump to the address of 4f */
806	mtctr	r0			/* in copy and do the rest. */
807	bctr				/* jump to the copy */
8084:	mr	r5,r25
809	bl	copy_and_flush		/* copy the rest */
810	b	turn_on_mmu
811
812/*
813 * Copy routine used to copy the kernel to start at physical address 0
814 * and flush and invalidate the caches as needed.
815 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
816 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
817 */
818_ENTRY(copy_and_flush)
819	addi	r5,r5,-4
820	addi	r6,r6,-4
8214:	li	r0,L1_CACHE_BYTES/4
822	mtctr	r0
8233:	addi	r6,r6,4			/* copy a cache line */
824	lwzx	r0,r6,r4
825	stwx	r0,r6,r3
826	bdnz	3b
827	dcbst	r6,r3			/* write it to memory */
828	sync
829	icbi	r6,r3			/* flush the icache line */
830	cmplw	0,r6,r5
831	blt	4b
832	sync				/* additional sync needed on g4 */
833	isync
834	addi	r5,r5,4
835	addi	r6,r6,4
836	blr
837
838#ifdef CONFIG_SMP
839	.globl __secondary_start_mpc86xx
840__secondary_start_mpc86xx:
841	mfspr	r3, SPRN_PIR
842	stw	r3, __secondary_hold_acknowledge@l(0)
843	mr	r24, r3			/* cpu # */
844	b	__secondary_start
845
846	.globl	__secondary_start_pmac_0
847__secondary_start_pmac_0:
848	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
849	li	r24,0
850	b	1f
851	li	r24,1
852	b	1f
853	li	r24,2
854	b	1f
855	li	r24,3
8561:
857	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
858	   set to map the 0xf0000000 - 0xffffffff region */
859	mfmsr	r0
860	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
861	mtmsr	r0
862	isync
863
864	.globl	__secondary_start
865__secondary_start:
866	/* Copy some CPU settings from CPU 0 */
867	bl	__restore_cpu_setup
868
869	lis	r3,-KERNELBASE@h
870	mr	r4,r24
871	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
872	lis	r3,-KERNELBASE@h
873	bl	init_idle_6xx
874
875	/* get current's stack and current */
876	lis	r2,secondary_current@ha
877	tophys(r2,r2)
878	lwz	r2,secondary_current@l(r2)
879	tophys(r1,r2)
880	lwz	r1,TASK_STACK(r1)
881
882	/* stack */
883	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
884	li	r0,0
885	tophys(r3,r1)
886	stw	r0,0(r3)
887
888	/* load up the MMU */
889	bl	load_segment_registers
890	bl	load_up_mmu
891
892	/* ptr to phys current thread */
893	tophys(r4,r2)
894	addi	r4,r4,THREAD	/* phys address of our thread_struct */
895	mtspr	SPRN_SPRG_THREAD,r4
896	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
897	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
898	mtspr	SPRN_SPRG_PGDIR, r4
899
900	/* enable MMU and jump to start_secondary */
901	li	r4,MSR_KERNEL
902	lis	r3,start_secondary@h
903	ori	r3,r3,start_secondary@l
904	mtspr	SPRN_SRR0,r3
905	mtspr	SPRN_SRR1,r4
906	rfi
907#endif /* CONFIG_SMP */
908
909#ifdef CONFIG_KVM_BOOK3S_HANDLER
910#include "../kvm/book3s_rmhandlers.S"
911#endif
912
913/*
914 * Load stuff into the MMU.  Intended to be called with
915 * IR=0 and DR=0.
916 */
917early_hash_table:
918	sync			/* Force all PTE updates to finish */
919	isync
920	tlbia			/* Clear all TLB entries */
921	sync			/* wait for tlbia/tlbie to finish */
922	TLBSYNC			/* ... on all CPUs */
923	/* Load the SDR1 register (hash table base & size) */
924	lis	r6, early_hash - PAGE_OFFSET@h
925	ori	r6, r6, 3	/* 256kB table */
926	mtspr	SPRN_SDR1, r6
927	lis	r6, early_hash@h
928	addis	r3, r3, Hash@ha
929	stw	r6, Hash@l(r3)
930	blr
931
932load_up_mmu:
933	sync			/* Force all PTE updates to finish */
934	isync
935	tlbia			/* Clear all TLB entries */
936	sync			/* wait for tlbia/tlbie to finish */
937	TLBSYNC			/* ... on all CPUs */
938	/* Load the SDR1 register (hash table base & size) */
939	lis	r6,_SDR1@ha
940	tophys(r6,r6)
941	lwz	r6,_SDR1@l(r6)
942	mtspr	SPRN_SDR1,r6
943
944/* Load the BAT registers with the values set up by MMU_init. */
945	lis	r3,BATS@ha
946	addi	r3,r3,BATS@l
947	tophys(r3,r3)
948	LOAD_BAT(0,r3,r4,r5)
949	LOAD_BAT(1,r3,r4,r5)
950	LOAD_BAT(2,r3,r4,r5)
951	LOAD_BAT(3,r3,r4,r5)
952BEGIN_MMU_FTR_SECTION
953	LOAD_BAT(4,r3,r4,r5)
954	LOAD_BAT(5,r3,r4,r5)
955	LOAD_BAT(6,r3,r4,r5)
956	LOAD_BAT(7,r3,r4,r5)
957END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
958	blr
959
960_GLOBAL(load_segment_registers)
961	li	r0, NUM_USER_SEGMENTS /* load up user segment register values */
962	mtctr	r0		/* for context 0 */
963	li	r3, 0		/* Kp = 0, Ks = 0, VSID = 0 */
964#ifdef CONFIG_PPC_KUEP
965	oris	r3, r3, SR_NX@h	/* Set Nx */
966#endif
967#ifdef CONFIG_PPC_KUAP
968	oris	r3, r3, SR_KS@h	/* Set Ks */
969#endif
970	li	r4, 0
9713:	mtsrin	r3, r4
972	addi	r3, r3, 0x111	/* increment VSID */
973	addis	r4, r4, 0x1000	/* address of next segment */
974	bdnz	3b
975	li	r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
976	mtctr	r0			/* for context 0 */
977	rlwinm	r3, r3, 0, ~SR_NX	/* Nx = 0 */
978	rlwinm	r3, r3, 0, ~SR_KS	/* Ks = 0 */
979	oris	r3, r3, SR_KP@h		/* Kp = 1 */
9803:	mtsrin	r3, r4
981	addi	r3, r3, 0x111	/* increment VSID */
982	addis	r4, r4, 0x1000	/* address of next segment */
983	bdnz	3b
984	blr
985
986/*
987 * This is where the main kernel code starts.
988 */
989start_here:
990	/* ptr to current */
991	lis	r2,init_task@h
992	ori	r2,r2,init_task@l
993	/* Set up for using our exception vectors */
994	/* ptr to phys current thread */
995	tophys(r4,r2)
996	addi	r4,r4,THREAD	/* init task's THREAD */
997	mtspr	SPRN_SPRG_THREAD,r4
998	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
999	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1000	mtspr	SPRN_SPRG_PGDIR, r4
1001
1002	/* stack */
1003	lis	r1,init_thread_union@ha
1004	addi	r1,r1,init_thread_union@l
1005	li	r0,0
1006	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1007/*
1008 * Do early platform-specific initialization,
1009 * and set up the MMU.
1010 */
1011#ifdef CONFIG_KASAN
1012	bl	kasan_early_init
1013#endif
1014	li	r3,0
1015	mr	r4,r31
1016	bl	machine_init
1017	bl	__save_cpu_setup
1018	bl	MMU_init
1019	bl	MMU_init_hw_patch
1020
1021/*
1022 * Go back to running unmapped so we can load up new values
1023 * for SDR1 (hash table pointer) and the segment registers
1024 * and change to using our exception vectors.
1025 */
1026	lis	r4,2f@h
1027	ori	r4,r4,2f@l
1028	tophys(r4,r4)
1029	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1030
1031	.align	4
1032	mtspr	SPRN_SRR0,r4
1033	mtspr	SPRN_SRR1,r3
1034	rfi
1035/* Load up the kernel context */
10362:	bl	load_up_mmu
1037
1038#ifdef CONFIG_BDI_SWITCH
1039	/* Add helper information for the Abatron bdiGDB debugger.
1040	 * We do this here because we know the mmu is disabled, and
1041	 * will be enabled for real in just a few instructions.
1042	 */
1043	lis	r5, abatron_pteptrs@h
1044	ori	r5, r5, abatron_pteptrs@l
1045	stw	r5, 0xf0(0)	/* This much match your Abatron config */
1046	lis	r6, swapper_pg_dir@h
1047	ori	r6, r6, swapper_pg_dir@l
1048	tophys(r5, r5)
1049	stw	r6, 0(r5)
1050#endif /* CONFIG_BDI_SWITCH */
1051
1052/* Now turn on the MMU for real! */
1053	li	r4,MSR_KERNEL
1054	lis	r3,start_kernel@h
1055	ori	r3,r3,start_kernel@l
1056	mtspr	SPRN_SRR0,r3
1057	mtspr	SPRN_SRR1,r4
1058	rfi
1059
1060/*
1061 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1062 *
1063 * Set up the segment registers for a new context.
1064 */
1065_ENTRY(switch_mmu_context)
1066	lwz	r3,MMCONTEXTID(r4)
1067	cmpwi	cr0,r3,0
1068	blt-	4f
1069	mulli	r3,r3,897	/* multiply context by skew factor */
1070	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
1071#ifdef CONFIG_PPC_KUEP
1072	oris	r3, r3, SR_NX@h	/* Set Nx */
1073#endif
1074#ifdef CONFIG_PPC_KUAP
1075	oris	r3, r3, SR_KS@h	/* Set Ks */
1076#endif
1077	li	r0,NUM_USER_SEGMENTS
1078	mtctr	r0
1079
1080	lwz	r4, MM_PGD(r4)
1081#ifdef CONFIG_BDI_SWITCH
1082	/* Context switch the PTE pointer for the Abatron BDI2000.
1083	 * The PGDIR is passed as second argument.
1084	 */
1085	lis	r5, abatron_pteptrs@ha
1086	stw	r4, abatron_pteptrs@l + 0x4(r5)
1087#endif
1088	tophys(r4, r4)
1089	mtspr	SPRN_SPRG_PGDIR, r4
1090	li	r4,0
1091	isync
10923:
1093	mtsrin	r3,r4
1094	addi	r3,r3,0x111	/* next VSID */
1095	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
1096	addis	r4,r4,0x1000	/* address of next segment */
1097	bdnz	3b
1098	sync
1099	isync
1100	blr
11014:	trap
1102	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1103	blr
1104EXPORT_SYMBOL(switch_mmu_context)
1105
1106/*
1107 * An undocumented "feature" of 604e requires that the v bit
1108 * be cleared before changing BAT values.
1109 *
1110 * Also, newer IBM firmware does not clear bat3 and 4 so
1111 * this makes sure it's done.
1112 *  -- Cort
1113 */
1114clear_bats:
1115	li	r10,0
1116
1117	mtspr	SPRN_DBAT0U,r10
1118	mtspr	SPRN_DBAT0L,r10
1119	mtspr	SPRN_DBAT1U,r10
1120	mtspr	SPRN_DBAT1L,r10
1121	mtspr	SPRN_DBAT2U,r10
1122	mtspr	SPRN_DBAT2L,r10
1123	mtspr	SPRN_DBAT3U,r10
1124	mtspr	SPRN_DBAT3L,r10
1125	mtspr	SPRN_IBAT0U,r10
1126	mtspr	SPRN_IBAT0L,r10
1127	mtspr	SPRN_IBAT1U,r10
1128	mtspr	SPRN_IBAT1L,r10
1129	mtspr	SPRN_IBAT2U,r10
1130	mtspr	SPRN_IBAT2L,r10
1131	mtspr	SPRN_IBAT3U,r10
1132	mtspr	SPRN_IBAT3L,r10
1133BEGIN_MMU_FTR_SECTION
1134	/* Here's a tweak: at this point, CPU setup have
1135	 * not been called yet, so HIGH_BAT_EN may not be
1136	 * set in HID0 for the 745x processors. However, it
1137	 * seems that doesn't affect our ability to actually
1138	 * write to these SPRs.
1139	 */
1140	mtspr	SPRN_DBAT4U,r10
1141	mtspr	SPRN_DBAT4L,r10
1142	mtspr	SPRN_DBAT5U,r10
1143	mtspr	SPRN_DBAT5L,r10
1144	mtspr	SPRN_DBAT6U,r10
1145	mtspr	SPRN_DBAT6L,r10
1146	mtspr	SPRN_DBAT7U,r10
1147	mtspr	SPRN_DBAT7L,r10
1148	mtspr	SPRN_IBAT4U,r10
1149	mtspr	SPRN_IBAT4L,r10
1150	mtspr	SPRN_IBAT5U,r10
1151	mtspr	SPRN_IBAT5L,r10
1152	mtspr	SPRN_IBAT6U,r10
1153	mtspr	SPRN_IBAT6L,r10
1154	mtspr	SPRN_IBAT7U,r10
1155	mtspr	SPRN_IBAT7L,r10
1156END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1157	blr
1158
1159_ENTRY(update_bats)
1160	lis	r4, 1f@h
1161	ori	r4, r4, 1f@l
1162	tophys(r4, r4)
1163	mfmsr	r6
1164	mflr	r7
1165	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1166	rlwinm	r0, r6, 0, ~MSR_RI
1167	rlwinm	r0, r0, 0, ~MSR_EE
1168	mtmsr	r0
1169
1170	.align	4
1171	mtspr	SPRN_SRR0, r4
1172	mtspr	SPRN_SRR1, r3
1173	rfi
11741:	bl	clear_bats
1175	lis	r3, BATS@ha
1176	addi	r3, r3, BATS@l
1177	tophys(r3, r3)
1178	LOAD_BAT(0, r3, r4, r5)
1179	LOAD_BAT(1, r3, r4, r5)
1180	LOAD_BAT(2, r3, r4, r5)
1181	LOAD_BAT(3, r3, r4, r5)
1182BEGIN_MMU_FTR_SECTION
1183	LOAD_BAT(4, r3, r4, r5)
1184	LOAD_BAT(5, r3, r4, r5)
1185	LOAD_BAT(6, r3, r4, r5)
1186	LOAD_BAT(7, r3, r4, r5)
1187END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1188	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1189	mtmsr	r3
1190	mtspr	SPRN_SRR0, r7
1191	mtspr	SPRN_SRR1, r6
1192	rfi
1193
1194flush_tlbs:
1195	lis	r10, 0x40
11961:	addic.	r10, r10, -0x1000
1197	tlbie	r10
1198	bgt	1b
1199	sync
1200	blr
1201
1202mmu_off:
1203 	addi	r4, r3, __after_mmu_off - _start
1204	mfmsr	r3
1205	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
1206	beqlr
1207	andc	r3,r3,r0
1208
1209	.align	4
1210	mtspr	SPRN_SRR0,r4
1211	mtspr	SPRN_SRR1,r3
1212	sync
1213	rfi
1214
1215/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1216initial_bats:
1217	lis	r11,PAGE_OFFSET@h
1218	tophys(r8,r11)
1219#ifdef CONFIG_SMP
1220	ori	r8,r8,0x12		/* R/W access, M=1 */
1221#else
1222	ori	r8,r8,2			/* R/W access */
1223#endif /* CONFIG_SMP */
1224	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
1225
1226	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx have valid */
1227	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
1228	mtspr	SPRN_IBAT0L,r8
1229	mtspr	SPRN_IBAT0U,r11
1230	isync
1231	blr
1232
1233#ifdef CONFIG_BOOTX_TEXT
1234setup_disp_bat:
1235	/*
1236	 * setup the display bat prepared for us in prom.c
1237	 */
1238	mflr	r8
1239	bl	reloc_offset
1240	mtlr	r8
1241	addis	r8,r3,disp_BAT@ha
1242	addi	r8,r8,disp_BAT@l
1243	cmpwi	cr0,r8,0
1244	beqlr
1245	lwz	r11,0(r8)
1246	lwz	r8,4(r8)
1247	mtspr	SPRN_DBAT3L,r8
1248	mtspr	SPRN_DBAT3U,r11
1249	blr
1250#endif /* CONFIG_BOOTX_TEXT */
1251
1252#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1253setup_cpm_bat:
1254	lis	r8, 0xf000
1255	ori	r8, r8,	0x002a
1256	mtspr	SPRN_DBAT1L, r8
1257
1258	lis	r11, 0xf000
1259	ori	r11, r11, (BL_1M << 2) | 2
1260	mtspr	SPRN_DBAT1U, r11
1261
1262	blr
1263#endif
1264
1265#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1266setup_usbgecko_bat:
1267	/* prepare a BAT for early io */
1268#if defined(CONFIG_GAMECUBE)
1269	lis	r8, 0x0c00
1270#elif defined(CONFIG_WII)
1271	lis	r8, 0x0d00
1272#else
1273#error Invalid platform for USB Gecko based early debugging.
1274#endif
1275	/*
1276	 * The virtual address used must match the virtual address
1277	 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1278	 */
1279	lis	r11, 0xfffe	/* top 128K */
1280	ori	r8, r8, 0x002a	/* uncached, guarded ,rw */
1281	ori	r11, r11, 0x2	/* 128K, Vs=1, Vp=0 */
1282	mtspr	SPRN_DBAT1L, r8
1283	mtspr	SPRN_DBAT1U, r11
1284	blr
1285#endif
1286
1287#ifdef CONFIG_8260
1288/* Jump into the system reset for the rom.
1289 * We first disable the MMU, and then jump to the ROM reset address.
1290 *
1291 * r3 is the board info structure, r4 is the location for starting.
1292 * I use this for building a small kernel that can load other kernels,
1293 * rather than trying to write or rely on a rom monitor that can tftp load.
1294 */
1295       .globl  m8260_gorom
1296m8260_gorom:
1297	mfmsr	r0
1298	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
1299	sync
1300	mtmsr	r0
1301	sync
1302	mfspr	r11, SPRN_HID0
1303	lis	r10, 0
1304	ori	r10,r10,HID0_ICE|HID0_DCE
1305	andc	r11, r11, r10
1306	mtspr	SPRN_HID0, r11
1307	isync
1308	li	r5, MSR_ME|MSR_RI
1309	lis	r6,2f@h
1310	addis	r6,r6,-KERNELBASE@h
1311	ori	r6,r6,2f@l
1312	mtspr	SPRN_SRR0,r6
1313	mtspr	SPRN_SRR1,r5
1314	isync
1315	sync
1316	rfi
13172:
1318	mtlr	r4
1319	blr
1320#endif
1321
1322
1323/*
1324 * We put a few things here that have to be page-aligned.
1325 * This stuff goes at the beginning of the data segment,
1326 * which is page-aligned.
1327 */
1328	.data
1329	.globl	sdata
1330sdata:
1331	.globl	empty_zero_page
1332empty_zero_page:
1333	.space	4096
1334EXPORT_SYMBOL(empty_zero_page)
1335
1336	.globl	swapper_pg_dir
1337swapper_pg_dir:
1338	.space	PGD_TABLE_SIZE
1339
1340/* Room for two PTE pointers, usually the kernel and current user pointers
1341 * to their respective root page table.
1342 */
1343abatron_pteptrs:
1344	.space	8
1345