1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 *  This file contains the low-level support and setup for the
15 *  PowerPC platform, including trap and interrupt dispatch.
16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/bug.h>
31#include <asm/kvm_book3s_asm.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34
35#include "head_32.h"
36
37#define LOAD_BAT(n, reg, RA, RB)	\
38	/* see the comment for clear_bats() -- Cort */ \
39	li	RA,0;			\
40	mtspr	SPRN_IBAT##n##U,RA;	\
41	mtspr	SPRN_DBAT##n##U,RA;	\
42	lwz	RA,(n*16)+0(reg);	\
43	lwz	RB,(n*16)+4(reg);	\
44	mtspr	SPRN_IBAT##n##U,RA;	\
45	mtspr	SPRN_IBAT##n##L,RB;	\
46	lwz	RA,(n*16)+8(reg);	\
47	lwz	RB,(n*16)+12(reg);	\
48	mtspr	SPRN_DBAT##n##U,RA;	\
49	mtspr	SPRN_DBAT##n##L,RB
50
51	__HEAD
52	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
53	.stabs	"head_book3s_32.S",N_SO,0,0,0f
540:
55_ENTRY(_stext);
56
57/*
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
60 */
61_ENTRY(_start);
62	/*
63	 * These are here for legacy reasons, the kernel used to
64	 * need to look like a coff function entry for the pmac
65	 * but we're always started by some kind of bootloader now.
66	 *  -- Cort
67	 */
68	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
69	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
70	nop
71
72/* PMAC
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address).  Address translation is turned on, with the prom
77 * managing the hash table.  Interrupts are disabled.  The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
80 *
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 *  r3: 'BooX' (0x426f6f58)
84 *  r4: virtual address of boot_infos_t
85 *  r5: 0
86 *
87 * PREP
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader.  The expected layout
90 * of the regs is:
91 *   r3: ptr to residual data
92 *   r4: initrd_start or if no initrd then 0
93 *   r5: initrd_end - unused if r4 is 0
94 *   r6: Start of command line string
95 *   r7: End of command line string
96 *
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
99 * -- Cort
100 */
101
102	.globl	__start
103__start:
104/*
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
108 */
109	cmpwi	0,r5,0
110	beq	1f
111
112#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113	/* find out where we are now */
114	bcl	20,31,$+4
1150:	mflr	r8			/* r8 = runtime addr here */
116	addis	r8,r8,(_stext - 0b)@ha
117	addi	r8,r8,(_stext - 0b)@l	/* current runtime base addr */
118	bl	prom_init
119#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
120
121	/* We never return. We also hit that trap if trying to boot
122	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
123	trap
124
125/*
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
128 */
129#ifdef CONFIG_PPC_PMAC
1301:	lis	r31,0x426f
131	ori	r31,r31,0x6f58
132	cmpw	0,r3,r31
133	bne	1f
134	bl	bootx_init
135	trap
136#endif /* CONFIG_PPC_PMAC */
137
1381:	mr	r31,r3			/* save device tree ptr */
139	li	r24,0			/* cpu # */
140
141/*
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 *  -- Cort <cort@fsmlabs.com>
145 */
146	bl	early_init
147
148/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
150 */
151 	bl	mmu_off
152__after_mmu_off:
153	bl	clear_bats
154	bl	flush_tlbs
155
156	bl	initial_bats
157	bl	load_segment_registers
158BEGIN_MMU_FTR_SECTION
159	bl	early_hash_table
160END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
161#if defined(CONFIG_BOOTX_TEXT)
162	bl	setup_disp_bat
163#endif
164#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
165	bl	setup_cpm_bat
166#endif
167#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
168	bl	setup_usbgecko_bat
169#endif
170
171/*
172 * Call setup_cpu for CPU 0 and initialize 6xx Idle
173 */
174	bl	reloc_offset
175	li	r24,0			/* cpu# */
176	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
177	bl	reloc_offset
178	bl	init_idle_6xx
179
180
181/*
182 * We need to run with _start at physical address 0.
183 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
184 * the exception vectors at 0 (and therefore this copy
185 * overwrites OF's exception vectors with our own).
186 * The MMU is off at this point.
187 */
188	bl	reloc_offset
189	mr	r26,r3
190	addis	r4,r3,KERNELBASE@h	/* current address of _start */
191	lis	r5,PHYSICAL_START@h
192	cmplw	0,r4,r5			/* already running at PHYSICAL_START? */
193	bne	relocate_kernel
194/*
195 * we now have the 1st 16M of ram mapped with the bats.
196 * prep needs the mmu to be turned on here, but pmac already has it on.
197 * this shouldn't bother the pmac since it just gets turned on again
198 * as we jump to our code at KERNELBASE. -- Cort
199 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
200 * off, and in other cases, we now turn it off before changing BATs above.
201 */
202turn_on_mmu:
203	mfmsr	r0
204	ori	r0,r0,MSR_DR|MSR_IR|MSR_RI
205	mtspr	SPRN_SRR1,r0
206	lis	r0,start_here@h
207	ori	r0,r0,start_here@l
208	mtspr	SPRN_SRR0,r0
209	RFI				/* enables MMU */
210
211/*
212 * We need __secondary_hold as a place to hold the other cpus on
213 * an SMP machine, even when we are running a UP kernel.
214 */
215	. = 0xc0			/* for prep bootloader */
216	li	r3,1			/* MTX only has 1 cpu */
217	.globl	__secondary_hold
218__secondary_hold:
219	/* tell the master we're here */
220	stw	r3,__secondary_hold_acknowledge@l(0)
221#ifdef CONFIG_SMP
222100:	lwz	r4,0(0)
223	/* wait until we're told to start */
224	cmpw	0,r4,r3
225	bne	100b
226	/* our cpu # was at addr 0 - go */
227	mr	r24,r3			/* cpu # */
228	b	__secondary_start
229#else
230	b	.
231#endif /* CONFIG_SMP */
232
233	.globl	__secondary_hold_spinloop
234__secondary_hold_spinloop:
235	.long	0
236	.globl	__secondary_hold_acknowledge
237__secondary_hold_acknowledge:
238	.long	-1
239
240/* System reset */
241/* core99 pmac starts the seconary here by changing the vector, and
242   putting it back to what it was (unknown_exception) when done.  */
243	EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
244
245/* Machine check */
246/*
247 * On CHRP, this is complicated by the fact that we could get a
248 * machine check inside RTAS, and we have no guarantee that certain
249 * critical registers will have the values we expect.  The set of
250 * registers that might have bad values includes all the GPRs
251 * and all the BATs.  We indicate that we are in RTAS by putting
252 * a non-zero value, the address of the exception frame to use,
253 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
254 * and uses its value if it is non-zero.
255 * (Other exception handlers assume that r1 is a valid kernel stack
256 * pointer when we take an exception from supervisor mode.)
257 *	-- paulus.
258 */
259	. = 0x200
260	DO_KVM  0x200
261MachineCheck:
262	EXCEPTION_PROLOG_0
263#ifdef CONFIG_PPC_CHRP
264	mfspr	r11, SPRN_SPRG_THREAD
265	lwz	r11, RTAS_SP(r11)
266	cmpwi	cr1, r11, 0
267	bne	cr1, 7f
268#endif /* CONFIG_PPC_CHRP */
269	EXCEPTION_PROLOG_1 for_rtas=1
2707:	EXCEPTION_PROLOG_2
271	addi	r3,r1,STACK_FRAME_OVERHEAD
272#ifdef CONFIG_PPC_CHRP
273#ifdef CONFIG_VMAP_STACK
274	mfspr	r4, SPRN_SPRG_THREAD
275	tovirt(r4, r4)
276	lwz	r4, RTAS_SP(r4)
277	cmpwi	cr1, r4, 0
278#endif
279	beq	cr1, machine_check_tramp
280	twi	31, 0, 0
281#else
282	b	machine_check_tramp
283#endif
284
285/* Data access exception. */
286	. = 0x300
287	DO_KVM  0x300
288DataAccess:
289#ifdef CONFIG_VMAP_STACK
290	mtspr	SPRN_SPRG_SCRATCH0,r10
291	mfspr	r10, SPRN_SPRG_THREAD
292BEGIN_MMU_FTR_SECTION
293	stw	r11, THR11(r10)
294	mfspr	r10, SPRN_DSISR
295	mfcr	r11
296#ifdef CONFIG_PPC_KUAP
297	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
298#else
299	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
300#endif
301	mfspr	r10, SPRN_SPRG_THREAD
302	beq	hash_page_dsi
303.Lhash_page_dsi_cont:
304	mtcr	r11
305	lwz	r11, THR11(r10)
306END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
307	mtspr	SPRN_SPRG_SCRATCH1,r11
308	mfspr	r11, SPRN_DAR
309	stw	r11, DAR(r10)
310	mfspr	r11, SPRN_DSISR
311	stw	r11, DSISR(r10)
312	mfspr	r11, SPRN_SRR0
313	stw	r11, SRR0(r10)
314	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
315	stw	r11, SRR1(r10)
316	mfcr	r10
317	andi.	r11, r11, MSR_PR
318
319	EXCEPTION_PROLOG_1
320	b	handle_page_fault_tramp_1
321#else	/* CONFIG_VMAP_STACK */
322	EXCEPTION_PROLOG handle_dar_dsisr=1
323	get_and_save_dar_dsisr_on_stack	r4, r5, r11
324BEGIN_MMU_FTR_SECTION
325#ifdef CONFIG_PPC_KUAP
326	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
327#else
328	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
329#endif
330	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
331	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
332	bl	hash_page
333	b	handle_page_fault_tramp_1
334FTR_SECTION_ELSE
335	b	handle_page_fault_tramp_2
336ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
337#endif	/* CONFIG_VMAP_STACK */
338
339/* Instruction access exception. */
340	. = 0x400
341	DO_KVM  0x400
342InstructionAccess:
343#ifdef CONFIG_VMAP_STACK
344	mtspr	SPRN_SPRG_SCRATCH0,r10
345	mtspr	SPRN_SPRG_SCRATCH1,r11
346	mfspr	r10, SPRN_SPRG_THREAD
347	mfspr	r11, SPRN_SRR0
348	stw	r11, SRR0(r10)
349	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
350	stw	r11, SRR1(r10)
351	mfcr	r10
352BEGIN_MMU_FTR_SECTION
353	andis.	r11, r11, SRR1_ISI_NOPT@h	/* no pte found? */
354	bne	hash_page_isi
355.Lhash_page_isi_cont:
356	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
357END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
358	andi.	r11, r11, MSR_PR
359
360	EXCEPTION_PROLOG_1
361	EXCEPTION_PROLOG_2
362#else	/* CONFIG_VMAP_STACK */
363	EXCEPTION_PROLOG
364	andis.	r0,r9,SRR1_ISI_NOPT@h	/* no pte found? */
365	beq	1f			/* if so, try to put a PTE */
366	li	r3,0			/* into the hash table */
367	mr	r4,r12			/* SRR0 is fault address */
368BEGIN_MMU_FTR_SECTION
369	bl	hash_page
370END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
371#endif	/* CONFIG_VMAP_STACK */
3721:	mr	r4,r12
373	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
374	stw	r4, _DAR(r11)
375	EXC_XFER_LITE(0x400, handle_page_fault)
376
377/* External interrupt */
378	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
379
380/* Alignment exception */
381	. = 0x600
382	DO_KVM  0x600
383Alignment:
384	EXCEPTION_PROLOG handle_dar_dsisr=1
385	save_dar_dsisr_on_stack r4, r5, r11
386	addi	r3,r1,STACK_FRAME_OVERHEAD
387	b	alignment_exception_tramp
388
389/* Program check exception */
390	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
391
392/* Floating-point unavailable */
393	. = 0x800
394	DO_KVM  0x800
395FPUnavailable:
396BEGIN_FTR_SECTION
397/*
398 * Certain Freescale cores don't have a FPU and treat fp instructions
399 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
400 */
401	b 	ProgramCheck
402END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
403	EXCEPTION_PROLOG
404	beq	1f
405	bl	load_up_fpu		/* if from user, just load it up */
406	b	fast_exception_return
4071:	addi	r3,r1,STACK_FRAME_OVERHEAD
408	EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
409
410/* Decrementer */
411	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
412
413	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
414	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
415
416/* System call */
417	. = 0xc00
418	DO_KVM  0xc00
419SystemCall:
420	SYSCALL_ENTRY	0xc00
421
422	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
423	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
424
425/*
426 * The Altivec unavailable trap is at 0x0f20.  Foo.
427 * We effectively remap it to 0x3000.
428 * We include an altivec unavailable exception vector even if
429 * not configured for Altivec, so that you can't panic a
430 * non-altivec kernel running on a machine with altivec just
431 * by executing an altivec instruction.
432 */
433	. = 0xf00
434	DO_KVM  0xf00
435	b	PerformanceMonitor
436
437	. = 0xf20
438	DO_KVM  0xf20
439	b	AltiVecUnavailable
440
441/*
442 * Handle TLB miss for instruction on 603/603e.
443 * Note: we get an alternate set of r0 - r3 to use automatically.
444 */
445	. = 0x1000
446InstructionTLBMiss:
447/*
448 * r0:	scratch
449 * r1:	linux style pte ( later becomes ppc hardware pte )
450 * r2:	ptr to linux-style pte
451 * r3:	scratch
452 */
453	/* Get PTE (linux-style) and check access */
454	mfspr	r3,SPRN_IMISS
455#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
456	lis	r1, TASK_SIZE@h		/* check if kernel address */
457	cmplw	0,r1,r3
458#endif
459	mfspr	r2, SPRN_SPRG_PGDIR
460	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
461#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
462	bgt-	112f
463	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
464	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
465#endif
466112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
467	lwz	r2,0(r2)		/* get pmd entry */
468	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
469	beq-	InstructionAddressInvalid	/* return if no mapping */
470	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
471	lwz	r0,0(r2)		/* get linux-style pte */
472	andc.	r1,r1,r0		/* check access & ~permission */
473	bne-	InstructionAddressInvalid /* return if access not permitted */
474	/* Convert linux-style PTE to low word of PPC-style PTE */
475	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
476	ori	r1, r1, 0xe06		/* clear out reserved bits */
477	andc	r1, r0, r1		/* PP = user? 1 : 0 */
478BEGIN_FTR_SECTION
479	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
480END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
481	mtspr	SPRN_RPA,r1
482	tlbli	r3
483	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
484	mtcrf	0x80,r3
485	rfi
486InstructionAddressInvalid:
487	mfspr	r3,SPRN_SRR1
488	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
489
490	addis	r1,r1,0x2000
491	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
492	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
493	or	r2,r2,r1
494	mtspr	SPRN_SRR1,r2
495	mfspr	r1,SPRN_IMISS	/* Get failing address */
496	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
497	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
498	xor	r1,r1,r2
499	mtspr	SPRN_DAR,r1	/* Set fault address */
500	mfmsr	r0		/* Restore "normal" registers */
501	xoris	r0,r0,MSR_TGPR>>16
502	mtcrf	0x80,r3		/* Restore CR0 */
503	mtmsr	r0
504	b	InstructionAccess
505
506/*
507 * Handle TLB miss for DATA Load operation on 603/603e
508 */
509	. = 0x1100
510DataLoadTLBMiss:
511/*
512 * r0:	scratch
513 * r1:	linux style pte ( later becomes ppc hardware pte )
514 * r2:	ptr to linux-style pte
515 * r3:	scratch
516 */
517	/* Get PTE (linux-style) and check access */
518	mfspr	r3,SPRN_DMISS
519	lis	r1, TASK_SIZE@h		/* check if kernel address */
520	cmplw	0,r1,r3
521	mfspr	r2, SPRN_SPRG_PGDIR
522	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
523	bgt-	112f
524	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
525	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
526112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
527	lwz	r2,0(r2)		/* get pmd entry */
528	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
529	beq-	DataAddressInvalid	/* return if no mapping */
530	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
531	lwz	r0,0(r2)		/* get linux-style pte */
532	andc.	r1,r1,r0		/* check access & ~permission */
533	bne-	DataAddressInvalid	/* return if access not permitted */
534	/*
535	 * NOTE! We are assuming this is not an SMP system, otherwise
536	 * we would need to update the pte atomically with lwarx/stwcx.
537	 */
538	/* Convert linux-style PTE to low word of PPC-style PTE */
539	rlwinm	r1,r0,32-9,30,30	/* _PAGE_RW -> PP msb */
540	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
541	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
542	ori	r1,r1,0xe04		/* clear out reserved bits */
543	andc	r1,r0,r1		/* PP = user? rw? 1: 3: 0 */
544BEGIN_FTR_SECTION
545	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
546END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
547	mtspr	SPRN_RPA,r1
548	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
549	mtcrf	0x80,r2
550BEGIN_MMU_FTR_SECTION
551	li	r0,1
552	mfspr	r1,SPRN_SPRG_603_LRU
553	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
554	slw	r0,r0,r2
555	xor	r1,r0,r1
556	srw	r0,r1,r2
557	mtspr   SPRN_SPRG_603_LRU,r1
558	mfspr	r2,SPRN_SRR1
559	rlwimi	r2,r0,31-14,14,14
560	mtspr   SPRN_SRR1,r2
561END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
562	tlbld	r3
563	rfi
564DataAddressInvalid:
565	mfspr	r3,SPRN_SRR1
566	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
567	addis	r1,r1,0x2000
568	mtspr	SPRN_DSISR,r1
569	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
570	mtspr	SPRN_SRR1,r2
571	mfspr	r1,SPRN_DMISS	/* Get failing address */
572	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
573	beq	20f		/* Jump if big endian */
574	xori	r1,r1,3
57520:	mtspr	SPRN_DAR,r1	/* Set fault address */
576	mfmsr	r0		/* Restore "normal" registers */
577	xoris	r0,r0,MSR_TGPR>>16
578	mtcrf	0x80,r3		/* Restore CR0 */
579	mtmsr	r0
580	b	DataAccess
581
582/*
583 * Handle TLB miss for DATA Store on 603/603e
584 */
585	. = 0x1200
586DataStoreTLBMiss:
587/*
588 * r0:	scratch
589 * r1:	linux style pte ( later becomes ppc hardware pte )
590 * r2:	ptr to linux-style pte
591 * r3:	scratch
592 */
593	/* Get PTE (linux-style) and check access */
594	mfspr	r3,SPRN_DMISS
595	lis	r1, TASK_SIZE@h		/* check if kernel address */
596	cmplw	0,r1,r3
597	mfspr	r2, SPRN_SPRG_PGDIR
598	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
599	bgt-	112f
600	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
601	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
602112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
603	lwz	r2,0(r2)		/* get pmd entry */
604	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
605	beq-	DataAddressInvalid	/* return if no mapping */
606	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
607	lwz	r0,0(r2)		/* get linux-style pte */
608	andc.	r1,r1,r0		/* check access & ~permission */
609	bne-	DataAddressInvalid	/* return if access not permitted */
610	/*
611	 * NOTE! We are assuming this is not an SMP system, otherwise
612	 * we would need to update the pte atomically with lwarx/stwcx.
613	 */
614	/* Convert linux-style PTE to low word of PPC-style PTE */
615	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
616	li	r1,0xe06		/* clear out reserved bits & PP msb */
617	andc	r1,r0,r1		/* PP = user? 1: 0 */
618BEGIN_FTR_SECTION
619	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
620END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
621	mtspr	SPRN_RPA,r1
622	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
623	mtcrf	0x80,r2
624BEGIN_MMU_FTR_SECTION
625	li	r0,1
626	mfspr	r1,SPRN_SPRG_603_LRU
627	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
628	slw	r0,r0,r2
629	xor	r1,r0,r1
630	srw	r0,r1,r2
631	mtspr   SPRN_SPRG_603_LRU,r1
632	mfspr	r2,SPRN_SRR1
633	rlwimi	r2,r0,31-14,14,14
634	mtspr   SPRN_SRR1,r2
635END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
636	tlbld	r3
637	rfi
638
639#ifndef CONFIG_ALTIVEC
640#define altivec_assist_exception	unknown_exception
641#endif
642
643#ifndef CONFIG_TAU_INT
644#define TAUException	unknown_exception
645#endif
646
647	EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
648	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
649	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
650	EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
651	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
652	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
653	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
654	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
655	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
656	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
657	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
658	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
659	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
660	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
661	EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
662	EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
663	EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
664	EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
665	EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
666	EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
667	EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
668	EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
669	EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
670	EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
671	EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
672	EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
673	EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
674	EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
675	EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
676
677	. = 0x3000
678
679machine_check_tramp:
680	EXC_XFER_STD(0x200, machine_check_exception)
681
682alignment_exception_tramp:
683	EXC_XFER_STD(0x600, alignment_exception)
684
685handle_page_fault_tramp_1:
686#ifdef CONFIG_VMAP_STACK
687	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
688#endif
689	lwz	r4, _DAR(r11)
690	lwz	r5, _DSISR(r11)
691	/* fall through */
692handle_page_fault_tramp_2:
693	EXC_XFER_LITE(0x300, handle_page_fault)
694
695#ifdef CONFIG_VMAP_STACK
696.macro save_regs_thread		thread
697	stw	r0, THR0(\thread)
698	stw	r3, THR3(\thread)
699	stw	r4, THR4(\thread)
700	stw	r5, THR5(\thread)
701	stw	r6, THR6(\thread)
702	stw	r8, THR8(\thread)
703	stw	r9, THR9(\thread)
704	mflr	r0
705	stw	r0, THLR(\thread)
706	mfctr	r0
707	stw	r0, THCTR(\thread)
708.endm
709
710.macro restore_regs_thread	thread
711	lwz	r0, THLR(\thread)
712	mtlr	r0
713	lwz	r0, THCTR(\thread)
714	mtctr	r0
715	lwz	r0, THR0(\thread)
716	lwz	r3, THR3(\thread)
717	lwz	r4, THR4(\thread)
718	lwz	r5, THR5(\thread)
719	lwz	r6, THR6(\thread)
720	lwz	r8, THR8(\thread)
721	lwz	r9, THR9(\thread)
722.endm
723
724hash_page_dsi:
725	save_regs_thread	r10
726	mfdsisr	r3
727	mfdar	r4
728	mfsrr0	r5
729	mfsrr1	r9
730	rlwinm	r3, r3, 32 - 15, _PAGE_RW	/* DSISR_STORE -> _PAGE_RW */
731	bl	hash_page
732	mfspr	r10, SPRN_SPRG_THREAD
733	restore_regs_thread r10
734	b	.Lhash_page_dsi_cont
735
736hash_page_isi:
737	mr	r11, r10
738	mfspr	r10, SPRN_SPRG_THREAD
739	save_regs_thread	r10
740	li	r3, 0
741	lwz	r4, SRR0(r10)
742	lwz	r9, SRR1(r10)
743	bl	hash_page
744	mfspr	r10, SPRN_SPRG_THREAD
745	restore_regs_thread r10
746	mr	r10, r11
747	b	.Lhash_page_isi_cont
748
749	.globl fast_hash_page_return
750fast_hash_page_return:
751	andis.	r10, r9, SRR1_ISI_NOPT@h	/* Set on ISI, cleared on DSI */
752	mfspr	r10, SPRN_SPRG_THREAD
753	restore_regs_thread r10
754	bne	1f
755
756	/* DSI */
757	mtcr	r11
758	lwz	r11, THR11(r10)
759	mfspr	r10, SPRN_SPRG_SCRATCH0
760	RFI
761
7621:	/* ISI */
763	mtcr	r11
764	mfspr	r11, SPRN_SPRG_SCRATCH1
765	mfspr	r10, SPRN_SPRG_SCRATCH0
766	RFI
767
768stack_overflow:
769	vmap_stack_overflow_exception
770#endif
771
772AltiVecUnavailable:
773	EXCEPTION_PROLOG
774#ifdef CONFIG_ALTIVEC
775	beq	1f
776	bl	load_up_altivec		/* if from user, just load it up */
777	b	fast_exception_return
778#endif /* CONFIG_ALTIVEC */
7791:	addi	r3,r1,STACK_FRAME_OVERHEAD
780	EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
781
782PerformanceMonitor:
783	EXCEPTION_PROLOG
784	addi	r3,r1,STACK_FRAME_OVERHEAD
785	EXC_XFER_STD(0xf00, performance_monitor_exception)
786
787
788/*
789 * This code is jumped to from the startup code to copy
790 * the kernel image to physical address PHYSICAL_START.
791 */
792relocate_kernel:
793	addis	r9,r26,klimit@ha	/* fetch klimit */
794	lwz	r25,klimit@l(r9)
795	addis	r25,r25,-KERNELBASE@h
796	lis	r3,PHYSICAL_START@h	/* Destination base address */
797	li	r6,0			/* Destination offset */
798	li	r5,0x4000		/* # bytes of memory to copy */
799	bl	copy_and_flush		/* copy the first 0x4000 bytes */
800	addi	r0,r3,4f@l		/* jump to the address of 4f */
801	mtctr	r0			/* in copy and do the rest. */
802	bctr				/* jump to the copy */
8034:	mr	r5,r25
804	bl	copy_and_flush		/* copy the rest */
805	b	turn_on_mmu
806
807/*
808 * Copy routine used to copy the kernel to start at physical address 0
809 * and flush and invalidate the caches as needed.
810 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
811 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
812 */
813_ENTRY(copy_and_flush)
814	addi	r5,r5,-4
815	addi	r6,r6,-4
8164:	li	r0,L1_CACHE_BYTES/4
817	mtctr	r0
8183:	addi	r6,r6,4			/* copy a cache line */
819	lwzx	r0,r6,r4
820	stwx	r0,r6,r3
821	bdnz	3b
822	dcbst	r6,r3			/* write it to memory */
823	sync
824	icbi	r6,r3			/* flush the icache line */
825	cmplw	0,r6,r5
826	blt	4b
827	sync				/* additional sync needed on g4 */
828	isync
829	addi	r5,r5,4
830	addi	r6,r6,4
831	blr
832
833#ifdef CONFIG_SMP
834	.globl __secondary_start_mpc86xx
835__secondary_start_mpc86xx:
836	mfspr	r3, SPRN_PIR
837	stw	r3, __secondary_hold_acknowledge@l(0)
838	mr	r24, r3			/* cpu # */
839	b	__secondary_start
840
841	.globl	__secondary_start_pmac_0
842__secondary_start_pmac_0:
843	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
844	li	r24,0
845	b	1f
846	li	r24,1
847	b	1f
848	li	r24,2
849	b	1f
850	li	r24,3
8511:
852	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
853	   set to map the 0xf0000000 - 0xffffffff region */
854	mfmsr	r0
855	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
856	mtmsr	r0
857	isync
858
859	.globl	__secondary_start
860__secondary_start:
861	/* Copy some CPU settings from CPU 0 */
862	bl	__restore_cpu_setup
863
864	lis	r3,-KERNELBASE@h
865	mr	r4,r24
866	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
867	lis	r3,-KERNELBASE@h
868	bl	init_idle_6xx
869
870	/* get current's stack and current */
871	lis	r2,secondary_current@ha
872	tophys(r2,r2)
873	lwz	r2,secondary_current@l(r2)
874	tophys(r1,r2)
875	lwz	r1,TASK_STACK(r1)
876
877	/* stack */
878	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
879	li	r0,0
880	tophys(r3,r1)
881	stw	r0,0(r3)
882
883	/* load up the MMU */
884	bl	load_segment_registers
885	bl	load_up_mmu
886
887	/* ptr to phys current thread */
888	tophys(r4,r2)
889	addi	r4,r4,THREAD	/* phys address of our thread_struct */
890	mtspr	SPRN_SPRG_THREAD,r4
891	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
892	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
893	mtspr	SPRN_SPRG_PGDIR, r4
894
895	/* enable MMU and jump to start_secondary */
896	li	r4,MSR_KERNEL
897	lis	r3,start_secondary@h
898	ori	r3,r3,start_secondary@l
899	mtspr	SPRN_SRR0,r3
900	mtspr	SPRN_SRR1,r4
901	RFI
902#endif /* CONFIG_SMP */
903
904#ifdef CONFIG_KVM_BOOK3S_HANDLER
905#include "../kvm/book3s_rmhandlers.S"
906#endif
907
908/*
909 * Load stuff into the MMU.  Intended to be called with
910 * IR=0 and DR=0.
911 */
912early_hash_table:
913	sync			/* Force all PTE updates to finish */
914	isync
915	tlbia			/* Clear all TLB entries */
916	sync			/* wait for tlbia/tlbie to finish */
917	TLBSYNC			/* ... on all CPUs */
918	/* Load the SDR1 register (hash table base & size) */
919	lis	r6, early_hash - PAGE_OFFSET@h
920	ori	r6, r6, 3	/* 256kB table */
921	mtspr	SPRN_SDR1, r6
922	lis	r6, early_hash@h
923	lis	r3, Hash@ha
924	stw	r6, Hash@l(r3)
925	blr
926
927load_up_mmu:
928	sync			/* Force all PTE updates to finish */
929	isync
930	tlbia			/* Clear all TLB entries */
931	sync			/* wait for tlbia/tlbie to finish */
932	TLBSYNC			/* ... on all CPUs */
933	/* Load the SDR1 register (hash table base & size) */
934	lis	r6,_SDR1@ha
935	tophys(r6,r6)
936	lwz	r6,_SDR1@l(r6)
937	mtspr	SPRN_SDR1,r6
938
939/* Load the BAT registers with the values set up by MMU_init. */
940	lis	r3,BATS@ha
941	addi	r3,r3,BATS@l
942	tophys(r3,r3)
943	LOAD_BAT(0,r3,r4,r5)
944	LOAD_BAT(1,r3,r4,r5)
945	LOAD_BAT(2,r3,r4,r5)
946	LOAD_BAT(3,r3,r4,r5)
947BEGIN_MMU_FTR_SECTION
948	LOAD_BAT(4,r3,r4,r5)
949	LOAD_BAT(5,r3,r4,r5)
950	LOAD_BAT(6,r3,r4,r5)
951	LOAD_BAT(7,r3,r4,r5)
952END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
953	blr
954
955_GLOBAL(load_segment_registers)
956	li	r0, NUM_USER_SEGMENTS /* load up user segment register values */
957	mtctr	r0		/* for context 0 */
958	li	r3, 0		/* Kp = 0, Ks = 0, VSID = 0 */
959#ifdef CONFIG_PPC_KUEP
960	oris	r3, r3, SR_NX@h	/* Set Nx */
961#endif
962#ifdef CONFIG_PPC_KUAP
963	oris	r3, r3, SR_KS@h	/* Set Ks */
964#endif
965	li	r4, 0
9663:	mtsrin	r3, r4
967	addi	r3, r3, 0x111	/* increment VSID */
968	addis	r4, r4, 0x1000	/* address of next segment */
969	bdnz	3b
970	li	r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
971	mtctr	r0			/* for context 0 */
972	rlwinm	r3, r3, 0, ~SR_NX	/* Nx = 0 */
973	rlwinm	r3, r3, 0, ~SR_KS	/* Ks = 0 */
974	oris	r3, r3, SR_KP@h		/* Kp = 1 */
9753:	mtsrin	r3, r4
976	addi	r3, r3, 0x111	/* increment VSID */
977	addis	r4, r4, 0x1000	/* address of next segment */
978	bdnz	3b
979	blr
980
981/*
982 * This is where the main kernel code starts.
983 */
984start_here:
985	/* ptr to current */
986	lis	r2,init_task@h
987	ori	r2,r2,init_task@l
988	/* Set up for using our exception vectors */
989	/* ptr to phys current thread */
990	tophys(r4,r2)
991	addi	r4,r4,THREAD	/* init task's THREAD */
992	mtspr	SPRN_SPRG_THREAD,r4
993	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
994	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
995	mtspr	SPRN_SPRG_PGDIR, r4
996
997	/* stack */
998	lis	r1,init_thread_union@ha
999	addi	r1,r1,init_thread_union@l
1000	li	r0,0
1001	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1002/*
1003 * Do early platform-specific initialization,
1004 * and set up the MMU.
1005 */
1006#ifdef CONFIG_KASAN
1007	bl	kasan_early_init
1008#endif
1009	li	r3,0
1010	mr	r4,r31
1011	bl	machine_init
1012	bl	__save_cpu_setup
1013	bl	MMU_init
1014	bl	MMU_init_hw_patch
1015
1016/*
1017 * Go back to running unmapped so we can load up new values
1018 * for SDR1 (hash table pointer) and the segment registers
1019 * and change to using our exception vectors.
1020 */
1021	lis	r4,2f@h
1022	ori	r4,r4,2f@l
1023	tophys(r4,r4)
1024	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1025
1026	.align	4
1027	mtspr	SPRN_SRR0,r4
1028	mtspr	SPRN_SRR1,r3
1029	RFI
1030/* Load up the kernel context */
10312:	bl	load_up_mmu
1032
1033#ifdef CONFIG_BDI_SWITCH
1034	/* Add helper information for the Abatron bdiGDB debugger.
1035	 * We do this here because we know the mmu is disabled, and
1036	 * will be enabled for real in just a few instructions.
1037	 */
1038	lis	r5, abatron_pteptrs@h
1039	ori	r5, r5, abatron_pteptrs@l
1040	stw	r5, 0xf0(0)	/* This much match your Abatron config */
1041	lis	r6, swapper_pg_dir@h
1042	ori	r6, r6, swapper_pg_dir@l
1043	tophys(r5, r5)
1044	stw	r6, 0(r5)
1045#endif /* CONFIG_BDI_SWITCH */
1046
1047/* Now turn on the MMU for real! */
1048	li	r4,MSR_KERNEL
1049	lis	r3,start_kernel@h
1050	ori	r3,r3,start_kernel@l
1051	mtspr	SPRN_SRR0,r3
1052	mtspr	SPRN_SRR1,r4
1053	RFI
1054
1055/*
1056 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1057 *
1058 * Set up the segment registers for a new context.
1059 */
1060_ENTRY(switch_mmu_context)
1061	lwz	r3,MMCONTEXTID(r4)
1062	cmpwi	cr0,r3,0
1063	blt-	4f
1064	mulli	r3,r3,897	/* multiply context by skew factor */
1065	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
1066#ifdef CONFIG_PPC_KUEP
1067	oris	r3, r3, SR_NX@h	/* Set Nx */
1068#endif
1069#ifdef CONFIG_PPC_KUAP
1070	oris	r3, r3, SR_KS@h	/* Set Ks */
1071#endif
1072	li	r0,NUM_USER_SEGMENTS
1073	mtctr	r0
1074
1075	lwz	r4, MM_PGD(r4)
1076#ifdef CONFIG_BDI_SWITCH
1077	/* Context switch the PTE pointer for the Abatron BDI2000.
1078	 * The PGDIR is passed as second argument.
1079	 */
1080	lis	r5, abatron_pteptrs@ha
1081	stw	r4, abatron_pteptrs@l + 0x4(r5)
1082#endif
1083	tophys(r4, r4)
1084	mtspr	SPRN_SPRG_PGDIR, r4
1085	li	r4,0
1086	isync
10873:
1088	mtsrin	r3,r4
1089	addi	r3,r3,0x111	/* next VSID */
1090	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
1091	addis	r4,r4,0x1000	/* address of next segment */
1092	bdnz	3b
1093	sync
1094	isync
1095	blr
10964:	trap
1097	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1098	blr
1099EXPORT_SYMBOL(switch_mmu_context)
1100
1101/*
1102 * An undocumented "feature" of 604e requires that the v bit
1103 * be cleared before changing BAT values.
1104 *
1105 * Also, newer IBM firmware does not clear bat3 and 4 so
1106 * this makes sure it's done.
1107 *  -- Cort
1108 */
1109clear_bats:
1110	li	r10,0
1111
1112	mtspr	SPRN_DBAT0U,r10
1113	mtspr	SPRN_DBAT0L,r10
1114	mtspr	SPRN_DBAT1U,r10
1115	mtspr	SPRN_DBAT1L,r10
1116	mtspr	SPRN_DBAT2U,r10
1117	mtspr	SPRN_DBAT2L,r10
1118	mtspr	SPRN_DBAT3U,r10
1119	mtspr	SPRN_DBAT3L,r10
1120	mtspr	SPRN_IBAT0U,r10
1121	mtspr	SPRN_IBAT0L,r10
1122	mtspr	SPRN_IBAT1U,r10
1123	mtspr	SPRN_IBAT1L,r10
1124	mtspr	SPRN_IBAT2U,r10
1125	mtspr	SPRN_IBAT2L,r10
1126	mtspr	SPRN_IBAT3U,r10
1127	mtspr	SPRN_IBAT3L,r10
1128BEGIN_MMU_FTR_SECTION
1129	/* Here's a tweak: at this point, CPU setup have
1130	 * not been called yet, so HIGH_BAT_EN may not be
1131	 * set in HID0 for the 745x processors. However, it
1132	 * seems that doesn't affect our ability to actually
1133	 * write to these SPRs.
1134	 */
1135	mtspr	SPRN_DBAT4U,r10
1136	mtspr	SPRN_DBAT4L,r10
1137	mtspr	SPRN_DBAT5U,r10
1138	mtspr	SPRN_DBAT5L,r10
1139	mtspr	SPRN_DBAT6U,r10
1140	mtspr	SPRN_DBAT6L,r10
1141	mtspr	SPRN_DBAT7U,r10
1142	mtspr	SPRN_DBAT7L,r10
1143	mtspr	SPRN_IBAT4U,r10
1144	mtspr	SPRN_IBAT4L,r10
1145	mtspr	SPRN_IBAT5U,r10
1146	mtspr	SPRN_IBAT5L,r10
1147	mtspr	SPRN_IBAT6U,r10
1148	mtspr	SPRN_IBAT6L,r10
1149	mtspr	SPRN_IBAT7U,r10
1150	mtspr	SPRN_IBAT7L,r10
1151END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1152	blr
1153
1154_ENTRY(update_bats)
1155	lis	r4, 1f@h
1156	ori	r4, r4, 1f@l
1157	tophys(r4, r4)
1158	mfmsr	r6
1159	mflr	r7
1160	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1161	rlwinm	r0, r6, 0, ~MSR_RI
1162	rlwinm	r0, r0, 0, ~MSR_EE
1163	mtmsr	r0
1164
1165	.align	4
1166	mtspr	SPRN_SRR0, r4
1167	mtspr	SPRN_SRR1, r3
1168	RFI
11691:	bl	clear_bats
1170	lis	r3, BATS@ha
1171	addi	r3, r3, BATS@l
1172	tophys(r3, r3)
1173	LOAD_BAT(0, r3, r4, r5)
1174	LOAD_BAT(1, r3, r4, r5)
1175	LOAD_BAT(2, r3, r4, r5)
1176	LOAD_BAT(3, r3, r4, r5)
1177BEGIN_MMU_FTR_SECTION
1178	LOAD_BAT(4, r3, r4, r5)
1179	LOAD_BAT(5, r3, r4, r5)
1180	LOAD_BAT(6, r3, r4, r5)
1181	LOAD_BAT(7, r3, r4, r5)
1182END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1183	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1184	mtmsr	r3
1185	mtspr	SPRN_SRR0, r7
1186	mtspr	SPRN_SRR1, r6
1187	RFI
1188
1189flush_tlbs:
1190	lis	r10, 0x40
11911:	addic.	r10, r10, -0x1000
1192	tlbie	r10
1193	bgt	1b
1194	sync
1195	blr
1196
1197mmu_off:
1198 	addi	r4, r3, __after_mmu_off - _start
1199	mfmsr	r3
1200	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
1201	beqlr
1202	andc	r3,r3,r0
1203
1204	.align	4
1205	mtspr	SPRN_SRR0,r4
1206	mtspr	SPRN_SRR1,r3
1207	sync
1208	RFI
1209
1210/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1211initial_bats:
1212	lis	r11,PAGE_OFFSET@h
1213	tophys(r8,r11)
1214#ifdef CONFIG_SMP
1215	ori	r8,r8,0x12		/* R/W access, M=1 */
1216#else
1217	ori	r8,r8,2			/* R/W access */
1218#endif /* CONFIG_SMP */
1219	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
1220
1221	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx have valid */
1222	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
1223	mtspr	SPRN_IBAT0L,r8
1224	mtspr	SPRN_IBAT0U,r11
1225	isync
1226	blr
1227
1228#ifdef CONFIG_BOOTX_TEXT
1229setup_disp_bat:
1230	/*
1231	 * setup the display bat prepared for us in prom.c
1232	 */
1233	mflr	r8
1234	bl	reloc_offset
1235	mtlr	r8
1236	addis	r8,r3,disp_BAT@ha
1237	addi	r8,r8,disp_BAT@l
1238	cmpwi	cr0,r8,0
1239	beqlr
1240	lwz	r11,0(r8)
1241	lwz	r8,4(r8)
1242	mtspr	SPRN_DBAT3L,r8
1243	mtspr	SPRN_DBAT3U,r11
1244	blr
1245#endif /* CONFIG_BOOTX_TEXT */
1246
1247#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1248setup_cpm_bat:
1249	lis	r8, 0xf000
1250	ori	r8, r8,	0x002a
1251	mtspr	SPRN_DBAT1L, r8
1252
1253	lis	r11, 0xf000
1254	ori	r11, r11, (BL_1M << 2) | 2
1255	mtspr	SPRN_DBAT1U, r11
1256
1257	blr
1258#endif
1259
1260#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1261setup_usbgecko_bat:
1262	/* prepare a BAT for early io */
1263#if defined(CONFIG_GAMECUBE)
1264	lis	r8, 0x0c00
1265#elif defined(CONFIG_WII)
1266	lis	r8, 0x0d00
1267#else
1268#error Invalid platform for USB Gecko based early debugging.
1269#endif
1270	/*
1271	 * The virtual address used must match the virtual address
1272	 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1273	 */
1274	lis	r11, 0xfffe	/* top 128K */
1275	ori	r8, r8, 0x002a	/* uncached, guarded ,rw */
1276	ori	r11, r11, 0x2	/* 128K, Vs=1, Vp=0 */
1277	mtspr	SPRN_DBAT1L, r8
1278	mtspr	SPRN_DBAT1U, r11
1279	blr
1280#endif
1281
1282#ifdef CONFIG_8260
1283/* Jump into the system reset for the rom.
1284 * We first disable the MMU, and then jump to the ROM reset address.
1285 *
1286 * r3 is the board info structure, r4 is the location for starting.
1287 * I use this for building a small kernel that can load other kernels,
1288 * rather than trying to write or rely on a rom monitor that can tftp load.
1289 */
1290       .globl  m8260_gorom
1291m8260_gorom:
1292	mfmsr	r0
1293	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
1294	sync
1295	mtmsr	r0
1296	sync
1297	mfspr	r11, SPRN_HID0
1298	lis	r10, 0
1299	ori	r10,r10,HID0_ICE|HID0_DCE
1300	andc	r11, r11, r10
1301	mtspr	SPRN_HID0, r11
1302	isync
1303	li	r5, MSR_ME|MSR_RI
1304	lis	r6,2f@h
1305	addis	r6,r6,-KERNELBASE@h
1306	ori	r6,r6,2f@l
1307	mtspr	SPRN_SRR0,r6
1308	mtspr	SPRN_SRR1,r5
1309	isync
1310	sync
1311	rfi
13122:
1313	mtlr	r4
1314	blr
1315#endif
1316
1317
1318/*
1319 * We put a few things here that have to be page-aligned.
1320 * This stuff goes at the beginning of the data segment,
1321 * which is page-aligned.
1322 */
1323	.data
1324	.globl	sdata
1325sdata:
1326	.globl	empty_zero_page
1327empty_zero_page:
1328	.space	4096
1329EXPORT_SYMBOL(empty_zero_page)
1330
1331	.globl	swapper_pg_dir
1332swapper_pg_dir:
1333	.space	PGD_TABLE_SIZE
1334
1335/* Room for two PTE pointers, usually the kernel and current user pointers
1336 * to their respective root page table.
1337 */
1338abatron_pteptrs:
1339	.space	8
1340