1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 *  Adapted for Power Macintosh by Paul Mackerras.
9 *  Low-level exception handlers and MMU support
10 *  rewritten by Paul Mackerras.
11 *    Copyright (C) 1996 Paul Mackerras.
12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 *  This file contains the low-level support and setup for the
15 *  PowerPC platform, including trap and interrupt dispatch.
16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/bug.h>
31#include <asm/kvm_book3s_asm.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34
35#include "head_32.h"
36
37#define LOAD_BAT(n, reg, RA, RB)	\
38	/* see the comment for clear_bats() -- Cort */ \
39	li	RA,0;			\
40	mtspr	SPRN_IBAT##n##U,RA;	\
41	mtspr	SPRN_DBAT##n##U,RA;	\
42	lwz	RA,(n*16)+0(reg);	\
43	lwz	RB,(n*16)+4(reg);	\
44	mtspr	SPRN_IBAT##n##U,RA;	\
45	mtspr	SPRN_IBAT##n##L,RB;	\
46	lwz	RA,(n*16)+8(reg);	\
47	lwz	RB,(n*16)+12(reg);	\
48	mtspr	SPRN_DBAT##n##U,RA;	\
49	mtspr	SPRN_DBAT##n##L,RB
50
51	__HEAD
52	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
53	.stabs	"head_book3s_32.S",N_SO,0,0,0f
540:
55_ENTRY(_stext);
56
57/*
58 * _start is defined this way because the XCOFF loader in the OpenFirmware
59 * on the powermac expects the entry point to be a procedure descriptor.
60 */
61_ENTRY(_start);
62	/*
63	 * These are here for legacy reasons, the kernel used to
64	 * need to look like a coff function entry for the pmac
65	 * but we're always started by some kind of bootloader now.
66	 *  -- Cort
67	 */
68	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
69	nop	/* used by __secondary_hold on prep (mtx) and chrp smp */
70	nop
71
72/* PMAC
73 * Enter here with the kernel text, data and bss loaded starting at
74 * 0, running with virtual == physical mapping.
75 * r5 points to the prom entry point (the client interface handler
76 * address).  Address translation is turned on, with the prom
77 * managing the hash table.  Interrupts are disabled.  The stack
78 * pointer (r1) points to just below the end of the half-meg region
79 * from 0x380000 - 0x400000, which is mapped in already.
80 *
81 * If we are booted from MacOS via BootX, we enter with the kernel
82 * image loaded somewhere, and the following values in registers:
83 *  r3: 'BooX' (0x426f6f58)
84 *  r4: virtual address of boot_infos_t
85 *  r5: 0
86 *
87 * PREP
88 * This is jumped to on prep systems right after the kernel is relocated
89 * to its proper place in memory by the boot loader.  The expected layout
90 * of the regs is:
91 *   r3: ptr to residual data
92 *   r4: initrd_start or if no initrd then 0
93 *   r5: initrd_end - unused if r4 is 0
94 *   r6: Start of command line string
95 *   r7: End of command line string
96 *
97 * This just gets a minimal mmu environment setup so we can call
98 * start_here() to do the real work.
99 * -- Cort
100 */
101
102	.globl	__start
103__start:
104/*
105 * We have to do any OF calls before we map ourselves to KERNELBASE,
106 * because OF may have I/O devices mapped into that area
107 * (particularly on CHRP).
108 */
109	cmpwi	0,r5,0
110	beq	1f
111
112#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
113	/* find out where we are now */
114	bcl	20,31,$+4
1150:	mflr	r8			/* r8 = runtime addr here */
116	addis	r8,r8,(_stext - 0b)@ha
117	addi	r8,r8,(_stext - 0b)@l	/* current runtime base addr */
118	bl	prom_init
119#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
120
121	/* We never return. We also hit that trap if trying to boot
122	 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
123	trap
124
125/*
126 * Check for BootX signature when supporting PowerMac and branch to
127 * appropriate trampoline if it's present
128 */
129#ifdef CONFIG_PPC_PMAC
1301:	lis	r31,0x426f
131	ori	r31,r31,0x6f58
132	cmpw	0,r3,r31
133	bne	1f
134	bl	bootx_init
135	trap
136#endif /* CONFIG_PPC_PMAC */
137
1381:	mr	r31,r3			/* save device tree ptr */
139	li	r24,0			/* cpu # */
140
141/*
142 * early_init() does the early machine identification and does
143 * the necessary low-level setup and clears the BSS
144 *  -- Cort <cort@fsmlabs.com>
145 */
146	bl	early_init
147
148/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
149 * the physical address we are running at, returned by early_init()
150 */
151 	bl	mmu_off
152__after_mmu_off:
153	bl	clear_bats
154	bl	flush_tlbs
155
156	bl	initial_bats
157	bl	load_segment_registers
158	bl	reloc_offset
159	bl	early_hash_table
160#if defined(CONFIG_BOOTX_TEXT)
161	bl	setup_disp_bat
162#endif
163#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
164	bl	setup_cpm_bat
165#endif
166#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
167	bl	setup_usbgecko_bat
168#endif
169
170/*
171 * Call setup_cpu for CPU 0 and initialize 6xx Idle
172 */
173	bl	reloc_offset
174	li	r24,0			/* cpu# */
175	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
176	bl	reloc_offset
177	bl	init_idle_6xx
178
179
180/*
181 * We need to run with _start at physical address 0.
182 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
183 * the exception vectors at 0 (and therefore this copy
184 * overwrites OF's exception vectors with our own).
185 * The MMU is off at this point.
186 */
187	bl	reloc_offset
188	mr	r26,r3
189	addis	r4,r3,KERNELBASE@h	/* current address of _start */
190	lis	r5,PHYSICAL_START@h
191	cmplw	0,r4,r5			/* already running at PHYSICAL_START? */
192	bne	relocate_kernel
193/*
194 * we now have the 1st 16M of ram mapped with the bats.
195 * prep needs the mmu to be turned on here, but pmac already has it on.
196 * this shouldn't bother the pmac since it just gets turned on again
197 * as we jump to our code at KERNELBASE. -- Cort
198 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
199 * off, and in other cases, we now turn it off before changing BATs above.
200 */
201turn_on_mmu:
202	mfmsr	r0
203	ori	r0,r0,MSR_DR|MSR_IR|MSR_RI
204	mtspr	SPRN_SRR1,r0
205	lis	r0,start_here@h
206	ori	r0,r0,start_here@l
207	mtspr	SPRN_SRR0,r0
208	rfi				/* enables MMU */
209
210/*
211 * We need __secondary_hold as a place to hold the other cpus on
212 * an SMP machine, even when we are running a UP kernel.
213 */
214	. = 0xc0			/* for prep bootloader */
215	li	r3,1			/* MTX only has 1 cpu */
216	.globl	__secondary_hold
217__secondary_hold:
218	/* tell the master we're here */
219	stw	r3,__secondary_hold_acknowledge@l(0)
220#ifdef CONFIG_SMP
221100:	lwz	r4,0(0)
222	/* wait until we're told to start */
223	cmpw	0,r4,r3
224	bne	100b
225	/* our cpu # was at addr 0 - go */
226	mr	r24,r3			/* cpu # */
227	b	__secondary_start
228#else
229	b	.
230#endif /* CONFIG_SMP */
231
232	.globl	__secondary_hold_spinloop
233__secondary_hold_spinloop:
234	.long	0
235	.globl	__secondary_hold_acknowledge
236__secondary_hold_acknowledge:
237	.long	-1
238
239/* System reset */
240/* core99 pmac starts the seconary here by changing the vector, and
241   putting it back to what it was (unknown_async_exception) when done.  */
242	EXCEPTION(0x100, Reset, unknown_async_exception, EXC_XFER_STD)
243
244/* Machine check */
245/*
246 * On CHRP, this is complicated by the fact that we could get a
247 * machine check inside RTAS, and we have no guarantee that certain
248 * critical registers will have the values we expect.  The set of
249 * registers that might have bad values includes all the GPRs
250 * and all the BATs.  We indicate that we are in RTAS by putting
251 * a non-zero value, the address of the exception frame to use,
252 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
253 * and uses its value if it is non-zero.
254 * (Other exception handlers assume that r1 is a valid kernel stack
255 * pointer when we take an exception from supervisor mode.)
256 *	-- paulus.
257 */
258	. = 0x200
259	DO_KVM  0x200
260MachineCheck:
261	EXCEPTION_PROLOG_0
262#ifdef CONFIG_PPC_CHRP
263#ifdef CONFIG_VMAP_STACK
264	mtspr	SPRN_SPRG_SCRATCH2,r1
265	mfspr	r1, SPRN_SPRG_THREAD
266	lwz	r1, RTAS_SP(r1)
267	cmpwi	cr1, r1, 0
268	bne	cr1, 7f
269	mfspr	r1, SPRN_SPRG_SCRATCH2
270#else
271	mfspr	r11, SPRN_SPRG_THREAD
272	lwz	r11, RTAS_SP(r11)
273	cmpwi	cr1, r11, 0
274	bne	cr1, 7f
275#endif
276#endif /* CONFIG_PPC_CHRP */
277	EXCEPTION_PROLOG_1 for_rtas=1
2787:	EXCEPTION_PROLOG_2
279	addi	r3,r1,STACK_FRAME_OVERHEAD
280#ifdef CONFIG_PPC_CHRP
281	beq	cr1, machine_check_tramp
282	twi	31, 0, 0
283#else
284	b	machine_check_tramp
285#endif
286
287/* Data access exception. */
288	. = 0x300
289	DO_KVM  0x300
290DataAccess:
291#ifdef CONFIG_VMAP_STACK
292#ifdef CONFIG_PPC_BOOK3S_604
293BEGIN_MMU_FTR_SECTION
294	mtspr	SPRN_SPRG_SCRATCH2,r10
295	mfspr	r10, SPRN_SPRG_THREAD
296	stw	r11, THR11(r10)
297	mfspr	r10, SPRN_DSISR
298	mfcr	r11
299	andis.	r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
300	mfspr	r10, SPRN_SPRG_THREAD
301	beq	hash_page_dsi
302.Lhash_page_dsi_cont:
303	mtcr	r11
304	lwz	r11, THR11(r10)
305	mfspr	r10, SPRN_SPRG_SCRATCH2
306MMU_FTR_SECTION_ELSE
307	b	1f
308ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
309#endif
3101:	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
311	EXCEPTION_PROLOG_1
312	b	handle_page_fault_tramp_1
313#else	/* CONFIG_VMAP_STACK */
314	EXCEPTION_PROLOG handle_dar_dsisr=1
315	get_and_save_dar_dsisr_on_stack	r4, r5, r11
316#ifdef CONFIG_PPC_BOOK3S_604
317BEGIN_MMU_FTR_SECTION
318	andis.	r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
319	bne	handle_page_fault_tramp_2	/* if not, try to put a PTE */
320	rlwinm	r3, r5, 32 - 15, 21, 21		/* DSISR_STORE -> _PAGE_RW */
321	bl	hash_page
322	b	handle_page_fault_tramp_1
323MMU_FTR_SECTION_ELSE
324#endif
325	b	handle_page_fault_tramp_2
326#ifdef CONFIG_PPC_BOOK3S_604
327ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
328#endif
329#endif	/* CONFIG_VMAP_STACK */
330
331/* Instruction access exception. */
332	. = 0x400
333	DO_KVM  0x400
334InstructionAccess:
335#ifdef CONFIG_VMAP_STACK
336	mtspr	SPRN_SPRG_SCRATCH0,r10
337	mtspr	SPRN_SPRG_SCRATCH1,r11
338	mfspr	r10, SPRN_SPRG_THREAD
339	mfspr	r11, SPRN_SRR0
340	stw	r11, SRR0(r10)
341	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
342	stw	r11, SRR1(r10)
343	mfcr	r10
344#ifdef CONFIG_PPC_BOOK3S_604
345BEGIN_MMU_FTR_SECTION
346	andis.	r11, r11, SRR1_ISI_NOPT@h	/* no pte found? */
347	bne	hash_page_isi
348.Lhash_page_isi_cont:
349	mfspr	r11, SPRN_SRR1		/* check whether user or kernel */
350END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
351#endif
352	andi.	r11, r11, MSR_PR
353
354	EXCEPTION_PROLOG_1
355	EXCEPTION_PROLOG_2
356#else	/* CONFIG_VMAP_STACK */
357	EXCEPTION_PROLOG
358	andis.	r0,r9,SRR1_ISI_NOPT@h	/* no pte found? */
359	beq	1f			/* if so, try to put a PTE */
360	li	r3,0			/* into the hash table */
361	mr	r4,r12			/* SRR0 is fault address */
362#ifdef CONFIG_PPC_BOOK3S_604
363BEGIN_MMU_FTR_SECTION
364	bl	hash_page
365END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
366#endif
367#endif	/* CONFIG_VMAP_STACK */
368	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
369	stw	r5, _DSISR(r11)
370	stw	r12, _DAR(r11)
371	EXC_XFER_LITE(0x400, handle_page_fault)
372
373/* External interrupt */
374	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
375
376/* Alignment exception */
377	. = 0x600
378	DO_KVM  0x600
379Alignment:
380	EXCEPTION_PROLOG handle_dar_dsisr=1
381	save_dar_dsisr_on_stack r4, r5, r11
382	addi	r3,r1,STACK_FRAME_OVERHEAD
383	b	alignment_exception_tramp
384
385/* Program check exception */
386	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
387
388/* Floating-point unavailable */
389	. = 0x800
390	DO_KVM  0x800
391FPUnavailable:
392#ifdef CONFIG_PPC_FPU
393BEGIN_FTR_SECTION
394/*
395 * Certain Freescale cores don't have a FPU and treat fp instructions
396 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
397 */
398	b 	ProgramCheck
399END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
400	EXCEPTION_PROLOG
401	beq	1f
402	bl	load_up_fpu		/* if from user, just load it up */
403	b	fast_exception_return
4041:	addi	r3,r1,STACK_FRAME_OVERHEAD
405	EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
406#else
407	b 	ProgramCheck
408#endif
409
410/* Decrementer */
411	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
412
413	EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
414	EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
415
416/* System call */
417	. = 0xc00
418	DO_KVM  0xc00
419SystemCall:
420	SYSCALL_ENTRY	0xc00
421
422	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
423	EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
424
425/*
426 * The Altivec unavailable trap is at 0x0f20.  Foo.
427 * We effectively remap it to 0x3000.
428 * We include an altivec unavailable exception vector even if
429 * not configured for Altivec, so that you can't panic a
430 * non-altivec kernel running on a machine with altivec just
431 * by executing an altivec instruction.
432 */
433	. = 0xf00
434	DO_KVM  0xf00
435	b	PerformanceMonitor
436
437	. = 0xf20
438	DO_KVM  0xf20
439	b	AltiVecUnavailable
440
441/*
442 * Handle TLB miss for instruction on 603/603e.
443 * Note: we get an alternate set of r0 - r3 to use automatically.
444 */
445	. = 0x1000
446InstructionTLBMiss:
447/*
448 * r0:	scratch
449 * r1:	linux style pte ( later becomes ppc hardware pte )
450 * r2:	ptr to linux-style pte
451 * r3:	scratch
452 */
453	/* Get PTE (linux-style) and check access */
454	mfspr	r3,SPRN_IMISS
455#ifdef CONFIG_MODULES
456	lis	r1, TASK_SIZE@h		/* check if kernel address */
457	cmplw	0,r1,r3
458#endif
459	mfspr	r2, SPRN_SDR1
460	li	r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
461	rlwinm	r2, r2, 28, 0xfffff000
462#ifdef CONFIG_MODULES
463	bgt-	112f
464	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
465	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
466#endif
467112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
468	lwz	r2,0(r2)		/* get pmd entry */
469	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
470	beq-	InstructionAddressInvalid	/* return if no mapping */
471	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
472	lwz	r0,0(r2)		/* get linux-style pte */
473	andc.	r1,r1,r0		/* check access & ~permission */
474	bne-	InstructionAddressInvalid /* return if access not permitted */
475	/* Convert linux-style PTE to low word of PPC-style PTE */
476	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
477	ori	r1, r1, 0xe06		/* clear out reserved bits */
478	andc	r1, r0, r1		/* PP = user? 1 : 0 */
479BEGIN_FTR_SECTION
480	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
481END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
482	mtspr	SPRN_RPA,r1
483	tlbli	r3
484	mfspr	r3,SPRN_SRR1		/* Need to restore CR0 */
485	mtcrf	0x80,r3
486	rfi
487InstructionAddressInvalid:
488	mfspr	r3,SPRN_SRR1
489	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
490
491	addis	r1,r1,0x2000
492	mtspr	SPRN_DSISR,r1	/* (shouldn't be needed) */
493	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
494	or	r2,r2,r1
495	mtspr	SPRN_SRR1,r2
496	mfspr	r1,SPRN_IMISS	/* Get failing address */
497	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
498	rlwimi	r2,r2,1,30,30	/* change 1 -> 3 */
499	xor	r1,r1,r2
500	mtspr	SPRN_DAR,r1	/* Set fault address */
501	mfmsr	r0		/* Restore "normal" registers */
502	xoris	r0,r0,MSR_TGPR>>16
503	mtcrf	0x80,r3		/* Restore CR0 */
504	mtmsr	r0
505	b	InstructionAccess
506
507/*
508 * Handle TLB miss for DATA Load operation on 603/603e
509 */
510	. = 0x1100
511DataLoadTLBMiss:
512/*
513 * r0:	scratch
514 * r1:	linux style pte ( later becomes ppc hardware pte )
515 * r2:	ptr to linux-style pte
516 * r3:	scratch
517 */
518	/* Get PTE (linux-style) and check access */
519	mfspr	r3,SPRN_DMISS
520	lis	r1, TASK_SIZE@h		/* check if kernel address */
521	cmplw	0,r1,r3
522	mfspr	r2, SPRN_SDR1
523	li	r1, _PAGE_PRESENT | _PAGE_ACCESSED
524	rlwinm	r2, r2, 28, 0xfffff000
525	bgt-	112f
526	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
527	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
528112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
529	lwz	r2,0(r2)		/* get pmd entry */
530	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
531	beq-	DataAddressInvalid	/* return if no mapping */
532	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
533	lwz	r0,0(r2)		/* get linux-style pte */
534	andc.	r1,r1,r0		/* check access & ~permission */
535	bne-	DataAddressInvalid	/* return if access not permitted */
536	/*
537	 * NOTE! We are assuming this is not an SMP system, otherwise
538	 * we would need to update the pte atomically with lwarx/stwcx.
539	 */
540	/* Convert linux-style PTE to low word of PPC-style PTE */
541	rlwinm	r1,r0,32-9,30,30	/* _PAGE_RW -> PP msb */
542	rlwimi	r0,r0,32-1,30,30	/* _PAGE_USER -> PP msb */
543	rlwimi	r0,r0,32-1,31,31	/* _PAGE_USER -> PP lsb */
544	ori	r1,r1,0xe04		/* clear out reserved bits */
545	andc	r1,r0,r1		/* PP = user? rw? 1: 3: 0 */
546BEGIN_FTR_SECTION
547	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
548END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
549	mtspr	SPRN_RPA,r1
550	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
551	mtcrf	0x80,r2
552BEGIN_MMU_FTR_SECTION
553	li	r0,1
554	mfspr	r1,SPRN_SPRG_603_LRU
555	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
556	slw	r0,r0,r2
557	xor	r1,r0,r1
558	srw	r0,r1,r2
559	mtspr   SPRN_SPRG_603_LRU,r1
560	mfspr	r2,SPRN_SRR1
561	rlwimi	r2,r0,31-14,14,14
562	mtspr   SPRN_SRR1,r2
563END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
564	tlbld	r3
565	rfi
566DataAddressInvalid:
567	mfspr	r3,SPRN_SRR1
568	rlwinm	r1,r3,9,6,6	/* Get load/store bit */
569	addis	r1,r1,0x2000
570	mtspr	SPRN_DSISR,r1
571	andi.	r2,r3,0xFFFF	/* Clear upper bits of SRR1 */
572	mtspr	SPRN_SRR1,r2
573	mfspr	r1,SPRN_DMISS	/* Get failing address */
574	rlwinm.	r2,r2,0,31,31	/* Check for little endian access */
575	beq	20f		/* Jump if big endian */
576	xori	r1,r1,3
57720:	mtspr	SPRN_DAR,r1	/* Set fault address */
578	mfmsr	r0		/* Restore "normal" registers */
579	xoris	r0,r0,MSR_TGPR>>16
580	mtcrf	0x80,r3		/* Restore CR0 */
581	mtmsr	r0
582	b	DataAccess
583
584/*
585 * Handle TLB miss for DATA Store on 603/603e
586 */
587	. = 0x1200
588DataStoreTLBMiss:
589/*
590 * r0:	scratch
591 * r1:	linux style pte ( later becomes ppc hardware pte )
592 * r2:	ptr to linux-style pte
593 * r3:	scratch
594 */
595	/* Get PTE (linux-style) and check access */
596	mfspr	r3,SPRN_DMISS
597	lis	r1, TASK_SIZE@h		/* check if kernel address */
598	cmplw	0,r1,r3
599	mfspr	r2, SPRN_SDR1
600	li	r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
601	rlwinm	r2, r2, 28, 0xfffff000
602	bgt-	112f
603	lis	r2, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
604	addi	r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
605112:	rlwimi	r2,r3,12,20,29		/* insert top 10 bits of address */
606	lwz	r2,0(r2)		/* get pmd entry */
607	rlwinm.	r2,r2,0,0,19		/* extract address of pte page */
608	beq-	DataAddressInvalid	/* return if no mapping */
609	rlwimi	r2,r3,22,20,29		/* insert next 10 bits of address */
610	lwz	r0,0(r2)		/* get linux-style pte */
611	andc.	r1,r1,r0		/* check access & ~permission */
612	bne-	DataAddressInvalid	/* return if access not permitted */
613	/*
614	 * NOTE! We are assuming this is not an SMP system, otherwise
615	 * we would need to update the pte atomically with lwarx/stwcx.
616	 */
617	/* Convert linux-style PTE to low word of PPC-style PTE */
618	rlwimi	r0,r0,32-2,31,31	/* _PAGE_USER -> PP lsb */
619	li	r1,0xe06		/* clear out reserved bits & PP msb */
620	andc	r1,r0,r1		/* PP = user? 1: 0 */
621BEGIN_FTR_SECTION
622	rlwinm	r1,r1,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
623END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
624	mtspr	SPRN_RPA,r1
625	mfspr	r2,SPRN_SRR1		/* Need to restore CR0 */
626	mtcrf	0x80,r2
627BEGIN_MMU_FTR_SECTION
628	li	r0,1
629	mfspr	r1,SPRN_SPRG_603_LRU
630	rlwinm	r2,r3,20,27,31		/* Get Address bits 15:19 */
631	slw	r0,r0,r2
632	xor	r1,r0,r1
633	srw	r0,r1,r2
634	mtspr   SPRN_SPRG_603_LRU,r1
635	mfspr	r2,SPRN_SRR1
636	rlwimi	r2,r0,31-14,14,14
637	mtspr   SPRN_SRR1,r2
638END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
639	tlbld	r3
640	rfi
641
642#ifndef CONFIG_ALTIVEC
643#define altivec_assist_exception	unknown_exception
644#endif
645
646#ifndef CONFIG_TAU_INT
647#define TAUException	unknown_async_exception
648#endif
649
650	EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
651	EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
652	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
653	EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
654	EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
655	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
656	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
657	EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
658	EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
659	EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
660	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
661	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
662	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
663	EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
664	EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
665	EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
666	EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
667	EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
668	EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
669	EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
670	EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
671	EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
672	EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
673	EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
674	EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
675	EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
676	EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
677	EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
678	EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
679
680	. = 0x3000
681
682machine_check_tramp:
683	EXC_XFER_STD(0x200, machine_check_exception)
684
685alignment_exception_tramp:
686	EXC_XFER_STD(0x600, alignment_exception)
687
688handle_page_fault_tramp_1:
689#ifdef CONFIG_VMAP_STACK
690	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
691#endif
692	lwz	r5, _DSISR(r11)
693	/* fall through */
694handle_page_fault_tramp_2:
695	andis.	r0, r5, DSISR_DABRMATCH@h
696	bne-	1f
697	EXC_XFER_LITE(0x300, handle_page_fault)
6981:	EXC_XFER_STD(0x300, do_break)
699
700#ifdef CONFIG_VMAP_STACK
701#ifdef CONFIG_PPC_BOOK3S_604
702.macro save_regs_thread		thread
703	stw	r0, THR0(\thread)
704	stw	r3, THR3(\thread)
705	stw	r4, THR4(\thread)
706	stw	r5, THR5(\thread)
707	stw	r6, THR6(\thread)
708	stw	r8, THR8(\thread)
709	stw	r9, THR9(\thread)
710	mflr	r0
711	stw	r0, THLR(\thread)
712	mfctr	r0
713	stw	r0, THCTR(\thread)
714.endm
715
716.macro restore_regs_thread	thread
717	lwz	r0, THLR(\thread)
718	mtlr	r0
719	lwz	r0, THCTR(\thread)
720	mtctr	r0
721	lwz	r0, THR0(\thread)
722	lwz	r3, THR3(\thread)
723	lwz	r4, THR4(\thread)
724	lwz	r5, THR5(\thread)
725	lwz	r6, THR6(\thread)
726	lwz	r8, THR8(\thread)
727	lwz	r9, THR9(\thread)
728.endm
729
730hash_page_dsi:
731	save_regs_thread	r10
732	mfdsisr	r3
733	mfdar	r4
734	mfsrr0	r5
735	mfsrr1	r9
736	rlwinm	r3, r3, 32 - 15, _PAGE_RW	/* DSISR_STORE -> _PAGE_RW */
737	bl	hash_page
738	mfspr	r10, SPRN_SPRG_THREAD
739	restore_regs_thread r10
740	b	.Lhash_page_dsi_cont
741
742hash_page_isi:
743	mr	r11, r10
744	mfspr	r10, SPRN_SPRG_THREAD
745	save_regs_thread	r10
746	li	r3, 0
747	lwz	r4, SRR0(r10)
748	lwz	r9, SRR1(r10)
749	bl	hash_page
750	mfspr	r10, SPRN_SPRG_THREAD
751	restore_regs_thread r10
752	mr	r10, r11
753	b	.Lhash_page_isi_cont
754
755	.globl fast_hash_page_return
756fast_hash_page_return:
757	andis.	r10, r9, SRR1_ISI_NOPT@h	/* Set on ISI, cleared on DSI */
758	mfspr	r10, SPRN_SPRG_THREAD
759	restore_regs_thread r10
760	bne	1f
761
762	/* DSI */
763	mtcr	r11
764	lwz	r11, THR11(r10)
765	mfspr	r10, SPRN_SPRG_SCRATCH2
766	rfi
767
7681:	/* ISI */
769	mtcr	r11
770	mfspr	r11, SPRN_SPRG_SCRATCH1
771	mfspr	r10, SPRN_SPRG_SCRATCH0
772	rfi
773#endif /* CONFIG_PPC_BOOK3S_604 */
774
775stack_overflow:
776	vmap_stack_overflow_exception
777#endif
778
779AltiVecUnavailable:
780	EXCEPTION_PROLOG
781#ifdef CONFIG_ALTIVEC
782	beq	1f
783	bl	load_up_altivec		/* if from user, just load it up */
784	b	fast_exception_return
785#endif /* CONFIG_ALTIVEC */
7861:	addi	r3,r1,STACK_FRAME_OVERHEAD
787	EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
788
789PerformanceMonitor:
790	EXCEPTION_PROLOG
791	addi	r3,r1,STACK_FRAME_OVERHEAD
792	EXC_XFER_STD(0xf00, performance_monitor_exception)
793
794
795/*
796 * This code is jumped to from the startup code to copy
797 * the kernel image to physical address PHYSICAL_START.
798 */
799relocate_kernel:
800	addis	r9,r26,klimit@ha	/* fetch klimit */
801	lwz	r25,klimit@l(r9)
802	addis	r25,r25,-KERNELBASE@h
803	lis	r3,PHYSICAL_START@h	/* Destination base address */
804	li	r6,0			/* Destination offset */
805	li	r5,0x4000		/* # bytes of memory to copy */
806	bl	copy_and_flush		/* copy the first 0x4000 bytes */
807	addi	r0,r3,4f@l		/* jump to the address of 4f */
808	mtctr	r0			/* in copy and do the rest. */
809	bctr				/* jump to the copy */
8104:	mr	r5,r25
811	bl	copy_and_flush		/* copy the rest */
812	b	turn_on_mmu
813
814/*
815 * Copy routine used to copy the kernel to start at physical address 0
816 * and flush and invalidate the caches as needed.
817 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
818 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
819 */
820_ENTRY(copy_and_flush)
821	addi	r5,r5,-4
822	addi	r6,r6,-4
8234:	li	r0,L1_CACHE_BYTES/4
824	mtctr	r0
8253:	addi	r6,r6,4			/* copy a cache line */
826	lwzx	r0,r6,r4
827	stwx	r0,r6,r3
828	bdnz	3b
829	dcbst	r6,r3			/* write it to memory */
830	sync
831	icbi	r6,r3			/* flush the icache line */
832	cmplw	0,r6,r5
833	blt	4b
834	sync				/* additional sync needed on g4 */
835	isync
836	addi	r5,r5,4
837	addi	r6,r6,4
838	blr
839
840#ifdef CONFIG_SMP
841	.globl __secondary_start_mpc86xx
842__secondary_start_mpc86xx:
843	mfspr	r3, SPRN_PIR
844	stw	r3, __secondary_hold_acknowledge@l(0)
845	mr	r24, r3			/* cpu # */
846	b	__secondary_start
847
848	.globl	__secondary_start_pmac_0
849__secondary_start_pmac_0:
850	/* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
851	li	r24,0
852	b	1f
853	li	r24,1
854	b	1f
855	li	r24,2
856	b	1f
857	li	r24,3
8581:
859	/* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
860	   set to map the 0xf0000000 - 0xffffffff region */
861	mfmsr	r0
862	rlwinm	r0,r0,0,28,26		/* clear DR (0x10) */
863	mtmsr	r0
864	isync
865
866	.globl	__secondary_start
867__secondary_start:
868	/* Copy some CPU settings from CPU 0 */
869	bl	__restore_cpu_setup
870
871	lis	r3,-KERNELBASE@h
872	mr	r4,r24
873	bl	call_setup_cpu		/* Call setup_cpu for this CPU */
874	lis	r3,-KERNELBASE@h
875	bl	init_idle_6xx
876
877	/* get current's stack and current */
878	lis	r2,secondary_current@ha
879	tophys(r2,r2)
880	lwz	r2,secondary_current@l(r2)
881	tophys(r1,r2)
882	lwz	r1,TASK_STACK(r1)
883
884	/* stack */
885	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
886	li	r0,0
887	tophys(r3,r1)
888	stw	r0,0(r3)
889
890	/* load up the MMU */
891	bl	load_segment_registers
892	bl	load_up_mmu
893
894	/* ptr to phys current thread */
895	tophys(r4,r2)
896	addi	r4,r4,THREAD	/* phys address of our thread_struct */
897	mtspr	SPRN_SPRG_THREAD,r4
898BEGIN_MMU_FTR_SECTION
899	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
900	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
901	rlwinm	r4, r4, 4, 0xffff01ff
902	mtspr	SPRN_SDR1, r4
903END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
904
905	/* enable MMU and jump to start_secondary */
906	li	r4,MSR_KERNEL
907	lis	r3,start_secondary@h
908	ori	r3,r3,start_secondary@l
909	mtspr	SPRN_SRR0,r3
910	mtspr	SPRN_SRR1,r4
911	rfi
912#endif /* CONFIG_SMP */
913
914#ifdef CONFIG_KVM_BOOK3S_HANDLER
915#include "../kvm/book3s_rmhandlers.S"
916#endif
917
918/*
919 * Load stuff into the MMU.  Intended to be called with
920 * IR=0 and DR=0.
921 */
922early_hash_table:
923	sync			/* Force all PTE updates to finish */
924	isync
925	tlbia			/* Clear all TLB entries */
926	sync			/* wait for tlbia/tlbie to finish */
927	TLBSYNC			/* ... on all CPUs */
928	/* Load the SDR1 register (hash table base & size) */
929	lis	r6, early_hash - PAGE_OFFSET@h
930	ori	r6, r6, 3	/* 256kB table */
931	mtspr	SPRN_SDR1, r6
932	blr
933
934load_up_mmu:
935	sync			/* Force all PTE updates to finish */
936	isync
937	tlbia			/* Clear all TLB entries */
938	sync			/* wait for tlbia/tlbie to finish */
939	TLBSYNC			/* ... on all CPUs */
940BEGIN_MMU_FTR_SECTION
941	/* Load the SDR1 register (hash table base & size) */
942	lis	r6,_SDR1@ha
943	tophys(r6,r6)
944	lwz	r6,_SDR1@l(r6)
945	mtspr	SPRN_SDR1,r6
946END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
947
948/* Load the BAT registers with the values set up by MMU_init. */
949	lis	r3,BATS@ha
950	addi	r3,r3,BATS@l
951	tophys(r3,r3)
952	LOAD_BAT(0,r3,r4,r5)
953	LOAD_BAT(1,r3,r4,r5)
954	LOAD_BAT(2,r3,r4,r5)
955	LOAD_BAT(3,r3,r4,r5)
956BEGIN_MMU_FTR_SECTION
957	LOAD_BAT(4,r3,r4,r5)
958	LOAD_BAT(5,r3,r4,r5)
959	LOAD_BAT(6,r3,r4,r5)
960	LOAD_BAT(7,r3,r4,r5)
961END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
962	blr
963
964_GLOBAL(load_segment_registers)
965	li	r0, NUM_USER_SEGMENTS /* load up user segment register values */
966	mtctr	r0		/* for context 0 */
967	li	r3, 0		/* Kp = 0, Ks = 0, VSID = 0 */
968#ifdef CONFIG_PPC_KUEP
969	oris	r3, r3, SR_NX@h	/* Set Nx */
970#endif
971#ifdef CONFIG_PPC_KUAP
972	oris	r3, r3, SR_KS@h	/* Set Ks */
973#endif
974	li	r4, 0
9753:	mtsrin	r3, r4
976	addi	r3, r3, 0x111	/* increment VSID */
977	addis	r4, r4, 0x1000	/* address of next segment */
978	bdnz	3b
979	li	r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
980	mtctr	r0			/* for context 0 */
981	rlwinm	r3, r3, 0, ~SR_NX	/* Nx = 0 */
982	rlwinm	r3, r3, 0, ~SR_KS	/* Ks = 0 */
983	oris	r3, r3, SR_KP@h		/* Kp = 1 */
9843:	mtsrin	r3, r4
985	addi	r3, r3, 0x111	/* increment VSID */
986	addis	r4, r4, 0x1000	/* address of next segment */
987	bdnz	3b
988	blr
989
990/*
991 * This is where the main kernel code starts.
992 */
993start_here:
994	/* ptr to current */
995	lis	r2,init_task@h
996	ori	r2,r2,init_task@l
997	/* Set up for using our exception vectors */
998	/* ptr to phys current thread */
999	tophys(r4,r2)
1000	addi	r4,r4,THREAD	/* init task's THREAD */
1001	mtspr	SPRN_SPRG_THREAD,r4
1002BEGIN_MMU_FTR_SECTION
1003	lis	r4, (swapper_pg_dir - PAGE_OFFSET)@h
1004	ori	r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1005	rlwinm	r4, r4, 4, 0xffff01ff
1006	mtspr	SPRN_SDR1, r4
1007END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
1008
1009	/* stack */
1010	lis	r1,init_thread_union@ha
1011	addi	r1,r1,init_thread_union@l
1012	li	r0,0
1013	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1014/*
1015 * Do early platform-specific initialization,
1016 * and set up the MMU.
1017 */
1018#ifdef CONFIG_KASAN
1019	bl	kasan_early_init
1020#endif
1021	li	r3,0
1022	mr	r4,r31
1023	bl	machine_init
1024	bl	__save_cpu_setup
1025	bl	MMU_init
1026	bl	MMU_init_hw_patch
1027
1028/*
1029 * Go back to running unmapped so we can load up new values
1030 * for SDR1 (hash table pointer) and the segment registers
1031 * and change to using our exception vectors.
1032 */
1033	lis	r4,2f@h
1034	ori	r4,r4,2f@l
1035	tophys(r4,r4)
1036	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1037
1038	.align	4
1039	mtspr	SPRN_SRR0,r4
1040	mtspr	SPRN_SRR1,r3
1041	rfi
1042/* Load up the kernel context */
10432:	bl	load_up_mmu
1044
1045#ifdef CONFIG_BDI_SWITCH
1046	/* Add helper information for the Abatron bdiGDB debugger.
1047	 * We do this here because we know the mmu is disabled, and
1048	 * will be enabled for real in just a few instructions.
1049	 */
1050	lis	r5, abatron_pteptrs@h
1051	ori	r5, r5, abatron_pteptrs@l
1052	stw	r5, 0xf0(0)	/* This much match your Abatron config */
1053	lis	r6, swapper_pg_dir@h
1054	ori	r6, r6, swapper_pg_dir@l
1055	tophys(r5, r5)
1056	stw	r6, 0(r5)
1057#endif /* CONFIG_BDI_SWITCH */
1058
1059/* Now turn on the MMU for real! */
1060	li	r4,MSR_KERNEL
1061	lis	r3,start_kernel@h
1062	ori	r3,r3,start_kernel@l
1063	mtspr	SPRN_SRR0,r3
1064	mtspr	SPRN_SRR1,r4
1065	rfi
1066
1067/*
1068 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1069 *
1070 * Set up the segment registers for a new context.
1071 */
1072_ENTRY(switch_mmu_context)
1073	lwz	r3,MMCONTEXTID(r4)
1074	cmpwi	cr0,r3,0
1075	blt-	4f
1076	mulli	r3,r3,897	/* multiply context by skew factor */
1077	rlwinm	r3,r3,4,8,27	/* VSID = (context & 0xfffff) << 4 */
1078#ifdef CONFIG_PPC_KUEP
1079	oris	r3, r3, SR_NX@h	/* Set Nx */
1080#endif
1081#ifdef CONFIG_PPC_KUAP
1082	oris	r3, r3, SR_KS@h	/* Set Ks */
1083#endif
1084	li	r0,NUM_USER_SEGMENTS
1085	mtctr	r0
1086
1087#ifdef CONFIG_BDI_SWITCH
1088	/* Context switch the PTE pointer for the Abatron BDI2000.
1089	 * The PGDIR is passed as second argument.
1090	 */
1091	lwz	r4, MM_PGD(r4)
1092	lis	r5, abatron_pteptrs@ha
1093	stw	r4, abatron_pteptrs@l + 0x4(r5)
1094#endif
1095BEGIN_MMU_FTR_SECTION
1096#ifndef CONFIG_BDI_SWITCH
1097	lwz	r4, MM_PGD(r4)
1098#endif
1099	tophys(r4, r4)
1100	rlwinm	r4, r4, 4, 0xffff01ff
1101	mtspr	SPRN_SDR1, r4
1102END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
1103	li	r4,0
1104	isync
11053:
1106	mtsrin	r3,r4
1107	addi	r3,r3,0x111	/* next VSID */
1108	rlwinm	r3,r3,0,8,3	/* clear out any overflow from VSID field */
1109	addis	r4,r4,0x1000	/* address of next segment */
1110	bdnz	3b
1111	sync
1112	isync
1113	blr
11144:	trap
1115	EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1116	blr
1117EXPORT_SYMBOL(switch_mmu_context)
1118
1119/*
1120 * An undocumented "feature" of 604e requires that the v bit
1121 * be cleared before changing BAT values.
1122 *
1123 * Also, newer IBM firmware does not clear bat3 and 4 so
1124 * this makes sure it's done.
1125 *  -- Cort
1126 */
1127clear_bats:
1128	li	r10,0
1129
1130	mtspr	SPRN_DBAT0U,r10
1131	mtspr	SPRN_DBAT0L,r10
1132	mtspr	SPRN_DBAT1U,r10
1133	mtspr	SPRN_DBAT1L,r10
1134	mtspr	SPRN_DBAT2U,r10
1135	mtspr	SPRN_DBAT2L,r10
1136	mtspr	SPRN_DBAT3U,r10
1137	mtspr	SPRN_DBAT3L,r10
1138	mtspr	SPRN_IBAT0U,r10
1139	mtspr	SPRN_IBAT0L,r10
1140	mtspr	SPRN_IBAT1U,r10
1141	mtspr	SPRN_IBAT1L,r10
1142	mtspr	SPRN_IBAT2U,r10
1143	mtspr	SPRN_IBAT2L,r10
1144	mtspr	SPRN_IBAT3U,r10
1145	mtspr	SPRN_IBAT3L,r10
1146BEGIN_MMU_FTR_SECTION
1147	/* Here's a tweak: at this point, CPU setup have
1148	 * not been called yet, so HIGH_BAT_EN may not be
1149	 * set in HID0 for the 745x processors. However, it
1150	 * seems that doesn't affect our ability to actually
1151	 * write to these SPRs.
1152	 */
1153	mtspr	SPRN_DBAT4U,r10
1154	mtspr	SPRN_DBAT4L,r10
1155	mtspr	SPRN_DBAT5U,r10
1156	mtspr	SPRN_DBAT5L,r10
1157	mtspr	SPRN_DBAT6U,r10
1158	mtspr	SPRN_DBAT6L,r10
1159	mtspr	SPRN_DBAT7U,r10
1160	mtspr	SPRN_DBAT7L,r10
1161	mtspr	SPRN_IBAT4U,r10
1162	mtspr	SPRN_IBAT4L,r10
1163	mtspr	SPRN_IBAT5U,r10
1164	mtspr	SPRN_IBAT5L,r10
1165	mtspr	SPRN_IBAT6U,r10
1166	mtspr	SPRN_IBAT6L,r10
1167	mtspr	SPRN_IBAT7U,r10
1168	mtspr	SPRN_IBAT7L,r10
1169END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1170	blr
1171
1172_ENTRY(update_bats)
1173	lis	r4, 1f@h
1174	ori	r4, r4, 1f@l
1175	tophys(r4, r4)
1176	mfmsr	r6
1177	mflr	r7
1178	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1179	rlwinm	r0, r6, 0, ~MSR_RI
1180	rlwinm	r0, r0, 0, ~MSR_EE
1181	mtmsr	r0
1182
1183	.align	4
1184	mtspr	SPRN_SRR0, r4
1185	mtspr	SPRN_SRR1, r3
1186	rfi
11871:	bl	clear_bats
1188	lis	r3, BATS@ha
1189	addi	r3, r3, BATS@l
1190	tophys(r3, r3)
1191	LOAD_BAT(0, r3, r4, r5)
1192	LOAD_BAT(1, r3, r4, r5)
1193	LOAD_BAT(2, r3, r4, r5)
1194	LOAD_BAT(3, r3, r4, r5)
1195BEGIN_MMU_FTR_SECTION
1196	LOAD_BAT(4, r3, r4, r5)
1197	LOAD_BAT(5, r3, r4, r5)
1198	LOAD_BAT(6, r3, r4, r5)
1199	LOAD_BAT(7, r3, r4, r5)
1200END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1201	li	r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1202	mtmsr	r3
1203	mtspr	SPRN_SRR0, r7
1204	mtspr	SPRN_SRR1, r6
1205	rfi
1206
1207flush_tlbs:
1208	lis	r10, 0x40
12091:	addic.	r10, r10, -0x1000
1210	tlbie	r10
1211	bgt	1b
1212	sync
1213	blr
1214
1215mmu_off:
1216 	addi	r4, r3, __after_mmu_off - _start
1217	mfmsr	r3
1218	andi.	r0,r3,MSR_DR|MSR_IR		/* MMU enabled? */
1219	beqlr
1220	andc	r3,r3,r0
1221
1222	.align	4
1223	mtspr	SPRN_SRR0,r4
1224	mtspr	SPRN_SRR1,r3
1225	sync
1226	rfi
1227
1228/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1229initial_bats:
1230	lis	r11,PAGE_OFFSET@h
1231	tophys(r8,r11)
1232#ifdef CONFIG_SMP
1233	ori	r8,r8,0x12		/* R/W access, M=1 */
1234#else
1235	ori	r8,r8,2			/* R/W access */
1236#endif /* CONFIG_SMP */
1237	ori	r11,r11,BL_256M<<2|0x2	/* set up BAT registers for 604 */
1238
1239	mtspr	SPRN_DBAT0L,r8		/* N.B. 6xx have valid */
1240	mtspr	SPRN_DBAT0U,r11		/* bit in upper BAT register */
1241	mtspr	SPRN_IBAT0L,r8
1242	mtspr	SPRN_IBAT0U,r11
1243	isync
1244	blr
1245
1246#ifdef CONFIG_BOOTX_TEXT
1247setup_disp_bat:
1248	/*
1249	 * setup the display bat prepared for us in prom.c
1250	 */
1251	mflr	r8
1252	bl	reloc_offset
1253	mtlr	r8
1254	addis	r8,r3,disp_BAT@ha
1255	addi	r8,r8,disp_BAT@l
1256	cmpwi	cr0,r8,0
1257	beqlr
1258	lwz	r11,0(r8)
1259	lwz	r8,4(r8)
1260	mtspr	SPRN_DBAT3L,r8
1261	mtspr	SPRN_DBAT3U,r11
1262	blr
1263#endif /* CONFIG_BOOTX_TEXT */
1264
1265#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1266setup_cpm_bat:
1267	lis	r8, 0xf000
1268	ori	r8, r8,	0x002a
1269	mtspr	SPRN_DBAT1L, r8
1270
1271	lis	r11, 0xf000
1272	ori	r11, r11, (BL_1M << 2) | 2
1273	mtspr	SPRN_DBAT1U, r11
1274
1275	blr
1276#endif
1277
1278#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1279setup_usbgecko_bat:
1280	/* prepare a BAT for early io */
1281#if defined(CONFIG_GAMECUBE)
1282	lis	r8, 0x0c00
1283#elif defined(CONFIG_WII)
1284	lis	r8, 0x0d00
1285#else
1286#error Invalid platform for USB Gecko based early debugging.
1287#endif
1288	/*
1289	 * The virtual address used must match the virtual address
1290	 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1291	 */
1292	lis	r11, 0xfffe	/* top 128K */
1293	ori	r8, r8, 0x002a	/* uncached, guarded ,rw */
1294	ori	r11, r11, 0x2	/* 128K, Vs=1, Vp=0 */
1295	mtspr	SPRN_DBAT1L, r8
1296	mtspr	SPRN_DBAT1U, r11
1297	blr
1298#endif
1299
1300#ifdef CONFIG_8260
1301/* Jump into the system reset for the rom.
1302 * We first disable the MMU, and then jump to the ROM reset address.
1303 *
1304 * r3 is the board info structure, r4 is the location for starting.
1305 * I use this for building a small kernel that can load other kernels,
1306 * rather than trying to write or rely on a rom monitor that can tftp load.
1307 */
1308       .globl  m8260_gorom
1309m8260_gorom:
1310	mfmsr	r0
1311	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
1312	sync
1313	mtmsr	r0
1314	sync
1315	mfspr	r11, SPRN_HID0
1316	lis	r10, 0
1317	ori	r10,r10,HID0_ICE|HID0_DCE
1318	andc	r11, r11, r10
1319	mtspr	SPRN_HID0, r11
1320	isync
1321	li	r5, MSR_ME|MSR_RI
1322	lis	r6,2f@h
1323	addis	r6,r6,-KERNELBASE@h
1324	ori	r6,r6,2f@l
1325	mtspr	SPRN_SRR0,r6
1326	mtspr	SPRN_SRR1,r5
1327	isync
1328	sync
1329	rfi
13302:
1331	mtlr	r4
1332	blr
1333#endif
1334
1335
1336/*
1337 * We put a few things here that have to be page-aligned.
1338 * This stuff goes at the beginning of the data segment,
1339 * which is page-aligned.
1340 */
1341	.data
1342	.globl	sdata
1343sdata:
1344	.globl	empty_zero_page
1345empty_zero_page:
1346	.space	4096
1347EXPORT_SYMBOL(empty_zero_page)
1348
1349	.globl	swapper_pg_dir
1350swapper_pg_dir:
1351	.space	PGD_TABLE_SIZE
1352
1353/* Room for two PTE pointers, usually the kernel and current user pointers
1354 * to their respective root page table.
1355 */
1356abatron_pteptrs:
1357	.space	8
1358