xref: /openbmc/linux/arch/powerpc/kernel/head_8xx.S (revision 26cfd12b)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications by Dan Malek
11 *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains low-level support and setup for PowerPC 8xx
14 *  embedded processors, including trap and interrupt dispatch.
15 */
16
17#include <linux/init.h>
18#include <linux/magic.h>
19#include <linux/pgtable.h>
20#include <linux/sizes.h>
21#include <asm/processor.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cache.h>
25#include <asm/cputable.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/code-patching-asm.h>
32
33#include "head_32.h"
34
35.macro compare_to_kernel_boundary scratch, addr
36#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
37/* By simply checking Address >= 0x80000000, we know if its a kernel address */
38	not.	\scratch, \addr
39#else
40	rlwinm	\scratch, \addr, 16, 0xfff8
41	cmpli	cr0, \scratch, PAGE_OFFSET@h
42#endif
43.endm
44
45/*
46 * We need an ITLB miss handler for kernel addresses if:
47 * - Either we have modules
48 * - Or we have not pinned the first 8M
49 */
50#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT) || \
51    defined(CONFIG_DEBUG_PAGEALLOC)
52#define ITLB_MISS_KERNEL	1
53#endif
54
55/*
56 * Value for the bits that have fixed value in RPN entries.
57 * Also used for tagging DAR for DTLBerror.
58 */
59#define RPN_PATTERN	0x00f0
60
61#define PAGE_SHIFT_512K		19
62#define PAGE_SHIFT_8M		23
63
64	__HEAD
65_ENTRY(_stext);
66_ENTRY(_start);
67
68/* MPC8xx
69 * This port was done on an MBX board with an 860.  Right now I only
70 * support an ELF compressed (zImage) boot from EPPC-Bug because the
71 * code there loads up some registers before calling us:
72 *   r3: ptr to board info data
73 *   r4: initrd_start or if no initrd then 0
74 *   r5: initrd_end - unused if r4 is 0
75 *   r6: Start of command line string
76 *   r7: End of command line string
77 *
78 * I decided to use conditional compilation instead of checking PVR and
79 * adding more processor specific branches around code I don't need.
80 * Since this is an embedded processor, I also appreciate any memory
81 * savings I can get.
82 *
83 * The MPC8xx does not have any BATs, but it supports large page sizes.
84 * We first initialize the MMU to support 8M byte pages, then load one
85 * entry into each of the instruction and data TLBs to map the first
86 * 8M 1:1.  I also mapped an additional I/O space 1:1 so we can get to
87 * the "internal" processor registers before MMU_init is called.
88 *
89 *	-- Dan
90 */
91	.globl	__start
92__start:
93	mr	r31,r3			/* save device tree ptr */
94
95	/* We have to turn on the MMU right away so we get cache modes
96	 * set correctly.
97	 */
98	bl	initial_mmu
99
100/* We now have the lower 8 Meg mapped into TLB entries, and the caches
101 * ready to work.
102 */
103
104turn_on_mmu:
105	mfmsr	r0
106	ori	r0,r0,MSR_DR|MSR_IR
107	mtspr	SPRN_SRR1,r0
108	lis	r0,start_here@h
109	ori	r0,r0,start_here@l
110	mtspr	SPRN_SRR0,r0
111	rfi				/* enables MMU */
112
113
114#ifdef CONFIG_PERF_EVENTS
115	.align	4
116
117	.globl	itlb_miss_counter
118itlb_miss_counter:
119	.space	4
120
121	.globl	dtlb_miss_counter
122dtlb_miss_counter:
123	.space	4
124
125	.globl	instruction_counter
126instruction_counter:
127	.space	4
128#endif
129
130/* System reset */
131	EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
132
133/* Machine check */
134	. = 0x200
135MachineCheck:
136	EXCEPTION_PROLOG handle_dar_dsisr=1
137	save_dar_dsisr_on_stack r4, r5, r11
138	li	r6, RPN_PATTERN
139	mtspr	SPRN_DAR, r6	/* Tag DAR, to be used in DTLB Error */
140	addi r3,r1,STACK_FRAME_OVERHEAD
141	EXC_XFER_STD(0x200, machine_check_exception)
142
143/* External interrupt */
144	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
145
146/* Alignment exception */
147	. = 0x600
148Alignment:
149	EXCEPTION_PROLOG handle_dar_dsisr=1
150	save_dar_dsisr_on_stack r4, r5, r11
151	li	r6, RPN_PATTERN
152	mtspr	SPRN_DAR, r6	/* Tag DAR, to be used in DTLB Error */
153	addi	r3,r1,STACK_FRAME_OVERHEAD
154	b	.Lalignment_exception_ool
155
156/* Program check exception */
157	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
158
159/* Decrementer */
160	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
161
162	/* With VMAP_STACK there's not enough room for this at 0x600 */
163	. = 0xa00
164.Lalignment_exception_ool:
165	EXC_XFER_STD(0x600, alignment_exception)
166
167/* System call */
168	. = 0xc00
169SystemCall:
170	SYSCALL_ENTRY	0xc00
171
172/* Single step - not used on 601 */
173	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
174
175/* On the MPC8xx, this is a software emulation interrupt.  It occurs
176 * for all unimplemented and illegal instructions.
177 */
178	EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
179
180	. = 0x1100
181/*
182 * For the MPC8xx, this is a software tablewalk to load the instruction
183 * TLB.  The task switch loads the M_TWB register with the pointer to the first
184 * level table.
185 * If we discover there is no second level table (value is zero) or if there
186 * is an invalid pte, we load that into the TLB, which causes another fault
187 * into the TLB Error interrupt where we can handle such problems.
188 * We have to use the MD_xxx registers for the tablewalk because the
189 * equivalent MI_xxx registers only perform the attribute functions.
190 */
191
192#ifdef CONFIG_8xx_CPU15
193#define INVALIDATE_ADJACENT_PAGES_CPU15(addr)	\
194	addi	addr, addr, PAGE_SIZE;	\
195	tlbie	addr;			\
196	addi	addr, addr, -(PAGE_SIZE << 1);	\
197	tlbie	addr;			\
198	addi	addr, addr, PAGE_SIZE
199#else
200#define INVALIDATE_ADJACENT_PAGES_CPU15(addr)
201#endif
202
203InstructionTLBMiss:
204	mtspr	SPRN_SPRG_SCRATCH0, r10
205#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
206	mtspr	SPRN_SPRG_SCRATCH1, r11
207#endif
208
209	/* If we are faulting a kernel address, we have to use the
210	 * kernel page tables.
211	 */
212	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
213	INVALIDATE_ADJACENT_PAGES_CPU15(r10)
214	mtspr	SPRN_MD_EPN, r10
215#ifdef ITLB_MISS_KERNEL
216	mfcr	r11
217	compare_to_kernel_boundary r10, r10
218#endif
219	mfspr	r10, SPRN_M_TWB	/* Get level 1 table */
220#ifdef ITLB_MISS_KERNEL
221	blt+	3f
222	rlwinm	r10, r10, 0, 20, 31
223	oris	r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
2243:
225	mtcr	r11
226#endif
227#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
228	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10)	/* Get level 1 entry */
229	mtspr	SPRN_MD_TWC, r11
230#else
231	lwz	r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10)	/* Get level 1 entry */
232	mtspr	SPRN_MI_TWC, r10	/* Set segment attributes */
233	mtspr	SPRN_MD_TWC, r10
234#endif
235	mfspr	r10, SPRN_MD_TWC
236	lwz	r10, 0(r10)	/* Get the pte */
237#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT)
238	rlwimi	r11, r10, 32 - 9, _PMD_PAGE_512K
239	mtspr	SPRN_MI_TWC, r11
240#endif
241#ifdef CONFIG_SWAP
242	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
243	and	r11, r11, r10
244	rlwimi	r10, r11, 0, _PAGE_PRESENT
245#endif
246	/* The Linux PTE won't go exactly into the MMU TLB.
247	 * Software indicator bits 20 and 23 must be clear.
248	 * Software indicator bits 22, 24, 25, 26, and 27 must be
249	 * set.  All other Linux PTE bits control the behavior
250	 * of the MMU.
251	 */
252	rlwinm	r10, r10, 0, ~0x0f00	/* Clear bits 20-23 */
253	rlwimi	r10, r10, 4, 0x0400	/* Copy _PAGE_EXEC into bit 21 */
254	ori	r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
255	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
256
257	/* Restore registers */
2580:	mfspr	r10, SPRN_SPRG_SCRATCH0
259#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS)
260	mfspr	r11, SPRN_SPRG_SCRATCH1
261#endif
262	rfi
263	patch_site	0b, patch__itlbmiss_exit_1
264
265#ifdef CONFIG_PERF_EVENTS
266	patch_site	0f, patch__itlbmiss_perf
2670:	lwz	r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
268	addi	r10, r10, 1
269	stw	r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
270	mfspr	r10, SPRN_SPRG_SCRATCH0
271#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
272	mfspr	r11, SPRN_SPRG_SCRATCH1
273#endif
274	rfi
275#endif
276
277	. = 0x1200
278DataStoreTLBMiss:
279	mtspr	SPRN_DAR, r10
280	mtspr	SPRN_M_TW, r11
281	mfcr	r11
282
283	/* If we are faulting a kernel address, we have to use the
284	 * kernel page tables.
285	 */
286	mfspr	r10, SPRN_MD_EPN
287	compare_to_kernel_boundary r10, r10
288	mfspr	r10, SPRN_M_TWB	/* Get level 1 table */
289	blt+	3f
290	rlwinm	r10, r10, 0, 20, 31
291	oris	r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
2923:
293	mtcr	r11
294	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10)	/* Get level 1 entry */
295
296	mtspr	SPRN_MD_TWC, r11
297	mfspr	r10, SPRN_MD_TWC
298	lwz	r10, 0(r10)	/* Get the pte */
299
300	/* Insert the Guarded flag into the TWC from the Linux PTE.
301	 * It is bit 27 of both the Linux PTE and the TWC (at least
302	 * I got that right :-).  It will be better when we can put
303	 * this into the Linux pgd/pmd and load it in the operation
304	 * above.
305	 */
306	rlwimi	r11, r10, 0, _PAGE_GUARDED
307	rlwimi	r11, r10, 32 - 9, _PMD_PAGE_512K
308	mtspr	SPRN_MD_TWC, r11
309
310	/* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
311	 * We also need to know if the insn is a load/store, so:
312	 * Clear _PAGE_PRESENT and load that which will
313	 * trap into DTLB Error with store bit set accordinly.
314	 */
315	/* PRESENT=0x1, ACCESSED=0x20
316	 * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
317	 * r10 = (r10 & ~PRESENT) | r11;
318	 */
319#ifdef CONFIG_SWAP
320	rlwinm	r11, r10, 32-5, _PAGE_PRESENT
321	and	r11, r11, r10
322	rlwimi	r10, r11, 0, _PAGE_PRESENT
323#endif
324	/* The Linux PTE won't go exactly into the MMU TLB.
325	 * Software indicator bits 24, 25, 26, and 27 must be
326	 * set.  All other Linux PTE bits control the behavior
327	 * of the MMU.
328	 */
329	li	r11, RPN_PATTERN
330	rlwimi	r10, r11, 0, 24, 27	/* Set 24-27 */
331	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
332
333	/* Restore registers */
334
3350:	mfspr	r10, SPRN_DAR
336	mtspr	SPRN_DAR, r11	/* Tag DAR */
337	mfspr	r11, SPRN_M_TW
338	rfi
339	patch_site	0b, patch__dtlbmiss_exit_1
340
341#ifdef CONFIG_PERF_EVENTS
342	patch_site	0f, patch__dtlbmiss_perf
3430:	lwz	r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
344	addi	r10, r10, 1
345	stw	r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
346	mfspr	r10, SPRN_DAR
347	mtspr	SPRN_DAR, r11	/* Tag DAR */
348	mfspr	r11, SPRN_M_TW
349	rfi
350#endif
351
352/* This is an instruction TLB error on the MPC8xx.  This could be due
353 * to many reasons, such as executing guarded memory or illegal instruction
354 * addresses.  There is nothing to do but handle a big time error fault.
355 */
356	. = 0x1300
357InstructionTLBError:
358	EXCEPTION_PROLOG
359	mr	r4,r12
360	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
361	andis.	r10,r9,SRR1_ISI_NOPT@h
362	beq+	.Litlbie
363	tlbie	r4
364	/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
365.Litlbie:
366	stw	r4, _DAR(r11)
367	EXC_XFER_LITE(0x400, handle_page_fault)
368
369/* This is the data TLB error on the MPC8xx.  This could be due to
370 * many reasons, including a dirty update to a pte.  We bail out to
371 * a higher level function that can handle it.
372 */
373	. = 0x1400
374DataTLBError:
375	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
376	mfspr	r11, SPRN_DAR
377	cmpwi	cr1, r11, RPN_PATTERN
378	beq-	cr1, FixupDAR	/* must be a buggy dcbX, icbi insn. */
379DARFixed:/* Return from dcbx instruction bug workaround */
380#ifdef CONFIG_VMAP_STACK
381	li	r11, RPN_PATTERN
382	mtspr	SPRN_DAR, r11	/* Tag DAR, to be used in DTLB Error */
383#endif
384	EXCEPTION_PROLOG_1
385	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
386	get_and_save_dar_dsisr_on_stack r4, r5, r11
387	andis.	r10,r5,DSISR_NOHPTE@h
388	beq+	.Ldtlbie
389	tlbie	r4
390.Ldtlbie:
391#ifndef CONFIG_VMAP_STACK
392	li	r10,RPN_PATTERN
393	mtspr	SPRN_DAR,r10	/* Tag DAR, to be used in DTLB Error */
394#endif
395	/* 0x300 is DataAccess exception, needed by bad_page_fault() */
396	EXC_XFER_LITE(0x300, handle_page_fault)
397
398stack_overflow:
399	vmap_stack_overflow_exception
400
401/* On the MPC8xx, these next four traps are used for development
402 * support of breakpoints and such.  Someday I will get around to
403 * using them.
404 */
405do_databreakpoint:
406	EXCEPTION_PROLOG_1
407	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
408	addi	r3,r1,STACK_FRAME_OVERHEAD
409	mfspr	r4,SPRN_BAR
410	stw	r4,_DAR(r11)
411#ifdef CONFIG_VMAP_STACK
412	lwz	r5,_DSISR(r11)
413#else
414	mfspr	r5,SPRN_DSISR
415#endif
416	EXC_XFER_STD(0x1c00, do_break)
417
418	. = 0x1c00
419DataBreakpoint:
420	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
421	mfspr	r11, SPRN_SRR0
422	cmplwi	cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l
423	cmplwi	cr7, r11, (.Litlbie - PAGE_OFFSET)@l
424	cror	4*cr1+eq, 4*cr1+eq, 4*cr7+eq
425	bne	cr1, do_databreakpoint
426	mtcr	r10
427	mfspr	r10, SPRN_SPRG_SCRATCH0
428	mfspr	r11, SPRN_SPRG_SCRATCH1
429	rfi
430
431#ifdef CONFIG_PERF_EVENTS
432	. = 0x1d00
433InstructionBreakpoint:
434	mtspr	SPRN_SPRG_SCRATCH0, r10
435	lwz	r10, (instruction_counter - PAGE_OFFSET)@l(0)
436	addi	r10, r10, -1
437	stw	r10, (instruction_counter - PAGE_OFFSET)@l(0)
438	lis	r10, 0xffff
439	ori	r10, r10, 0x01
440	mtspr	SPRN_COUNTA, r10
441	mfspr	r10, SPRN_SPRG_SCRATCH0
442	rfi
443#else
444	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
445#endif
446	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
447	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
448
449	. = 0x2000
450
451/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
452 * by decoding the registers used by the dcbx instruction and adding them.
453 * DAR is set to the calculated address.
454 */
455FixupDAR:/* Entry point for dcbx workaround. */
456	mtspr	SPRN_M_TW, r10
457	/* fetch instruction from memory. */
458	mfspr	r10, SPRN_SRR0
459	mtspr	SPRN_MD_EPN, r10
460	rlwinm	r11, r10, 16, 0xfff8
461	cmpli	cr1, r11, PAGE_OFFSET@h
462	mfspr	r11, SPRN_M_TWB	/* Get level 1 table */
463	blt+	cr1, 3f
464
465	/* create physical page address from effective address */
466	tophys(r11, r10)
467	mfspr	r11, SPRN_M_TWB	/* Get level 1 table */
468	rlwinm	r11, r11, 0, 20, 31
469	oris	r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha
4703:
471	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11)	/* Get the level 1 entry */
472	mtspr	SPRN_MD_TWC, r11
473	mtcrf	0x01, r11
474	mfspr	r11, SPRN_MD_TWC
475	lwz	r11, 0(r11)	/* Get the pte */
476	bt	28,200f		/* bit 28 = Large page (8M) */
477	/* concat physical page address(r11) and page offset(r10) */
478	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT, 31
479201:	lwz	r11,0(r11)
480/* Check if it really is a dcbx instruction. */
481/* dcbt and dcbtst does not generate DTLB Misses/Errors,
482 * no need to include them here */
483	xoris	r10, r11, 0x7c00	/* check if major OP code is 31 */
484	rlwinm	r10, r10, 0, 21, 5
485	cmpwi	cr1, r10, 2028	/* Is dcbz? */
486	beq+	cr1, 142f
487	cmpwi	cr1, r10, 940	/* Is dcbi? */
488	beq+	cr1, 142f
489	cmpwi	cr1, r10, 108	/* Is dcbst? */
490	beq+	cr1, 144f		/* Fix up store bit! */
491	cmpwi	cr1, r10, 172	/* Is dcbf? */
492	beq+	cr1, 142f
493	cmpwi	cr1, r10, 1964	/* Is icbi? */
494	beq+	cr1, 142f
495141:	mfspr	r10,SPRN_M_TW
496	b	DARFixed	/* Nope, go back to normal TLB processing */
497
498200:
499	/* concat physical page address(r11) and page offset(r10) */
500	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
501	b	201b
502
503144:	mfspr	r10, SPRN_DSISR
504	rlwinm	r10, r10,0,7,5	/* Clear store bit for buggy dcbst insn */
505	mtspr	SPRN_DSISR, r10
506142:	/* continue, it was a dcbx, dcbi instruction. */
507	mfctr	r10
508	mtdar	r10			/* save ctr reg in DAR */
509	rlwinm	r10, r11, 24, 24, 28	/* offset into jump table for reg RB */
510	addi	r10, r10, 150f@l	/* add start of table */
511	mtctr	r10			/* load ctr with jump address */
512	xor	r10, r10, r10		/* sum starts at zero */
513	bctr				/* jump into table */
514150:
515	add	r10, r10, r0	;b	151f
516	add	r10, r10, r1	;b	151f
517	add	r10, r10, r2	;b	151f
518	add	r10, r10, r3	;b	151f
519	add	r10, r10, r4	;b	151f
520	add	r10, r10, r5	;b	151f
521	add	r10, r10, r6	;b	151f
522	add	r10, r10, r7	;b	151f
523	add	r10, r10, r8	;b	151f
524	add	r10, r10, r9	;b	151f
525	mtctr	r11	;b	154f	/* r10 needs special handling */
526	mtctr	r11	;b	153f	/* r11 needs special handling */
527	add	r10, r10, r12	;b	151f
528	add	r10, r10, r13	;b	151f
529	add	r10, r10, r14	;b	151f
530	add	r10, r10, r15	;b	151f
531	add	r10, r10, r16	;b	151f
532	add	r10, r10, r17	;b	151f
533	add	r10, r10, r18	;b	151f
534	add	r10, r10, r19	;b	151f
535	add	r10, r10, r20	;b	151f
536	add	r10, r10, r21	;b	151f
537	add	r10, r10, r22	;b	151f
538	add	r10, r10, r23	;b	151f
539	add	r10, r10, r24	;b	151f
540	add	r10, r10, r25	;b	151f
541	add	r10, r10, r26	;b	151f
542	add	r10, r10, r27	;b	151f
543	add	r10, r10, r28	;b	151f
544	add	r10, r10, r29	;b	151f
545	add	r10, r10, r30	;b	151f
546	add	r10, r10, r31
547151:
548	rlwinm	r11,r11,19,24,28	/* offset into jump table for reg RA */
549	cmpwi	cr1, r11, 0
550	beq	cr1, 152f		/* if reg RA is zero, don't add it */
551	addi	r11, r11, 150b@l	/* add start of table */
552	mtctr	r11			/* load ctr with jump address */
553	rlwinm	r11,r11,0,16,10		/* make sure we don't execute this more than once */
554	bctr				/* jump into table */
555152:
556	mfdar	r11
557	mtctr	r11			/* restore ctr reg from DAR */
558#ifdef CONFIG_VMAP_STACK
559	mfspr	r11, SPRN_SPRG_THREAD
560	stw	r10, DAR(r11)
561	mfspr	r10, SPRN_DSISR
562	stw	r10, DSISR(r11)
563#else
564	mtdar	r10			/* save fault EA to DAR */
565#endif
566	mfspr	r10,SPRN_M_TW
567	b	DARFixed		/* Go back to normal TLB handling */
568
569	/* special handling for r10,r11 since these are modified already */
570153:	mfspr	r11, SPRN_SPRG_SCRATCH1	/* load r11 from SPRN_SPRG_SCRATCH1 */
571	add	r10, r10, r11	/* add it */
572	mfctr	r11		/* restore r11 */
573	b	151b
574154:	mfspr	r11, SPRN_SPRG_SCRATCH0	/* load r10 from SPRN_SPRG_SCRATCH0 */
575	add	r10, r10, r11	/* add it */
576	mfctr	r11		/* restore r11 */
577	b	151b
578
579/*
580 * This is where the main kernel code starts.
581 */
582start_here:
583	/* ptr to current */
584	lis	r2,init_task@h
585	ori	r2,r2,init_task@l
586
587	/* ptr to phys current thread */
588	tophys(r4,r2)
589	addi	r4,r4,THREAD	/* init task's THREAD */
590	mtspr	SPRN_SPRG_THREAD,r4
591
592	/* stack */
593	lis	r1,init_thread_union@ha
594	addi	r1,r1,init_thread_union@l
595	lis	r0, STACK_END_MAGIC@h
596	ori	r0, r0, STACK_END_MAGIC@l
597	stw	r0, 0(r1)
598	li	r0,0
599	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
600
601	lis	r6, swapper_pg_dir@ha
602	tophys(r6,r6)
603	mtspr	SPRN_M_TWB, r6
604
605	bl	early_init	/* We have to do this with MMU on */
606
607/*
608 * Decide what sort of machine this is and initialize the MMU.
609 */
610#ifdef CONFIG_KASAN
611	bl	kasan_early_init
612#endif
613	li	r3,0
614	mr	r4,r31
615	bl	machine_init
616	bl	MMU_init
617
618/*
619 * Go back to running unmapped so we can load up new values
620 * and change to using our exception vectors.
621 * On the 8xx, all we have to do is invalidate the TLB to clear
622 * the old 8M byte TLB mappings and load the page table base register.
623 */
624	/* The right way to do this would be to track it down through
625	 * init's THREAD like the context switch code does, but this is
626	 * easier......until someone changes init's static structures.
627	 */
628	lis	r4,2f@h
629	ori	r4,r4,2f@l
630	tophys(r4,r4)
631	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
632	mtspr	SPRN_SRR0,r4
633	mtspr	SPRN_SRR1,r3
634	rfi
635/* Load up the kernel context */
6362:
637#ifdef CONFIG_PIN_TLB_IMMR
638	lis	r0, MD_TWAM@h
639	oris	r0, r0, 0x1f00
640	mtspr	SPRN_MD_CTR, r0
641	LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
642	tlbie	r0
643	mtspr	SPRN_MD_EPN, r0
644	LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED)
645	mtspr	SPRN_MD_TWC, r0
646	mfspr   r0, SPRN_IMMR
647	rlwinm	r0, r0, 0, 0xfff80000
648	ori	r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
649			_PAGE_NO_CACHE | _PAGE_PRESENT
650	mtspr	SPRN_MD_RPN, r0
651	lis	r0, (MD_TWAM | MD_RSV4I)@h
652	mtspr	SPRN_MD_CTR, r0
653#endif
654#ifndef CONFIG_PIN_TLB_TEXT
655	li	r0, 0
656	mtspr	SPRN_MI_CTR, r0
657#endif
658#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
659	lis	r0, MD_TWAM@h
660	mtspr	SPRN_MD_CTR, r0
661#endif
662	tlbia			/* Clear all TLB entries */
663	sync			/* wait for tlbia/tlbie to finish */
664
665	/* set up the PTE pointers for the Abatron bdiGDB.
666	*/
667	lis	r5, abatron_pteptrs@h
668	ori	r5, r5, abatron_pteptrs@l
669	stw	r5, 0xf0(0)	/* Must match your Abatron config file */
670	tophys(r5,r5)
671	lis	r6, swapper_pg_dir@h
672	ori	r6, r6, swapper_pg_dir@l
673	stw	r6, 0(r5)
674
675/* Now turn on the MMU for real! */
676	li	r4,MSR_KERNEL
677	lis	r3,start_kernel@h
678	ori	r3,r3,start_kernel@l
679	mtspr	SPRN_SRR0,r3
680	mtspr	SPRN_SRR1,r4
681	rfi			/* enable MMU and jump to start_kernel */
682
683/* Set up the initial MMU state so we can do the first level of
684 * kernel initialization.  This maps the first 8 MBytes of memory 1:1
685 * virtual to physical.  Also, set the cache mode since that is defined
686 * by TLB entries and perform any additional mapping (like of the IMMR).
687 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
688 * 24 Mbytes of data, and the 512k IMMR space.  Anything not covered by
689 * these mappings is mapped by page tables.
690 */
691initial_mmu:
692	li	r8, 0
693	mtspr	SPRN_MI_CTR, r8		/* remove PINNED ITLB entries */
694	lis	r10, MD_TWAM@h
695	mtspr	SPRN_MD_CTR, r10	/* remove PINNED DTLB entries */
696
697	tlbia			/* Invalidate all TLB entries */
698
699	lis	r8, MI_APG_INIT@h	/* Set protection modes */
700	ori	r8, r8, MI_APG_INIT@l
701	mtspr	SPRN_MI_AP, r8
702	lis	r8, MD_APG_INIT@h
703	ori	r8, r8, MD_APG_INIT@l
704	mtspr	SPRN_MD_AP, r8
705
706	/* Map the lower RAM (up to 32 Mbytes) into the ITLB and DTLB */
707	lis	r8, MI_RSV4I@h
708	ori	r8, r8, 0x1c00
709	oris	r12, r10, MD_RSV4I@h
710	ori	r12, r12, 0x1c00
711	li	r9, 4				/* up to 4 pages of 8M */
712	mtctr	r9
713	lis	r9, KERNELBASE@h		/* Create vaddr for TLB */
714	li	r10, MI_PS8MEG | MI_SVALID	/* Set 8M byte page */
715	li	r11, MI_BOOTINIT		/* Create RPN for address 0 */
7161:
717	mtspr	SPRN_MI_CTR, r8	/* Set instruction MMU control */
718	addi	r8, r8, 0x100
719	ori	r0, r9, MI_EVALID		/* Mark it valid */
720	mtspr	SPRN_MI_EPN, r0
721	mtspr	SPRN_MI_TWC, r10
722	mtspr	SPRN_MI_RPN, r11		/* Store TLB entry */
723	mtspr	SPRN_MD_CTR, r12
724	addi	r12, r12, 0x100
725	mtspr	SPRN_MD_EPN, r0
726	mtspr	SPRN_MD_TWC, r10
727	mtspr	SPRN_MD_RPN, r11
728	addis	r9, r9, 0x80
729	addis	r11, r11, 0x80
730
731	bdnz	1b
732
733	/* Since the cache is enabled according to the information we
734	 * just loaded into the TLB, invalidate and enable the caches here.
735	 * We should probably check/set other modes....later.
736	 */
737	lis	r8, IDC_INVALL@h
738	mtspr	SPRN_IC_CST, r8
739	mtspr	SPRN_DC_CST, r8
740	lis	r8, IDC_ENABLE@h
741	mtspr	SPRN_IC_CST, r8
742	mtspr	SPRN_DC_CST, r8
743	/* Disable debug mode entry on breakpoints */
744	mfspr	r8, SPRN_DER
745#ifdef CONFIG_PERF_EVENTS
746	rlwinm	r8, r8, 0, ~0xc
747#else
748	rlwinm	r8, r8, 0, ~0x8
749#endif
750	mtspr	SPRN_DER, r8
751	blr
752
753#ifdef CONFIG_PIN_TLB
754_GLOBAL(mmu_pin_tlb)
755	lis	r9, (1f - PAGE_OFFSET)@h
756	ori	r9, r9, (1f - PAGE_OFFSET)@l
757	mfmsr	r10
758	mflr	r11
759	li	r12, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
760	rlwinm	r0, r10, 0, ~MSR_RI
761	rlwinm	r0, r0, 0, ~MSR_EE
762	mtmsr	r0
763	isync
764	.align	4
765	mtspr	SPRN_SRR0, r9
766	mtspr	SPRN_SRR1, r12
767	rfi
7681:
769	li	r5, 0
770	lis	r6, MD_TWAM@h
771	mtspr	SPRN_MI_CTR, r5
772	mtspr	SPRN_MD_CTR, r6
773	tlbia
774
775#ifdef CONFIG_PIN_TLB_TEXT
776	LOAD_REG_IMMEDIATE(r5, 28 << 8)
777	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
778	LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
779	LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
780	LOAD_REG_ADDR(r9, _sinittext)
781	li	r0, 4
782	mtctr	r0
783
7842:	ori	r0, r6, MI_EVALID
785	mtspr	SPRN_MI_CTR, r5
786	mtspr	SPRN_MI_EPN, r0
787	mtspr	SPRN_MI_TWC, r7
788	mtspr	SPRN_MI_RPN, r8
789	addi	r5, r5, 0x100
790	addis	r6, r6, SZ_8M@h
791	addis	r8, r8, SZ_8M@h
792	cmplw	r6, r9
793	bdnzt	lt, 2b
794	lis	r0, MI_RSV4I@h
795	mtspr	SPRN_MI_CTR, r0
796#endif
797	LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
798#ifdef CONFIG_PIN_TLB_DATA
799	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
800	LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG)
801#ifdef CONFIG_PIN_TLB_IMMR
802	li	r0, 3
803#else
804	li	r0, 4
805#endif
806	mtctr	r0
807	cmpwi	r4, 0
808	beq	4f
809	LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
810	LOAD_REG_ADDR(r9, _sinittext)
811
8122:	ori	r0, r6, MD_EVALID
813	mtspr	SPRN_MD_CTR, r5
814	mtspr	SPRN_MD_EPN, r0
815	mtspr	SPRN_MD_TWC, r7
816	mtspr	SPRN_MD_RPN, r8
817	addi	r5, r5, 0x100
818	addis	r6, r6, SZ_8M@h
819	addis	r8, r8, SZ_8M@h
820	cmplw	r6, r9
821	bdnzt	lt, 2b
822
8234:	LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
8242:	ori	r0, r6, MD_EVALID
825	mtspr	SPRN_MD_CTR, r5
826	mtspr	SPRN_MD_EPN, r0
827	mtspr	SPRN_MD_TWC, r7
828	mtspr	SPRN_MD_RPN, r8
829	addi	r5, r5, 0x100
830	addis	r6, r6, SZ_8M@h
831	addis	r8, r8, SZ_8M@h
832	cmplw	r6, r3
833	bdnzt	lt, 2b
834#endif
835#ifdef CONFIG_PIN_TLB_IMMR
836	LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
837	LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED)
838	mfspr   r8, SPRN_IMMR
839	rlwinm	r8, r8, 0, 0xfff80000
840	ori	r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
841			_PAGE_NO_CACHE | _PAGE_PRESENT
842	mtspr	SPRN_MD_CTR, r5
843	mtspr	SPRN_MD_EPN, r0
844	mtspr	SPRN_MD_TWC, r7
845	mtspr	SPRN_MD_RPN, r8
846#endif
847#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
848	lis	r0, (MD_RSV4I | MD_TWAM)@h
849	mtspr	SPRN_MI_CTR, r0
850#endif
851	mtspr	SPRN_SRR1, r10
852	mtspr	SPRN_SRR0, r11
853	rfi
854#endif /* CONFIG_PIN_TLB */
855
856/*
857 * We put a few things here that have to be page-aligned.
858 * This stuff goes at the beginning of the data segment,
859 * which is page-aligned.
860 */
861	.data
862	.globl	sdata
863sdata:
864	.globl	empty_zero_page
865	.align	PAGE_SHIFT
866empty_zero_page:
867	.space	PAGE_SIZE
868EXPORT_SYMBOL(empty_zero_page)
869
870	.globl	swapper_pg_dir
871swapper_pg_dir:
872	.space	PGD_TABLE_SIZE
873
874/* Room for two PTE table poiners, usually the kernel and current user
875 * pointer to their respective root page table (pgdir).
876 */
877	.globl	abatron_pteptrs
878abatron_pteptrs:
879	.space	8
880