xref: /openbmc/linux/arch/powerpc/kernel/head_8xx.S (revision e15a5365)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
6 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications by Dan Malek
11 *    Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
12 *
13 *  This file contains low-level support and setup for PowerPC 8xx
14 *  embedded processors, including trap and interrupt dispatch.
15 */
16
17#include <linux/init.h>
18#include <linux/magic.h>
19#include <linux/pgtable.h>
20#include <linux/sizes.h>
21#include <asm/processor.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cache.h>
25#include <asm/cputable.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/export.h>
31#include <asm/code-patching-asm.h>
32
33#include "head_32.h"
34
35.macro compare_to_kernel_boundary scratch, addr
36#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
37/* By simply checking Address >= 0x80000000, we know if its a kernel address */
38	not.	\scratch, \addr
39#else
40	rlwinm	\scratch, \addr, 16, 0xfff8
41	cmpli	cr0, \scratch, PAGE_OFFSET@h
42#endif
43.endm
44
45/*
46 * We need an ITLB miss handler for kernel addresses if:
47 * - Either we have modules
48 * - Or we have not pinned the first 8M
49 */
50#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT) || \
51    defined(CONFIG_DEBUG_PAGEALLOC)
52#define ITLB_MISS_KERNEL	1
53#endif
54
55/*
56 * Value for the bits that have fixed value in RPN entries.
57 * Also used for tagging DAR for DTLBerror.
58 */
59#define RPN_PATTERN	0x00f0
60
61#define PAGE_SHIFT_512K		19
62#define PAGE_SHIFT_8M		23
63
64	__HEAD
65_ENTRY(_stext);
66_ENTRY(_start);
67
68/* MPC8xx
69 * This port was done on an MBX board with an 860.  Right now I only
70 * support an ELF compressed (zImage) boot from EPPC-Bug because the
71 * code there loads up some registers before calling us:
72 *   r3: ptr to board info data
73 *   r4: initrd_start or if no initrd then 0
74 *   r5: initrd_end - unused if r4 is 0
75 *   r6: Start of command line string
76 *   r7: End of command line string
77 *
78 * I decided to use conditional compilation instead of checking PVR and
79 * adding more processor specific branches around code I don't need.
80 * Since this is an embedded processor, I also appreciate any memory
81 * savings I can get.
82 *
83 * The MPC8xx does not have any BATs, but it supports large page sizes.
84 * We first initialize the MMU to support 8M byte pages, then load one
85 * entry into each of the instruction and data TLBs to map the first
86 * 8M 1:1.  I also mapped an additional I/O space 1:1 so we can get to
87 * the "internal" processor registers before MMU_init is called.
88 *
89 *	-- Dan
90 */
91	.globl	__start
92__start:
93	mr	r31,r3			/* save device tree ptr */
94
95	/* We have to turn on the MMU right away so we get cache modes
96	 * set correctly.
97	 */
98	bl	initial_mmu
99
100/* We now have the lower 8 Meg mapped into TLB entries, and the caches
101 * ready to work.
102 */
103
104turn_on_mmu:
105	mfmsr	r0
106	ori	r0,r0,MSR_DR|MSR_IR
107	mtspr	SPRN_SRR1,r0
108	lis	r0,start_here@h
109	ori	r0,r0,start_here@l
110	mtspr	SPRN_SRR0,r0
111	rfi				/* enables MMU */
112
113
114#ifdef CONFIG_PERF_EVENTS
115	.align	4
116
117	.globl	itlb_miss_counter
118itlb_miss_counter:
119	.space	4
120
121	.globl	dtlb_miss_counter
122dtlb_miss_counter:
123	.space	4
124
125	.globl	instruction_counter
126instruction_counter:
127	.space	4
128#endif
129
130/* System reset */
131	EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
132
133/* Machine check */
134	. = 0x200
135MachineCheck:
136	EXCEPTION_PROLOG handle_dar_dsisr=1
137	save_dar_dsisr_on_stack r4, r5, r11
138	li	r6, RPN_PATTERN
139	mtspr	SPRN_DAR, r6	/* Tag DAR, to be used in DTLB Error */
140	addi r3,r1,STACK_FRAME_OVERHEAD
141	EXC_XFER_STD(0x200, machine_check_exception)
142
143/* External interrupt */
144	EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
145
146/* Alignment exception */
147	. = 0x600
148Alignment:
149	EXCEPTION_PROLOG handle_dar_dsisr=1
150	save_dar_dsisr_on_stack r4, r5, r11
151	li	r6, RPN_PATTERN
152	mtspr	SPRN_DAR, r6	/* Tag DAR, to be used in DTLB Error */
153	addi	r3,r1,STACK_FRAME_OVERHEAD
154	b	.Lalignment_exception_ool
155
156/* Program check exception */
157	EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
158
159/* Decrementer */
160	EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
161
162	/* With VMAP_STACK there's not enough room for this at 0x600 */
163	. = 0xa00
164.Lalignment_exception_ool:
165	EXC_XFER_STD(0x600, alignment_exception)
166
167/* System call */
168	. = 0xc00
169SystemCall:
170	SYSCALL_ENTRY	0xc00
171
172/* Single step - not used on 601 */
173	EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
174
175/* On the MPC8xx, this is a software emulation interrupt.  It occurs
176 * for all unimplemented and illegal instructions.
177 */
178	EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
179
180	. = 0x1100
181/*
182 * For the MPC8xx, this is a software tablewalk to load the instruction
183 * TLB.  The task switch loads the M_TWB register with the pointer to the first
184 * level table.
185 * If we discover there is no second level table (value is zero) or if there
186 * is an invalid pte, we load that into the TLB, which causes another fault
187 * into the TLB Error interrupt where we can handle such problems.
188 * We have to use the MD_xxx registers for the tablewalk because the
189 * equivalent MI_xxx registers only perform the attribute functions.
190 */
191
192#ifdef CONFIG_8xx_CPU15
193#define INVALIDATE_ADJACENT_PAGES_CPU15(addr)	\
194	addi	addr, addr, PAGE_SIZE;	\
195	tlbie	addr;			\
196	addi	addr, addr, -(PAGE_SIZE << 1);	\
197	tlbie	addr;			\
198	addi	addr, addr, PAGE_SIZE
199#else
200#define INVALIDATE_ADJACENT_PAGES_CPU15(addr)
201#endif
202
203InstructionTLBMiss:
204	mtspr	SPRN_SPRG_SCRATCH0, r10
205	mtspr	SPRN_SPRG_SCRATCH1, r11
206
207	/* If we are faulting a kernel address, we have to use the
208	 * kernel page tables.
209	 */
210	mfspr	r10, SPRN_SRR0	/* Get effective address of fault */
211	INVALIDATE_ADJACENT_PAGES_CPU15(r10)
212	mtspr	SPRN_MD_EPN, r10
213#ifdef ITLB_MISS_KERNEL
214	mfcr	r11
215	compare_to_kernel_boundary r10, r10
216#endif
217	mfspr	r10, SPRN_M_TWB	/* Get level 1 table */
218#ifdef ITLB_MISS_KERNEL
219	blt+	3f
220	rlwinm	r10, r10, 0, 20, 31
221	oris	r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
2223:
223	mtcr	r11
224#endif
225	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10)	/* Get level 1 entry */
226	mtspr	SPRN_MD_TWC, r11
227	mfspr	r10, SPRN_MD_TWC
228	lwz	r10, 0(r10)	/* Get the pte */
229	rlwimi	r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
230	rlwimi	r11, r10, 32 - 9, _PMD_PAGE_512K
231	mtspr	SPRN_MI_TWC, r11
232	/* The Linux PTE won't go exactly into the MMU TLB.
233	 * Software indicator bits 20 and 23 must be clear.
234	 * Software indicator bits 22, 24, 25, 26, and 27 must be
235	 * set.  All other Linux PTE bits control the behavior
236	 * of the MMU.
237	 */
238	rlwinm	r10, r10, 0, ~0x0f00	/* Clear bits 20-23 */
239	rlwimi	r10, r10, 4, 0x0400	/* Copy _PAGE_EXEC into bit 21 */
240	ori	r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
241	mtspr	SPRN_MI_RPN, r10	/* Update TLB entry */
242
243	/* Restore registers */
2440:	mfspr	r10, SPRN_SPRG_SCRATCH0
245	mfspr	r11, SPRN_SPRG_SCRATCH1
246	rfi
247	patch_site	0b, patch__itlbmiss_exit_1
248
249#ifdef CONFIG_PERF_EVENTS
250	patch_site	0f, patch__itlbmiss_perf
2510:	lwz	r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
252	addi	r10, r10, 1
253	stw	r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
254	mfspr	r10, SPRN_SPRG_SCRATCH0
255	mfspr	r11, SPRN_SPRG_SCRATCH1
256	rfi
257#endif
258
259	. = 0x1200
260DataStoreTLBMiss:
261	mtspr	SPRN_DAR, r10
262	mtspr	SPRN_M_TW, r11
263	mfcr	r11
264
265	/* If we are faulting a kernel address, we have to use the
266	 * kernel page tables.
267	 */
268	mfspr	r10, SPRN_MD_EPN
269	compare_to_kernel_boundary r10, r10
270	mfspr	r10, SPRN_M_TWB	/* Get level 1 table */
271	blt+	3f
272	rlwinm	r10, r10, 0, 20, 31
273	oris	r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
2743:
275	mtcr	r11
276	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10)	/* Get level 1 entry */
277
278	mtspr	SPRN_MD_TWC, r11
279	mfspr	r10, SPRN_MD_TWC
280	lwz	r10, 0(r10)	/* Get the pte */
281
282	/* Insert Guarded and Accessed flags into the TWC from the Linux PTE.
283	 * It is bit 27 of both the Linux PTE and the TWC (at least
284	 * I got that right :-).  It will be better when we can put
285	 * this into the Linux pgd/pmd and load it in the operation
286	 * above.
287	 */
288	rlwimi	r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
289	rlwimi	r11, r10, 32 - 9, _PMD_PAGE_512K
290	mtspr	SPRN_MD_TWC, r11
291
292	/* The Linux PTE won't go exactly into the MMU TLB.
293	 * Software indicator bits 24, 25, 26, and 27 must be
294	 * set.  All other Linux PTE bits control the behavior
295	 * of the MMU.
296	 */
297	li	r11, RPN_PATTERN
298	rlwimi	r10, r11, 0, 24, 27	/* Set 24-27 */
299	mtspr	SPRN_MD_RPN, r10	/* Update TLB entry */
300
301	/* Restore registers */
302
3030:	mfspr	r10, SPRN_DAR
304	mtspr	SPRN_DAR, r11	/* Tag DAR */
305	mfspr	r11, SPRN_M_TW
306	rfi
307	patch_site	0b, patch__dtlbmiss_exit_1
308
309#ifdef CONFIG_PERF_EVENTS
310	patch_site	0f, patch__dtlbmiss_perf
3110:	lwz	r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
312	addi	r10, r10, 1
313	stw	r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
314	mfspr	r10, SPRN_DAR
315	mtspr	SPRN_DAR, r11	/* Tag DAR */
316	mfspr	r11, SPRN_M_TW
317	rfi
318#endif
319
320/* This is an instruction TLB error on the MPC8xx.  This could be due
321 * to many reasons, such as executing guarded memory or illegal instruction
322 * addresses.  There is nothing to do but handle a big time error fault.
323 */
324	. = 0x1300
325InstructionTLBError:
326	EXCEPTION_PROLOG
327	mr	r4,r12
328	andis.	r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
329	andis.	r10,r9,SRR1_ISI_NOPT@h
330	beq+	.Litlbie
331	tlbie	r4
332	/* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
333.Litlbie:
334	stw	r4, _DAR(r11)
335	EXC_XFER_LITE(0x400, handle_page_fault)
336
337/* This is the data TLB error on the MPC8xx.  This could be due to
338 * many reasons, including a dirty update to a pte.  We bail out to
339 * a higher level function that can handle it.
340 */
341	. = 0x1400
342DataTLBError:
343	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
344	mfspr	r11, SPRN_DAR
345	cmpwi	cr1, r11, RPN_PATTERN
346	beq-	cr1, FixupDAR	/* must be a buggy dcbX, icbi insn. */
347DARFixed:/* Return from dcbx instruction bug workaround */
348#ifdef CONFIG_VMAP_STACK
349	li	r11, RPN_PATTERN
350	mtspr	SPRN_DAR, r11	/* Tag DAR, to be used in DTLB Error */
351#endif
352	EXCEPTION_PROLOG_1
353	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
354	get_and_save_dar_dsisr_on_stack r4, r5, r11
355	andis.	r10,r5,DSISR_NOHPTE@h
356	beq+	.Ldtlbie
357	tlbie	r4
358.Ldtlbie:
359#ifndef CONFIG_VMAP_STACK
360	li	r10,RPN_PATTERN
361	mtspr	SPRN_DAR,r10	/* Tag DAR, to be used in DTLB Error */
362#endif
363	/* 0x300 is DataAccess exception, needed by bad_page_fault() */
364	EXC_XFER_LITE(0x300, handle_page_fault)
365
366stack_overflow:
367	vmap_stack_overflow_exception
368
369/* On the MPC8xx, these next four traps are used for development
370 * support of breakpoints and such.  Someday I will get around to
371 * using them.
372 */
373do_databreakpoint:
374	EXCEPTION_PROLOG_1
375	EXCEPTION_PROLOG_2 handle_dar_dsisr=1
376	addi	r3,r1,STACK_FRAME_OVERHEAD
377	mfspr	r4,SPRN_BAR
378	stw	r4,_DAR(r11)
379#ifdef CONFIG_VMAP_STACK
380	lwz	r5,_DSISR(r11)
381#else
382	mfspr	r5,SPRN_DSISR
383#endif
384	EXC_XFER_STD(0x1c00, do_break)
385
386	. = 0x1c00
387DataBreakpoint:
388	EXCEPTION_PROLOG_0 handle_dar_dsisr=1
389	mfspr	r11, SPRN_SRR0
390	cmplwi	cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l
391	cmplwi	cr7, r11, (.Litlbie - PAGE_OFFSET)@l
392	cror	4*cr1+eq, 4*cr1+eq, 4*cr7+eq
393	bne	cr1, do_databreakpoint
394	mtcr	r10
395	mfspr	r10, SPRN_SPRG_SCRATCH0
396	mfspr	r11, SPRN_SPRG_SCRATCH1
397	rfi
398
399#ifdef CONFIG_PERF_EVENTS
400	. = 0x1d00
401InstructionBreakpoint:
402	mtspr	SPRN_SPRG_SCRATCH0, r10
403	lwz	r10, (instruction_counter - PAGE_OFFSET)@l(0)
404	addi	r10, r10, -1
405	stw	r10, (instruction_counter - PAGE_OFFSET)@l(0)
406	lis	r10, 0xffff
407	ori	r10, r10, 0x01
408	mtspr	SPRN_COUNTA, r10
409	mfspr	r10, SPRN_SPRG_SCRATCH0
410	rfi
411#else
412	EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
413#endif
414	EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
415	EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
416
417	. = 0x2000
418
419/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
420 * by decoding the registers used by the dcbx instruction and adding them.
421 * DAR is set to the calculated address.
422 */
423FixupDAR:/* Entry point for dcbx workaround. */
424	mtspr	SPRN_M_TW, r10
425	/* fetch instruction from memory. */
426	mfspr	r10, SPRN_SRR0
427	mtspr	SPRN_MD_EPN, r10
428	rlwinm	r11, r10, 16, 0xfff8
429	cmpli	cr1, r11, PAGE_OFFSET@h
430	mfspr	r11, SPRN_M_TWB	/* Get level 1 table */
431	blt+	cr1, 3f
432
433	/* create physical page address from effective address */
434	tophys(r11, r10)
435	mfspr	r11, SPRN_M_TWB	/* Get level 1 table */
436	rlwinm	r11, r11, 0, 20, 31
437	oris	r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha
4383:
439	lwz	r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11)	/* Get the level 1 entry */
440	mtspr	SPRN_MD_TWC, r11
441	mtcrf	0x01, r11
442	mfspr	r11, SPRN_MD_TWC
443	lwz	r11, 0(r11)	/* Get the pte */
444	bt	28,200f		/* bit 28 = Large page (8M) */
445	/* concat physical page address(r11) and page offset(r10) */
446	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT, 31
447201:	lwz	r11,0(r11)
448/* Check if it really is a dcbx instruction. */
449/* dcbt and dcbtst does not generate DTLB Misses/Errors,
450 * no need to include them here */
451	xoris	r10, r11, 0x7c00	/* check if major OP code is 31 */
452	rlwinm	r10, r10, 0, 21, 5
453	cmpwi	cr1, r10, 2028	/* Is dcbz? */
454	beq+	cr1, 142f
455	cmpwi	cr1, r10, 940	/* Is dcbi? */
456	beq+	cr1, 142f
457	cmpwi	cr1, r10, 108	/* Is dcbst? */
458	beq+	cr1, 144f		/* Fix up store bit! */
459	cmpwi	cr1, r10, 172	/* Is dcbf? */
460	beq+	cr1, 142f
461	cmpwi	cr1, r10, 1964	/* Is icbi? */
462	beq+	cr1, 142f
463141:	mfspr	r10,SPRN_M_TW
464	b	DARFixed	/* Nope, go back to normal TLB processing */
465
466200:
467	/* concat physical page address(r11) and page offset(r10) */
468	rlwimi	r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
469	b	201b
470
471144:	mfspr	r10, SPRN_DSISR
472	rlwinm	r10, r10,0,7,5	/* Clear store bit for buggy dcbst insn */
473	mtspr	SPRN_DSISR, r10
474142:	/* continue, it was a dcbx, dcbi instruction. */
475	mfctr	r10
476	mtdar	r10			/* save ctr reg in DAR */
477	rlwinm	r10, r11, 24, 24, 28	/* offset into jump table for reg RB */
478	addi	r10, r10, 150f@l	/* add start of table */
479	mtctr	r10			/* load ctr with jump address */
480	xor	r10, r10, r10		/* sum starts at zero */
481	bctr				/* jump into table */
482150:
483	add	r10, r10, r0	;b	151f
484	add	r10, r10, r1	;b	151f
485	add	r10, r10, r2	;b	151f
486	add	r10, r10, r3	;b	151f
487	add	r10, r10, r4	;b	151f
488	add	r10, r10, r5	;b	151f
489	add	r10, r10, r6	;b	151f
490	add	r10, r10, r7	;b	151f
491	add	r10, r10, r8	;b	151f
492	add	r10, r10, r9	;b	151f
493	mtctr	r11	;b	154f	/* r10 needs special handling */
494	mtctr	r11	;b	153f	/* r11 needs special handling */
495	add	r10, r10, r12	;b	151f
496	add	r10, r10, r13	;b	151f
497	add	r10, r10, r14	;b	151f
498	add	r10, r10, r15	;b	151f
499	add	r10, r10, r16	;b	151f
500	add	r10, r10, r17	;b	151f
501	add	r10, r10, r18	;b	151f
502	add	r10, r10, r19	;b	151f
503	add	r10, r10, r20	;b	151f
504	add	r10, r10, r21	;b	151f
505	add	r10, r10, r22	;b	151f
506	add	r10, r10, r23	;b	151f
507	add	r10, r10, r24	;b	151f
508	add	r10, r10, r25	;b	151f
509	add	r10, r10, r26	;b	151f
510	add	r10, r10, r27	;b	151f
511	add	r10, r10, r28	;b	151f
512	add	r10, r10, r29	;b	151f
513	add	r10, r10, r30	;b	151f
514	add	r10, r10, r31
515151:
516	rlwinm	r11,r11,19,24,28	/* offset into jump table for reg RA */
517	cmpwi	cr1, r11, 0
518	beq	cr1, 152f		/* if reg RA is zero, don't add it */
519	addi	r11, r11, 150b@l	/* add start of table */
520	mtctr	r11			/* load ctr with jump address */
521	rlwinm	r11,r11,0,16,10		/* make sure we don't execute this more than once */
522	bctr				/* jump into table */
523152:
524	mfdar	r11
525	mtctr	r11			/* restore ctr reg from DAR */
526#ifdef CONFIG_VMAP_STACK
527	mfspr	r11, SPRN_SPRG_THREAD
528	stw	r10, DAR(r11)
529	mfspr	r10, SPRN_DSISR
530	stw	r10, DSISR(r11)
531#else
532	mtdar	r10			/* save fault EA to DAR */
533#endif
534	mfspr	r10,SPRN_M_TW
535	b	DARFixed		/* Go back to normal TLB handling */
536
537	/* special handling for r10,r11 since these are modified already */
538153:	mfspr	r11, SPRN_SPRG_SCRATCH1	/* load r11 from SPRN_SPRG_SCRATCH1 */
539	add	r10, r10, r11	/* add it */
540	mfctr	r11		/* restore r11 */
541	b	151b
542154:	mfspr	r11, SPRN_SPRG_SCRATCH0	/* load r10 from SPRN_SPRG_SCRATCH0 */
543	add	r10, r10, r11	/* add it */
544	mfctr	r11		/* restore r11 */
545	b	151b
546
547/*
548 * This is where the main kernel code starts.
549 */
550start_here:
551	/* ptr to current */
552	lis	r2,init_task@h
553	ori	r2,r2,init_task@l
554
555	/* ptr to phys current thread */
556	tophys(r4,r2)
557	addi	r4,r4,THREAD	/* init task's THREAD */
558	mtspr	SPRN_SPRG_THREAD,r4
559
560	/* stack */
561	lis	r1,init_thread_union@ha
562	addi	r1,r1,init_thread_union@l
563	lis	r0, STACK_END_MAGIC@h
564	ori	r0, r0, STACK_END_MAGIC@l
565	stw	r0, 0(r1)
566	li	r0,0
567	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
568
569	lis	r6, swapper_pg_dir@ha
570	tophys(r6,r6)
571	mtspr	SPRN_M_TWB, r6
572
573	bl	early_init	/* We have to do this with MMU on */
574
575/*
576 * Decide what sort of machine this is and initialize the MMU.
577 */
578#ifdef CONFIG_KASAN
579	bl	kasan_early_init
580#endif
581	li	r3,0
582	mr	r4,r31
583	bl	machine_init
584	bl	MMU_init
585
586/*
587 * Go back to running unmapped so we can load up new values
588 * and change to using our exception vectors.
589 * On the 8xx, all we have to do is invalidate the TLB to clear
590 * the old 8M byte TLB mappings and load the page table base register.
591 */
592	/* The right way to do this would be to track it down through
593	 * init's THREAD like the context switch code does, but this is
594	 * easier......until someone changes init's static structures.
595	 */
596	lis	r4,2f@h
597	ori	r4,r4,2f@l
598	tophys(r4,r4)
599	li	r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
600	mtspr	SPRN_SRR0,r4
601	mtspr	SPRN_SRR1,r3
602	rfi
603/* Load up the kernel context */
6042:
605#ifdef CONFIG_PIN_TLB_IMMR
606	lis	r0, MD_TWAM@h
607	oris	r0, r0, 0x1f00
608	mtspr	SPRN_MD_CTR, r0
609	LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
610	tlbie	r0
611	mtspr	SPRN_MD_EPN, r0
612	LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED)
613	mtspr	SPRN_MD_TWC, r0
614	mfspr   r0, SPRN_IMMR
615	rlwinm	r0, r0, 0, 0xfff80000
616	ori	r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
617			_PAGE_NO_CACHE | _PAGE_PRESENT
618	mtspr	SPRN_MD_RPN, r0
619	lis	r0, (MD_TWAM | MD_RSV4I)@h
620	mtspr	SPRN_MD_CTR, r0
621#endif
622#ifndef CONFIG_PIN_TLB_TEXT
623	li	r0, 0
624	mtspr	SPRN_MI_CTR, r0
625#endif
626#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
627	lis	r0, MD_TWAM@h
628	mtspr	SPRN_MD_CTR, r0
629#endif
630	tlbia			/* Clear all TLB entries */
631	sync			/* wait for tlbia/tlbie to finish */
632
633	/* set up the PTE pointers for the Abatron bdiGDB.
634	*/
635	lis	r5, abatron_pteptrs@h
636	ori	r5, r5, abatron_pteptrs@l
637	stw	r5, 0xf0(0)	/* Must match your Abatron config file */
638	tophys(r5,r5)
639	lis	r6, swapper_pg_dir@h
640	ori	r6, r6, swapper_pg_dir@l
641	stw	r6, 0(r5)
642
643/* Now turn on the MMU for real! */
644	li	r4,MSR_KERNEL
645	lis	r3,start_kernel@h
646	ori	r3,r3,start_kernel@l
647	mtspr	SPRN_SRR0,r3
648	mtspr	SPRN_SRR1,r4
649	rfi			/* enable MMU and jump to start_kernel */
650
651/* Set up the initial MMU state so we can do the first level of
652 * kernel initialization.  This maps the first 8 MBytes of memory 1:1
653 * virtual to physical.  Also, set the cache mode since that is defined
654 * by TLB entries and perform any additional mapping (like of the IMMR).
655 * If configured to pin some TLBs, we pin the first 8 Mbytes of kernel,
656 * 24 Mbytes of data, and the 512k IMMR space.  Anything not covered by
657 * these mappings is mapped by page tables.
658 */
659initial_mmu:
660	li	r8, 0
661	mtspr	SPRN_MI_CTR, r8		/* remove PINNED ITLB entries */
662	lis	r10, MD_TWAM@h
663	mtspr	SPRN_MD_CTR, r10	/* remove PINNED DTLB entries */
664
665	tlbia			/* Invalidate all TLB entries */
666
667	lis	r8, MI_APG_INIT@h	/* Set protection modes */
668	ori	r8, r8, MI_APG_INIT@l
669	mtspr	SPRN_MI_AP, r8
670	lis	r8, MD_APG_INIT@h
671	ori	r8, r8, MD_APG_INIT@l
672	mtspr	SPRN_MD_AP, r8
673
674	/* Map the lower RAM (up to 32 Mbytes) into the ITLB and DTLB */
675	lis	r8, MI_RSV4I@h
676	ori	r8, r8, 0x1c00
677	oris	r12, r10, MD_RSV4I@h
678	ori	r12, r12, 0x1c00
679	li	r9, 4				/* up to 4 pages of 8M */
680	mtctr	r9
681	lis	r9, KERNELBASE@h		/* Create vaddr for TLB */
682	li	r10, MI_PS8MEG | _PMD_ACCESSED | MI_SVALID
683	li	r11, MI_BOOTINIT		/* Create RPN for address 0 */
6841:
685	mtspr	SPRN_MI_CTR, r8	/* Set instruction MMU control */
686	addi	r8, r8, 0x100
687	ori	r0, r9, MI_EVALID		/* Mark it valid */
688	mtspr	SPRN_MI_EPN, r0
689	mtspr	SPRN_MI_TWC, r10
690	mtspr	SPRN_MI_RPN, r11		/* Store TLB entry */
691	mtspr	SPRN_MD_CTR, r12
692	addi	r12, r12, 0x100
693	mtspr	SPRN_MD_EPN, r0
694	mtspr	SPRN_MD_TWC, r10
695	mtspr	SPRN_MD_RPN, r11
696	addis	r9, r9, 0x80
697	addis	r11, r11, 0x80
698
699	bdnz	1b
700
701	/* Since the cache is enabled according to the information we
702	 * just loaded into the TLB, invalidate and enable the caches here.
703	 * We should probably check/set other modes....later.
704	 */
705	lis	r8, IDC_INVALL@h
706	mtspr	SPRN_IC_CST, r8
707	mtspr	SPRN_DC_CST, r8
708	lis	r8, IDC_ENABLE@h
709	mtspr	SPRN_IC_CST, r8
710	mtspr	SPRN_DC_CST, r8
711	/* Disable debug mode entry on breakpoints */
712	mfspr	r8, SPRN_DER
713#ifdef CONFIG_PERF_EVENTS
714	rlwinm	r8, r8, 0, ~0xc
715#else
716	rlwinm	r8, r8, 0, ~0x8
717#endif
718	mtspr	SPRN_DER, r8
719	blr
720
721#ifdef CONFIG_PIN_TLB
722_GLOBAL(mmu_pin_tlb)
723	lis	r9, (1f - PAGE_OFFSET)@h
724	ori	r9, r9, (1f - PAGE_OFFSET)@l
725	mfmsr	r10
726	mflr	r11
727	li	r12, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
728	rlwinm	r0, r10, 0, ~MSR_RI
729	rlwinm	r0, r0, 0, ~MSR_EE
730	mtmsr	r0
731	isync
732	.align	4
733	mtspr	SPRN_SRR0, r9
734	mtspr	SPRN_SRR1, r12
735	rfi
7361:
737	li	r5, 0
738	lis	r6, MD_TWAM@h
739	mtspr	SPRN_MI_CTR, r5
740	mtspr	SPRN_MD_CTR, r6
741	tlbia
742
743#ifdef CONFIG_PIN_TLB_TEXT
744	LOAD_REG_IMMEDIATE(r5, 28 << 8)
745	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
746	LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
747	LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
748	LOAD_REG_ADDR(r9, _sinittext)
749	li	r0, 4
750	mtctr	r0
751
7522:	ori	r0, r6, MI_EVALID
753	mtspr	SPRN_MI_CTR, r5
754	mtspr	SPRN_MI_EPN, r0
755	mtspr	SPRN_MI_TWC, r7
756	mtspr	SPRN_MI_RPN, r8
757	addi	r5, r5, 0x100
758	addis	r6, r6, SZ_8M@h
759	addis	r8, r8, SZ_8M@h
760	cmplw	r6, r9
761	bdnzt	lt, 2b
762	lis	r0, MI_RSV4I@h
763	mtspr	SPRN_MI_CTR, r0
764#endif
765	LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
766#ifdef CONFIG_PIN_TLB_DATA
767	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
768	LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
769#ifdef CONFIG_PIN_TLB_IMMR
770	li	r0, 3
771#else
772	li	r0, 4
773#endif
774	mtctr	r0
775	cmpwi	r4, 0
776	beq	4f
777	LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
778	LOAD_REG_ADDR(r9, _sinittext)
779
7802:	ori	r0, r6, MD_EVALID
781	mtspr	SPRN_MD_CTR, r5
782	mtspr	SPRN_MD_EPN, r0
783	mtspr	SPRN_MD_TWC, r7
784	mtspr	SPRN_MD_RPN, r8
785	addi	r5, r5, 0x100
786	addis	r6, r6, SZ_8M@h
787	addis	r8, r8, SZ_8M@h
788	cmplw	r6, r9
789	bdnzt	lt, 2b
790
7914:	LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
7922:	ori	r0, r6, MD_EVALID
793	mtspr	SPRN_MD_CTR, r5
794	mtspr	SPRN_MD_EPN, r0
795	mtspr	SPRN_MD_TWC, r7
796	mtspr	SPRN_MD_RPN, r8
797	addi	r5, r5, 0x100
798	addis	r6, r6, SZ_8M@h
799	addis	r8, r8, SZ_8M@h
800	cmplw	r6, r3
801	bdnzt	lt, 2b
802#endif
803#ifdef CONFIG_PIN_TLB_IMMR
804	LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
805	LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED | _PMD_ACCESSED)
806	mfspr   r8, SPRN_IMMR
807	rlwinm	r8, r8, 0, 0xfff80000
808	ori	r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
809			_PAGE_NO_CACHE | _PAGE_PRESENT
810	mtspr	SPRN_MD_CTR, r5
811	mtspr	SPRN_MD_EPN, r0
812	mtspr	SPRN_MD_TWC, r7
813	mtspr	SPRN_MD_RPN, r8
814#endif
815#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
816	lis	r0, (MD_RSV4I | MD_TWAM)@h
817	mtspr	SPRN_MI_CTR, r0
818#endif
819	mtspr	SPRN_SRR1, r10
820	mtspr	SPRN_SRR0, r11
821	rfi
822#endif /* CONFIG_PIN_TLB */
823
824/*
825 * We put a few things here that have to be page-aligned.
826 * This stuff goes at the beginning of the data segment,
827 * which is page-aligned.
828 */
829	.data
830	.globl	sdata
831sdata:
832	.globl	empty_zero_page
833	.align	PAGE_SHIFT
834empty_zero_page:
835	.space	PAGE_SIZE
836EXPORT_SYMBOL(empty_zero_page)
837
838	.globl	swapper_pg_dir
839swapper_pg_dir:
840	.space	PGD_TABLE_SIZE
841
842/* Room for two PTE table poiners, usually the kernel and current user
843 * pointer to their respective root page table (pgdir).
844 */
845	.globl	abatron_pteptrs
846abatron_pteptrs:
847	.space	8
848