xref: /openbmc/linux/arch/powerpc/kernel/head_40x.S (revision 565d76cb)
1/*
2 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 *      Initial PowerPC version.
4 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 *      Rewritten for PReP
6 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 *      Low-level exception handers, MMU support, and rewrite.
8 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 *      PowerPC 8xx modifications.
10 *    Copyright (c) 1998-1999 TiVo, Inc.
11 *      PowerPC 403GCX modifications.
12 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 *      PowerPC 403GCX/405GP modifications.
14 *    Copyright 2000 MontaVista Software Inc.
15 *	PPC405 modifications
16 *      PowerPC 403GCX/405GP modifications.
17 * 	Author: MontaVista Software, Inc.
18 *         	frank_rowand@mvista.com or source@mvista.com
19 * 	   	debbie_chu@mvista.com
20 *
21 *
22 *    Module name: head_4xx.S
23 *
24 *    Description:
25 *      Kernel execution entry point code.
26 *
27 *    This program is free software; you can redistribute it and/or
28 *    modify it under the terms of the GNU General Public License
29 *    as published by the Free Software Foundation; either version
30 *    2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <linux/init.h>
35#include <asm/processor.h>
36#include <asm/page.h>
37#include <asm/mmu.h>
38#include <asm/pgtable.h>
39#include <asm/cputable.h>
40#include <asm/thread_info.h>
41#include <asm/ppc_asm.h>
42#include <asm/asm-offsets.h>
43#include <asm/ptrace.h>
44
45/* As with the other PowerPC ports, it is expected that when code
46 * execution begins here, the following registers contain valid, yet
47 * optional, information:
48 *
49 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
50 *   r4 - Starting address of the init RAM disk
51 *   r5 - Ending address of the init RAM disk
52 *   r6 - Start of kernel command line string (e.g. "mem=96m")
53 *   r7 - End of kernel command line string
54 *
55 * This is all going to change RSN when we add bi_recs.......  -- Dan
56 */
57	__HEAD
58_ENTRY(_stext);
59_ENTRY(_start);
60
61	/* Save parameters we are passed.
62	*/
63	mr	r31,r3
64	mr	r30,r4
65	mr	r29,r5
66	mr	r28,r6
67	mr	r27,r7
68
69	/* We have to turn on the MMU right away so we get cache modes
70	 * set correctly.
71	 */
72	bl	initial_mmu
73
74/* We now have the lower 16 Meg mapped into TLB entries, and the caches
75 * ready to work.
76 */
77turn_on_mmu:
78	lis	r0,MSR_KERNEL@h
79	ori	r0,r0,MSR_KERNEL@l
80	mtspr	SPRN_SRR1,r0
81	lis	r0,start_here@h
82	ori	r0,r0,start_here@l
83	mtspr	SPRN_SRR0,r0
84	SYNC
85	rfi				/* enables MMU */
86	b	.			/* prevent prefetch past rfi */
87
88/*
89 * This area is used for temporarily saving registers during the
90 * critical exception prolog.
91 */
92	. = 0xc0
93crit_save:
94_ENTRY(crit_r10)
95	.space	4
96_ENTRY(crit_r11)
97	.space	4
98_ENTRY(crit_srr0)
99	.space	4
100_ENTRY(crit_srr1)
101	.space	4
102_ENTRY(saved_ksp_limit)
103	.space	4
104
105/*
106 * Exception vector entry code. This code runs with address translation
107 * turned off (i.e. using physical addresses). We assume SPRG_THREAD has
108 * the physical address of the current task thread_struct.
109 * Note that we have to have decremented r1 before we write to any fields
110 * of the exception frame, since a critical interrupt could occur at any
111 * time, and it will write to the area immediately below the current r1.
112 */
113#define NORMAL_EXCEPTION_PROLOG						     \
114	mtspr	SPRN_SPRG_SCRATCH0,r10;	/* save two registers to work with */\
115	mtspr	SPRN_SPRG_SCRATCH1,r11;					     \
116	mtspr	SPRN_SPRG_SCRATCH2,r1;					     \
117	mfcr	r10;			/* save CR in r10 for now	   */\
118	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel    */\
119	andi.	r11,r11,MSR_PR;						     \
120	beq	1f;							     \
121	mfspr	r1,SPRN_SPRG_THREAD;	/* if from user, start at top of   */\
122	lwz	r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack   */\
123	addi	r1,r1,THREAD_SIZE;					     \
1241:	subi	r1,r1,INT_FRAME_SIZE;	/* Allocate an exception frame     */\
125	tophys(r11,r1);							     \
126	stw	r10,_CCR(r11);          /* save various registers	   */\
127	stw	r12,GPR12(r11);						     \
128	stw	r9,GPR9(r11);						     \
129	mfspr	r10,SPRN_SPRG_SCRATCH0;					     \
130	stw	r10,GPR10(r11);						     \
131	mfspr	r12,SPRN_SPRG_SCRATCH1;					     \
132	stw	r12,GPR11(r11);						     \
133	mflr	r10;							     \
134	stw	r10,_LINK(r11);						     \
135	mfspr	r10,SPRN_SPRG_SCRATCH2;					     \
136	mfspr	r12,SPRN_SRR0;						     \
137	stw	r10,GPR1(r11);						     \
138	mfspr	r9,SPRN_SRR1;						     \
139	stw	r10,0(r11);						     \
140	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
141	stw	r0,GPR0(r11);						     \
142	SAVE_4GPRS(3, r11);						     \
143	SAVE_2GPRS(7, r11)
144
145/*
146 * Exception prolog for critical exceptions.  This is a little different
147 * from the normal exception prolog above since a critical exception
148 * can potentially occur at any point during normal exception processing.
149 * Thus we cannot use the same SPRG registers as the normal prolog above.
150 * Instead we use a couple of words of memory at low physical addresses.
151 * This is OK since we don't support SMP on these processors.
152 */
153#define CRITICAL_EXCEPTION_PROLOG					     \
154	stw	r10,crit_r10@l(0);	/* save two registers to work with */\
155	stw	r11,crit_r11@l(0);					     \
156	mfcr	r10;			/* save CR in r10 for now	   */\
157	mfspr	r11,SPRN_SRR3;		/* check whether user or kernel    */\
158	andi.	r11,r11,MSR_PR;						     \
159	lis	r11,critirq_ctx@ha;					     \
160	tophys(r11,r11);						     \
161	lwz	r11,critirq_ctx@l(r11);					     \
162	beq	1f;							     \
163	/* COMING FROM USER MODE */					     \
164	mfspr	r11,SPRN_SPRG_THREAD;	/* if from user, start at top of   */\
165	lwz	r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
1661:	addi	r11,r11,THREAD_SIZE-INT_FRAME_SIZE; /* Alloc an excpt frm  */\
167	tophys(r11,r11);						     \
168	stw	r10,_CCR(r11);          /* save various registers	   */\
169	stw	r12,GPR12(r11);						     \
170	stw	r9,GPR9(r11);						     \
171	mflr	r10;							     \
172	stw	r10,_LINK(r11);						     \
173	mfspr	r12,SPRN_DEAR;		/* save DEAR and ESR in the frame  */\
174	stw	r12,_DEAR(r11);		/* since they may have had stuff   */\
175	mfspr	r9,SPRN_ESR;		/* in them at the point where the  */\
176	stw	r9,_ESR(r11);		/* exception was taken		   */\
177	mfspr	r12,SPRN_SRR2;						     \
178	stw	r1,GPR1(r11);						     \
179	mfspr	r9,SPRN_SRR3;						     \
180	stw	r1,0(r11);						     \
181	tovirt(r1,r11);							     \
182	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
183	stw	r0,GPR0(r11);						     \
184	SAVE_4GPRS(3, r11);						     \
185	SAVE_2GPRS(7, r11)
186
187	/*
188	 * State at this point:
189	 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
190	 * r10 saved in crit_r10 and in stack frame, trashed
191	 * r11 saved in crit_r11 and in stack frame,
192	 *	now phys stack/exception frame pointer
193	 * r12 saved in stack frame, now saved SRR2
194	 * CR saved in stack frame, CR0.EQ = !SRR3.PR
195	 * LR, DEAR, ESR in stack frame
196	 * r1 saved in stack frame, now virt stack/excframe pointer
197	 * r0, r3-r8 saved in stack frame
198	 */
199
200/*
201 * Exception vectors.
202 */
203#define	START_EXCEPTION(n, label)					     \
204	. = n;								     \
205label:
206
207#define EXCEPTION(n, label, hdlr, xfer)				\
208	START_EXCEPTION(n, label);				\
209	NORMAL_EXCEPTION_PROLOG;				\
210	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
211	xfer(n, hdlr)
212
213#define CRITICAL_EXCEPTION(n, label, hdlr)			\
214	START_EXCEPTION(n, label);				\
215	CRITICAL_EXCEPTION_PROLOG;				\
216	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
217	EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
218			  NOCOPY, crit_transfer_to_handler,	\
219			  ret_from_crit_exc)
220
221#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret)	\
222	li	r10,trap;					\
223	stw	r10,_TRAP(r11);					\
224	lis	r10,msr@h;					\
225	ori	r10,r10,msr@l;					\
226	copyee(r10, r9);					\
227	bl	tfer;		 				\
228	.long	hdlr;						\
229	.long	ret
230
231#define COPY_EE(d, s)		rlwimi d,s,0,16,16
232#define NOCOPY(d, s)
233
234#define EXC_XFER_STD(n, hdlr)		\
235	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
236			  ret_from_except_full)
237
238#define EXC_XFER_LITE(n, hdlr)		\
239	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
240			  ret_from_except)
241
242#define EXC_XFER_EE(n, hdlr)		\
243	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
244			  ret_from_except_full)
245
246#define EXC_XFER_EE_LITE(n, hdlr)	\
247	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
248			  ret_from_except)
249
250
251/*
252 * 0x0100 - Critical Interrupt Exception
253 */
254	CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
255
256/*
257 * 0x0200 - Machine Check Exception
258 */
259	CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
260
261/*
262 * 0x0300 - Data Storage Exception
263 * This happens for just a few reasons.  U0 set (but we don't do that),
264 * or zone protection fault (user violation, write to protected page).
265 * If this is just an update of modified status, we do that quickly
266 * and exit.  Otherwise, we call heavywight functions to do the work.
267 */
268	START_EXCEPTION(0x0300,	DataStorage)
269	mtspr	SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
270	mtspr	SPRN_SPRG_SCRATCH1, r11
271#ifdef CONFIG_403GCX
272	stw     r12, 0(r0)
273	stw     r9, 4(r0)
274	mfcr    r11
275	mfspr   r12, SPRN_PID
276	stw     r11, 8(r0)
277	stw     r12, 12(r0)
278#else
279	mtspr	SPRN_SPRG_SCRATCH3, r12
280	mtspr	SPRN_SPRG_SCRATCH4, r9
281	mfcr	r11
282	mfspr	r12, SPRN_PID
283	mtspr	SPRN_SPRG_SCRATCH6, r11
284	mtspr	SPRN_SPRG_SCRATCH5, r12
285#endif
286
287	/* First, check if it was a zone fault (which means a user
288	* tried to access a kernel or read-protected page - always
289	* a SEGV).  All other faults here must be stores, so no
290	* need to check ESR_DST as well. */
291	mfspr	r10, SPRN_ESR
292	andis.	r10, r10, ESR_DIZ@h
293	bne	2f
294
295	mfspr	r10, SPRN_DEAR		/* Get faulting address */
296
297	/* If we are faulting a kernel address, we have to use the
298	 * kernel page tables.
299	 */
300	lis	r11, PAGE_OFFSET@h
301	cmplw	r10, r11
302	blt+	3f
303	lis	r11, swapper_pg_dir@h
304	ori	r11, r11, swapper_pg_dir@l
305	li	r9, 0
306	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
307	b	4f
308
309	/* Get the PGD for the current thread.
310	 */
3113:
312	mfspr	r11,SPRN_SPRG_THREAD
313	lwz	r11,PGDIR(r11)
3144:
315	tophys(r11, r11)
316	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
317	lwz	r11, 0(r11)		/* Get L1 entry */
318	rlwinm.	r12, r11, 0, 0, 19	/* Extract L2 (pte) base address */
319	beq	2f			/* Bail if no table */
320
321	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
322	lwz	r11, 0(r12)		/* Get Linux PTE */
323
324	andi.	r9, r11, _PAGE_RW	/* Is it writeable? */
325	beq	2f			/* Bail if not */
326
327	/* Update 'changed'.
328	*/
329	ori	r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
330	stw	r11, 0(r12)		/* Update Linux page table */
331
332	/* Most of the Linux PTE is ready to load into the TLB LO.
333	 * We set ZSEL, where only the LS-bit determines user access.
334	 * We set execute, because we don't have the granularity to
335	 * properly set this at the page level (Linux problem).
336	 * If shared is set, we cause a zero PID->TID load.
337	 * Many of these bits are software only.  Bits we don't set
338	 * here we (properly should) assume have the appropriate value.
339	 */
340	li	r12, 0x0ce2
341	andc	r11, r11, r12		/* Make sure 20, 21 are zero */
342
343	/* find the TLB index that caused the fault.  It has to be here.
344	*/
345	tlbsx	r9, 0, r10
346
347	tlbwe	r11, r9, TLB_DATA		/* Load TLB LO */
348
349	/* Done...restore registers and get out of here.
350	*/
351#ifdef CONFIG_403GCX
352	lwz     r12, 12(r0)
353	lwz     r11, 8(r0)
354	mtspr   SPRN_PID, r12
355	mtcr    r11
356	lwz     r9, 4(r0)
357	lwz     r12, 0(r0)
358#else
359	mfspr	r12, SPRN_SPRG_SCRATCH5
360	mfspr	r11, SPRN_SPRG_SCRATCH6
361	mtspr	SPRN_PID, r12
362	mtcr	r11
363	mfspr	r9, SPRN_SPRG_SCRATCH4
364	mfspr	r12, SPRN_SPRG_SCRATCH3
365#endif
366	mfspr	r11, SPRN_SPRG_SCRATCH1
367	mfspr	r10, SPRN_SPRG_SCRATCH0
368	PPC405_ERR77_SYNC
369	rfi			/* Should sync shadow TLBs */
370	b	.		/* prevent prefetch past rfi */
371
3722:
373	/* The bailout.  Restore registers to pre-exception conditions
374	 * and call the heavyweights to help us out.
375	 */
376#ifdef CONFIG_403GCX
377	lwz     r12, 12(r0)
378	lwz     r11, 8(r0)
379	mtspr   SPRN_PID, r12
380	mtcr    r11
381	lwz     r9, 4(r0)
382	lwz     r12, 0(r0)
383#else
384	mfspr	r12, SPRN_SPRG_SCRATCH5
385	mfspr	r11, SPRN_SPRG_SCRATCH6
386	mtspr	SPRN_PID, r12
387	mtcr	r11
388	mfspr	r9, SPRN_SPRG_SCRATCH4
389	mfspr	r12, SPRN_SPRG_SCRATCH3
390#endif
391	mfspr	r11, SPRN_SPRG_SCRATCH1
392	mfspr	r10, SPRN_SPRG_SCRATCH0
393	b	DataAccess
394
395/*
396 * 0x0400 - Instruction Storage Exception
397 * This is caused by a fetch from non-execute or guarded pages.
398 */
399	START_EXCEPTION(0x0400, InstructionAccess)
400	NORMAL_EXCEPTION_PROLOG
401	mr	r4,r12			/* Pass SRR0 as arg2 */
402	li	r5,0			/* Pass zero as arg3 */
403	EXC_XFER_EE_LITE(0x400, handle_page_fault)
404
405/* 0x0500 - External Interrupt Exception */
406	EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
407
408/* 0x0600 - Alignment Exception */
409	START_EXCEPTION(0x0600, Alignment)
410	NORMAL_EXCEPTION_PROLOG
411	mfspr	r4,SPRN_DEAR		/* Grab the DEAR and save it */
412	stw	r4,_DEAR(r11)
413	addi	r3,r1,STACK_FRAME_OVERHEAD
414	EXC_XFER_EE(0x600, alignment_exception)
415
416/* 0x0700 - Program Exception */
417	START_EXCEPTION(0x0700, ProgramCheck)
418	NORMAL_EXCEPTION_PROLOG
419	mfspr	r4,SPRN_ESR		/* Grab the ESR and save it */
420	stw	r4,_ESR(r11)
421	addi	r3,r1,STACK_FRAME_OVERHEAD
422	EXC_XFER_STD(0x700, program_check_exception)
423
424	EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
425	EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
426	EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
427	EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
428
429/* 0x0C00 - System Call Exception */
430	START_EXCEPTION(0x0C00,	SystemCall)
431	NORMAL_EXCEPTION_PROLOG
432	EXC_XFER_EE_LITE(0xc00, DoSyscall)
433
434	EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
435	EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
436	EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
437
438/* 0x1000 - Programmable Interval Timer (PIT) Exception */
439	START_EXCEPTION(0x1000, Decrementer)
440	NORMAL_EXCEPTION_PROLOG
441	lis	r0,TSR_PIS@h
442	mtspr	SPRN_TSR,r0		/* Clear the PIT exception */
443	addi	r3,r1,STACK_FRAME_OVERHEAD
444	EXC_XFER_LITE(0x1000, timer_interrupt)
445
446#if 0
447/* NOTE:
448 * FIT and WDT handlers are not implemented yet.
449 */
450
451/* 0x1010 - Fixed Interval Timer (FIT) Exception
452*/
453	STND_EXCEPTION(0x1010,	FITException,		unknown_exception)
454
455/* 0x1020 - Watchdog Timer (WDT) Exception
456*/
457#ifdef CONFIG_BOOKE_WDT
458	CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
459#else
460	CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
461#endif
462#endif
463
464/* 0x1100 - Data TLB Miss Exception
465 * As the name implies, translation is not in the MMU, so search the
466 * page tables and fix it.  The only purpose of this function is to
467 * load TLB entries from the page table if they exist.
468 */
469	START_EXCEPTION(0x1100,	DTLBMiss)
470	mtspr	SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
471	mtspr	SPRN_SPRG_SCRATCH1, r11
472#ifdef CONFIG_403GCX
473	stw     r12, 0(r0)
474	stw     r9, 4(r0)
475	mfcr    r11
476	mfspr   r12, SPRN_PID
477	stw     r11, 8(r0)
478	stw     r12, 12(r0)
479#else
480	mtspr	SPRN_SPRG_SCRATCH3, r12
481	mtspr	SPRN_SPRG_SCRATCH4, r9
482	mfcr	r11
483	mfspr	r12, SPRN_PID
484	mtspr	SPRN_SPRG_SCRATCH6, r11
485	mtspr	SPRN_SPRG_SCRATCH5, r12
486#endif
487	mfspr	r10, SPRN_DEAR		/* Get faulting address */
488
489	/* If we are faulting a kernel address, we have to use the
490	 * kernel page tables.
491	 */
492	lis	r11, PAGE_OFFSET@h
493	cmplw	r10, r11
494	blt+	3f
495	lis	r11, swapper_pg_dir@h
496	ori	r11, r11, swapper_pg_dir@l
497	li	r9, 0
498	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
499	b	4f
500
501	/* Get the PGD for the current thread.
502	 */
5033:
504	mfspr	r11,SPRN_SPRG_THREAD
505	lwz	r11,PGDIR(r11)
5064:
507	tophys(r11, r11)
508	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
509	lwz	r12, 0(r11)		/* Get L1 entry */
510	andi.	r9, r12, _PMD_PRESENT	/* Check if it points to a PTE page */
511	beq	2f			/* Bail if no table */
512
513	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
514	lwz	r11, 0(r12)		/* Get Linux PTE */
515	andi.	r9, r11, _PAGE_PRESENT
516	beq	5f
517
518	ori	r11, r11, _PAGE_ACCESSED
519	stw	r11, 0(r12)
520
521	/* Create TLB tag.  This is the faulting address plus a static
522	 * set of bits.  These are size, valid, E, U0.
523	*/
524	li	r12, 0x00c0
525	rlwimi	r10, r12, 0, 20, 31
526
527	b	finish_tlb_load
528
5292:	/* Check for possible large-page pmd entry */
530	rlwinm.	r9, r12, 2, 22, 24
531	beq	5f
532
533	/* Create TLB tag.  This is the faulting address, plus a static
534	 * set of bits (valid, E, U0) plus the size from the PMD.
535	 */
536	ori	r9, r9, 0x40
537	rlwimi	r10, r9, 0, 20, 31
538	mr	r11, r12
539
540	b	finish_tlb_load
541
5425:
543	/* The bailout.  Restore registers to pre-exception conditions
544	 * and call the heavyweights to help us out.
545	 */
546#ifdef CONFIG_403GCX
547	lwz     r12, 12(r0)
548	lwz     r11, 8(r0)
549	mtspr   SPRN_PID, r12
550	mtcr    r11
551	lwz     r9, 4(r0)
552	lwz     r12, 0(r0)
553#else
554	mfspr	r12, SPRN_SPRG_SCRATCH5
555	mfspr	r11, SPRN_SPRG_SCRATCH6
556	mtspr	SPRN_PID, r12
557	mtcr	r11
558	mfspr	r9, SPRN_SPRG_SCRATCH4
559	mfspr	r12, SPRN_SPRG_SCRATCH3
560#endif
561	mfspr	r11, SPRN_SPRG_SCRATCH1
562	mfspr	r10, SPRN_SPRG_SCRATCH0
563	b	DataAccess
564
565/* 0x1200 - Instruction TLB Miss Exception
566 * Nearly the same as above, except we get our information from different
567 * registers and bailout to a different point.
568 */
569	START_EXCEPTION(0x1200,	ITLBMiss)
570	mtspr	SPRN_SPRG_SCRATCH0, r10	 /* Save some working registers */
571	mtspr	SPRN_SPRG_SCRATCH1, r11
572#ifdef CONFIG_403GCX
573	stw     r12, 0(r0)
574	stw     r9, 4(r0)
575	mfcr    r11
576	mfspr   r12, SPRN_PID
577	stw     r11, 8(r0)
578	stw     r12, 12(r0)
579#else
580	mtspr	SPRN_SPRG_SCRATCH3, r12
581	mtspr	SPRN_SPRG_SCRATCH4, r9
582	mfcr	r11
583	mfspr	r12, SPRN_PID
584	mtspr	SPRN_SPRG_SCRATCH6, r11
585	mtspr	SPRN_SPRG_SCRATCH5, r12
586#endif
587	mfspr	r10, SPRN_SRR0		/* Get faulting address */
588
589	/* If we are faulting a kernel address, we have to use the
590	 * kernel page tables.
591	 */
592	lis	r11, PAGE_OFFSET@h
593	cmplw	r10, r11
594	blt+	3f
595	lis	r11, swapper_pg_dir@h
596	ori	r11, r11, swapper_pg_dir@l
597	li	r9, 0
598	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
599	b	4f
600
601	/* Get the PGD for the current thread.
602	 */
6033:
604	mfspr	r11,SPRN_SPRG_THREAD
605	lwz	r11,PGDIR(r11)
6064:
607	tophys(r11, r11)
608	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
609	lwz	r12, 0(r11)		/* Get L1 entry */
610	andi.	r9, r12, _PMD_PRESENT	/* Check if it points to a PTE page */
611	beq	2f			/* Bail if no table */
612
613	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
614	lwz	r11, 0(r12)		/* Get Linux PTE */
615	andi.	r9, r11, _PAGE_PRESENT
616	beq	5f
617
618	ori	r11, r11, _PAGE_ACCESSED
619	stw	r11, 0(r12)
620
621	/* Create TLB tag.  This is the faulting address plus a static
622	 * set of bits.  These are size, valid, E, U0.
623	*/
624	li	r12, 0x00c0
625	rlwimi	r10, r12, 0, 20, 31
626
627	b	finish_tlb_load
628
6292:	/* Check for possible large-page pmd entry */
630	rlwinm.	r9, r12, 2, 22, 24
631	beq	5f
632
633	/* Create TLB tag.  This is the faulting address, plus a static
634	 * set of bits (valid, E, U0) plus the size from the PMD.
635	 */
636	ori	r9, r9, 0x40
637	rlwimi	r10, r9, 0, 20, 31
638	mr	r11, r12
639
640	b	finish_tlb_load
641
6425:
643	/* The bailout.  Restore registers to pre-exception conditions
644	 * and call the heavyweights to help us out.
645	 */
646#ifdef CONFIG_403GCX
647	lwz     r12, 12(r0)
648	lwz     r11, 8(r0)
649	mtspr   SPRN_PID, r12
650	mtcr    r11
651	lwz     r9, 4(r0)
652	lwz     r12, 0(r0)
653#else
654	mfspr	r12, SPRN_SPRG_SCRATCH5
655	mfspr	r11, SPRN_SPRG_SCRATCH6
656	mtspr	SPRN_PID, r12
657	mtcr	r11
658	mfspr	r9, SPRN_SPRG_SCRATCH4
659	mfspr	r12, SPRN_SPRG_SCRATCH3
660#endif
661	mfspr	r11, SPRN_SPRG_SCRATCH1
662	mfspr	r10, SPRN_SPRG_SCRATCH0
663	b	InstructionAccess
664
665	EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
666	EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
667	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
668	EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
669#ifdef CONFIG_IBM405_ERR51
670	/* 405GP errata 51 */
671	START_EXCEPTION(0x1700, Trap_17)
672	b DTLBMiss
673#else
674	EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
675#endif
676	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
677	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
678	EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
679	EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
680	EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
681	EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
682	EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
683	EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
684
685/* Check for a single step debug exception while in an exception
686 * handler before state has been saved.  This is to catch the case
687 * where an instruction that we are trying to single step causes
688 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
689 * the exception handler generates a single step debug exception.
690 *
691 * If we get a debug trap on the first instruction of an exception handler,
692 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
693 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
694 * The exception handler was handling a non-critical interrupt, so it will
695 * save (and later restore) the MSR via SPRN_SRR1, which will still have
696 * the MSR_DE bit set.
697 */
698	/* 0x2000 - Debug Exception */
699	START_EXCEPTION(0x2000, DebugTrap)
700	CRITICAL_EXCEPTION_PROLOG
701
702	/*
703	 * If this is a single step or branch-taken exception in an
704	 * exception entry sequence, it was probably meant to apply to
705	 * the code where the exception occurred (since exception entry
706	 * doesn't turn off DE automatically).  We simulate the effect
707	 * of turning off DE on entry to an exception handler by turning
708	 * off DE in the SRR3 value and clearing the debug status.
709	 */
710	mfspr	r10,SPRN_DBSR		/* check single-step/branch taken */
711	andis.	r10,r10,DBSR_IC@h
712	beq+	2f
713
714	andi.	r10,r9,MSR_IR|MSR_PR	/* check supervisor + MMU off */
715	beq	1f			/* branch and fix it up */
716
717	mfspr   r10,SPRN_SRR2		/* Faulting instruction address */
718	cmplwi  r10,0x2100
719	bgt+    2f			/* address above exception vectors */
720
721	/* here it looks like we got an inappropriate debug exception. */
7221:	rlwinm	r9,r9,0,~MSR_DE		/* clear DE in the SRR3 value */
723	lis	r10,DBSR_IC@h		/* clear the IC event */
724	mtspr	SPRN_DBSR,r10
725	/* restore state and get out */
726	lwz	r10,_CCR(r11)
727	lwz	r0,GPR0(r11)
728	lwz	r1,GPR1(r11)
729	mtcrf	0x80,r10
730	mtspr	SPRN_SRR2,r12
731	mtspr	SPRN_SRR3,r9
732	lwz	r9,GPR9(r11)
733	lwz	r12,GPR12(r11)
734	lwz	r10,crit_r10@l(0)
735	lwz	r11,crit_r11@l(0)
736	PPC405_ERR77_SYNC
737	rfci
738	b	.
739
740	/* continue normal handling for a critical exception... */
7412:	mfspr	r4,SPRN_DBSR
742	addi	r3,r1,STACK_FRAME_OVERHEAD
743	EXC_XFER_TEMPLATE(DebugException, 0x2002, \
744		(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
745		NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
746
747/*
748 * The other Data TLB exceptions bail out to this point
749 * if they can't resolve the lightweight TLB fault.
750 */
751DataAccess:
752	NORMAL_EXCEPTION_PROLOG
753	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
754	stw	r5,_ESR(r11)
755	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
756	EXC_XFER_EE_LITE(0x300, handle_page_fault)
757
758/* Other PowerPC processors, namely those derived from the 6xx-series
759 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
760 * However, for the 4xx-series processors these are neither defined nor
761 * reserved.
762 */
763
764	/* Damn, I came up one instruction too many to fit into the
765	 * exception space :-).  Both the instruction and data TLB
766	 * miss get to this point to load the TLB.
767	 * 	r10 - TLB_TAG value
768	 * 	r11 - Linux PTE
769	 *	r12, r9 - avilable to use
770	 *	PID - loaded with proper value when we get here
771	 *	Upon exit, we reload everything and RFI.
772	 * Actually, it will fit now, but oh well.....a common place
773	 * to load the TLB.
774	 */
775tlb_4xx_index:
776	.long	0
777finish_tlb_load:
778	/* load the next available TLB index.
779	*/
780	lwz	r9, tlb_4xx_index@l(0)
781	addi	r9, r9, 1
782	andi.	r9, r9, (PPC40X_TLB_SIZE-1)
783	stw	r9, tlb_4xx_index@l(0)
784
7856:
786	/*
787	 * Clear out the software-only bits in the PTE to generate the
788	 * TLB_DATA value.  These are the bottom 2 bits of the RPM, the
789	 * top 3 bits of the zone field, and M.
790	 */
791	li	r12, 0x0ce2
792	andc	r11, r11, r12
793
794	tlbwe	r11, r9, TLB_DATA		/* Load TLB LO */
795	tlbwe	r10, r9, TLB_TAG		/* Load TLB HI */
796
797	/* Done...restore registers and get out of here.
798	*/
799#ifdef CONFIG_403GCX
800	lwz     r12, 12(r0)
801	lwz     r11, 8(r0)
802	mtspr   SPRN_PID, r12
803	mtcr    r11
804	lwz     r9, 4(r0)
805	lwz     r12, 0(r0)
806#else
807	mfspr	r12, SPRN_SPRG_SCRATCH5
808	mfspr	r11, SPRN_SPRG_SCRATCH6
809	mtspr	SPRN_PID, r12
810	mtcr	r11
811	mfspr	r9, SPRN_SPRG_SCRATCH4
812	mfspr	r12, SPRN_SPRG_SCRATCH3
813#endif
814	mfspr	r11, SPRN_SPRG_SCRATCH1
815	mfspr	r10, SPRN_SPRG_SCRATCH0
816	PPC405_ERR77_SYNC
817	rfi			/* Should sync shadow TLBs */
818	b	.		/* prevent prefetch past rfi */
819
820/* extern void giveup_fpu(struct task_struct *prev)
821 *
822 * The PowerPC 4xx family of processors do not have an FPU, so this just
823 * returns.
824 */
825_ENTRY(giveup_fpu)
826	blr
827
828/* This is where the main kernel code starts.
829 */
830start_here:
831
832	/* ptr to current */
833	lis	r2,init_task@h
834	ori	r2,r2,init_task@l
835
836	/* ptr to phys current thread */
837	tophys(r4,r2)
838	addi	r4,r4,THREAD	/* init task's THREAD */
839	mtspr	SPRN_SPRG_THREAD,r4
840
841	/* stack */
842	lis	r1,init_thread_union@ha
843	addi	r1,r1,init_thread_union@l
844	li	r0,0
845	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
846
847	bl	early_init	/* We have to do this with MMU on */
848
849/*
850 * Decide what sort of machine this is and initialize the MMU.
851 */
852	mr	r3,r31
853	mr	r4,r30
854	mr	r5,r29
855	mr	r6,r28
856	mr	r7,r27
857	bl	machine_init
858	bl	MMU_init
859
860/* Go back to running unmapped so we can load up new values
861 * and change to using our exception vectors.
862 * On the 4xx, all we have to do is invalidate the TLB to clear
863 * the old 16M byte TLB mappings.
864 */
865	lis	r4,2f@h
866	ori	r4,r4,2f@l
867	tophys(r4,r4)
868	lis	r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
869	ori	r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
870	mtspr	SPRN_SRR0,r4
871	mtspr	SPRN_SRR1,r3
872	rfi
873	b	.		/* prevent prefetch past rfi */
874
875/* Load up the kernel context */
8762:
877	sync			/* Flush to memory before changing TLB */
878	tlbia
879	isync			/* Flush shadow TLBs */
880
881	/* set up the PTE pointers for the Abatron bdiGDB.
882	*/
883	lis	r6, swapper_pg_dir@h
884	ori	r6, r6, swapper_pg_dir@l
885	lis	r5, abatron_pteptrs@h
886	ori	r5, r5, abatron_pteptrs@l
887	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */
888	tophys(r5,r5)
889	stw	r6, 0(r5)
890
891/* Now turn on the MMU for real! */
892	lis	r4,MSR_KERNEL@h
893	ori	r4,r4,MSR_KERNEL@l
894	lis	r3,start_kernel@h
895	ori	r3,r3,start_kernel@l
896	mtspr	SPRN_SRR0,r3
897	mtspr	SPRN_SRR1,r4
898	rfi			/* enable MMU and jump to start_kernel */
899	b	.		/* prevent prefetch past rfi */
900
901/* Set up the initial MMU state so we can do the first level of
902 * kernel initialization.  This maps the first 16 MBytes of memory 1:1
903 * virtual to physical and more importantly sets the cache mode.
904 */
905initial_mmu:
906	tlbia			/* Invalidate all TLB entries */
907	isync
908
909	/* We should still be executing code at physical address 0x0000xxxx
910	 * at this point. However, start_here is at virtual address
911	 * 0xC000xxxx. So, set up a TLB mapping to cover this once
912	 * translation is enabled.
913	 */
914
915	lis	r3,KERNELBASE@h		/* Load the kernel virtual address */
916	ori	r3,r3,KERNELBASE@l
917	tophys(r4,r3)			/* Load the kernel physical address */
918
919	iccci	r0,r3			/* Invalidate the i-cache before use */
920
921	/* Load the kernel PID.
922	*/
923	li	r0,0
924	mtspr	SPRN_PID,r0
925	sync
926
927	/* Configure and load one entry into TLB slots 63 */
928	clrrwi	r4,r4,10		/* Mask off the real page number */
929	ori	r4,r4,(TLB_WR | TLB_EX)	/* Set the write and execute bits */
930
931	clrrwi	r3,r3,10		/* Mask off the effective page number */
932	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
933
934        li      r0,63                    /* TLB slot 63 */
935
936	tlbwe	r4,r0,TLB_DATA		/* Load the data portion of the entry */
937	tlbwe	r3,r0,TLB_TAG		/* Load the tag portion of the entry */
938
939#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
940
941	/* Load a TLB entry for the UART, so that ppc4xx_progress() can use
942	 * the UARTs nice and early.  We use a 4k real==virtual mapping. */
943
944	lis	r3,SERIAL_DEBUG_IO_BASE@h
945	ori	r3,r3,SERIAL_DEBUG_IO_BASE@l
946	mr	r4,r3
947	clrrwi	r4,r4,12
948	ori	r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
949
950	clrrwi	r3,r3,12
951	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
952
953	li	r0,0			/* TLB slot 0 */
954	tlbwe	r4,r0,TLB_DATA
955	tlbwe	r3,r0,TLB_TAG
956#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
957
958	isync
959
960	/* Establish the exception vector base
961	*/
962	lis	r4,KERNELBASE@h		/* EVPR only uses the high 16-bits */
963	tophys(r0,r4)			/* Use the physical address */
964	mtspr	SPRN_EVPR,r0
965
966	blr
967
968_GLOBAL(abort)
969        mfspr   r13,SPRN_DBCR0
970        oris    r13,r13,DBCR0_RST_SYSTEM@h
971        mtspr   SPRN_DBCR0,r13
972
973_GLOBAL(set_context)
974
975#ifdef CONFIG_BDI_SWITCH
976	/* Context switch the PTE pointer for the Abatron BDI2000.
977	 * The PGDIR is the second parameter.
978	 */
979	lis	r5, KERNELBASE@h
980	lwz	r5, 0xf0(r5)
981	stw	r4, 0x4(r5)
982#endif
983	sync
984	mtspr	SPRN_PID,r3
985	isync				/* Need an isync to flush shadow */
986					/* TLBs after changing PID */
987	blr
988
989/* We put a few things here that have to be page-aligned. This stuff
990 * goes at the beginning of the data segment, which is page-aligned.
991 */
992	.data
993	.align	12
994	.globl	sdata
995sdata:
996	.globl	empty_zero_page
997empty_zero_page:
998	.space	4096
999	.globl	swapper_pg_dir
1000swapper_pg_dir:
1001	.space	PGD_TABLE_SIZE
1002
1003/* Room for two PTE pointers, usually the kernel and current user pointers
1004 * to their respective root page table.
1005 */
1006abatron_pteptrs:
1007	.space	8
1008