xref: /openbmc/linux/arch/powerpc/kernel/head_40x.S (revision a1e58bbd)
1/*
2 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
3 *      Initial PowerPC version.
4 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
5 *      Rewritten for PReP
6 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
7 *      Low-level exception handers, MMU support, and rewrite.
8 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
9 *      PowerPC 8xx modifications.
10 *    Copyright (c) 1998-1999 TiVo, Inc.
11 *      PowerPC 403GCX modifications.
12 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
13 *      PowerPC 403GCX/405GP modifications.
14 *    Copyright 2000 MontaVista Software Inc.
15 *	PPC405 modifications
16 *      PowerPC 403GCX/405GP modifications.
17 * 	Author: MontaVista Software, Inc.
18 *         	frank_rowand@mvista.com or source@mvista.com
19 * 	   	debbie_chu@mvista.com
20 *
21 *
22 *    Module name: head_4xx.S
23 *
24 *    Description:
25 *      Kernel execution entry point code.
26 *
27 *    This program is free software; you can redistribute it and/or
28 *    modify it under the terms of the GNU General Public License
29 *    as published by the Free Software Foundation; either version
30 *    2 of the License, or (at your option) any later version.
31 *
32 */
33
34#include <asm/processor.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/cputable.h>
39#include <asm/thread_info.h>
40#include <asm/ppc_asm.h>
41#include <asm/asm-offsets.h>
42
43/* As with the other PowerPC ports, it is expected that when code
44 * execution begins here, the following registers contain valid, yet
45 * optional, information:
46 *
47 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
48 *   r4 - Starting address of the init RAM disk
49 *   r5 - Ending address of the init RAM disk
50 *   r6 - Start of kernel command line string (e.g. "mem=96m")
51 *   r7 - End of kernel command line string
52 *
53 * This is all going to change RSN when we add bi_recs.......  -- Dan
54 */
55	.section	.text.head, "ax"
56_ENTRY(_stext);
57_ENTRY(_start);
58
59	/* Save parameters we are passed.
60	*/
61	mr	r31,r3
62	mr	r30,r4
63	mr	r29,r5
64	mr	r28,r6
65	mr	r27,r7
66
67	/* We have to turn on the MMU right away so we get cache modes
68	 * set correctly.
69	 */
70	bl	initial_mmu
71
72/* We now have the lower 16 Meg mapped into TLB entries, and the caches
73 * ready to work.
74 */
75turn_on_mmu:
76	lis	r0,MSR_KERNEL@h
77	ori	r0,r0,MSR_KERNEL@l
78	mtspr	SPRN_SRR1,r0
79	lis	r0,start_here@h
80	ori	r0,r0,start_here@l
81	mtspr	SPRN_SRR0,r0
82	SYNC
83	rfi				/* enables MMU */
84	b	.			/* prevent prefetch past rfi */
85
86/*
87 * This area is used for temporarily saving registers during the
88 * critical exception prolog.
89 */
90	. = 0xc0
91crit_save:
92_ENTRY(crit_r10)
93	.space	4
94_ENTRY(crit_r11)
95	.space	4
96
97/*
98 * Exception vector entry code. This code runs with address translation
99 * turned off (i.e. using physical addresses). We assume SPRG3 has the
100 * physical address of the current task thread_struct.
101 * Note that we have to have decremented r1 before we write to any fields
102 * of the exception frame, since a critical interrupt could occur at any
103 * time, and it will write to the area immediately below the current r1.
104 */
105#define NORMAL_EXCEPTION_PROLOG						     \
106	mtspr	SPRN_SPRG0,r10;		/* save two registers to work with */\
107	mtspr	SPRN_SPRG1,r11;						     \
108	mtspr	SPRN_SPRG2,r1;						     \
109	mfcr	r10;			/* save CR in r10 for now	   */\
110	mfspr	r11,SPRN_SRR1;		/* check whether user or kernel    */\
111	andi.	r11,r11,MSR_PR;						     \
112	beq	1f;							     \
113	mfspr	r1,SPRN_SPRG3;		/* if from user, start at top of   */\
114	lwz	r1,THREAD_INFO-THREAD(r1); /* this thread's kernel stack   */\
115	addi	r1,r1,THREAD_SIZE;					     \
1161:	subi	r1,r1,INT_FRAME_SIZE;	/* Allocate an exception frame     */\
117	tophys(r11,r1);							     \
118	stw	r10,_CCR(r11);          /* save various registers	   */\
119	stw	r12,GPR12(r11);						     \
120	stw	r9,GPR9(r11);						     \
121	mfspr	r10,SPRN_SPRG0;						     \
122	stw	r10,GPR10(r11);						     \
123	mfspr	r12,SPRN_SPRG1;						     \
124	stw	r12,GPR11(r11);						     \
125	mflr	r10;							     \
126	stw	r10,_LINK(r11);						     \
127	mfspr	r10,SPRN_SPRG2;						     \
128	mfspr	r12,SPRN_SRR0;						     \
129	stw	r10,GPR1(r11);						     \
130	mfspr	r9,SPRN_SRR1;						     \
131	stw	r10,0(r11);						     \
132	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
133	stw	r0,GPR0(r11);						     \
134	SAVE_4GPRS(3, r11);						     \
135	SAVE_2GPRS(7, r11)
136
137/*
138 * Exception prolog for critical exceptions.  This is a little different
139 * from the normal exception prolog above since a critical exception
140 * can potentially occur at any point during normal exception processing.
141 * Thus we cannot use the same SPRG registers as the normal prolog above.
142 * Instead we use a couple of words of memory at low physical addresses.
143 * This is OK since we don't support SMP on these processors.
144 */
145#define CRITICAL_EXCEPTION_PROLOG					     \
146	stw	r10,crit_r10@l(0);	/* save two registers to work with */\
147	stw	r11,crit_r11@l(0);					     \
148	mfcr	r10;			/* save CR in r10 for now	   */\
149	mfspr	r11,SPRN_SRR3;		/* check whether user or kernel    */\
150	andi.	r11,r11,MSR_PR;						     \
151	lis	r11,critical_stack_top@h;				     \
152	ori	r11,r11,critical_stack_top@l;				     \
153	beq	1f;							     \
154	/* COMING FROM USER MODE */					     \
155	mfspr	r11,SPRN_SPRG3;		/* if from user, start at top of   */\
156	lwz	r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
157	addi	r11,r11,THREAD_SIZE;					     \
1581:	subi	r11,r11,INT_FRAME_SIZE;	/* Allocate an exception frame     */\
159	tophys(r11,r11);						     \
160	stw	r10,_CCR(r11);          /* save various registers	   */\
161	stw	r12,GPR12(r11);						     \
162	stw	r9,GPR9(r11);						     \
163	mflr	r10;							     \
164	stw	r10,_LINK(r11);						     \
165	mfspr	r12,SPRN_DEAR;		/* save DEAR and ESR in the frame  */\
166	stw	r12,_DEAR(r11);		/* since they may have had stuff   */\
167	mfspr	r9,SPRN_ESR;		/* in them at the point where the  */\
168	stw	r9,_ESR(r11);		/* exception was taken		   */\
169	mfspr	r12,SPRN_SRR2;						     \
170	stw	r1,GPR1(r11);						     \
171	mfspr	r9,SPRN_SRR3;						     \
172	stw	r1,0(r11);						     \
173	tovirt(r1,r11);							     \
174	rlwinm	r9,r9,0,14,12;		/* clear MSR_WE (necessary?)	   */\
175	stw	r0,GPR0(r11);						     \
176	SAVE_4GPRS(3, r11);						     \
177	SAVE_2GPRS(7, r11)
178
179	/*
180	 * State at this point:
181	 * r9 saved in stack frame, now saved SRR3 & ~MSR_WE
182	 * r10 saved in crit_r10 and in stack frame, trashed
183	 * r11 saved in crit_r11 and in stack frame,
184	 *	now phys stack/exception frame pointer
185	 * r12 saved in stack frame, now saved SRR2
186	 * CR saved in stack frame, CR0.EQ = !SRR3.PR
187	 * LR, DEAR, ESR in stack frame
188	 * r1 saved in stack frame, now virt stack/excframe pointer
189	 * r0, r3-r8 saved in stack frame
190	 */
191
192/*
193 * Exception vectors.
194 */
195#define	START_EXCEPTION(n, label)					     \
196	. = n;								     \
197label:
198
199#define EXCEPTION(n, label, hdlr, xfer)				\
200	START_EXCEPTION(n, label);				\
201	NORMAL_EXCEPTION_PROLOG;				\
202	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
203	xfer(n, hdlr)
204
205#define CRITICAL_EXCEPTION(n, label, hdlr)			\
206	START_EXCEPTION(n, label);				\
207	CRITICAL_EXCEPTION_PROLOG;				\
208	addi	r3,r1,STACK_FRAME_OVERHEAD;			\
209	EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
210			  NOCOPY, crit_transfer_to_handler,	\
211			  ret_from_crit_exc)
212
213#define EXC_XFER_TEMPLATE(hdlr, trap, msr, copyee, tfer, ret)	\
214	li	r10,trap;					\
215	stw	r10,_TRAP(r11);					\
216	lis	r10,msr@h;					\
217	ori	r10,r10,msr@l;					\
218	copyee(r10, r9);					\
219	bl	tfer;		 				\
220	.long	hdlr;						\
221	.long	ret
222
223#define COPY_EE(d, s)		rlwimi d,s,0,16,16
224#define NOCOPY(d, s)
225
226#define EXC_XFER_STD(n, hdlr)		\
227	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, NOCOPY, transfer_to_handler_full, \
228			  ret_from_except_full)
229
230#define EXC_XFER_LITE(n, hdlr)		\
231	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, NOCOPY, transfer_to_handler, \
232			  ret_from_except)
233
234#define EXC_XFER_EE(n, hdlr)		\
235	EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, COPY_EE, transfer_to_handler_full, \
236			  ret_from_except_full)
237
238#define EXC_XFER_EE_LITE(n, hdlr)	\
239	EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, COPY_EE, transfer_to_handler, \
240			  ret_from_except)
241
242
243/*
244 * 0x0100 - Critical Interrupt Exception
245 */
246	CRITICAL_EXCEPTION(0x0100, CriticalInterrupt, unknown_exception)
247
248/*
249 * 0x0200 - Machine Check Exception
250 */
251	CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
252
253/*
254 * 0x0300 - Data Storage Exception
255 * This happens for just a few reasons.  U0 set (but we don't do that),
256 * or zone protection fault (user violation, write to protected page).
257 * If this is just an update of modified status, we do that quickly
258 * and exit.  Otherwise, we call heavywight functions to do the work.
259 */
260	START_EXCEPTION(0x0300,	DataStorage)
261	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
262	mtspr	SPRN_SPRG1, r11
263#ifdef CONFIG_403GCX
264	stw     r12, 0(r0)
265	stw     r9, 4(r0)
266	mfcr    r11
267	mfspr   r12, SPRN_PID
268	stw     r11, 8(r0)
269	stw     r12, 12(r0)
270#else
271	mtspr	SPRN_SPRG4, r12
272	mtspr	SPRN_SPRG5, r9
273	mfcr	r11
274	mfspr	r12, SPRN_PID
275	mtspr	SPRN_SPRG7, r11
276	mtspr	SPRN_SPRG6, r12
277#endif
278
279	/* First, check if it was a zone fault (which means a user
280	* tried to access a kernel or read-protected page - always
281	* a SEGV).  All other faults here must be stores, so no
282	* need to check ESR_DST as well. */
283	mfspr	r10, SPRN_ESR
284	andis.	r10, r10, ESR_DIZ@h
285	bne	2f
286
287	mfspr	r10, SPRN_DEAR		/* Get faulting address */
288
289	/* If we are faulting a kernel address, we have to use the
290	 * kernel page tables.
291	 */
292	lis	r11, PAGE_OFFSET@h
293	cmplw	r10, r11
294	blt+	3f
295	lis	r11, swapper_pg_dir@h
296	ori	r11, r11, swapper_pg_dir@l
297	li	r9, 0
298	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
299	b	4f
300
301	/* Get the PGD for the current thread.
302	 */
3033:
304	mfspr	r11,SPRN_SPRG3
305	lwz	r11,PGDIR(r11)
3064:
307	tophys(r11, r11)
308	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
309	lwz	r11, 0(r11)		/* Get L1 entry */
310	rlwinm.	r12, r11, 0, 0, 19	/* Extract L2 (pte) base address */
311	beq	2f			/* Bail if no table */
312
313	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
314	lwz	r11, 0(r12)		/* Get Linux PTE */
315
316	andi.	r9, r11, _PAGE_RW	/* Is it writeable? */
317	beq	2f			/* Bail if not */
318
319	/* Update 'changed'.
320	*/
321	ori	r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
322	stw	r11, 0(r12)		/* Update Linux page table */
323
324	/* Most of the Linux PTE is ready to load into the TLB LO.
325	 * We set ZSEL, where only the LS-bit determines user access.
326	 * We set execute, because we don't have the granularity to
327	 * properly set this at the page level (Linux problem).
328	 * If shared is set, we cause a zero PID->TID load.
329	 * Many of these bits are software only.  Bits we don't set
330	 * here we (properly should) assume have the appropriate value.
331	 */
332	li	r12, 0x0ce2
333	andc	r11, r11, r12		/* Make sure 20, 21 are zero */
334
335	/* find the TLB index that caused the fault.  It has to be here.
336	*/
337	tlbsx	r9, 0, r10
338
339	tlbwe	r11, r9, TLB_DATA		/* Load TLB LO */
340
341	/* Done...restore registers and get out of here.
342	*/
343#ifdef CONFIG_403GCX
344	lwz     r12, 12(r0)
345	lwz     r11, 8(r0)
346	mtspr   SPRN_PID, r12
347	mtcr    r11
348	lwz     r9, 4(r0)
349	lwz     r12, 0(r0)
350#else
351	mfspr	r12, SPRN_SPRG6
352	mfspr	r11, SPRN_SPRG7
353	mtspr	SPRN_PID, r12
354	mtcr	r11
355	mfspr	r9, SPRN_SPRG5
356	mfspr	r12, SPRN_SPRG4
357#endif
358	mfspr	r11, SPRN_SPRG1
359	mfspr	r10, SPRN_SPRG0
360	PPC405_ERR77_SYNC
361	rfi			/* Should sync shadow TLBs */
362	b	.		/* prevent prefetch past rfi */
363
3642:
365	/* The bailout.  Restore registers to pre-exception conditions
366	 * and call the heavyweights to help us out.
367	 */
368#ifdef CONFIG_403GCX
369	lwz     r12, 12(r0)
370	lwz     r11, 8(r0)
371	mtspr   SPRN_PID, r12
372	mtcr    r11
373	lwz     r9, 4(r0)
374	lwz     r12, 0(r0)
375#else
376	mfspr	r12, SPRN_SPRG6
377	mfspr	r11, SPRN_SPRG7
378	mtspr	SPRN_PID, r12
379	mtcr	r11
380	mfspr	r9, SPRN_SPRG5
381	mfspr	r12, SPRN_SPRG4
382#endif
383	mfspr	r11, SPRN_SPRG1
384	mfspr	r10, SPRN_SPRG0
385	b	DataAccess
386
387/*
388 * 0x0400 - Instruction Storage Exception
389 * This is caused by a fetch from non-execute or guarded pages.
390 */
391	START_EXCEPTION(0x0400, InstructionAccess)
392	NORMAL_EXCEPTION_PROLOG
393	mr	r4,r12			/* Pass SRR0 as arg2 */
394	li	r5,0			/* Pass zero as arg3 */
395	EXC_XFER_EE_LITE(0x400, handle_page_fault)
396
397/* 0x0500 - External Interrupt Exception */
398	EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
399
400/* 0x0600 - Alignment Exception */
401	START_EXCEPTION(0x0600, Alignment)
402	NORMAL_EXCEPTION_PROLOG
403	mfspr	r4,SPRN_DEAR		/* Grab the DEAR and save it */
404	stw	r4,_DEAR(r11)
405	addi	r3,r1,STACK_FRAME_OVERHEAD
406	EXC_XFER_EE(0x600, alignment_exception)
407
408/* 0x0700 - Program Exception */
409	START_EXCEPTION(0x0700, ProgramCheck)
410	NORMAL_EXCEPTION_PROLOG
411	mfspr	r4,SPRN_ESR		/* Grab the ESR and save it */
412	stw	r4,_ESR(r11)
413	addi	r3,r1,STACK_FRAME_OVERHEAD
414	EXC_XFER_STD(0x700, program_check_exception)
415
416	EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_EE)
417	EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_EE)
418	EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_EE)
419	EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_EE)
420
421/* 0x0C00 - System Call Exception */
422	START_EXCEPTION(0x0C00,	SystemCall)
423	NORMAL_EXCEPTION_PROLOG
424	EXC_XFER_EE_LITE(0xc00, DoSyscall)
425
426	EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_EE)
427	EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_EE)
428	EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_EE)
429
430/* 0x1000 - Programmable Interval Timer (PIT) Exception */
431	START_EXCEPTION(0x1000, Decrementer)
432	NORMAL_EXCEPTION_PROLOG
433	lis	r0,TSR_PIS@h
434	mtspr	SPRN_TSR,r0		/* Clear the PIT exception */
435	addi	r3,r1,STACK_FRAME_OVERHEAD
436	EXC_XFER_LITE(0x1000, timer_interrupt)
437
438#if 0
439/* NOTE:
440 * FIT and WDT handlers are not implemented yet.
441 */
442
443/* 0x1010 - Fixed Interval Timer (FIT) Exception
444*/
445	STND_EXCEPTION(0x1010,	FITException,		unknown_exception)
446
447/* 0x1020 - Watchdog Timer (WDT) Exception
448*/
449#ifdef CONFIG_BOOKE_WDT
450	CRITICAL_EXCEPTION(0x1020, WDTException, WatchdogException)
451#else
452	CRITICAL_EXCEPTION(0x1020, WDTException, unknown_exception)
453#endif
454#endif
455
456/* 0x1100 - Data TLB Miss Exception
457 * As the name implies, translation is not in the MMU, so search the
458 * page tables and fix it.  The only purpose of this function is to
459 * load TLB entries from the page table if they exist.
460 */
461	START_EXCEPTION(0x1100,	DTLBMiss)
462	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
463	mtspr	SPRN_SPRG1, r11
464#ifdef CONFIG_403GCX
465	stw     r12, 0(r0)
466	stw     r9, 4(r0)
467	mfcr    r11
468	mfspr   r12, SPRN_PID
469	stw     r11, 8(r0)
470	stw     r12, 12(r0)
471#else
472	mtspr	SPRN_SPRG4, r12
473	mtspr	SPRN_SPRG5, r9
474	mfcr	r11
475	mfspr	r12, SPRN_PID
476	mtspr	SPRN_SPRG7, r11
477	mtspr	SPRN_SPRG6, r12
478#endif
479	mfspr	r10, SPRN_DEAR		/* Get faulting address */
480
481	/* If we are faulting a kernel address, we have to use the
482	 * kernel page tables.
483	 */
484	lis	r11, PAGE_OFFSET@h
485	cmplw	r10, r11
486	blt+	3f
487	lis	r11, swapper_pg_dir@h
488	ori	r11, r11, swapper_pg_dir@l
489	li	r9, 0
490	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
491	b	4f
492
493	/* Get the PGD for the current thread.
494	 */
4953:
496	mfspr	r11,SPRN_SPRG3
497	lwz	r11,PGDIR(r11)
4984:
499	tophys(r11, r11)
500	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
501	lwz	r12, 0(r11)		/* Get L1 entry */
502	andi.	r9, r12, _PMD_PRESENT	/* Check if it points to a PTE page */
503	beq	2f			/* Bail if no table */
504
505	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
506	lwz	r11, 0(r12)		/* Get Linux PTE */
507	andi.	r9, r11, _PAGE_PRESENT
508	beq	5f
509
510	ori	r11, r11, _PAGE_ACCESSED
511	stw	r11, 0(r12)
512
513	/* Create TLB tag.  This is the faulting address plus a static
514	 * set of bits.  These are size, valid, E, U0.
515	*/
516	li	r12, 0x00c0
517	rlwimi	r10, r12, 0, 20, 31
518
519	b	finish_tlb_load
520
5212:	/* Check for possible large-page pmd entry */
522	rlwinm.	r9, r12, 2, 22, 24
523	beq	5f
524
525	/* Create TLB tag.  This is the faulting address, plus a static
526	 * set of bits (valid, E, U0) plus the size from the PMD.
527	 */
528	ori	r9, r9, 0x40
529	rlwimi	r10, r9, 0, 20, 31
530	mr	r11, r12
531
532	b	finish_tlb_load
533
5345:
535	/* The bailout.  Restore registers to pre-exception conditions
536	 * and call the heavyweights to help us out.
537	 */
538#ifdef CONFIG_403GCX
539	lwz     r12, 12(r0)
540	lwz     r11, 8(r0)
541	mtspr   SPRN_PID, r12
542	mtcr    r11
543	lwz     r9, 4(r0)
544	lwz     r12, 0(r0)
545#else
546	mfspr	r12, SPRN_SPRG6
547	mfspr	r11, SPRN_SPRG7
548	mtspr	SPRN_PID, r12
549	mtcr	r11
550	mfspr	r9, SPRN_SPRG5
551	mfspr	r12, SPRN_SPRG4
552#endif
553	mfspr	r11, SPRN_SPRG1
554	mfspr	r10, SPRN_SPRG0
555	b	DataAccess
556
557/* 0x1200 - Instruction TLB Miss Exception
558 * Nearly the same as above, except we get our information from different
559 * registers and bailout to a different point.
560 */
561	START_EXCEPTION(0x1200,	ITLBMiss)
562	mtspr	SPRN_SPRG0, r10		/* Save some working registers */
563	mtspr	SPRN_SPRG1, r11
564#ifdef CONFIG_403GCX
565	stw     r12, 0(r0)
566	stw     r9, 4(r0)
567	mfcr    r11
568	mfspr   r12, SPRN_PID
569	stw     r11, 8(r0)
570	stw     r12, 12(r0)
571#else
572	mtspr	SPRN_SPRG4, r12
573	mtspr	SPRN_SPRG5, r9
574	mfcr	r11
575	mfspr	r12, SPRN_PID
576	mtspr	SPRN_SPRG7, r11
577	mtspr	SPRN_SPRG6, r12
578#endif
579	mfspr	r10, SPRN_SRR0		/* Get faulting address */
580
581	/* If we are faulting a kernel address, we have to use the
582	 * kernel page tables.
583	 */
584	lis	r11, PAGE_OFFSET@h
585	cmplw	r10, r11
586	blt+	3f
587	lis	r11, swapper_pg_dir@h
588	ori	r11, r11, swapper_pg_dir@l
589	li	r9, 0
590	mtspr	SPRN_PID, r9		/* TLB will have 0 TID */
591	b	4f
592
593	/* Get the PGD for the current thread.
594	 */
5953:
596	mfspr	r11,SPRN_SPRG3
597	lwz	r11,PGDIR(r11)
5984:
599	tophys(r11, r11)
600	rlwimi	r11, r10, 12, 20, 29	/* Create L1 (pgdir/pmd) address */
601	lwz	r12, 0(r11)		/* Get L1 entry */
602	andi.	r9, r12, _PMD_PRESENT	/* Check if it points to a PTE page */
603	beq	2f			/* Bail if no table */
604
605	rlwimi	r12, r10, 22, 20, 29	/* Compute PTE address */
606	lwz	r11, 0(r12)		/* Get Linux PTE */
607	andi.	r9, r11, _PAGE_PRESENT
608	beq	5f
609
610	ori	r11, r11, _PAGE_ACCESSED
611	stw	r11, 0(r12)
612
613	/* Create TLB tag.  This is the faulting address plus a static
614	 * set of bits.  These are size, valid, E, U0.
615	*/
616	li	r12, 0x00c0
617	rlwimi	r10, r12, 0, 20, 31
618
619	b	finish_tlb_load
620
6212:	/* Check for possible large-page pmd entry */
622	rlwinm.	r9, r12, 2, 22, 24
623	beq	5f
624
625	/* Create TLB tag.  This is the faulting address, plus a static
626	 * set of bits (valid, E, U0) plus the size from the PMD.
627	 */
628	ori	r9, r9, 0x40
629	rlwimi	r10, r9, 0, 20, 31
630	mr	r11, r12
631
632	b	finish_tlb_load
633
6345:
635	/* The bailout.  Restore registers to pre-exception conditions
636	 * and call the heavyweights to help us out.
637	 */
638#ifdef CONFIG_403GCX
639	lwz     r12, 12(r0)
640	lwz     r11, 8(r0)
641	mtspr   SPRN_PID, r12
642	mtcr    r11
643	lwz     r9, 4(r0)
644	lwz     r12, 0(r0)
645#else
646	mfspr	r12, SPRN_SPRG6
647	mfspr	r11, SPRN_SPRG7
648	mtspr	SPRN_PID, r12
649	mtcr	r11
650	mfspr	r9, SPRN_SPRG5
651	mfspr	r12, SPRN_SPRG4
652#endif
653	mfspr	r11, SPRN_SPRG1
654	mfspr	r10, SPRN_SPRG0
655	b	InstructionAccess
656
657	EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_EE)
658	EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_EE)
659	EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
660	EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
661#ifdef CONFIG_IBM405_ERR51
662	/* 405GP errata 51 */
663	START_EXCEPTION(0x1700, Trap_17)
664	b DTLBMiss
665#else
666	EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_EE)
667#endif
668	EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_EE)
669	EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_EE)
670	EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_EE)
671	EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_EE)
672	EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_EE)
673	EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_EE)
674	EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_EE)
675	EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_EE)
676
677/* Check for a single step debug exception while in an exception
678 * handler before state has been saved.  This is to catch the case
679 * where an instruction that we are trying to single step causes
680 * an exception (eg ITLB/DTLB miss) and thus the first instruction of
681 * the exception handler generates a single step debug exception.
682 *
683 * If we get a debug trap on the first instruction of an exception handler,
684 * we reset the MSR_DE in the _exception handler's_ MSR (the debug trap is
685 * a critical exception, so we are using SPRN_CSRR1 to manipulate the MSR).
686 * The exception handler was handling a non-critical interrupt, so it will
687 * save (and later restore) the MSR via SPRN_SRR1, which will still have
688 * the MSR_DE bit set.
689 */
690	/* 0x2000 - Debug Exception */
691	START_EXCEPTION(0x2000, DebugTrap)
692	CRITICAL_EXCEPTION_PROLOG
693
694	/*
695	 * If this is a single step or branch-taken exception in an
696	 * exception entry sequence, it was probably meant to apply to
697	 * the code where the exception occurred (since exception entry
698	 * doesn't turn off DE automatically).  We simulate the effect
699	 * of turning off DE on entry to an exception handler by turning
700	 * off DE in the SRR3 value and clearing the debug status.
701	 */
702	mfspr	r10,SPRN_DBSR		/* check single-step/branch taken */
703	andis.	r10,r10,DBSR_IC@h
704	beq+	2f
705
706	andi.	r10,r9,MSR_IR|MSR_PR	/* check supervisor + MMU off */
707	beq	1f			/* branch and fix it up */
708
709	mfspr   r10,SPRN_SRR2		/* Faulting instruction address */
710	cmplwi  r10,0x2100
711	bgt+    2f			/* address above exception vectors */
712
713	/* here it looks like we got an inappropriate debug exception. */
7141:	rlwinm	r9,r9,0,~MSR_DE		/* clear DE in the SRR3 value */
715	lis	r10,DBSR_IC@h		/* clear the IC event */
716	mtspr	SPRN_DBSR,r10
717	/* restore state and get out */
718	lwz	r10,_CCR(r11)
719	lwz	r0,GPR0(r11)
720	lwz	r1,GPR1(r11)
721	mtcrf	0x80,r10
722	mtspr	SPRN_SRR2,r12
723	mtspr	SPRN_SRR3,r9
724	lwz	r9,GPR9(r11)
725	lwz	r12,GPR12(r11)
726	lwz	r10,crit_r10@l(0)
727	lwz	r11,crit_r11@l(0)
728	PPC405_ERR77_SYNC
729	rfci
730	b	.
731
732	/* continue normal handling for a critical exception... */
7332:	mfspr	r4,SPRN_DBSR
734	addi	r3,r1,STACK_FRAME_OVERHEAD
735	EXC_XFER_TEMPLATE(DebugException, 0x2002, \
736		(MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
737		NOCOPY, crit_transfer_to_handler, ret_from_crit_exc)
738
739/*
740 * The other Data TLB exceptions bail out to this point
741 * if they can't resolve the lightweight TLB fault.
742 */
743DataAccess:
744	NORMAL_EXCEPTION_PROLOG
745	mfspr	r5,SPRN_ESR		/* Grab the ESR, save it, pass arg3 */
746	stw	r5,_ESR(r11)
747	mfspr	r4,SPRN_DEAR		/* Grab the DEAR, save it, pass arg2 */
748	EXC_XFER_EE_LITE(0x300, handle_page_fault)
749
750/* Other PowerPC processors, namely those derived from the 6xx-series
751 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
752 * However, for the 4xx-series processors these are neither defined nor
753 * reserved.
754 */
755
756	/* Damn, I came up one instruction too many to fit into the
757	 * exception space :-).  Both the instruction and data TLB
758	 * miss get to this point to load the TLB.
759	 * 	r10 - TLB_TAG value
760	 * 	r11 - Linux PTE
761	 *	r12, r9 - avilable to use
762	 *	PID - loaded with proper value when we get here
763	 *	Upon exit, we reload everything and RFI.
764	 * Actually, it will fit now, but oh well.....a common place
765	 * to load the TLB.
766	 */
767tlb_4xx_index:
768	.long	0
769finish_tlb_load:
770	/* load the next available TLB index.
771	*/
772	lwz	r9, tlb_4xx_index@l(0)
773	addi	r9, r9, 1
774	andi.	r9, r9, (PPC40X_TLB_SIZE-1)
775	stw	r9, tlb_4xx_index@l(0)
776
7776:
778	/*
779	 * Clear out the software-only bits in the PTE to generate the
780	 * TLB_DATA value.  These are the bottom 2 bits of the RPM, the
781	 * top 3 bits of the zone field, and M.
782	 */
783	li	r12, 0x0ce2
784	andc	r11, r11, r12
785
786	tlbwe	r11, r9, TLB_DATA		/* Load TLB LO */
787	tlbwe	r10, r9, TLB_TAG		/* Load TLB HI */
788
789	/* Done...restore registers and get out of here.
790	*/
791#ifdef CONFIG_403GCX
792	lwz     r12, 12(r0)
793	lwz     r11, 8(r0)
794	mtspr   SPRN_PID, r12
795	mtcr    r11
796	lwz     r9, 4(r0)
797	lwz     r12, 0(r0)
798#else
799	mfspr	r12, SPRN_SPRG6
800	mfspr	r11, SPRN_SPRG7
801	mtspr	SPRN_PID, r12
802	mtcr	r11
803	mfspr	r9, SPRN_SPRG5
804	mfspr	r12, SPRN_SPRG4
805#endif
806	mfspr	r11, SPRN_SPRG1
807	mfspr	r10, SPRN_SPRG0
808	PPC405_ERR77_SYNC
809	rfi			/* Should sync shadow TLBs */
810	b	.		/* prevent prefetch past rfi */
811
812/* extern void giveup_fpu(struct task_struct *prev)
813 *
814 * The PowerPC 4xx family of processors do not have an FPU, so this just
815 * returns.
816 */
817_ENTRY(giveup_fpu)
818	blr
819
820/* This is where the main kernel code starts.
821 */
822start_here:
823
824	/* ptr to current */
825	lis	r2,init_task@h
826	ori	r2,r2,init_task@l
827
828	/* ptr to phys current thread */
829	tophys(r4,r2)
830	addi	r4,r4,THREAD	/* init task's THREAD */
831	mtspr	SPRN_SPRG3,r4
832
833	/* stack */
834	lis	r1,init_thread_union@ha
835	addi	r1,r1,init_thread_union@l
836	li	r0,0
837	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
838
839	bl	early_init	/* We have to do this with MMU on */
840
841/*
842 * Decide what sort of machine this is and initialize the MMU.
843 */
844	mr	r3,r31
845	mr	r4,r30
846	mr	r5,r29
847	mr	r6,r28
848	mr	r7,r27
849	bl	machine_init
850	bl	MMU_init
851
852/* Go back to running unmapped so we can load up new values
853 * and change to using our exception vectors.
854 * On the 4xx, all we have to do is invalidate the TLB to clear
855 * the old 16M byte TLB mappings.
856 */
857	lis	r4,2f@h
858	ori	r4,r4,2f@l
859	tophys(r4,r4)
860	lis	r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@h
861	ori	r3,r3,(MSR_KERNEL & ~(MSR_IR|MSR_DR))@l
862	mtspr	SPRN_SRR0,r4
863	mtspr	SPRN_SRR1,r3
864	rfi
865	b	.		/* prevent prefetch past rfi */
866
867/* Load up the kernel context */
8682:
869	sync			/* Flush to memory before changing TLB */
870	tlbia
871	isync			/* Flush shadow TLBs */
872
873	/* set up the PTE pointers for the Abatron bdiGDB.
874	*/
875	lis	r6, swapper_pg_dir@h
876	ori	r6, r6, swapper_pg_dir@l
877	lis	r5, abatron_pteptrs@h
878	ori	r5, r5, abatron_pteptrs@l
879	stw	r5, 0xf0(r0)	/* Must match your Abatron config file */
880	tophys(r5,r5)
881	stw	r6, 0(r5)
882
883/* Now turn on the MMU for real! */
884	lis	r4,MSR_KERNEL@h
885	ori	r4,r4,MSR_KERNEL@l
886	lis	r3,start_kernel@h
887	ori	r3,r3,start_kernel@l
888	mtspr	SPRN_SRR0,r3
889	mtspr	SPRN_SRR1,r4
890	rfi			/* enable MMU and jump to start_kernel */
891	b	.		/* prevent prefetch past rfi */
892
893/* Set up the initial MMU state so we can do the first level of
894 * kernel initialization.  This maps the first 16 MBytes of memory 1:1
895 * virtual to physical and more importantly sets the cache mode.
896 */
897initial_mmu:
898	tlbia			/* Invalidate all TLB entries */
899	isync
900
901	/* We should still be executing code at physical address 0x0000xxxx
902	 * at this point. However, start_here is at virtual address
903	 * 0xC000xxxx. So, set up a TLB mapping to cover this once
904	 * translation is enabled.
905	 */
906
907	lis	r3,KERNELBASE@h		/* Load the kernel virtual address */
908	ori	r3,r3,KERNELBASE@l
909	tophys(r4,r3)			/* Load the kernel physical address */
910
911	iccci	r0,r3			/* Invalidate the i-cache before use */
912
913	/* Load the kernel PID.
914	*/
915	li	r0,0
916	mtspr	SPRN_PID,r0
917	sync
918
919	/* Configure and load two entries into TLB slots 62 and 63.
920	 * In case we are pinning TLBs, these are reserved in by the
921	 * other TLB functions.  If not reserving, then it doesn't
922	 * matter where they are loaded.
923	 */
924	clrrwi	r4,r4,10		/* Mask off the real page number */
925	ori	r4,r4,(TLB_WR | TLB_EX)	/* Set the write and execute bits */
926
927	clrrwi	r3,r3,10		/* Mask off the effective page number */
928	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_16M))
929
930        li      r0,63                    /* TLB slot 63 */
931
932	tlbwe	r4,r0,TLB_DATA		/* Load the data portion of the entry */
933	tlbwe	r3,r0,TLB_TAG		/* Load the tag portion of the entry */
934
935#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
936
937	/* Load a TLB entry for the UART, so that ppc4xx_progress() can use
938	 * the UARTs nice and early.  We use a 4k real==virtual mapping. */
939
940	lis	r3,SERIAL_DEBUG_IO_BASE@h
941	ori	r3,r3,SERIAL_DEBUG_IO_BASE@l
942	mr	r4,r3
943	clrrwi	r4,r4,12
944	ori	r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
945
946	clrrwi	r3,r3,12
947	ori	r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
948
949	li	r0,0			/* TLB slot 0 */
950	tlbwe	r4,r0,TLB_DATA
951	tlbwe	r3,r0,TLB_TAG
952#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
953
954	isync
955
956	/* Establish the exception vector base
957	*/
958	lis	r4,KERNELBASE@h		/* EVPR only uses the high 16-bits */
959	tophys(r0,r4)			/* Use the physical address */
960	mtspr	SPRN_EVPR,r0
961
962	blr
963
964_GLOBAL(abort)
965        mfspr   r13,SPRN_DBCR0
966        oris    r13,r13,DBCR0_RST_SYSTEM@h
967        mtspr   SPRN_DBCR0,r13
968
969_GLOBAL(set_context)
970
971#ifdef CONFIG_BDI_SWITCH
972	/* Context switch the PTE pointer for the Abatron BDI2000.
973	 * The PGDIR is the second parameter.
974	 */
975	lis	r5, KERNELBASE@h
976	lwz	r5, 0xf0(r5)
977	stw	r4, 0x4(r5)
978#endif
979	sync
980	mtspr	SPRN_PID,r3
981	isync				/* Need an isync to flush shadow */
982					/* TLBs after changing PID */
983	blr
984
985/* We put a few things here that have to be page-aligned. This stuff
986 * goes at the beginning of the data segment, which is page-aligned.
987 */
988	.data
989	.align	12
990	.globl	sdata
991sdata:
992	.globl	empty_zero_page
993empty_zero_page:
994	.space	4096
995	.globl	swapper_pg_dir
996swapper_pg_dir:
997	.space	PGD_TABLE_SIZE
998
999
1000/* Stack for handling critical exceptions from kernel mode */
1001	.section .bss
1002        .align 12
1003exception_stack_bottom:
1004	.space	4096
1005critical_stack_top:
1006	.globl	exception_stack_top
1007exception_stack_top:
1008
1009/* Room for two PTE pointers, usually the kernel and current user pointers
1010 * to their respective root page table.
1011 */
1012abatron_pteptrs:
1013	.space	8
1014