xref: /openbmc/linux/arch/powerpc/kernel/head_44x.S (revision 1fa6ac37)
1/*
2 * Kernel execution entry point code.
3 *
4 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
5 *      Initial PowerPC version.
6 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
7 *      Rewritten for PReP
8 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
9 *      Low-level exception handers, MMU support, and rewrite.
10 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
11 *      PowerPC 8xx modifications.
12 *    Copyright (c) 1998-1999 TiVo, Inc.
13 *      PowerPC 403GCX modifications.
14 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
15 *      PowerPC 403GCX/405GP modifications.
16 *    Copyright 2000 MontaVista Software Inc.
17 *	PPC405 modifications
18 *      PowerPC 403GCX/405GP modifications.
19 * 	Author: MontaVista Software, Inc.
20 *         	frank_rowand@mvista.com or source@mvista.com
21 * 	   	debbie_chu@mvista.com
22 *    Copyright 2002-2005 MontaVista Software, Inc.
23 *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
24 *
25 * This program is free software; you can redistribute  it and/or modify it
26 * under  the terms of  the GNU General  Public License as published by the
27 * Free Software Foundation;  either version 2 of the  License, or (at your
28 * option) any later version.
29 */
30
31#include <linux/init.h>
32#include <asm/processor.h>
33#include <asm/page.h>
34#include <asm/mmu.h>
35#include <asm/pgtable.h>
36#include <asm/cputable.h>
37#include <asm/thread_info.h>
38#include <asm/ppc_asm.h>
39#include <asm/asm-offsets.h>
40#include <asm/synch.h>
41#include "head_booke.h"
42
43
44/* As with the other PowerPC ports, it is expected that when code
45 * execution begins here, the following registers contain valid, yet
46 * optional, information:
47 *
48 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
49 *   r4 - Starting address of the init RAM disk
50 *   r5 - Ending address of the init RAM disk
51 *   r6 - Start of kernel command line string (e.g. "mem=128")
52 *   r7 - End of kernel command line string
53 *
54 */
55	__HEAD
56_ENTRY(_stext);
57_ENTRY(_start);
58	/*
59	 * Reserve a word at a fixed location to store the address
60	 * of abatron_pteptrs
61	 */
62	nop
63/*
64 * Save parameters we are passed
65 */
66	mr	r31,r3
67	mr	r30,r4
68	mr	r29,r5
69	mr	r28,r6
70	mr	r27,r7
71	li	r24,0		/* CPU number */
72
73	bl	init_cpu_state
74
75	/*
76	 * This is where the main kernel code starts.
77	 */
78
79	/* ptr to current */
80	lis	r2,init_task@h
81	ori	r2,r2,init_task@l
82
83	/* ptr to current thread */
84	addi	r4,r2,THREAD	/* init task's THREAD */
85	mtspr	SPRN_SPRG_THREAD,r4
86
87	/* stack */
88	lis	r1,init_thread_union@h
89	ori	r1,r1,init_thread_union@l
90	li	r0,0
91	stwu	r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
92
93	bl	early_init
94
95/*
96 * Decide what sort of machine this is and initialize the MMU.
97 */
98	mr	r3,r31
99	mr	r4,r30
100	mr	r5,r29
101	mr	r6,r28
102	mr	r7,r27
103	bl	machine_init
104	bl	MMU_init
105
106	/* Setup PTE pointers for the Abatron bdiGDB */
107	lis	r6, swapper_pg_dir@h
108	ori	r6, r6, swapper_pg_dir@l
109	lis	r5, abatron_pteptrs@h
110	ori	r5, r5, abatron_pteptrs@l
111	lis	r4, KERNELBASE@h
112	ori	r4, r4, KERNELBASE@l
113	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
114	stw	r6, 0(r5)
115
116	/* Let's move on */
117	lis	r4,start_kernel@h
118	ori	r4,r4,start_kernel@l
119	lis	r3,MSR_KERNEL@h
120	ori	r3,r3,MSR_KERNEL@l
121	mtspr	SPRN_SRR0,r4
122	mtspr	SPRN_SRR1,r3
123	rfi			/* change context and jump to start_kernel */
124
125/*
126 * Interrupt vector entry code
127 *
128 * The Book E MMUs are always on so we don't need to handle
129 * interrupts in real mode as with previous PPC processors. In
130 * this case we handle interrupts in the kernel virtual address
131 * space.
132 *
133 * Interrupt vectors are dynamically placed relative to the
134 * interrupt prefix as determined by the address of interrupt_base.
135 * The interrupt vectors offsets are programmed using the labels
136 * for each interrupt vector entry.
137 *
138 * Interrupt vectors must be aligned on a 16 byte boundary.
139 * We align on a 32 byte cache line boundary for good measure.
140 */
141
142interrupt_base:
143	/* Critical Input Interrupt */
144	CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception)
145
146	/* Machine Check Interrupt */
147	CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
148	MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
149
150	/* Data Storage Interrupt */
151	DATA_STORAGE_EXCEPTION
152
153		/* Instruction Storage Interrupt */
154	INSTRUCTION_STORAGE_EXCEPTION
155
156	/* External Input Interrupt */
157	EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE)
158
159	/* Alignment Interrupt */
160	ALIGNMENT_EXCEPTION
161
162	/* Program Interrupt */
163	PROGRAM_EXCEPTION
164
165	/* Floating Point Unavailable Interrupt */
166#ifdef CONFIG_PPC_FPU
167	FP_UNAVAILABLE_EXCEPTION
168#else
169	EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
170#endif
171	/* System Call Interrupt */
172	START_EXCEPTION(SystemCall)
173	NORMAL_EXCEPTION_PROLOG
174	EXC_XFER_EE_LITE(0x0c00, DoSyscall)
175
176	/* Auxillary Processor Unavailable Interrupt */
177	EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
178
179	/* Decrementer Interrupt */
180	DECREMENTER_EXCEPTION
181
182	/* Fixed Internal Timer Interrupt */
183	/* TODO: Add FIT support */
184	EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE)
185
186	/* Watchdog Timer Interrupt */
187	/* TODO: Add watchdog support */
188#ifdef CONFIG_BOOKE_WDT
189	CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException)
190#else
191	CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception)
192#endif
193
194	/* Data TLB Error Interrupt */
195	START_EXCEPTION(DataTLBError44x)
196	mtspr	SPRN_SPRG_WSCRATCH0, r10		/* Save some working registers */
197	mtspr	SPRN_SPRG_WSCRATCH1, r11
198	mtspr	SPRN_SPRG_WSCRATCH2, r12
199	mtspr	SPRN_SPRG_WSCRATCH3, r13
200	mfcr	r11
201	mtspr	SPRN_SPRG_WSCRATCH4, r11
202	mfspr	r10, SPRN_DEAR		/* Get faulting address */
203
204	/* If we are faulting a kernel address, we have to use the
205	 * kernel page tables.
206	 */
207	lis	r11, PAGE_OFFSET@h
208	cmplw	r10, r11
209	blt+	3f
210	lis	r11, swapper_pg_dir@h
211	ori	r11, r11, swapper_pg_dir@l
212
213	mfspr	r12,SPRN_MMUCR
214	rlwinm	r12,r12,0,0,23		/* Clear TID */
215
216	b	4f
217
218	/* Get the PGD for the current thread */
2193:
220	mfspr	r11,SPRN_SPRG_THREAD
221	lwz	r11,PGDIR(r11)
222
223	/* Load PID into MMUCR TID */
224	mfspr	r12,SPRN_MMUCR
225	mfspr   r13,SPRN_PID		/* Get PID */
226	rlwimi	r12,r13,0,24,31		/* Set TID */
227
2284:
229	mtspr	SPRN_MMUCR,r12
230
231	/* Mask of required permission bits. Note that while we
232	 * do copy ESR:ST to _PAGE_RW position as trying to write
233	 * to an RO page is pretty common, we don't do it with
234	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
235	 * event so I'd rather take the overhead when it happens
236	 * rather than adding an instruction here. We should measure
237	 * whether the whole thing is worth it in the first place
238	 * as we could avoid loading SPRN_ESR completely in the first
239	 * place...
240	 *
241	 * TODO: Is it worth doing that mfspr & rlwimi in the first
242	 *       place or can we save a couple of instructions here ?
243	 */
244	mfspr	r12,SPRN_ESR
245	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
246	rlwimi	r13,r12,10,30,30
247
248	/* Load the PTE */
249	/* Compute pgdir/pmd offset */
250	rlwinm  r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
251	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
252	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
253	beq	2f			/* Bail if no table */
254
255	/* Compute pte address */
256	rlwimi  r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
257	lwz	r11, 0(r12)		/* Get high word of pte entry */
258	lwz	r12, 4(r12)		/* Get low word of pte entry */
259
260	lis	r10,tlb_44x_index@ha
261
262	andc.	r13,r13,r12		/* Check permission */
263
264	/* Load the next available TLB index */
265	lwz	r13,tlb_44x_index@l(r10)
266
267	bne	2f			/* Bail if permission mismach */
268
269	/* Increment, rollover, and store TLB index */
270	addi	r13,r13,1
271
272	/* Compare with watermark (instruction gets patched) */
273	.globl tlb_44x_patch_hwater_D
274tlb_44x_patch_hwater_D:
275	cmpwi	0,r13,1			/* reserve entries */
276	ble	5f
277	li	r13,0
2785:
279	/* Store the next available TLB index */
280	stw	r13,tlb_44x_index@l(r10)
281
282	/* Re-load the faulting address */
283	mfspr	r10,SPRN_DEAR
284
285	 /* Jump to common tlb load */
286	b	finish_tlb_load_44x
287
2882:
289	/* The bailout.  Restore registers to pre-exception conditions
290	 * and call the heavyweights to help us out.
291	 */
292	mfspr	r11, SPRN_SPRG_RSCRATCH4
293	mtcr	r11
294	mfspr	r13, SPRN_SPRG_RSCRATCH3
295	mfspr	r12, SPRN_SPRG_RSCRATCH2
296	mfspr	r11, SPRN_SPRG_RSCRATCH1
297	mfspr	r10, SPRN_SPRG_RSCRATCH0
298	b	DataStorage
299
300	/* Instruction TLB Error Interrupt */
301	/*
302	 * Nearly the same as above, except we get our
303	 * information from different registers and bailout
304	 * to a different point.
305	 */
306	START_EXCEPTION(InstructionTLBError44x)
307	mtspr	SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
308	mtspr	SPRN_SPRG_WSCRATCH1, r11
309	mtspr	SPRN_SPRG_WSCRATCH2, r12
310	mtspr	SPRN_SPRG_WSCRATCH3, r13
311	mfcr	r11
312	mtspr	SPRN_SPRG_WSCRATCH4, r11
313	mfspr	r10, SPRN_SRR0		/* Get faulting address */
314
315	/* If we are faulting a kernel address, we have to use the
316	 * kernel page tables.
317	 */
318	lis	r11, PAGE_OFFSET@h
319	cmplw	r10, r11
320	blt+	3f
321	lis	r11, swapper_pg_dir@h
322	ori	r11, r11, swapper_pg_dir@l
323
324	mfspr	r12,SPRN_MMUCR
325	rlwinm	r12,r12,0,0,23		/* Clear TID */
326
327	b	4f
328
329	/* Get the PGD for the current thread */
3303:
331	mfspr	r11,SPRN_SPRG_THREAD
332	lwz	r11,PGDIR(r11)
333
334	/* Load PID into MMUCR TID */
335	mfspr	r12,SPRN_MMUCR
336	mfspr   r13,SPRN_PID		/* Get PID */
337	rlwimi	r12,r13,0,24,31		/* Set TID */
338
3394:
340	mtspr	SPRN_MMUCR,r12
341
342	/* Make up the required permissions */
343	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
344
345	/* Compute pgdir/pmd offset */
346	rlwinm 	r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
347	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
348	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
349	beq	2f			/* Bail if no table */
350
351	/* Compute pte address */
352	rlwimi	r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
353	lwz	r11, 0(r12)		/* Get high word of pte entry */
354	lwz	r12, 4(r12)		/* Get low word of pte entry */
355
356	lis	r10,tlb_44x_index@ha
357
358	andc.	r13,r13,r12		/* Check permission */
359
360	/* Load the next available TLB index */
361	lwz	r13,tlb_44x_index@l(r10)
362
363	bne	2f			/* Bail if permission mismach */
364
365	/* Increment, rollover, and store TLB index */
366	addi	r13,r13,1
367
368	/* Compare with watermark (instruction gets patched) */
369	.globl tlb_44x_patch_hwater_I
370tlb_44x_patch_hwater_I:
371	cmpwi	0,r13,1			/* reserve entries */
372	ble	5f
373	li	r13,0
3745:
375	/* Store the next available TLB index */
376	stw	r13,tlb_44x_index@l(r10)
377
378	/* Re-load the faulting address */
379	mfspr	r10,SPRN_SRR0
380
381	/* Jump to common TLB load point */
382	b	finish_tlb_load_44x
383
3842:
385	/* The bailout.  Restore registers to pre-exception conditions
386	 * and call the heavyweights to help us out.
387	 */
388	mfspr	r11, SPRN_SPRG_RSCRATCH4
389	mtcr	r11
390	mfspr	r13, SPRN_SPRG_RSCRATCH3
391	mfspr	r12, SPRN_SPRG_RSCRATCH2
392	mfspr	r11, SPRN_SPRG_RSCRATCH1
393	mfspr	r10, SPRN_SPRG_RSCRATCH0
394	b	InstructionStorage
395
396/*
397 * Both the instruction and data TLB miss get to this
398 * point to load the TLB.
399 * 	r10 - EA of fault
400 * 	r11 - PTE high word value
401 *	r12 - PTE low word value
402 *	r13 - TLB index
403 *	MMUCR - loaded with proper value when we get here
404 *	Upon exit, we reload everything and RFI.
405 */
406finish_tlb_load_44x:
407	/* Combine RPN & ERPN an write WS 0 */
408	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
409	tlbwe	r11,r13,PPC44x_TLB_XLAT
410
411	/*
412	 * Create WS1. This is the faulting address (EPN),
413	 * page size, and valid flag.
414	 */
415	li	r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
416	/* Insert valid and page size */
417	rlwimi	r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
418	tlbwe	r10,r13,PPC44x_TLB_PAGEID	/* Write PAGEID */
419
420	/* And WS 2 */
421	li	r10,0xf85			/* Mask to apply from PTE */
422	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
423	and	r11,r12,r10			/* Mask PTE bits to keep */
424	andi.	r10,r12,_PAGE_USER		/* User page ? */
425	beq	1f				/* nope, leave U bits empty */
426	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
4271:	tlbwe	r11,r13,PPC44x_TLB_ATTRIB	/* Write ATTRIB */
428
429	/* Done...restore registers and get out of here.
430	*/
431	mfspr	r11, SPRN_SPRG_RSCRATCH4
432	mtcr	r11
433	mfspr	r13, SPRN_SPRG_RSCRATCH3
434	mfspr	r12, SPRN_SPRG_RSCRATCH2
435	mfspr	r11, SPRN_SPRG_RSCRATCH1
436	mfspr	r10, SPRN_SPRG_RSCRATCH0
437	rfi					/* Force context change */
438
439/* TLB error interrupts for 476
440 */
441#ifdef CONFIG_PPC_47x
442	START_EXCEPTION(DataTLBError47x)
443	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
444	mtspr	SPRN_SPRG_WSCRATCH1,r11
445	mtspr	SPRN_SPRG_WSCRATCH2,r12
446	mtspr	SPRN_SPRG_WSCRATCH3,r13
447	mfcr	r11
448	mtspr	SPRN_SPRG_WSCRATCH4,r11
449	mfspr	r10,SPRN_DEAR		/* Get faulting address */
450
451	/* If we are faulting a kernel address, we have to use the
452	 * kernel page tables.
453	 */
454	lis	r11,PAGE_OFFSET@h
455	cmplw	cr0,r10,r11
456	blt+	3f
457	lis	r11,swapper_pg_dir@h
458	ori	r11,r11, swapper_pg_dir@l
459	li	r12,0			/* MMUCR = 0 */
460	b	4f
461
462	/* Get the PGD for the current thread and setup MMUCR */
4633:	mfspr	r11,SPRN_SPRG3
464	lwz	r11,PGDIR(r11)
465	mfspr   r12,SPRN_PID		/* Get PID */
4664:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
467
468	/* Mask of required permission bits. Note that while we
469	 * do copy ESR:ST to _PAGE_RW position as trying to write
470	 * to an RO page is pretty common, we don't do it with
471	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
472	 * event so I'd rather take the overhead when it happens
473	 * rather than adding an instruction here. We should measure
474	 * whether the whole thing is worth it in the first place
475	 * as we could avoid loading SPRN_ESR completely in the first
476	 * place...
477	 *
478	 * TODO: Is it worth doing that mfspr & rlwimi in the first
479	 *       place or can we save a couple of instructions here ?
480	 */
481	mfspr	r12,SPRN_ESR
482	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
483	rlwimi	r13,r12,10,30,30
484
485	/* Load the PTE */
486	/* Compute pgdir/pmd offset */
487	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
488	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
489
490	/* Word 0 is EPN,V,TS,DSIZ */
491	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
492	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
493	li	r12,0
494	tlbwe	r10,r12,0
495
496	/* XXX can we do better ? Need to make sure tlbwe has established
497	 * latch V bit in MMUCR0 before the PTE is loaded further down */
498#ifdef CONFIG_SMP
499	isync
500#endif
501
502	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
503	/* Compute pte address */
504	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
505	beq	2f			/* Bail if no table */
506	lwz	r11,0(r12)		/* Get high word of pte entry */
507
508	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
509	 * bottom of r12 to create a data dependency... We can also use r10
510	 * as destination nowadays
511	 */
512#ifdef CONFIG_SMP
513	lwsync
514#endif
515	lwz	r12,4(r12)		/* Get low word of pte entry */
516
517	andc.	r13,r13,r12		/* Check permission */
518
519	 /* Jump to common tlb load */
520	beq	finish_tlb_load_47x
521
5222:	/* The bailout.  Restore registers to pre-exception conditions
523	 * and call the heavyweights to help us out.
524	 */
525	mfspr	r11,SPRN_SPRG_RSCRATCH4
526	mtcr	r11
527	mfspr	r13,SPRN_SPRG_RSCRATCH3
528	mfspr	r12,SPRN_SPRG_RSCRATCH2
529	mfspr	r11,SPRN_SPRG_RSCRATCH1
530	mfspr	r10,SPRN_SPRG_RSCRATCH0
531	b	DataStorage
532
533	/* Instruction TLB Error Interrupt */
534	/*
535	 * Nearly the same as above, except we get our
536	 * information from different registers and bailout
537	 * to a different point.
538	 */
539	START_EXCEPTION(InstructionTLBError47x)
540	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
541	mtspr	SPRN_SPRG_WSCRATCH1,r11
542	mtspr	SPRN_SPRG_WSCRATCH2,r12
543	mtspr	SPRN_SPRG_WSCRATCH3,r13
544	mfcr	r11
545	mtspr	SPRN_SPRG_WSCRATCH4,r11
546	mfspr	r10,SPRN_SRR0		/* Get faulting address */
547
548	/* If we are faulting a kernel address, we have to use the
549	 * kernel page tables.
550	 */
551	lis	r11,PAGE_OFFSET@h
552	cmplw	cr0,r10,r11
553	blt+	3f
554	lis	r11,swapper_pg_dir@h
555	ori	r11,r11, swapper_pg_dir@l
556	li	r12,0			/* MMUCR = 0 */
557	b	4f
558
559	/* Get the PGD for the current thread and setup MMUCR */
5603:	mfspr	r11,SPRN_SPRG_THREAD
561	lwz	r11,PGDIR(r11)
562	mfspr   r12,SPRN_PID		/* Get PID */
5634:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
564
565	/* Make up the required permissions */
566	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
567
568	/* Load PTE */
569	/* Compute pgdir/pmd offset */
570	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
571	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
572
573	/* Word 0 is EPN,V,TS,DSIZ */
574	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
575	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
576	li	r12,0
577	tlbwe	r10,r12,0
578
579	/* XXX can we do better ? Need to make sure tlbwe has established
580	 * latch V bit in MMUCR0 before the PTE is loaded further down */
581#ifdef CONFIG_SMP
582	isync
583#endif
584
585	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
586	/* Compute pte address */
587	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
588	beq	2f			/* Bail if no table */
589
590	lwz	r11,0(r12)		/* Get high word of pte entry */
591	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
592	 * bottom of r12 to create a data dependency... We can also use r10
593	 * as destination nowadays
594	 */
595#ifdef CONFIG_SMP
596	lwsync
597#endif
598	lwz	r12,4(r12)		/* Get low word of pte entry */
599
600	andc.	r13,r13,r12		/* Check permission */
601
602	/* Jump to common TLB load point */
603	beq	finish_tlb_load_47x
604
6052:	/* The bailout.  Restore registers to pre-exception conditions
606	 * and call the heavyweights to help us out.
607	 */
608	mfspr	r11, SPRN_SPRG_RSCRATCH4
609	mtcr	r11
610	mfspr	r13, SPRN_SPRG_RSCRATCH3
611	mfspr	r12, SPRN_SPRG_RSCRATCH2
612	mfspr	r11, SPRN_SPRG_RSCRATCH1
613	mfspr	r10, SPRN_SPRG_RSCRATCH0
614	b	InstructionStorage
615
616/*
617 * Both the instruction and data TLB miss get to this
618 * point to load the TLB.
619 * 	r10 - free to use
620 * 	r11 - PTE high word value
621 *	r12 - PTE low word value
622 *      r13 - free to use
623 *	MMUCR - loaded with proper value when we get here
624 *	Upon exit, we reload everything and RFI.
625 */
626finish_tlb_load_47x:
627	/* Combine RPN & ERPN an write WS 1 */
628	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
629	tlbwe	r11,r13,1
630
631	/* And make up word 2 */
632	li	r10,0xf85			/* Mask to apply from PTE */
633	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
634	and	r11,r12,r10			/* Mask PTE bits to keep */
635	andi.	r10,r12,_PAGE_USER		/* User page ? */
636	beq	1f				/* nope, leave U bits empty */
637	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
6381:	tlbwe	r11,r13,2
639
640	/* Done...restore registers and get out of here.
641	*/
642	mfspr	r11, SPRN_SPRG_RSCRATCH4
643	mtcr	r11
644	mfspr	r13, SPRN_SPRG_RSCRATCH3
645	mfspr	r12, SPRN_SPRG_RSCRATCH2
646	mfspr	r11, SPRN_SPRG_RSCRATCH1
647	mfspr	r10, SPRN_SPRG_RSCRATCH0
648	rfi
649
650#endif /* CONFIG_PPC_47x */
651
652	/* Debug Interrupt */
653	/*
654	 * This statement needs to exist at the end of the IVPR
655	 * definition just in case you end up taking a debug
656	 * exception within another exception.
657	 */
658	DEBUG_CRIT_EXCEPTION
659
660/*
661 * Global functions
662 */
663
664/*
665 * Adjust the machine check IVOR on 440A cores
666 */
667_GLOBAL(__fixup_440A_mcheck)
668	li	r3,MachineCheckA@l
669	mtspr	SPRN_IVOR1,r3
670	sync
671	blr
672
673/*
674 * extern void giveup_altivec(struct task_struct *prev)
675 *
676 * The 44x core does not have an AltiVec unit.
677 */
678_GLOBAL(giveup_altivec)
679	blr
680
681/*
682 * extern void giveup_fpu(struct task_struct *prev)
683 *
684 * The 44x core does not have an FPU.
685 */
686#ifndef CONFIG_PPC_FPU
687_GLOBAL(giveup_fpu)
688	blr
689#endif
690
691_GLOBAL(set_context)
692
693#ifdef CONFIG_BDI_SWITCH
694	/* Context switch the PTE pointer for the Abatron BDI2000.
695	 * The PGDIR is the second parameter.
696	 */
697	lis	r5, abatron_pteptrs@h
698	ori	r5, r5, abatron_pteptrs@l
699	stw	r4, 0x4(r5)
700#endif
701	mtspr	SPRN_PID,r3
702	isync			/* Force context change */
703	blr
704
705/*
706 * Init CPU state. This is called at boot time or for secondary CPUs
707 * to setup initial TLB entries, setup IVORs, etc...
708 *
709 */
710_GLOBAL(init_cpu_state)
711	mflr	r22
712#ifdef CONFIG_PPC_47x
713	/* We use the PVR to differenciate 44x cores from 476 */
714	mfspr	r3,SPRN_PVR
715	srwi	r3,r3,16
716	cmplwi	cr0,r3,PVR_476@h
717	beq	head_start_47x
718	cmplwi	cr0,r3,PVR_476_ISS@h
719	beq	head_start_47x
720#endif /* CONFIG_PPC_47x */
721
722/*
723 * In case the firmware didn't do it, we apply some workarounds
724 * that are good for all 440 core variants here
725 */
726	mfspr	r3,SPRN_CCR0
727	rlwinm	r3,r3,0,0,27	/* disable icache prefetch */
728	isync
729	mtspr	SPRN_CCR0,r3
730	isync
731	sync
732
733/*
734 * Set up the initial MMU state for 44x
735 *
736 * We are still executing code at the virtual address
737 * mappings set by the firmware for the base of RAM.
738 *
739 * We first invalidate all TLB entries but the one
740 * we are running from.  We then load the KERNELBASE
741 * mappings so we can begin to use kernel addresses
742 * natively and so the interrupt vector locations are
743 * permanently pinned (necessary since Book E
744 * implementations always have translation enabled).
745 *
746 * TODO: Use the known TLB entry we are running from to
747 *	 determine which physical region we are located
748 *	 in.  This can be used to determine where in RAM
749 *	 (on a shared CPU system) or PCI memory space
750 *	 (on a DRAMless system) we are located.
751 *       For now, we assume a perfect world which means
752 *	 we are located at the base of DRAM (physical 0).
753 */
754
755/*
756 * Search TLB for entry that we are currently using.
757 * Invalidate all entries but the one we are using.
758 */
759	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
760	mfspr	r3,SPRN_PID			/* Get PID */
761	mfmsr	r4				/* Get MSR */
762	andi.	r4,r4,MSR_IS@l			/* TS=1? */
763	beq	wmmucr				/* If not, leave STS=0 */
764	oris	r3,r3,PPC44x_MMUCR_STS@h	/* Set STS=1 */
765wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
766	sync
767
768	bl	invstr				/* Find our address */
769invstr:	mflr	r5				/* Make it accessible */
770	tlbsx	r23,0,r5			/* Find entry we are in */
771	li	r4,0				/* Start at TLB entry 0 */
772	li	r3,0				/* Set PAGEID inval value */
7731:	cmpw	r23,r4				/* Is this our entry? */
774	beq	skpinv				/* If so, skip the inval */
775	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
776skpinv:	addi	r4,r4,1				/* Increment */
777	cmpwi	r4,64				/* Are we done? */
778	bne	1b				/* If not, repeat */
779	isync					/* If so, context change */
780
781/*
782 * Configure and load pinned entry into TLB slot 63.
783 */
784
785	lis	r3,PAGE_OFFSET@h
786	ori	r3,r3,PAGE_OFFSET@l
787
788	/* Kernel is at the base of RAM */
789	li r4, 0			/* Load the kernel physical address */
790
791	/* Load the kernel PID = 0 */
792	li	r0,0
793	mtspr	SPRN_PID,r0
794	sync
795
796	/* Initialize MMUCR */
797	li	r5,0
798	mtspr	SPRN_MMUCR,r5
799	sync
800
801	/* pageid fields */
802	clrrwi	r3,r3,10		/* Mask off the effective page number */
803	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
804
805	/* xlat fields */
806	clrrwi	r4,r4,10		/* Mask off the real page number */
807					/* ERPN is 0 for first 4GB page */
808
809	/* attrib fields */
810	/* Added guarded bit to protect against speculative loads/stores */
811	li	r5,0
812	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
813
814        li      r0,63                    /* TLB slot 63 */
815
816	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
817	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
818	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
819
820	/* Force context change */
821	mfmsr	r0
822	mtspr	SPRN_SRR1, r0
823	lis	r0,3f@h
824	ori	r0,r0,3f@l
825	mtspr	SPRN_SRR0,r0
826	sync
827	rfi
828
829	/* If necessary, invalidate original entry we used */
8303:	cmpwi	r23,63
831	beq	4f
832	li	r6,0
833	tlbwe   r6,r23,PPC44x_TLB_PAGEID
834	isync
835
8364:
837#ifdef CONFIG_PPC_EARLY_DEBUG_44x
838	/* Add UART mapping for early debug. */
839
840	/* pageid fields */
841	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
842	ori	r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
843
844	/* xlat fields */
845	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
846	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
847
848	/* attrib fields */
849	li	r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
850        li      r0,62                    /* TLB slot 0 */
851
852	tlbwe	r3,r0,PPC44x_TLB_PAGEID
853	tlbwe	r4,r0,PPC44x_TLB_XLAT
854	tlbwe	r5,r0,PPC44x_TLB_ATTRIB
855
856	/* Force context change */
857	isync
858#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
859
860	/* Establish the interrupt vector offsets */
861	SET_IVOR(0,  CriticalInput);
862	SET_IVOR(1,  MachineCheck);
863	SET_IVOR(2,  DataStorage);
864	SET_IVOR(3,  InstructionStorage);
865	SET_IVOR(4,  ExternalInput);
866	SET_IVOR(5,  Alignment);
867	SET_IVOR(6,  Program);
868	SET_IVOR(7,  FloatingPointUnavailable);
869	SET_IVOR(8,  SystemCall);
870	SET_IVOR(9,  AuxillaryProcessorUnavailable);
871	SET_IVOR(10, Decrementer);
872	SET_IVOR(11, FixedIntervalTimer);
873	SET_IVOR(12, WatchdogTimer);
874	SET_IVOR(13, DataTLBError44x);
875	SET_IVOR(14, InstructionTLBError44x);
876	SET_IVOR(15, DebugCrit);
877
878	b	head_start_common
879
880
881#ifdef CONFIG_PPC_47x
882
883#ifdef CONFIG_SMP
884
885/* Entry point for secondary 47x processors */
886_GLOBAL(start_secondary_47x)
887        mr      r24,r3          /* CPU number */
888
889	bl	init_cpu_state
890
891	/* Now we need to bolt the rest of kernel memory which
892	 * is done in C code. We must be careful because our task
893	 * struct or our stack can (and will probably) be out
894	 * of reach of the initial 256M TLB entry, so we use a
895	 * small temporary stack in .bss for that. This works
896	 * because only one CPU at a time can be in this code
897	 */
898	lis	r1,temp_boot_stack@h
899	ori	r1,r1,temp_boot_stack@l
900	addi	r1,r1,1024-STACK_FRAME_OVERHEAD
901	li	r0,0
902	stw	r0,0(r1)
903	bl	mmu_init_secondary
904
905	/* Now we can get our task struct and real stack pointer */
906
907	/* Get current_thread_info and current */
908	lis	r1,secondary_ti@ha
909	lwz	r1,secondary_ti@l(r1)
910	lwz	r2,TI_TASK(r1)
911
912	/* Current stack pointer */
913	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
914	li	r0,0
915	stw	r0,0(r1)
916
917	/* Kernel stack for exception entry in SPRG3 */
918	addi	r4,r2,THREAD	/* init task's THREAD */
919	mtspr	SPRN_SPRG3,r4
920
921	b	start_secondary
922
923#endif /* CONFIG_SMP */
924
925/*
926 * Set up the initial MMU state for 44x
927 *
928 * We are still executing code at the virtual address
929 * mappings set by the firmware for the base of RAM.
930 */
931
932head_start_47x:
933	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
934	mfspr	r3,SPRN_PID			/* Get PID */
935	mfmsr	r4				/* Get MSR */
936	andi.	r4,r4,MSR_IS@l			/* TS=1? */
937	beq	1f				/* If not, leave STS=0 */
938	oris	r3,r3,PPC47x_MMUCR_STS@h	/* Set STS=1 */
9391:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
940	sync
941
942	/* Find the entry we are running from */
943	bl	1f
9441:	mflr	r23
945	tlbsx	r23,0,r23
946	tlbre	r24,r23,0
947	tlbre	r25,r23,1
948	tlbre	r26,r23,2
949
950/*
951 * Cleanup time
952 */
953
954	/* Initialize MMUCR */
955	li	r5,0
956	mtspr	SPRN_MMUCR,r5
957	sync
958
959clear_all_utlb_entries:
960
961	#; Set initial values.
962
963	addis		r3,0,0x8000
964	addi		r4,0,0
965	addi		r5,0,0
966	b		clear_utlb_entry
967
968	#; Align the loop to speed things up.
969
970	.align		6
971
972clear_utlb_entry:
973
974	tlbwe		r4,r3,0
975	tlbwe		r5,r3,1
976	tlbwe		r5,r3,2
977	addis		r3,r3,0x2000
978	cmpwi		r3,0
979	bne		clear_utlb_entry
980	addis		r3,0,0x8000
981	addis		r4,r4,0x100
982	cmpwi		r4,0
983	bne		clear_utlb_entry
984
985	#; Restore original entry.
986
987	oris	r23,r23,0x8000  /* specify the way */
988	tlbwe		r24,r23,0
989	tlbwe		r25,r23,1
990	tlbwe		r26,r23,2
991
992/*
993 * Configure and load pinned entry into TLB for the kernel core
994 */
995
996	lis	r3,PAGE_OFFSET@h
997	ori	r3,r3,PAGE_OFFSET@l
998
999	/* Kernel is at the base of RAM */
1000	li r4, 0			/* Load the kernel physical address */
1001
1002	/* Load the kernel PID = 0 */
1003	li	r0,0
1004	mtspr	SPRN_PID,r0
1005	sync
1006
1007	/* Word 0 */
1008	clrrwi	r3,r3,12		/* Mask off the effective page number */
1009	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1010
1011	/* Word 1 */
1012	clrrwi	r4,r4,12		/* Mask off the real page number */
1013					/* ERPN is 0 for first 4GB page */
1014	/* Word 2 */
1015	li	r5,0
1016	ori	r5,r5,PPC47x_TLB2_S_RWX
1017#ifdef CONFIG_SMP
1018	ori	r5,r5,PPC47x_TLB2_M
1019#endif
1020
1021	/* We write to way 0 and bolted 0 */
1022	lis	r0,0x8800
1023	tlbwe	r3,r0,0
1024	tlbwe	r4,r0,1
1025	tlbwe	r5,r0,2
1026
1027/*
1028 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1029 * them up later
1030 */
1031	LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1032	mtspr	SPRN_SSPCR,r3
1033	mtspr	SPRN_USPCR,r3
1034	LOAD_REG_IMMEDIATE(r3, 0x12345670)
1035	mtspr	SPRN_ISPCR,r3
1036
1037	/* Force context change */
1038	mfmsr	r0
1039	mtspr	SPRN_SRR1, r0
1040	lis	r0,3f@h
1041	ori	r0,r0,3f@l
1042	mtspr	SPRN_SRR0,r0
1043	sync
1044	rfi
1045
1046	/* Invalidate original entry we used */
10473:
1048	rlwinm	r24,r24,0,21,19 /* clear the "valid" bit */
1049	tlbwe	r24,r23,0
1050	addi	r24,0,0
1051	tlbwe	r24,r23,1
1052	tlbwe	r24,r23,2
1053	isync                   /* Clear out the shadow TLB entries */
1054
1055#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1056	/* Add UART mapping for early debug. */
1057
1058	/* Word 0 */
1059	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1060	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1061
1062	/* Word 1 */
1063	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1064	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1065
1066	/* Word 2 */
1067	li	r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1068
1069	/* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1070	 * congruence class as the kernel, we need to make sure of it at
1071	 * some point
1072	 */
1073        lis	r0,0x8d00
1074	tlbwe	r3,r0,0
1075	tlbwe	r4,r0,1
1076	tlbwe	r5,r0,2
1077
1078	/* Force context change */
1079	isync
1080#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1081
1082	/* Establish the interrupt vector offsets */
1083	SET_IVOR(0,  CriticalInput);
1084	SET_IVOR(1,  MachineCheckA);
1085	SET_IVOR(2,  DataStorage);
1086	SET_IVOR(3,  InstructionStorage);
1087	SET_IVOR(4,  ExternalInput);
1088	SET_IVOR(5,  Alignment);
1089	SET_IVOR(6,  Program);
1090	SET_IVOR(7,  FloatingPointUnavailable);
1091	SET_IVOR(8,  SystemCall);
1092	SET_IVOR(9,  AuxillaryProcessorUnavailable);
1093	SET_IVOR(10, Decrementer);
1094	SET_IVOR(11, FixedIntervalTimer);
1095	SET_IVOR(12, WatchdogTimer);
1096	SET_IVOR(13, DataTLBError47x);
1097	SET_IVOR(14, InstructionTLBError47x);
1098	SET_IVOR(15, DebugCrit);
1099
1100	/* We configure icbi to invalidate 128 bytes at a time since the
1101	 * current 32-bit kernel code isn't too happy with icache != dcache
1102	 * block size
1103	 */
1104	mfspr	r3,SPRN_CCR0
1105	oris	r3,r3,0x0020
1106	mtspr	SPRN_CCR0,r3
1107	isync
1108
1109#endif /* CONFIG_PPC_47x */
1110
1111/*
1112 * Here we are back to code that is common between 44x and 47x
1113 *
1114 * We proceed to further kernel initialization and return to the
1115 * main kernel entry
1116 */
1117head_start_common:
1118	/* Establish the interrupt vector base */
1119	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
1120	mtspr	SPRN_IVPR,r4
1121
1122	addis	r22,r22,KERNELBASE@h
1123	mtlr	r22
1124	isync
1125	blr
1126
1127/*
1128 * We put a few things here that have to be page-aligned. This stuff
1129 * goes at the beginning of the data segment, which is page-aligned.
1130 */
1131	.data
1132	.align	PAGE_SHIFT
1133	.globl	sdata
1134sdata:
1135	.globl	empty_zero_page
1136empty_zero_page:
1137	.space	PAGE_SIZE
1138
1139/*
1140 * To support >32-bit physical addresses, we use an 8KB pgdir.
1141 */
1142	.globl	swapper_pg_dir
1143swapper_pg_dir:
1144	.space	PGD_TABLE_SIZE
1145
1146/*
1147 * Room for two PTE pointers, usually the kernel and current user pointers
1148 * to their respective root page table.
1149 */
1150abatron_pteptrs:
1151	.space	8
1152
1153#ifdef CONFIG_SMP
1154	.align	12
1155temp_boot_stack:
1156	.space	1024
1157#endif /* CONFIG_SMP */
1158