1/*
2 *  Low level TLB miss handlers for Book3E
3 *
4 *  Copyright (C) 2008-2009
5 *      Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
6 *
7 *  This program is free software; you can redistribute it and/or
8 *  modify it under the terms of the GNU General Public License
9 *  as published by the Free Software Foundation; either version
10 *  2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/processor.h>
14#include <asm/reg.h>
15#include <asm/page.h>
16#include <asm/mmu.h>
17#include <asm/ppc_asm.h>
18#include <asm/asm-offsets.h>
19#include <asm/cputable.h>
20#include <asm/pgtable.h>
21#include <asm/exception-64e.h>
22#include <asm/ppc-opcode.h>
23#include <asm/kvm_asm.h>
24#include <asm/kvm_booke_hv_asm.h>
25#include <asm/feature-fixups.h>
26
27#define VPTE_PMD_SHIFT	(PTE_INDEX_SIZE)
28#define VPTE_PUD_SHIFT	(VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
29#define VPTE_PGD_SHIFT	(VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
30#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
31
32/**********************************************************************
33 *                                                                    *
34 * TLB miss handling for Book3E with a bolted linear mapping          *
35 * No virtual page table, no nested TLB misses                        *
36 *                                                                    *
37 **********************************************************************/
38
39/*
40 * Note that, unlike non-bolted handlers, TLB_EXFRAME is not
41 * modified by the TLB miss handlers themselves, since the TLB miss
42 * handler code will not itself cause a recursive TLB miss.
43 *
44 * TLB_EXFRAME will be modified when crit/mc/debug exceptions are
45 * entered/exited.
46 */
47.macro tlb_prolog_bolted intnum addr
48	mtspr	SPRN_SPRG_GEN_SCRATCH,r12
49	mfspr	r12,SPRN_SPRG_TLB_EXFRAME
50	std	r13,EX_TLB_R13(r12)
51	std	r10,EX_TLB_R10(r12)
52	mfspr	r13,SPRN_SPRG_PACA
53
54	mfcr	r10
55	std	r11,EX_TLB_R11(r12)
56#ifdef CONFIG_KVM_BOOKE_HV
57BEGIN_FTR_SECTION
58	mfspr	r11, SPRN_SRR1
59END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
60#endif
61	DO_KVM	\intnum, SPRN_SRR1
62	std	r16,EX_TLB_R16(r12)
63	mfspr	r16,\addr		/* get faulting address */
64	std	r14,EX_TLB_R14(r12)
65	ld	r14,PACAPGD(r13)
66	std	r15,EX_TLB_R15(r12)
67	std	r10,EX_TLB_CR(r12)
68#ifdef CONFIG_PPC_FSL_BOOK3E
69START_BTB_FLUSH_SECTION
70	mfspr r11, SPRN_SRR1
71	andi. r10,r11,MSR_PR
72	beq 1f
73	BTB_FLUSH(r10)
741:
75END_BTB_FLUSH_SECTION
76	std	r7,EX_TLB_R7(r12)
77#endif
78	TLB_MISS_PROLOG_STATS
79.endm
80
81.macro tlb_epilog_bolted
82	ld	r14,EX_TLB_CR(r12)
83#ifdef CONFIG_PPC_FSL_BOOK3E
84	ld	r7,EX_TLB_R7(r12)
85#endif
86	ld	r10,EX_TLB_R10(r12)
87	ld	r11,EX_TLB_R11(r12)
88	ld	r13,EX_TLB_R13(r12)
89	mtcr	r14
90	ld	r14,EX_TLB_R14(r12)
91	ld	r15,EX_TLB_R15(r12)
92	TLB_MISS_RESTORE_STATS
93	ld	r16,EX_TLB_R16(r12)
94	mfspr	r12,SPRN_SPRG_GEN_SCRATCH
95.endm
96
97/* Data TLB miss */
98	START_EXCEPTION(data_tlb_miss_bolted)
99	tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
100
101	/* We need _PAGE_PRESENT and  _PAGE_ACCESSED set */
102
103	/* We do the user/kernel test for the PID here along with the RW test
104	 */
105	/* We pre-test some combination of permissions to avoid double
106	 * faults:
107	 *
108	 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
109	 * ESR_ST   is 0x00800000
110	 * _PAGE_BAP_SW is 0x00000010
111	 * So the shift is >> 19. This tests for supervisor writeability.
112	 * If the page happens to be supervisor writeable and not user
113	 * writeable, we will take a new fault later, but that should be
114	 * a rare enough case.
115	 *
116	 * We also move ESR_ST in _PAGE_DIRTY position
117	 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
118	 *
119	 * MAS1 is preset for all we need except for TID that needs to
120	 * be cleared for kernel translations
121	 */
122
123	mfspr	r11,SPRN_ESR
124
125	srdi	r15,r16,60		/* get region */
126	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
127	bne-	dtlb_miss_fault_bolted	/* Bail if fault addr is invalid */
128
129	rlwinm	r10,r11,32-19,27,27
130	rlwimi	r10,r11,32-16,19,19
131	cmpwi	r15,0			/* user vs kernel check */
132	ori	r10,r10,_PAGE_PRESENT
133	oris	r11,r10,_PAGE_ACCESSED@h
134
135	TLB_MISS_STATS_SAVE_INFO_BOLTED
136	bne	tlb_miss_kernel_bolted
137
138tlb_miss_common_bolted:
139/*
140 * This is the guts of the TLB miss handler for bolted-linear.
141 * We are entered with:
142 *
143 * r16 = faulting address
144 * r15 = crap (free to use)
145 * r14 = page table base
146 * r13 = PACA
147 * r11 = PTE permission mask
148 * r10 = crap (free to use)
149 */
150	rldicl	r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
151	cmpldi	cr0,r14,0
152	clrrdi	r15,r15,3
153	beq	tlb_miss_fault_bolted	/* No PGDIR, bail */
154
155BEGIN_MMU_FTR_SECTION
156	/* Set the TLB reservation and search for existing entry. Then load
157	 * the entry.
158	 */
159	PPC_TLBSRX_DOT(0,R16)
160	ldx	r14,r14,r15		/* grab pgd entry */
161	beq	tlb_miss_done_bolted	/* tlb exists already, bail */
162MMU_FTR_SECTION_ELSE
163	ldx	r14,r14,r15		/* grab pgd entry */
164ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
165
166	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
167	clrrdi	r15,r15,3
168	cmpdi	cr0,r14,0
169	bge	tlb_miss_fault_bolted	/* Bad pgd entry or hugepage; bail */
170	ldx	r14,r14,r15		/* grab pud entry */
171
172	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
173	clrrdi	r15,r15,3
174	cmpdi	cr0,r14,0
175	bge	tlb_miss_fault_bolted
176	ldx	r14,r14,r15		/* Grab pmd entry */
177
178	rldicl	r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
179	clrrdi	r15,r15,3
180	cmpdi	cr0,r14,0
181	bge	tlb_miss_fault_bolted
182	ldx	r14,r14,r15		/* Grab PTE, normal (!huge) page */
183
184	/* Check if required permissions are met */
185	andc.	r15,r11,r14
186	rldicr	r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
187	bne-	tlb_miss_fault_bolted
188
189	/* Now we build the MAS:
190	 *
191	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
192	 * MAS 1   :	Almost fully setup
193	 *               - PID already updated by caller if necessary
194	 *               - TSIZE need change if !base page size, not
195	 *                 yet implemented for now
196	 * MAS 2   :	Defaults not useful, need to be redone
197	 * MAS 3+7 :	Needs to be done
198	 */
199	clrrdi	r11,r16,12		/* Clear low crap in EA */
200	clrldi	r15,r15,12		/* Clear crap at the top */
201	rlwimi	r11,r14,32-19,27,31	/* Insert WIMGE */
202	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
203	mtspr	SPRN_MAS2,r11
204	andi.	r11,r14,_PAGE_DIRTY
205	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
206
207	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
208	bne	1f
209	li	r11,MAS3_SW|MAS3_UW
210	andc	r15,r15,r11
2111:
212	mtspr	SPRN_MAS7_MAS3,r15
213	tlbwe
214
215tlb_miss_done_bolted:
216	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
217	tlb_epilog_bolted
218	rfi
219
220itlb_miss_kernel_bolted:
221	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
222	oris	r11,r11,_PAGE_ACCESSED@h
223tlb_miss_kernel_bolted:
224	mfspr	r10,SPRN_MAS1
225	ld	r14,PACA_KERNELPGD(r13)
226	cmpldi	cr0,r15,8		/* Check for vmalloc region */
227	rlwinm	r10,r10,0,16,1		/* Clear TID */
228	mtspr	SPRN_MAS1,r10
229	beq+	tlb_miss_common_bolted
230
231tlb_miss_fault_bolted:
232	/* We need to check if it was an instruction miss */
233	andi.	r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
234	bne	itlb_miss_fault_bolted
235dtlb_miss_fault_bolted:
236	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
237	tlb_epilog_bolted
238	b	exc_data_storage_book3e
239itlb_miss_fault_bolted:
240	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
241	tlb_epilog_bolted
242	b	exc_instruction_storage_book3e
243
244/* Instruction TLB miss */
245	START_EXCEPTION(instruction_tlb_miss_bolted)
246	tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
247
248	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
249	srdi	r15,r16,60		/* get region */
250	TLB_MISS_STATS_SAVE_INFO_BOLTED
251	bne-	itlb_miss_fault_bolted
252
253	li	r11,_PAGE_PRESENT|_PAGE_EXEC	/* Base perm */
254
255	/* We do the user/kernel test for the PID here along with the RW test
256	 */
257
258	cmpldi	cr0,r15,0			/* Check for user region */
259	oris	r11,r11,_PAGE_ACCESSED@h
260	beq	tlb_miss_common_bolted
261	b	itlb_miss_kernel_bolted
262
263#ifdef CONFIG_PPC_FSL_BOOK3E
264/*
265 * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
266 *
267 * Linear mapping is bolted: no virtual page table or nested TLB misses
268 * Indirect entries in TLB1, hardware loads resulting direct entries
269 *    into TLB0
270 * No HES or NV hint on TLB1, so we need to do software round-robin
271 * No tlbsrx. so we need a spinlock, and we have to deal
272 *    with MAS-damage caused by tlbsx
273 * 4K pages only
274 */
275
276	START_EXCEPTION(instruction_tlb_miss_e6500)
277	tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
278
279	ld	r11,PACA_TCD_PTR(r13)
280	srdi.	r15,r16,60		/* get region */
281	ori	r16,r16,1
282
283	TLB_MISS_STATS_SAVE_INFO_BOLTED
284	bne	tlb_miss_kernel_e6500	/* user/kernel test */
285
286	b	tlb_miss_common_e6500
287
288	START_EXCEPTION(data_tlb_miss_e6500)
289	tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
290
291	ld	r11,PACA_TCD_PTR(r13)
292	srdi.	r15,r16,60		/* get region */
293	rldicr	r16,r16,0,62
294
295	TLB_MISS_STATS_SAVE_INFO_BOLTED
296	bne	tlb_miss_kernel_e6500	/* user vs kernel check */
297
298/*
299 * This is the guts of the TLB miss handler for e6500 and derivatives.
300 * We are entered with:
301 *
302 * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
303 * r15 = crap (free to use)
304 * r14 = page table base
305 * r13 = PACA
306 * r11 = tlb_per_core ptr
307 * r10 = crap (free to use)
308 * r7  = esel_next
309 */
310tlb_miss_common_e6500:
311	crmove	cr2*4+2,cr0*4+2		/* cr2.eq != 0 if kernel address */
312
313BEGIN_FTR_SECTION		/* CPU_FTR_SMT */
314	/*
315	 * Search if we already have an indirect entry for that virtual
316	 * address, and if we do, bail out.
317	 *
318	 * MAS6:IND should be already set based on MAS4
319	 */
320	lhz	r10,PACAPACAINDEX(r13)
321	addi	r10,r10,1
322	crclr	cr1*4+eq	/* set cr1.eq = 0 for non-recursive */
3231:	lbarx	r15,0,r11
324	cmpdi	r15,0
325	bne	2f
326	stbcx.	r10,0,r11
327	bne	1b
3283:
329	.subsection 1
3302:	cmpd	cr1,r15,r10	/* recursive lock due to mcheck/crit/etc? */
331	beq	cr1,3b		/* unlock will happen if cr1.eq = 0 */
33210:	lbz	r15,0(r11)
333	cmpdi	r15,0
334	bne	10b
335	b	1b
336	.previous
337END_FTR_SECTION_IFSET(CPU_FTR_SMT)
338
339	lbz	r7,TCD_ESEL_NEXT(r11)
340
341BEGIN_FTR_SECTION		/* CPU_FTR_SMT */
342	/*
343	 * Erratum A-008139 says that we can't use tlbwe to change
344	 * an indirect entry in any way (including replacing or
345	 * invalidating) if the other thread could be in the process
346	 * of a lookup.  The workaround is to invalidate the entry
347	 * with tlbilx before overwriting.
348	 */
349
350	rlwinm	r10,r7,16,0xff0000
351	oris	r10,r10,MAS0_TLBSEL(1)@h
352	mtspr	SPRN_MAS0,r10
353	isync
354	tlbre
355	mfspr	r15,SPRN_MAS1
356	andis.	r15,r15,MAS1_VALID@h
357	beq	5f
358
359BEGIN_FTR_SECTION_NESTED(532)
360	mfspr	r10,SPRN_MAS8
361	rlwinm	r10,r10,0,0x80000fff  /* tgs,tlpid -> sgs,slpid */
362	mtspr	SPRN_MAS5,r10
363END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
364
365	mfspr	r10,SPRN_MAS1
366	rlwinm	r15,r10,0,0x3fff0000  /* tid -> spid */
367	rlwimi	r15,r10,20,0x00000003 /* ind,ts -> sind,sas */
368	mfspr	r10,SPRN_MAS6
369	mtspr	SPRN_MAS6,r15
370
371	mfspr	r15,SPRN_MAS2
372	isync
373	tlbilxva 0,r15
374	isync
375
376	mtspr	SPRN_MAS6,r10
377
3785:
379BEGIN_FTR_SECTION_NESTED(532)
380	li	r10,0
381	mtspr	SPRN_MAS8,r10
382	mtspr	SPRN_MAS5,r10
383END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
384
385	tlbsx	0,r16
386	mfspr	r10,SPRN_MAS1
387	andis.	r15,r10,MAS1_VALID@h
388	bne	tlb_miss_done_e6500
389FTR_SECTION_ELSE
390	mfspr	r10,SPRN_MAS1
391ALT_FTR_SECTION_END_IFSET(CPU_FTR_SMT)
392
393	oris	r10,r10,MAS1_VALID@h
394	beq	cr2,4f
395	rlwinm	r10,r10,0,16,1		/* Clear TID */
3964:	mtspr	SPRN_MAS1,r10
397
398	/* Now, we need to walk the page tables. First check if we are in
399	 * range.
400	 */
401	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
402	bne-	tlb_miss_fault_e6500
403
404	rldicl	r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
405	cmpldi	cr0,r14,0
406	clrrdi	r15,r15,3
407	beq-	tlb_miss_fault_e6500 /* No PGDIR, bail */
408	ldx	r14,r14,r15		/* grab pgd entry */
409
410	rldicl	r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
411	clrrdi	r15,r15,3
412	cmpdi	cr0,r14,0
413	bge	tlb_miss_huge_e6500	/* Bad pgd entry or hugepage; bail */
414	ldx	r14,r14,r15		/* grab pud entry */
415
416	rldicl	r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
417	clrrdi	r15,r15,3
418	cmpdi	cr0,r14,0
419	bge	tlb_miss_huge_e6500
420	ldx	r14,r14,r15		/* Grab pmd entry */
421
422	mfspr	r10,SPRN_MAS0
423	cmpdi	cr0,r14,0
424	bge	tlb_miss_huge_e6500
425
426	/* Now we build the MAS for a 2M indirect page:
427	 *
428	 * MAS 0   :	ESEL needs to be filled by software round-robin
429	 * MAS 1   :	Fully set up
430	 *               - PID already updated by caller if necessary
431	 *               - TSIZE for now is base ind page size always
432	 *               - TID already cleared if necessary
433	 * MAS 2   :	Default not 2M-aligned, need to be redone
434	 * MAS 3+7 :	Needs to be done
435	 */
436
437	ori	r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
438	mtspr	SPRN_MAS7_MAS3,r14
439
440	clrrdi	r15,r16,21		/* make EA 2M-aligned */
441	mtspr	SPRN_MAS2,r15
442
443tlb_miss_huge_done_e6500:
444	lbz	r16,TCD_ESEL_MAX(r11)
445	lbz	r14,TCD_ESEL_FIRST(r11)
446	rlwimi	r10,r7,16,0x00ff0000	/* insert esel_next into MAS0 */
447	addi	r7,r7,1			/* increment esel_next */
448	mtspr	SPRN_MAS0,r10
449	cmpw	r7,r16
450	iseleq	r7,r14,r7		/* if next == last use first */
451	stb	r7,TCD_ESEL_NEXT(r11)
452
453	tlbwe
454
455tlb_miss_done_e6500:
456	.macro	tlb_unlock_e6500
457BEGIN_FTR_SECTION
458	beq	cr1,1f		/* no unlock if lock was recursively grabbed */
459	li	r15,0
460	isync
461	stb	r15,0(r11)
4621:
463END_FTR_SECTION_IFSET(CPU_FTR_SMT)
464	.endm
465
466	tlb_unlock_e6500
467	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
468	tlb_epilog_bolted
469	rfi
470
471tlb_miss_huge_e6500:
472	beq	tlb_miss_fault_e6500
473	li	r10,1
474	andi.	r15,r14,HUGEPD_SHIFT_MASK@l /* r15 = psize */
475	rldimi	r14,r10,63,0		/* Set PD_HUGE */
476	xor	r14,r14,r15		/* Clear size bits */
477	ldx	r14,0,r14
478
479	/*
480	 * Now we build the MAS for a huge page.
481	 *
482	 * MAS 0   :	ESEL needs to be filled by software round-robin
483	 *		 - can be handled by indirect code
484	 * MAS 1   :	Need to clear IND and set TSIZE
485	 * MAS 2,3+7:	Needs to be redone similar to non-tablewalk handler
486	 */
487
488	subi	r15,r15,10		/* Convert psize to tsize */
489	mfspr	r10,SPRN_MAS1
490	rlwinm	r10,r10,0,~MAS1_IND
491	rlwimi	r10,r15,MAS1_TSIZE_SHIFT,MAS1_TSIZE_MASK
492	mtspr	SPRN_MAS1,r10
493
494	li	r10,-0x400
495	sld	r15,r10,r15		/* Generate mask based on size */
496	and	r10,r16,r15
497	rldicr	r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
498	rlwimi	r10,r14,32-19,27,31	/* Insert WIMGE */
499	clrldi	r15,r15,PAGE_SHIFT	/* Clear crap at the top */
500	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
501	mtspr	SPRN_MAS2,r10
502	andi.	r10,r14,_PAGE_DIRTY
503	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
504
505	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
506	bne	1f
507	li	r10,MAS3_SW|MAS3_UW
508	andc	r15,r15,r10
5091:
510	mtspr	SPRN_MAS7_MAS3,r15
511
512	mfspr	r10,SPRN_MAS0
513	b	tlb_miss_huge_done_e6500
514
515tlb_miss_kernel_e6500:
516	ld	r14,PACA_KERNELPGD(r13)
517	cmpldi	cr1,r15,8		/* Check for vmalloc region */
518	beq+	cr1,tlb_miss_common_e6500
519
520tlb_miss_fault_e6500:
521	tlb_unlock_e6500
522	/* We need to check if it was an instruction miss */
523	andi.	r16,r16,1
524	bne	itlb_miss_fault_e6500
525dtlb_miss_fault_e6500:
526	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
527	tlb_epilog_bolted
528	b	exc_data_storage_book3e
529itlb_miss_fault_e6500:
530	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
531	tlb_epilog_bolted
532	b	exc_instruction_storage_book3e
533#endif /* CONFIG_PPC_FSL_BOOK3E */
534
535/**********************************************************************
536 *                                                                    *
537 * TLB miss handling for Book3E with TLB reservation and HES support  *
538 *                                                                    *
539 **********************************************************************/
540
541
542/* Data TLB miss */
543	START_EXCEPTION(data_tlb_miss)
544	TLB_MISS_PROLOG
545
546	/* Now we handle the fault proper. We only save DEAR in normal
547	 * fault case since that's the only interesting values here.
548	 * We could probably also optimize by not saving SRR0/1 in the
549	 * linear mapping case but I'll leave that for later
550	 */
551	mfspr	r14,SPRN_ESR
552	mfspr	r16,SPRN_DEAR		/* get faulting address */
553	srdi	r15,r16,60		/* get region */
554	cmpldi	cr0,r15,0xc		/* linear mapping ? */
555	TLB_MISS_STATS_SAVE_INFO
556	beq	tlb_load_linear		/* yes -> go to linear map load */
557
558	/* The page tables are mapped virtually linear. At this point, though,
559	 * we don't know whether we are trying to fault in a first level
560	 * virtual address or a virtual page table address. We can get that
561	 * from bit 0x1 of the region ID which we have set for a page table
562	 */
563	andi.	r10,r15,0x1
564	bne-	virt_page_table_tlb_miss
565
566	std	r14,EX_TLB_ESR(r12);	/* save ESR */
567	std	r16,EX_TLB_DEAR(r12);	/* save DEAR */
568
569	 /* We need _PAGE_PRESENT and  _PAGE_ACCESSED set */
570	li	r11,_PAGE_PRESENT
571	oris	r11,r11,_PAGE_ACCESSED@h
572
573	/* We do the user/kernel test for the PID here along with the RW test
574	 */
575	cmpldi	cr0,r15,0		/* Check for user region */
576
577	/* We pre-test some combination of permissions to avoid double
578	 * faults:
579	 *
580	 * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
581	 * ESR_ST   is 0x00800000
582	 * _PAGE_BAP_SW is 0x00000010
583	 * So the shift is >> 19. This tests for supervisor writeability.
584	 * If the page happens to be supervisor writeable and not user
585	 * writeable, we will take a new fault later, but that should be
586	 * a rare enough case.
587	 *
588	 * We also move ESR_ST in _PAGE_DIRTY position
589	 * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
590	 *
591	 * MAS1 is preset for all we need except for TID that needs to
592	 * be cleared for kernel translations
593	 */
594	rlwimi	r11,r14,32-19,27,27
595	rlwimi	r11,r14,32-16,19,19
596	beq	normal_tlb_miss
597	/* XXX replace the RMW cycles with immediate loads + writes */
5981:	mfspr	r10,SPRN_MAS1
599	cmpldi	cr0,r15,8		/* Check for vmalloc region */
600	rlwinm	r10,r10,0,16,1		/* Clear TID */
601	mtspr	SPRN_MAS1,r10
602	beq+	normal_tlb_miss
603
604	/* We got a crappy address, just fault with whatever DEAR and ESR
605	 * are here
606	 */
607	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
608	TLB_MISS_EPILOG_ERROR
609	b	exc_data_storage_book3e
610
611/* Instruction TLB miss */
612	START_EXCEPTION(instruction_tlb_miss)
613	TLB_MISS_PROLOG
614
615	/* If we take a recursive fault, the second level handler may need
616	 * to know whether we are handling a data or instruction fault in
617	 * order to get to the right store fault handler. We provide that
618	 * info by writing a crazy value in ESR in our exception frame
619	 */
620	li	r14,-1	/* store to exception frame is done later */
621
622	/* Now we handle the fault proper. We only save DEAR in the non
623	 * linear mapping case since we know the linear mapping case will
624	 * not re-enter. We could indeed optimize and also not save SRR0/1
625	 * in the linear mapping case but I'll leave that for later
626	 *
627	 * Faulting address is SRR0 which is already in r16
628	 */
629	srdi	r15,r16,60		/* get region */
630	cmpldi	cr0,r15,0xc		/* linear mapping ? */
631	TLB_MISS_STATS_SAVE_INFO
632	beq	tlb_load_linear		/* yes -> go to linear map load */
633
634	/* We do the user/kernel test for the PID here along with the RW test
635	 */
636	li	r11,_PAGE_PRESENT|_PAGE_EXEC	/* Base perm */
637	oris	r11,r11,_PAGE_ACCESSED@h
638
639	cmpldi	cr0,r15,0			/* Check for user region */
640	std	r14,EX_TLB_ESR(r12)		/* write crazy -1 to frame */
641	beq	normal_tlb_miss
642
643	li	r11,_PAGE_PRESENT|_PAGE_BAP_SX	/* Base perm */
644	oris	r11,r11,_PAGE_ACCESSED@h
645	/* XXX replace the RMW cycles with immediate loads + writes */
646	mfspr	r10,SPRN_MAS1
647	cmpldi	cr0,r15,8			/* Check for vmalloc region */
648	rlwinm	r10,r10,0,16,1			/* Clear TID */
649	mtspr	SPRN_MAS1,r10
650	beq+	normal_tlb_miss
651
652	/* We got a crappy address, just fault */
653	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
654	TLB_MISS_EPILOG_ERROR
655	b	exc_instruction_storage_book3e
656
657/*
658 * This is the guts of the first-level TLB miss handler for direct
659 * misses. We are entered with:
660 *
661 * r16 = faulting address
662 * r15 = region ID
663 * r14 = crap (free to use)
664 * r13 = PACA
665 * r12 = TLB exception frame in PACA
666 * r11 = PTE permission mask
667 * r10 = crap (free to use)
668 */
669normal_tlb_miss:
670	/* So we first construct the page table address. We do that by
671	 * shifting the bottom of the address (not the region ID) by
672	 * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and
673	 * or'ing the fourth high bit.
674	 *
675	 * NOTE: For 64K pages, we do things slightly differently in
676	 * order to handle the weird page table format used by linux
677	 */
678	ori	r10,r15,0x1
679	rldicl	r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
680	sldi	r15,r10,60
681	clrrdi	r14,r14,3
682	or	r10,r15,r14
683
684BEGIN_MMU_FTR_SECTION
685	/* Set the TLB reservation and search for existing entry. Then load
686	 * the entry.
687	 */
688	PPC_TLBSRX_DOT(0,R16)
689	ld	r14,0(r10)
690	beq	normal_tlb_miss_done
691MMU_FTR_SECTION_ELSE
692	ld	r14,0(r10)
693ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
694
695finish_normal_tlb_miss:
696	/* Check if required permissions are met */
697	andc.	r15,r11,r14
698	bne-	normal_tlb_miss_access_fault
699
700	/* Now we build the MAS:
701	 *
702	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
703	 * MAS 1   :	Almost fully setup
704	 *               - PID already updated by caller if necessary
705	 *               - TSIZE need change if !base page size, not
706	 *                 yet implemented for now
707	 * MAS 2   :	Defaults not useful, need to be redone
708	 * MAS 3+7 :	Needs to be done
709	 *
710	 * TODO: mix up code below for better scheduling
711	 */
712	clrrdi	r11,r16,12		/* Clear low crap in EA */
713	rlwimi	r11,r14,32-19,27,31	/* Insert WIMGE */
714	mtspr	SPRN_MAS2,r11
715
716	/* Check page size, if not standard, update MAS1 */
717	rldicl	r11,r14,64-8,64-8
718	cmpldi	cr0,r11,BOOK3E_PAGESZ_4K
719	beq-	1f
720	mfspr	r11,SPRN_MAS1
721	rlwimi	r11,r14,31,21,24
722	rlwinm	r11,r11,0,21,19
723	mtspr	SPRN_MAS1,r11
7241:
725	/* Move RPN in position */
726	rldicr	r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
727	clrldi	r15,r11,12		/* Clear crap at the top */
728	rlwimi	r15,r14,32-8,22,25	/* Move in U bits */
729	rlwimi	r15,r14,32-2,26,31	/* Move in BAP bits */
730
731	/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
732	andi.	r11,r14,_PAGE_DIRTY
733	bne	1f
734	li	r11,MAS3_SW|MAS3_UW
735	andc	r15,r15,r11
7361:
737BEGIN_MMU_FTR_SECTION
738	srdi	r16,r15,32
739	mtspr	SPRN_MAS3,r15
740	mtspr	SPRN_MAS7,r16
741MMU_FTR_SECTION_ELSE
742	mtspr	SPRN_MAS7_MAS3,r15
743ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
744
745	tlbwe
746
747normal_tlb_miss_done:
748	/* We don't bother with restoring DEAR or ESR since we know we are
749	 * level 0 and just going back to userland. They are only needed
750	 * if you are going to take an access fault
751	 */
752	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
753	TLB_MISS_EPILOG_SUCCESS
754	rfi
755
756normal_tlb_miss_access_fault:
757	/* We need to check if it was an instruction miss */
758	andi.	r10,r11,_PAGE_EXEC
759	bne	1f
760	ld	r14,EX_TLB_DEAR(r12)
761	ld	r15,EX_TLB_ESR(r12)
762	mtspr	SPRN_DEAR,r14
763	mtspr	SPRN_ESR,r15
764	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
765	TLB_MISS_EPILOG_ERROR
766	b	exc_data_storage_book3e
7671:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
768	TLB_MISS_EPILOG_ERROR
769	b	exc_instruction_storage_book3e
770
771
772/*
773 * This is the guts of the second-level TLB miss handler for direct
774 * misses. We are entered with:
775 *
776 * r16 = virtual page table faulting address
777 * r15 = region (top 4 bits of address)
778 * r14 = crap (free to use)
779 * r13 = PACA
780 * r12 = TLB exception frame in PACA
781 * r11 = crap (free to use)
782 * r10 = crap (free to use)
783 *
784 * Note that this should only ever be called as a second level handler
785 * with the current scheme when using SW load.
786 * That means we can always get the original fault DEAR at
787 * EX_TLB_DEAR-EX_TLB_SIZE(r12)
788 *
789 * It can be re-entered by the linear mapping miss handler. However, to
790 * avoid too much complication, it will restart the whole fault at level
791 * 0 so we don't care too much about clobbers
792 *
793 * XXX That code was written back when we couldn't clobber r14. We can now,
794 * so we could probably optimize things a bit
795 */
796virt_page_table_tlb_miss:
797	/* Are we hitting a kernel page table ? */
798	andi.	r10,r15,0x8
799
800	/* The cool thing now is that r10 contains 0 for user and 8 for kernel,
801	 * and we happen to have the swapper_pg_dir at offset 8 from the user
802	 * pgdir in the PACA :-).
803	 */
804	add	r11,r10,r13
805
806	/* If kernel, we need to clear MAS1 TID */
807	beq	1f
808	/* XXX replace the RMW cycles with immediate loads + writes */
809	mfspr	r10,SPRN_MAS1
810	rlwinm	r10,r10,0,16,1			/* Clear TID */
811	mtspr	SPRN_MAS1,r10
8121:
813BEGIN_MMU_FTR_SECTION
814	/* Search if we already have a TLB entry for that virtual address, and
815	 * if we do, bail out.
816	 */
817	PPC_TLBSRX_DOT(0,R16)
818	beq	virt_page_table_tlb_miss_done
819END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
820
821	/* Now, we need to walk the page tables. First check if we are in
822	 * range.
823	 */
824	rldicl.	r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
825	bne-	virt_page_table_tlb_miss_fault
826
827	/* Get the PGD pointer */
828	ld	r15,PACAPGD(r11)
829	cmpldi	cr0,r15,0
830	beq-	virt_page_table_tlb_miss_fault
831
832	/* Get to PGD entry */
833	rldicl	r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3
834	clrrdi	r10,r11,3
835	ldx	r15,r10,r15
836	cmpdi	cr0,r15,0
837	bge	virt_page_table_tlb_miss_fault
838
839	/* Get to PUD entry */
840	rldicl	r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
841	clrrdi	r10,r11,3
842	ldx	r15,r10,r15
843	cmpdi	cr0,r15,0
844	bge	virt_page_table_tlb_miss_fault
845
846	/* Get to PMD entry */
847	rldicl	r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
848	clrrdi	r10,r11,3
849	ldx	r15,r10,r15
850	cmpdi	cr0,r15,0
851	bge	virt_page_table_tlb_miss_fault
852
853	/* Ok, we're all right, we can now create a kernel translation for
854	 * a 4K or 64K page from r16 -> r15.
855	 */
856	/* Now we build the MAS:
857	 *
858	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
859	 * MAS 1   :	Almost fully setup
860	 *               - PID already updated by caller if necessary
861	 *               - TSIZE for now is base page size always
862	 * MAS 2   :	Use defaults
863	 * MAS 3+7 :	Needs to be done
864	 *
865	 * So we only do MAS 2 and 3 for now...
866	 */
867	clrldi	r11,r15,4		/* remove region ID from RPN */
868	ori	r10,r11,1		/* Or-in SR */
869
870BEGIN_MMU_FTR_SECTION
871	srdi	r16,r10,32
872	mtspr	SPRN_MAS3,r10
873	mtspr	SPRN_MAS7,r16
874MMU_FTR_SECTION_ELSE
875	mtspr	SPRN_MAS7_MAS3,r10
876ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
877
878	tlbwe
879
880BEGIN_MMU_FTR_SECTION
881virt_page_table_tlb_miss_done:
882
883	/* We have overridden MAS2:EPN but currently our primary TLB miss
884	 * handler will always restore it so that should not be an issue,
885	 * if we ever optimize the primary handler to not write MAS2 on
886	 * some cases, we'll have to restore MAS2:EPN here based on the
887	 * original fault's DEAR. If we do that we have to modify the
888	 * ITLB miss handler to also store SRR0 in the exception frame
889	 * as DEAR.
890	 *
891	 * However, one nasty thing we did is we cleared the reservation
892	 * (well, potentially we did). We do a trick here thus if we
893	 * are not a level 0 exception (we interrupted the TLB miss) we
894	 * offset the return address by -4 in order to replay the tlbsrx
895	 * instruction there
896	 */
897	subf	r10,r13,r12
898	cmpldi	cr0,r10,PACA_EXTLB+EX_TLB_SIZE
899	bne-	1f
900	ld	r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
901	addi	r10,r11,-4
902	std	r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
9031:
904END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
905	/* Return to caller, normal case */
906	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK);
907	TLB_MISS_EPILOG_SUCCESS
908	rfi
909
910virt_page_table_tlb_miss_fault:
911	/* If we fault here, things are a little bit tricky. We need to call
912	 * either data or instruction store fault, and we need to retrieve
913	 * the original fault address and ESR (for data).
914	 *
915	 * The thing is, we know that in normal circumstances, this is
916	 * always called as a second level tlb miss for SW load or as a first
917	 * level TLB miss for HW load, so we should be able to peek at the
918	 * relevant information in the first exception frame in the PACA.
919	 *
920	 * However, we do need to double check that, because we may just hit
921	 * a stray kernel pointer or a userland attack trying to hit those
922	 * areas. If that is the case, we do a data fault. (We can't get here
923	 * from an instruction tlb miss anyway).
924	 *
925	 * Note also that when going to a fault, we must unwind the previous
926	 * level as well. Since we are doing that, we don't need to clear or
927	 * restore the TLB reservation neither.
928	 */
929	subf	r10,r13,r12
930	cmpldi	cr0,r10,PACA_EXTLB+EX_TLB_SIZE
931	bne-	virt_page_table_tlb_miss_whacko_fault
932
933	/* We dig the original DEAR and ESR from slot 0 */
934	ld	r15,EX_TLB_DEAR+PACA_EXTLB(r13)
935	ld	r16,EX_TLB_ESR+PACA_EXTLB(r13)
936
937	/* We check for the "special" ESR value for instruction faults */
938	cmpdi	cr0,r16,-1
939	beq	1f
940	mtspr	SPRN_DEAR,r15
941	mtspr	SPRN_ESR,r16
942	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT);
943	TLB_MISS_EPILOG_ERROR
944	b	exc_data_storage_book3e
9451:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT);
946	TLB_MISS_EPILOG_ERROR
947	b	exc_instruction_storage_book3e
948
949virt_page_table_tlb_miss_whacko_fault:
950	/* The linear fault will restart everything so ESR and DEAR will
951	 * not have been clobbered, let's just fault with what we have
952	 */
953	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT);
954	TLB_MISS_EPILOG_ERROR
955	b	exc_data_storage_book3e
956
957
958/**************************************************************
959 *                                                            *
960 * TLB miss handling for Book3E with hw page table support    *
961 *                                                            *
962 **************************************************************/
963
964
965/* Data TLB miss */
966	START_EXCEPTION(data_tlb_miss_htw)
967	TLB_MISS_PROLOG
968
969	/* Now we handle the fault proper. We only save DEAR in normal
970	 * fault case since that's the only interesting values here.
971	 * We could probably also optimize by not saving SRR0/1 in the
972	 * linear mapping case but I'll leave that for later
973	 */
974	mfspr	r14,SPRN_ESR
975	mfspr	r16,SPRN_DEAR		/* get faulting address */
976	srdi	r11,r16,60		/* get region */
977	cmpldi	cr0,r11,0xc		/* linear mapping ? */
978	TLB_MISS_STATS_SAVE_INFO
979	beq	tlb_load_linear		/* yes -> go to linear map load */
980
981	/* We do the user/kernel test for the PID here along with the RW test
982	 */
983	cmpldi	cr0,r11,0		/* Check for user region */
984	ld	r15,PACAPGD(r13)	/* Load user pgdir */
985	beq	htw_tlb_miss
986
987	/* XXX replace the RMW cycles with immediate loads + writes */
9881:	mfspr	r10,SPRN_MAS1
989	cmpldi	cr0,r11,8		/* Check for vmalloc region */
990	rlwinm	r10,r10,0,16,1		/* Clear TID */
991	mtspr	SPRN_MAS1,r10
992	ld	r15,PACA_KERNELPGD(r13)	/* Load kernel pgdir */
993	beq+	htw_tlb_miss
994
995	/* We got a crappy address, just fault with whatever DEAR and ESR
996	 * are here
997	 */
998	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
999	TLB_MISS_EPILOG_ERROR
1000	b	exc_data_storage_book3e
1001
1002/* Instruction TLB miss */
1003	START_EXCEPTION(instruction_tlb_miss_htw)
1004	TLB_MISS_PROLOG
1005
1006	/* If we take a recursive fault, the second level handler may need
1007	 * to know whether we are handling a data or instruction fault in
1008	 * order to get to the right store fault handler. We provide that
1009	 * info by keeping a crazy value for ESR in r14
1010	 */
1011	li	r14,-1	/* store to exception frame is done later */
1012
1013	/* Now we handle the fault proper. We only save DEAR in the non
1014	 * linear mapping case since we know the linear mapping case will
1015	 * not re-enter. We could indeed optimize and also not save SRR0/1
1016	 * in the linear mapping case but I'll leave that for later
1017	 *
1018	 * Faulting address is SRR0 which is already in r16
1019	 */
1020	srdi	r11,r16,60		/* get region */
1021	cmpldi	cr0,r11,0xc		/* linear mapping ? */
1022	TLB_MISS_STATS_SAVE_INFO
1023	beq	tlb_load_linear		/* yes -> go to linear map load */
1024
1025	/* We do the user/kernel test for the PID here along with the RW test
1026	 */
1027	cmpldi	cr0,r11,0			/* Check for user region */
1028	ld	r15,PACAPGD(r13)		/* Load user pgdir */
1029	beq	htw_tlb_miss
1030
1031	/* XXX replace the RMW cycles with immediate loads + writes */
10321:	mfspr	r10,SPRN_MAS1
1033	cmpldi	cr0,r11,8			/* Check for vmalloc region */
1034	rlwinm	r10,r10,0,16,1			/* Clear TID */
1035	mtspr	SPRN_MAS1,r10
1036	ld	r15,PACA_KERNELPGD(r13)		/* Load kernel pgdir */
1037	beq+	htw_tlb_miss
1038
1039	/* We got a crappy address, just fault */
1040	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
1041	TLB_MISS_EPILOG_ERROR
1042	b	exc_instruction_storage_book3e
1043
1044
1045/*
1046 * This is the guts of the second-level TLB miss handler for direct
1047 * misses. We are entered with:
1048 *
1049 * r16 = virtual page table faulting address
1050 * r15 = PGD pointer
1051 * r14 = ESR
1052 * r13 = PACA
1053 * r12 = TLB exception frame in PACA
1054 * r11 = crap (free to use)
1055 * r10 = crap (free to use)
1056 *
1057 * It can be re-entered by the linear mapping miss handler. However, to
1058 * avoid too much complication, it will save/restore things for us
1059 */
1060htw_tlb_miss:
1061	/* Search if we already have a TLB entry for that virtual address, and
1062	 * if we do, bail out.
1063	 *
1064	 * MAS1:IND should be already set based on MAS4
1065	 */
1066	PPC_TLBSRX_DOT(0,R16)
1067	beq	htw_tlb_miss_done
1068
1069	/* Now, we need to walk the page tables. First check if we are in
1070	 * range.
1071	 */
1072	rldicl.	r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
1073	bne-	htw_tlb_miss_fault
1074
1075	/* Get the PGD pointer */
1076	cmpldi	cr0,r15,0
1077	beq-	htw_tlb_miss_fault
1078
1079	/* Get to PGD entry */
1080	rldicl	r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
1081	clrrdi	r10,r11,3
1082	ldx	r15,r10,r15
1083	cmpdi	cr0,r15,0
1084	bge	htw_tlb_miss_fault
1085
1086	/* Get to PUD entry */
1087	rldicl	r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
1088	clrrdi	r10,r11,3
1089	ldx	r15,r10,r15
1090	cmpdi	cr0,r15,0
1091	bge	htw_tlb_miss_fault
1092
1093	/* Get to PMD entry */
1094	rldicl	r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
1095	clrrdi	r10,r11,3
1096	ldx	r15,r10,r15
1097	cmpdi	cr0,r15,0
1098	bge	htw_tlb_miss_fault
1099
1100	/* Ok, we're all right, we can now create an indirect entry for
1101	 * a 1M or 256M page.
1102	 *
1103	 * The last trick is now that because we use "half" pages for
1104	 * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
1105	 * for an added LSB bit to the RPN. For 64K pages, there is no
1106	 * problem as we already use 32K arrays (half PTE pages), but for
1107	 * 4K page we need to extract a bit from the virtual address and
1108	 * insert it into the "PA52" bit of the RPN.
1109	 */
1110	rlwimi	r15,r16,32-9,20,20
1111	/* Now we build the MAS:
1112	 *
1113	 * MAS 0   :	Fully setup with defaults in MAS4 and TLBnCFG
1114	 * MAS 1   :	Almost fully setup
1115	 *               - PID already updated by caller if necessary
1116	 *               - TSIZE for now is base ind page size always
1117	 * MAS 2   :	Use defaults
1118	 * MAS 3+7 :	Needs to be done
1119	 */
1120	ori	r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
1121
1122BEGIN_MMU_FTR_SECTION
1123	srdi	r16,r10,32
1124	mtspr	SPRN_MAS3,r10
1125	mtspr	SPRN_MAS7,r16
1126MMU_FTR_SECTION_ELSE
1127	mtspr	SPRN_MAS7_MAS3,r10
1128ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
1129
1130	tlbwe
1131
1132htw_tlb_miss_done:
1133	/* We don't bother with restoring DEAR or ESR since we know we are
1134	 * level 0 and just going back to userland. They are only needed
1135	 * if you are going to take an access fault
1136	 */
1137	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK)
1138	TLB_MISS_EPILOG_SUCCESS
1139	rfi
1140
1141htw_tlb_miss_fault:
1142	/* We need to check if it was an instruction miss. We know this
1143	 * though because r14 would contain -1
1144	 */
1145	cmpdi	cr0,r14,-1
1146	beq	1f
1147	mtspr	SPRN_DEAR,r16
1148	mtspr	SPRN_ESR,r14
1149	TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT)
1150	TLB_MISS_EPILOG_ERROR
1151	b	exc_data_storage_book3e
11521:	TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT)
1153	TLB_MISS_EPILOG_ERROR
1154	b	exc_instruction_storage_book3e
1155
1156/*
1157 * This is the guts of "any" level TLB miss handler for kernel linear
1158 * mapping misses. We are entered with:
1159 *
1160 *
1161 * r16 = faulting address
1162 * r15 = crap (free to use)
1163 * r14 = ESR (data) or -1 (instruction)
1164 * r13 = PACA
1165 * r12 = TLB exception frame in PACA
1166 * r11 = crap (free to use)
1167 * r10 = crap (free to use)
1168 *
1169 * In addition we know that we will not re-enter, so in theory, we could
1170 * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later.
1171 *
1172 * We also need to be careful about MAS registers here & TLB reservation,
1173 * as we know we'll have clobbered them if we interrupt the main TLB miss
1174 * handlers in which case we probably want to do a full restart at level
1175 * 0 rather than saving / restoring the MAS.
1176 *
1177 * Note: If we care about performance of that core, we can easily shuffle
1178 *       a few things around
1179 */
1180tlb_load_linear:
1181	/* For now, we assume the linear mapping is contiguous and stops at
1182	 * linear_map_top. We also assume the size is a multiple of 1G, thus
1183	 * we only use 1G pages for now. That might have to be changed in a
1184	 * final implementation, especially when dealing with hypervisors
1185	 */
1186	ld	r11,PACATOC(r13)
1187	ld	r11,linear_map_top@got(r11)
1188	ld	r10,0(r11)
1189	tovirt(10,10)
1190	cmpld	cr0,r16,r10
1191	bge	tlb_load_linear_fault
1192
1193	/* MAS1 need whole new setup. */
1194	li	r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT)
1195	oris	r15,r15,MAS1_VALID@h	/* MAS1 needs V and TSIZE */
1196	mtspr	SPRN_MAS1,r15
1197
1198	/* Already somebody there ? */
1199	PPC_TLBSRX_DOT(0,R16)
1200	beq	tlb_load_linear_done
1201
1202	/* Now we build the remaining MAS. MAS0 and 2 should be fine
1203	 * with their defaults, which leaves us with MAS 3 and 7. The
1204	 * mapping is linear, so we just take the address, clear the
1205	 * region bits, and or in the permission bits which are currently
1206	 * hard wired
1207	 */
1208	clrrdi	r10,r16,30		/* 1G page index */
1209	clrldi	r10,r10,4		/* clear region bits */
1210	ori	r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
1211
1212BEGIN_MMU_FTR_SECTION
1213	srdi	r16,r10,32
1214	mtspr	SPRN_MAS3,r10
1215	mtspr	SPRN_MAS7,r16
1216MMU_FTR_SECTION_ELSE
1217	mtspr	SPRN_MAS7_MAS3,r10
1218ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
1219
1220	tlbwe
1221
1222tlb_load_linear_done:
1223	/* We use the "error" epilog for success as we do want to
1224	 * restore to the initial faulting context, whatever it was.
1225	 * We do that because we can't resume a fault within a TLB
1226	 * miss handler, due to MAS and TLB reservation being clobbered.
1227	 */
1228	TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR)
1229	TLB_MISS_EPILOG_ERROR
1230	rfi
1231
1232tlb_load_linear_fault:
1233	/* We keep the DEAR and ESR around, this shouldn't have happened */
1234	cmpdi	cr0,r14,-1
1235	beq	1f
1236	TLB_MISS_EPILOG_ERROR_SPECIAL
1237	b	exc_data_storage_book3e
12381:	TLB_MISS_EPILOG_ERROR_SPECIAL
1239	b	exc_instruction_storage_book3e
1240
1241
1242#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
1243.tlb_stat_inc:
12441:	ldarx	r8,0,r9
1245	addi	r8,r8,1
1246	stdcx.	r8,0,r9
1247	bne-	1b
1248	blr
1249#endif
1250