1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *
11 *  This file contains low-level assembler routines for managing
12 *  the PowerPC MMU hash table.  (PPC 8xx processors don't use a
13 *  hash table, so this file is not used on them.)
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <asm/reg.h>
23#include <asm/page.h>
24#include <asm/pgtable.h>
25#include <asm/cputable.h>
26#include <asm/ppc_asm.h>
27#include <asm/thread_info.h>
28#include <asm/asm-offsets.h>
29#include <asm/export.h>
30#include <asm/feature-fixups.h>
31#include <asm/code-patching-asm.h>
32
33#ifdef CONFIG_SMP
34	.section .bss
35	.align	2
36mmu_hash_lock:
37	.space	4
38#endif /* CONFIG_SMP */
39
40/*
41 * Load a PTE into the hash table, if possible.
42 * The address is in r4, and r3 contains an access flag:
43 * _PAGE_RW (0x400) if a write.
44 * r9 contains the SRR1 value, from which we use the MSR_PR bit.
45 * SPRG_THREAD contains the physical address of the current task's thread.
46 *
47 * Returns to the caller if the access is illegal or there is no
48 * mapping for the address.  Otherwise it places an appropriate PTE
49 * in the hash table and returns from the exception.
50 * Uses r0, r3 - r6, r8, r10, ctr, lr.
51 */
52	.text
53_GLOBAL(hash_page)
54#ifdef CONFIG_SMP
55	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@h
56	ori	r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
57	lis	r0,0x0fff
58	b	10f
5911:	lwz	r6,0(r8)
60	cmpwi	0,r6,0
61	bne	11b
6210:	lwarx	r6,0,r8
63	cmpwi	0,r6,0
64	bne-	11b
65	stwcx.	r0,0,r8
66	bne-	10b
67	isync
68#endif
69	/* Get PTE (linux-style) and check access */
70	lis	r0,KERNELBASE@h		/* check if kernel address */
71	cmplw	0,r4,r0
72	ori	r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
73	mfspr	r5, SPRN_SPRG_PGDIR	/* phys page-table root */
74	blt+	112f			/* assume user more likely */
75	lis	r5, (swapper_pg_dir - PAGE_OFFSET)@ha	/* if kernel address, use */
76	addi	r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l	/* kernel page table */
77	rlwimi	r3,r9,32-12,29,29	/* MSR_PR -> _PAGE_USER */
78112:
79#ifndef CONFIG_PTE_64BIT
80	rlwimi	r5,r4,12,20,29		/* insert top 10 bits of address */
81	lwz	r8,0(r5)		/* get pmd entry */
82	rlwinm.	r8,r8,0,0,19		/* extract address of pte page */
83#else
84	rlwinm	r8,r4,13,19,29		/* Compute pgdir/pmd offset */
85	lwzx	r8,r8,r5		/* Get L1 entry */
86	rlwinm.	r8,r8,0,0,20		/* extract pt base address */
87#endif
88#ifdef CONFIG_SMP
89	beq-	hash_page_out		/* return if no mapping */
90#else
91	/* XXX it seems like the 601 will give a machine fault on the
92	   rfi if its alignment is wrong (bottom 4 bits of address are
93	   8 or 0xc) and we have had a not-taken conditional branch
94	   to the address following the rfi. */
95	beqlr-
96#endif
97#ifndef CONFIG_PTE_64BIT
98	rlwimi	r8,r4,22,20,29		/* insert next 10 bits of address */
99#else
100	rlwimi	r8,r4,23,20,28		/* compute pte address */
101#endif
102	rlwinm	r0,r3,32-3,24,24	/* _PAGE_RW access -> _PAGE_DIRTY */
103	ori	r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
104
105	/*
106	 * Update the linux PTE atomically.  We do the lwarx up-front
107	 * because almost always, there won't be a permission violation
108	 * and there won't already be an HPTE, and thus we will have
109	 * to update the PTE to set _PAGE_HASHPTE.  -- paulus.
110	 *
111	 * If PTE_64BIT is set, the low word is the flags word; use that
112	 * word for locking since it contains all the interesting bits.
113	 */
114#if (PTE_FLAGS_OFFSET != 0)
115	addi	r8,r8,PTE_FLAGS_OFFSET
116#endif
117retry:
118	lwarx	r6,0,r8			/* get linux-style pte, flag word */
119	andc.	r5,r3,r6		/* check access & ~permission */
120#ifdef CONFIG_SMP
121	bne-	hash_page_out		/* return if access not permitted */
122#else
123	bnelr-
124#endif
125	or	r5,r0,r6		/* set accessed/dirty bits */
126#ifdef CONFIG_PTE_64BIT
127#ifdef CONFIG_SMP
128	subf	r10,r6,r8		/* create false data dependency */
129	subi	r10,r10,PTE_FLAGS_OFFSET
130	lwzx	r10,r6,r10		/* Get upper PTE word */
131#else
132	lwz	r10,-PTE_FLAGS_OFFSET(r8)
133#endif /* CONFIG_SMP */
134#endif /* CONFIG_PTE_64BIT */
135	stwcx.	r5,0,r8			/* attempt to update PTE */
136	bne-	retry			/* retry if someone got there first */
137
138	mfsrin	r3,r4			/* get segment reg for segment */
139	mfctr	r0
140	stw	r0,_CTR(r11)
141	bl	create_hpte		/* add the hash table entry */
142
143#ifdef CONFIG_SMP
144	eieio
145	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@ha
146	li	r0,0
147	stw	r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
148#endif
149
150	/* Return from the exception */
151	lwz	r5,_CTR(r11)
152	mtctr	r5
153	lwz	r0,GPR0(r11)
154	lwz	r8,GPR8(r11)
155	b	fast_exception_return
156
157#ifdef CONFIG_SMP
158hash_page_out:
159	eieio
160	lis	r8, (mmu_hash_lock - PAGE_OFFSET)@ha
161	li	r0,0
162	stw	r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
163	blr
164#endif /* CONFIG_SMP */
165
166/*
167 * Add an entry for a particular page to the hash table.
168 *
169 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
170 *
171 * We assume any necessary modifications to the pte (e.g. setting
172 * the accessed bit) have already been done and that there is actually
173 * a hash table in use (i.e. we're not on a 603).
174 */
175_GLOBAL(add_hash_page)
176	mflr	r0
177	stw	r0,4(r1)
178
179	/* Convert context and va to VSID */
180	mulli	r3,r3,897*16		/* multiply context by context skew */
181	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
182	mulli	r0,r0,0x111		/* multiply by ESID skew */
183	add	r3,r3,r0		/* note create_hpte trims to 24 bits */
184
185#ifdef CONFIG_SMP
186	lwz	r8,TASK_CPU(r2)		/* to go in mmu_hash_lock */
187	oris	r8,r8,12
188#endif /* CONFIG_SMP */
189
190	/*
191	 * We disable interrupts here, even on UP, because we don't
192	 * want to race with hash_page, and because we want the
193	 * _PAGE_HASHPTE bit to be a reliable indication of whether
194	 * the HPTE exists (or at least whether one did once).
195	 * We also turn off the MMU for data accesses so that we
196	 * we can't take a hash table miss (assuming the code is
197	 * covered by a BAT).  -- paulus
198	 */
199	mfmsr	r9
200	SYNC
201	rlwinm	r0,r9,0,17,15		/* clear bit 16 (MSR_EE) */
202	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
203	mtmsr	r0
204	SYNC_601
205	isync
206
207#ifdef CONFIG_SMP
208	lis	r6, (mmu_hash_lock - PAGE_OFFSET)@ha
209	addi	r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
21010:	lwarx	r0,0,r6			/* take the mmu_hash_lock */
211	cmpi	0,r0,0
212	bne-	11f
213	stwcx.	r8,0,r6
214	beq+	12f
21511:	lwz	r0,0(r6)
216	cmpi	0,r0,0
217	beq	10b
218	b	11b
21912:	isync
220#endif
221
222	/*
223	 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
224	 * If _PAGE_HASHPTE was already set, we don't replace the existing
225	 * HPTE, so we just unlock and return.
226	 */
227	mr	r8,r5
228#ifndef CONFIG_PTE_64BIT
229	rlwimi	r8,r4,22,20,29
230#else
231	rlwimi	r8,r4,23,20,28
232	addi	r8,r8,PTE_FLAGS_OFFSET
233#endif
2341:	lwarx	r6,0,r8
235	andi.	r0,r6,_PAGE_HASHPTE
236	bne	9f			/* if HASHPTE already set, done */
237#ifdef CONFIG_PTE_64BIT
238#ifdef CONFIG_SMP
239	subf	r10,r6,r8		/* create false data dependency */
240	subi	r10,r10,PTE_FLAGS_OFFSET
241	lwzx	r10,r6,r10		/* Get upper PTE word */
242#else
243	lwz	r10,-PTE_FLAGS_OFFSET(r8)
244#endif /* CONFIG_SMP */
245#endif /* CONFIG_PTE_64BIT */
246	ori	r5,r6,_PAGE_HASHPTE
247	stwcx.	r5,0,r8
248	bne-	1b
249
250	bl	create_hpte
251
2529:
253#ifdef CONFIG_SMP
254	lis	r6, (mmu_hash_lock - PAGE_OFFSET)@ha
255	addi	r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
256	eieio
257	li	r0,0
258	stw	r0,0(r6)		/* clear mmu_hash_lock */
259#endif
260
261	/* reenable interrupts and DR */
262	mtmsr	r9
263	SYNC_601
264	isync
265
266	lwz	r0,4(r1)
267	mtlr	r0
268	blr
269
270/*
271 * This routine adds a hardware PTE to the hash table.
272 * It is designed to be called with the MMU either on or off.
273 * r3 contains the VSID, r4 contains the virtual address,
274 * r5 contains the linux PTE, r6 contains the old value of the
275 * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
276 * upper half of the PTE if CONFIG_PTE_64BIT.
277 * On SMP, the caller should have the mmu_hash_lock held.
278 * We assume that the caller has (or will) set the _PAGE_HASHPTE
279 * bit in the linux PTE in memory.  The value passed in r6 should
280 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
281 * this routine will skip the search for an existing HPTE.
282 * This procedure modifies r0, r3 - r6, r8, cr0.
283 *  -- paulus.
284 *
285 * For speed, 4 of the instructions get patched once the size and
286 * physical address of the hash table are known.  These definitions
287 * of Hash_base and Hash_bits below are just an example.
288 */
289Hash_base = 0xc0180000
290Hash_bits = 12				/* e.g. 256kB hash table */
291Hash_msk = (((1 << Hash_bits) - 1) * 64)
292
293/* defines for the PTE format for 32-bit PPCs */
294#define HPTE_SIZE	8
295#define PTEG_SIZE	64
296#define LG_PTEG_SIZE	6
297#define LDPTEu		lwzu
298#define LDPTE		lwz
299#define STPTE		stw
300#define CMPPTE		cmpw
301#define PTE_H		0x40
302#define PTE_V		0x80000000
303#define TST_V(r)	rlwinm. r,r,0,0,0
304#define SET_V(r)	oris r,r,PTE_V@h
305#define CLR_V(r,t)	rlwinm r,r,0,1,31
306
307#define HASH_LEFT	31-(LG_PTEG_SIZE+Hash_bits-1)
308#define HASH_RIGHT	31-LG_PTEG_SIZE
309
310_GLOBAL(create_hpte)
311	/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
312	rlwinm	r8,r5,32-9,30,30	/* _PAGE_RW -> PP msb */
313	rlwinm	r0,r5,32-6,30,30	/* _PAGE_DIRTY -> PP msb */
314	and	r8,r8,r0		/* writable if _RW & _DIRTY */
315	rlwimi	r5,r5,32-1,30,30	/* _PAGE_USER -> PP msb */
316	rlwimi	r5,r5,32-2,31,31	/* _PAGE_USER -> PP lsb */
317	ori	r8,r8,0xe04		/* clear out reserved bits */
318	andc	r8,r5,r8		/* PP = user? (rw&dirty? 1: 3): 0 */
319BEGIN_FTR_SECTION
320	rlwinm	r8,r8,0,~_PAGE_COHERENT	/* clear M (coherence not required) */
321END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
322#ifdef CONFIG_PTE_64BIT
323	/* Put the XPN bits into the PTE */
324	rlwimi	r8,r10,8,20,22
325	rlwimi	r8,r10,2,29,29
326#endif
327
328	/* Construct the high word of the PPC-style PTE (r5) */
329	rlwinm	r5,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
330	rlwimi	r5,r4,10,26,31		/* put in API (abbrev page index) */
331	SET_V(r5)			/* set V (valid) bit */
332
333	patch_site	0f, patch__hash_page_A0
334	patch_site	1f, patch__hash_page_A1
335	patch_site	2f, patch__hash_page_A2
336	/* Get the address of the primary PTE group in the hash table (r3) */
3370:	lis	r0, (Hash_base - PAGE_OFFSET)@h	/* base address of hash table */
3381:	rlwimi	r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
3392:	rlwinm	r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
340	xor	r3,r3,r0		/* make primary hash */
341	li	r0,8			/* PTEs/group */
342
343	/*
344	 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
345	 * if it is clear, meaning that the HPTE isn't there already...
346	 */
347	andi.	r6,r6,_PAGE_HASHPTE
348	beq+	10f			/* no PTE: go look for an empty slot */
349	tlbie	r4
350
351	lis	r4, (htab_hash_searches - PAGE_OFFSET)@ha
352	lwz	r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
353	addi	r6,r6,1			/* count how many searches we do */
354	stw	r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
355
356	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
357	mtctr	r0
358	addi	r4,r3,-HPTE_SIZE
3591:	LDPTEu	r6,HPTE_SIZE(r4)	/* get next PTE */
360	CMPPTE	0,r6,r5
361	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
362	beq+	found_slot
363
364	patch_site	0f, patch__hash_page_B
365	/* Search the secondary PTEG for a matching PTE */
366	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
3670:	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
368	xori	r4,r4,(-PTEG_SIZE & 0xffff)
369	addi	r4,r4,-HPTE_SIZE
370	mtctr	r0
3712:	LDPTEu	r6,HPTE_SIZE(r4)
372	CMPPTE	0,r6,r5
373	bdnzf	2,2b
374	beq+	found_slot
375	xori	r5,r5,PTE_H		/* clear H bit again */
376
377	/* Search the primary PTEG for an empty slot */
37810:	mtctr	r0
379	addi	r4,r3,-HPTE_SIZE	/* search primary PTEG */
3801:	LDPTEu	r6,HPTE_SIZE(r4)	/* get next PTE */
381	TST_V(r6)			/* test valid bit */
382	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
383	beq+	found_empty
384
385	/* update counter of times that the primary PTEG is full */
386	lis	r4, (primary_pteg_full - PAGE_OFFSET)@ha
387	lwz	r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
388	addi	r6,r6,1
389	stw	r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
390
391	patch_site	0f, patch__hash_page_C
392	/* Search the secondary PTEG for an empty slot */
393	ori	r5,r5,PTE_H		/* set H (secondary hash) bit */
3940:	xoris	r4,r3,Hash_msk>>16	/* compute secondary hash */
395	xori	r4,r4,(-PTEG_SIZE & 0xffff)
396	addi	r4,r4,-HPTE_SIZE
397	mtctr	r0
3982:	LDPTEu	r6,HPTE_SIZE(r4)
399	TST_V(r6)
400	bdnzf	2,2b
401	beq+	found_empty
402	xori	r5,r5,PTE_H		/* clear H bit again */
403
404	/*
405	 * Choose an arbitrary slot in the primary PTEG to overwrite.
406	 * Since both the primary and secondary PTEGs are full, and we
407	 * have no information that the PTEs in the primary PTEG are
408	 * more important or useful than those in the secondary PTEG,
409	 * and we know there is a definite (although small) speed
410	 * advantage to putting the PTE in the primary PTEG, we always
411	 * put the PTE in the primary PTEG.
412	 *
413	 * In addition, we skip any slot that is mapping kernel text in
414	 * order to avoid a deadlock when not using BAT mappings if
415	 * trying to hash in the kernel hash code itself after it has
416	 * already taken the hash table lock. This works in conjunction
417	 * with pre-faulting of the kernel text.
418	 *
419	 * If the hash table bucket is full of kernel text entries, we'll
420	 * lockup here but that shouldn't happen
421	 */
422
4231:	lis	r4, (next_slot - PAGE_OFFSET)@ha	/* get next evict slot */
424	lwz	r6, (next_slot - PAGE_OFFSET)@l(r4)
425	addi	r6,r6,HPTE_SIZE			/* search for candidate */
426	andi.	r6,r6,7*HPTE_SIZE
427	stw	r6,next_slot@l(r4)
428	add	r4,r3,r6
429	LDPTE	r0,HPTE_SIZE/2(r4)		/* get PTE second word */
430	clrrwi	r0,r0,12
431	lis	r6,etext@h
432	ori	r6,r6,etext@l			/* get etext */
433	tophys(r6,r6)
434	cmpl	cr0,r0,r6			/* compare and try again */
435	blt	1b
436
437#ifndef CONFIG_SMP
438	/* Store PTE in PTEG */
439found_empty:
440	STPTE	r5,0(r4)
441found_slot:
442	STPTE	r8,HPTE_SIZE/2(r4)
443
444#else /* CONFIG_SMP */
445/*
446 * Between the tlbie above and updating the hash table entry below,
447 * another CPU could read the hash table entry and put it in its TLB.
448 * There are 3 cases:
449 * 1. using an empty slot
450 * 2. updating an earlier entry to change permissions (i.e. enable write)
451 * 3. taking over the PTE for an unrelated address
452 *
453 * In each case it doesn't really matter if the other CPUs have the old
454 * PTE in their TLB.  So we don't need to bother with another tlbie here,
455 * which is convenient as we've overwritten the register that had the
456 * address. :-)  The tlbie above is mainly to make sure that this CPU comes
457 * and gets the new PTE from the hash table.
458 *
459 * We do however have to make sure that the PTE is never in an invalid
460 * state with the V bit set.
461 */
462found_empty:
463found_slot:
464	CLR_V(r5,r0)		/* clear V (valid) bit in PTE */
465	STPTE	r5,0(r4)
466	sync
467	TLBSYNC
468	STPTE	r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
469	sync
470	SET_V(r5)
471	STPTE	r5,0(r4)	/* finally set V bit in PTE */
472#endif /* CONFIG_SMP */
473
474	sync		/* make sure pte updates get to memory */
475	blr
476
477	.section .bss
478	.align	2
479next_slot:
480	.space	4
481primary_pteg_full:
482	.space	4
483htab_hash_searches:
484	.space	4
485	.previous
486
487/*
488 * Flush the entry for a particular page from the hash table.
489 *
490 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
491 *		    int count)
492 *
493 * We assume that there is a hash table in use (Hash != 0).
494 */
495_GLOBAL(flush_hash_pages)
496	/*
497	 * We disable interrupts here, even on UP, because we want
498	 * the _PAGE_HASHPTE bit to be a reliable indication of
499	 * whether the HPTE exists (or at least whether one did once).
500	 * We also turn off the MMU for data accesses so that we
501	 * we can't take a hash table miss (assuming the code is
502	 * covered by a BAT).  -- paulus
503	 */
504	mfmsr	r10
505	SYNC
506	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
507	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
508	mtmsr	r0
509	SYNC_601
510	isync
511
512	/* First find a PTE in the range that has _PAGE_HASHPTE set */
513#ifndef CONFIG_PTE_64BIT
514	rlwimi	r5,r4,22,20,29
515#else
516	rlwimi	r5,r4,23,20,28
517#endif
5181:	lwz	r0,PTE_FLAGS_OFFSET(r5)
519	cmpwi	cr1,r6,1
520	andi.	r0,r0,_PAGE_HASHPTE
521	bne	2f
522	ble	cr1,19f
523	addi	r4,r4,0x1000
524	addi	r5,r5,PTE_SIZE
525	addi	r6,r6,-1
526	b	1b
527
528	/* Convert context and va to VSID */
5292:	mulli	r3,r3,897*16		/* multiply context by context skew */
530	rlwinm	r0,r4,4,28,31		/* get ESID (top 4 bits of va) */
531	mulli	r0,r0,0x111		/* multiply by ESID skew */
532	add	r3,r3,r0		/* note code below trims to 24 bits */
533
534	/* Construct the high word of the PPC-style PTE (r11) */
535	rlwinm	r11,r3,7,1,24		/* put VSID in 0x7fffff80 bits */
536	rlwimi	r11,r4,10,26,31		/* put in API (abbrev page index) */
537	SET_V(r11)			/* set V (valid) bit */
538
539#ifdef CONFIG_SMP
540	lis	r9, (mmu_hash_lock - PAGE_OFFSET)@ha
541	addi	r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
542	tophys	(r8, r2)
543	lwz	r8, TASK_CPU(r8)
544	oris	r8,r8,9
54510:	lwarx	r0,0,r9
546	cmpi	0,r0,0
547	bne-	11f
548	stwcx.	r8,0,r9
549	beq+	12f
55011:	lwz	r0,0(r9)
551	cmpi	0,r0,0
552	beq	10b
553	b	11b
55412:	isync
555#endif
556
557	/*
558	 * Check the _PAGE_HASHPTE bit in the linux PTE.  If it is
559	 * already clear, we're done (for this pte).  If not,
560	 * clear it (atomically) and proceed.  -- paulus.
561	 */
562#if (PTE_FLAGS_OFFSET != 0)
563	addi	r5,r5,PTE_FLAGS_OFFSET
564#endif
56533:	lwarx	r8,0,r5			/* fetch the pte flags word */
566	andi.	r0,r8,_PAGE_HASHPTE
567	beq	8f			/* done if HASHPTE is already clear */
568	rlwinm	r8,r8,0,31,29		/* clear HASHPTE bit */
569	stwcx.	r8,0,r5			/* update the pte */
570	bne-	33b
571
572	patch_site	0f, patch__flush_hash_A0
573	patch_site	1f, patch__flush_hash_A1
574	patch_site	2f, patch__flush_hash_A2
575	/* Get the address of the primary PTE group in the hash table (r3) */
5760:	lis	r8, (Hash_base - PAGE_OFFSET)@h	/* base address of hash table */
5771:	rlwimi	r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT    /* VSID -> hash */
5782:	rlwinm	r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
579	xor	r8,r0,r8		/* make primary hash */
580
581	/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
582	li	r0,8			/* PTEs/group */
583	mtctr	r0
584	addi	r12,r8,-HPTE_SIZE
5851:	LDPTEu	r0,HPTE_SIZE(r12)	/* get next PTE */
586	CMPPTE	0,r0,r11
587	bdnzf	2,1b			/* loop while ctr != 0 && !cr0.eq */
588	beq+	3f
589
590	patch_site	0f, patch__flush_hash_B
591	/* Search the secondary PTEG for a matching PTE */
592	ori	r11,r11,PTE_H		/* set H (secondary hash) bit */
593	li	r0,8			/* PTEs/group */
5940:	xoris	r12,r8,Hash_msk>>16	/* compute secondary hash */
595	xori	r12,r12,(-PTEG_SIZE & 0xffff)
596	addi	r12,r12,-HPTE_SIZE
597	mtctr	r0
5982:	LDPTEu	r0,HPTE_SIZE(r12)
599	CMPPTE	0,r0,r11
600	bdnzf	2,2b
601	xori	r11,r11,PTE_H		/* clear H again */
602	bne-	4f			/* should rarely fail to find it */
603
6043:	li	r0,0
605	STPTE	r0,0(r12)		/* invalidate entry */
6064:	sync
607	tlbie	r4			/* in hw tlb too */
608	sync
609
6108:	ble	cr1,9f			/* if all ptes checked */
61181:	addi	r6,r6,-1
612	addi	r5,r5,PTE_SIZE
613	addi	r4,r4,0x1000
614	lwz	r0,0(r5)		/* check next pte */
615	cmpwi	cr1,r6,1
616	andi.	r0,r0,_PAGE_HASHPTE
617	bne	33b
618	bgt	cr1,81b
619
6209:
621#ifdef CONFIG_SMP
622	TLBSYNC
623	li	r0,0
624	stw	r0,0(r9)		/* clear mmu_hash_lock */
625#endif
626
62719:	mtmsr	r10
628	SYNC_601
629	isync
630	blr
631EXPORT_SYMBOL(flush_hash_pages)
632
633/*
634 * Flush an entry from the TLB
635 */
636_GLOBAL(_tlbie)
637#ifdef CONFIG_SMP
638	lwz	r8,TASK_CPU(r2)
639	oris	r8,r8,11
640	mfmsr	r10
641	SYNC
642	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
643	rlwinm	r0,r0,0,28,26		/* clear DR */
644	mtmsr	r0
645	SYNC_601
646	isync
647	lis	r9,mmu_hash_lock@h
648	ori	r9,r9,mmu_hash_lock@l
649	tophys(r9,r9)
65010:	lwarx	r7,0,r9
651	cmpwi	0,r7,0
652	bne-	10b
653	stwcx.	r8,0,r9
654	bne-	10b
655	eieio
656	tlbie	r3
657	sync
658	TLBSYNC
659	li	r0,0
660	stw	r0,0(r9)		/* clear mmu_hash_lock */
661	mtmsr	r10
662	SYNC_601
663	isync
664#else /* CONFIG_SMP */
665	tlbie	r3
666	sync
667#endif /* CONFIG_SMP */
668	blr
669
670/*
671 * Flush the entire TLB. 603/603e only
672 */
673_GLOBAL(_tlbia)
674#if defined(CONFIG_SMP)
675	lwz	r8,TASK_CPU(r2)
676	oris	r8,r8,10
677	mfmsr	r10
678	SYNC
679	rlwinm	r0,r10,0,17,15		/* clear bit 16 (MSR_EE) */
680	rlwinm	r0,r0,0,28,26		/* clear DR */
681	mtmsr	r0
682	SYNC_601
683	isync
684	lis	r9,mmu_hash_lock@h
685	ori	r9,r9,mmu_hash_lock@l
686	tophys(r9,r9)
68710:	lwarx	r7,0,r9
688	cmpwi	0,r7,0
689	bne-	10b
690	stwcx.	r8,0,r9
691	bne-	10b
692	sync
693	tlbia
694	sync
695	TLBSYNC
696	li	r0,0
697	stw	r0,0(r9)		/* clear mmu_hash_lock */
698	mtmsr	r10
699	SYNC_601
700	isync
701#else /* CONFIG_SMP */
702	sync
703	tlbia
704	sync
705#endif /* CONFIG_SMP */
706	blr
707