1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP 6 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> 7 * Adapted for Power Macintosh by Paul Mackerras. 8 * Low-level exception handlers and MMU support 9 * rewritten by Paul Mackerras. 10 * Copyright (C) 1996 Paul Mackerras. 11 * 12 * This file contains low-level assembler routines for managing 13 * the PowerPC MMU hash table. (PPC 8xx processors don't use a 14 * hash table, so this file is not used on them.) 15 */ 16 17#include <asm/reg.h> 18#include <asm/page.h> 19#include <asm/pgtable.h> 20#include <asm/cputable.h> 21#include <asm/ppc_asm.h> 22#include <asm/thread_info.h> 23#include <asm/asm-offsets.h> 24#include <asm/export.h> 25#include <asm/feature-fixups.h> 26#include <asm/code-patching-asm.h> 27 28#ifdef CONFIG_VMAP_STACK 29#define ADDR_OFFSET 0 30#else 31#define ADDR_OFFSET PAGE_OFFSET 32#endif 33 34#ifdef CONFIG_SMP 35 .section .bss 36 .align 2 37mmu_hash_lock: 38 .space 4 39#endif /* CONFIG_SMP */ 40 41/* 42 * Load a PTE into the hash table, if possible. 43 * The address is in r4, and r3 contains an access flag: 44 * _PAGE_RW (0x400) if a write. 45 * r9 contains the SRR1 value, from which we use the MSR_PR bit. 46 * SPRG_THREAD contains the physical address of the current task's thread. 47 * 48 * Returns to the caller if the access is illegal or there is no 49 * mapping for the address. Otherwise it places an appropriate PTE 50 * in the hash table and returns from the exception. 51 * Uses r0, r3 - r6, r8, r10, ctr, lr. 52 */ 53 .text 54_GLOBAL(hash_page) 55#ifdef CONFIG_SMP 56 lis r8, (mmu_hash_lock - ADDR_OFFSET)@h 57 ori r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l 58 lis r0,0x0fff 59 b 10f 6011: lwz r6,0(r8) 61 cmpwi 0,r6,0 62 bne 11b 6310: lwarx r6,0,r8 64 cmpwi 0,r6,0 65 bne- 11b 66 stwcx. r0,0,r8 67 bne- 10b 68 isync 69#endif 70 /* Get PTE (linux-style) and check access */ 71 lis r0,KERNELBASE@h /* check if kernel address */ 72 cmplw 0,r4,r0 73 ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ 74 mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */ 75#ifdef CONFIG_VMAP_STACK 76 tovirt(r5, r5) 77#endif 78 blt+ 112f /* assume user more likely */ 79 lis r5, (swapper_pg_dir - ADDR_OFFSET)@ha /* if kernel address, use */ 80 addi r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l /* kernel page table */ 81 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 82112: 83#ifndef CONFIG_PTE_64BIT 84 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ 85 lwz r8,0(r5) /* get pmd entry */ 86 rlwinm. r8,r8,0,0,19 /* extract address of pte page */ 87#else 88 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */ 89 lwzx r8,r8,r5 /* Get L1 entry */ 90 rlwinm. r8,r8,0,0,20 /* extract pt base address */ 91#endif 92#ifdef CONFIG_VMAP_STACK 93 tovirt(r8, r8) 94#endif 95#ifdef CONFIG_SMP 96 beq- hash_page_out /* return if no mapping */ 97#else 98 /* XXX it seems like the 601 will give a machine fault on the 99 rfi if its alignment is wrong (bottom 4 bits of address are 100 8 or 0xc) and we have had a not-taken conditional branch 101 to the address following the rfi. */ 102 beqlr- 103#endif 104#ifndef CONFIG_PTE_64BIT 105 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ 106#else 107 rlwimi r8,r4,23,20,28 /* compute pte address */ 108#endif 109 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ 110 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE 111 112 /* 113 * Update the linux PTE atomically. We do the lwarx up-front 114 * because almost always, there won't be a permission violation 115 * and there won't already be an HPTE, and thus we will have 116 * to update the PTE to set _PAGE_HASHPTE. -- paulus. 117 * 118 * If PTE_64BIT is set, the low word is the flags word; use that 119 * word for locking since it contains all the interesting bits. 120 */ 121#if (PTE_FLAGS_OFFSET != 0) 122 addi r8,r8,PTE_FLAGS_OFFSET 123#endif 124retry: 125 lwarx r6,0,r8 /* get linux-style pte, flag word */ 126 andc. r5,r3,r6 /* check access & ~permission */ 127#ifdef CONFIG_SMP 128 bne- hash_page_out /* return if access not permitted */ 129#else 130 bnelr- 131#endif 132 or r5,r0,r6 /* set accessed/dirty bits */ 133#ifdef CONFIG_PTE_64BIT 134#ifdef CONFIG_SMP 135 subf r10,r6,r8 /* create false data dependency */ 136 subi r10,r10,PTE_FLAGS_OFFSET 137 lwzx r10,r6,r10 /* Get upper PTE word */ 138#else 139 lwz r10,-PTE_FLAGS_OFFSET(r8) 140#endif /* CONFIG_SMP */ 141#endif /* CONFIG_PTE_64BIT */ 142 stwcx. r5,0,r8 /* attempt to update PTE */ 143 bne- retry /* retry if someone got there first */ 144 145 mfsrin r3,r4 /* get segment reg for segment */ 146 mfctr r0 147 stw r0,_CTR(r11) 148 bl create_hpte /* add the hash table entry */ 149 150#ifdef CONFIG_SMP 151 eieio 152 lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha 153 li r0,0 154 stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8) 155#endif 156 157 /* Return from the exception */ 158 lwz r5,_CTR(r11) 159 mtctr r5 160 lwz r0,GPR0(r11) 161 lwz r8,GPR8(r11) 162 b fast_exception_return 163 164#ifdef CONFIG_SMP 165hash_page_out: 166 eieio 167 lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha 168 li r0,0 169 stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8) 170 blr 171#endif /* CONFIG_SMP */ 172 173/* 174 * Add an entry for a particular page to the hash table. 175 * 176 * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval) 177 * 178 * We assume any necessary modifications to the pte (e.g. setting 179 * the accessed bit) have already been done and that there is actually 180 * a hash table in use (i.e. we're not on a 603). 181 */ 182_GLOBAL(add_hash_page) 183 mflr r0 184 stw r0,4(r1) 185 186 /* Convert context and va to VSID */ 187 mulli r3,r3,897*16 /* multiply context by context skew */ 188 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ 189 mulli r0,r0,0x111 /* multiply by ESID skew */ 190 add r3,r3,r0 /* note create_hpte trims to 24 bits */ 191 192#ifdef CONFIG_SMP 193 lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */ 194 oris r8,r8,12 195#endif /* CONFIG_SMP */ 196 197 /* 198 * We disable interrupts here, even on UP, because we don't 199 * want to race with hash_page, and because we want the 200 * _PAGE_HASHPTE bit to be a reliable indication of whether 201 * the HPTE exists (or at least whether one did once). 202 * We also turn off the MMU for data accesses so that we 203 * we can't take a hash table miss (assuming the code is 204 * covered by a BAT). -- paulus 205 */ 206 mfmsr r9 207 SYNC 208 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */ 209 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 210 mtmsr r0 211 SYNC_601 212 isync 213 214#ifdef CONFIG_SMP 215 lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha 216 addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l 21710: lwarx r0,0,r6 /* take the mmu_hash_lock */ 218 cmpi 0,r0,0 219 bne- 11f 220 stwcx. r8,0,r6 221 beq+ 12f 22211: lwz r0,0(r6) 223 cmpi 0,r0,0 224 beq 10b 225 b 11b 22612: isync 227#endif 228 229 /* 230 * Fetch the linux pte and test and set _PAGE_HASHPTE atomically. 231 * If _PAGE_HASHPTE was already set, we don't replace the existing 232 * HPTE, so we just unlock and return. 233 */ 234 mr r8,r5 235#ifndef CONFIG_PTE_64BIT 236 rlwimi r8,r4,22,20,29 237#else 238 rlwimi r8,r4,23,20,28 239 addi r8,r8,PTE_FLAGS_OFFSET 240#endif 2411: lwarx r6,0,r8 242 andi. r0,r6,_PAGE_HASHPTE 243 bne 9f /* if HASHPTE already set, done */ 244#ifdef CONFIG_PTE_64BIT 245#ifdef CONFIG_SMP 246 subf r10,r6,r8 /* create false data dependency */ 247 subi r10,r10,PTE_FLAGS_OFFSET 248 lwzx r10,r6,r10 /* Get upper PTE word */ 249#else 250 lwz r10,-PTE_FLAGS_OFFSET(r8) 251#endif /* CONFIG_SMP */ 252#endif /* CONFIG_PTE_64BIT */ 253 ori r5,r6,_PAGE_HASHPTE 254 stwcx. r5,0,r8 255 bne- 1b 256 257 bl create_hpte 258 2599: 260#ifdef CONFIG_SMP 261 lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha 262 addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l 263 eieio 264 li r0,0 265 stw r0,0(r6) /* clear mmu_hash_lock */ 266#endif 267 268 /* reenable interrupts and DR */ 269 mtmsr r9 270 SYNC_601 271 isync 272 273 lwz r0,4(r1) 274 mtlr r0 275 blr 276 277/* 278 * This routine adds a hardware PTE to the hash table. 279 * It is designed to be called with the MMU either on or off. 280 * r3 contains the VSID, r4 contains the virtual address, 281 * r5 contains the linux PTE, r6 contains the old value of the 282 * linux PTE (before setting _PAGE_HASHPTE). r10 contains the 283 * upper half of the PTE if CONFIG_PTE_64BIT. 284 * On SMP, the caller should have the mmu_hash_lock held. 285 * We assume that the caller has (or will) set the _PAGE_HASHPTE 286 * bit in the linux PTE in memory. The value passed in r6 should 287 * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set 288 * this routine will skip the search for an existing HPTE. 289 * This procedure modifies r0, r3 - r6, r8, cr0. 290 * -- paulus. 291 * 292 * For speed, 4 of the instructions get patched once the size and 293 * physical address of the hash table are known. These definitions 294 * of Hash_base and Hash_bits below are just an example. 295 */ 296Hash_base = 0xc0180000 297Hash_bits = 12 /* e.g. 256kB hash table */ 298Hash_msk = (((1 << Hash_bits) - 1) * 64) 299 300/* defines for the PTE format for 32-bit PPCs */ 301#define HPTE_SIZE 8 302#define PTEG_SIZE 64 303#define LG_PTEG_SIZE 6 304#define LDPTEu lwzu 305#define LDPTE lwz 306#define STPTE stw 307#define CMPPTE cmpw 308#define PTE_H 0x40 309#define PTE_V 0x80000000 310#define TST_V(r) rlwinm. r,r,0,0,0 311#define SET_V(r) oris r,r,PTE_V@h 312#define CLR_V(r,t) rlwinm r,r,0,1,31 313 314#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) 315#define HASH_RIGHT 31-LG_PTEG_SIZE 316 317_GLOBAL(create_hpte) 318 /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */ 319 rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */ 320 rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */ 321 and r8,r8,r0 /* writable if _RW & _DIRTY */ 322 rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */ 323 rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */ 324 ori r8,r8,0xe04 /* clear out reserved bits */ 325 andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */ 326BEGIN_FTR_SECTION 327 rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */ 328END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) 329#ifdef CONFIG_PTE_64BIT 330 /* Put the XPN bits into the PTE */ 331 rlwimi r8,r10,8,20,22 332 rlwimi r8,r10,2,29,29 333#endif 334 335 /* Construct the high word of the PPC-style PTE (r5) */ 336 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ 337 rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ 338 SET_V(r5) /* set V (valid) bit */ 339 340 patch_site 0f, patch__hash_page_A0 341 patch_site 1f, patch__hash_page_A1 342 patch_site 2f, patch__hash_page_A2 343 /* Get the address of the primary PTE group in the hash table (r3) */ 3440: lis r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */ 3451: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 3462: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ 347 xor r3,r3,r0 /* make primary hash */ 348 li r0,8 /* PTEs/group */ 349 350 /* 351 * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search 352 * if it is clear, meaning that the HPTE isn't there already... 353 */ 354 andi. r6,r6,_PAGE_HASHPTE 355 beq+ 10f /* no PTE: go look for an empty slot */ 356 tlbie r4 357 358 lis r4, (htab_hash_searches - ADDR_OFFSET)@ha 359 lwz r6, (htab_hash_searches - ADDR_OFFSET)@l(r4) 360 addi r6,r6,1 /* count how many searches we do */ 361 stw r6, (htab_hash_searches - ADDR_OFFSET)@l(r4) 362 363 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ 364 mtctr r0 365 addi r4,r3,-HPTE_SIZE 3661: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ 367 CMPPTE 0,r6,r5 368 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 369 beq+ found_slot 370 371 patch_site 0f, patch__hash_page_B 372 /* Search the secondary PTEG for a matching PTE */ 373 ori r5,r5,PTE_H /* set H (secondary hash) bit */ 3740: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ 375 xori r4,r4,(-PTEG_SIZE & 0xffff) 376 addi r4,r4,-HPTE_SIZE 377 mtctr r0 3782: LDPTEu r6,HPTE_SIZE(r4) 379 CMPPTE 0,r6,r5 380 bdnzf 2,2b 381 beq+ found_slot 382 xori r5,r5,PTE_H /* clear H bit again */ 383 384 /* Search the primary PTEG for an empty slot */ 38510: mtctr r0 386 addi r4,r3,-HPTE_SIZE /* search primary PTEG */ 3871: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ 388 TST_V(r6) /* test valid bit */ 389 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 390 beq+ found_empty 391 392 /* update counter of times that the primary PTEG is full */ 393 lis r4, (primary_pteg_full - ADDR_OFFSET)@ha 394 lwz r6, (primary_pteg_full - ADDR_OFFSET)@l(r4) 395 addi r6,r6,1 396 stw r6, (primary_pteg_full - ADDR_OFFSET)@l(r4) 397 398 patch_site 0f, patch__hash_page_C 399 /* Search the secondary PTEG for an empty slot */ 400 ori r5,r5,PTE_H /* set H (secondary hash) bit */ 4010: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ 402 xori r4,r4,(-PTEG_SIZE & 0xffff) 403 addi r4,r4,-HPTE_SIZE 404 mtctr r0 4052: LDPTEu r6,HPTE_SIZE(r4) 406 TST_V(r6) 407 bdnzf 2,2b 408 beq+ found_empty 409 xori r5,r5,PTE_H /* clear H bit again */ 410 411 /* 412 * Choose an arbitrary slot in the primary PTEG to overwrite. 413 * Since both the primary and secondary PTEGs are full, and we 414 * have no information that the PTEs in the primary PTEG are 415 * more important or useful than those in the secondary PTEG, 416 * and we know there is a definite (although small) speed 417 * advantage to putting the PTE in the primary PTEG, we always 418 * put the PTE in the primary PTEG. 419 * 420 * In addition, we skip any slot that is mapping kernel text in 421 * order to avoid a deadlock when not using BAT mappings if 422 * trying to hash in the kernel hash code itself after it has 423 * already taken the hash table lock. This works in conjunction 424 * with pre-faulting of the kernel text. 425 * 426 * If the hash table bucket is full of kernel text entries, we'll 427 * lockup here but that shouldn't happen 428 */ 429 4301: lis r4, (next_slot - ADDR_OFFSET)@ha /* get next evict slot */ 431 lwz r6, (next_slot - ADDR_OFFSET)@l(r4) 432 addi r6,r6,HPTE_SIZE /* search for candidate */ 433 andi. r6,r6,7*HPTE_SIZE 434 stw r6,next_slot@l(r4) 435 add r4,r3,r6 436 LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */ 437 clrrwi r0,r0,12 438 lis r6,etext@h 439 ori r6,r6,etext@l /* get etext */ 440 tophys(r6,r6) 441 cmpl cr0,r0,r6 /* compare and try again */ 442 blt 1b 443 444#ifndef CONFIG_SMP 445 /* Store PTE in PTEG */ 446found_empty: 447 STPTE r5,0(r4) 448found_slot: 449 STPTE r8,HPTE_SIZE/2(r4) 450 451#else /* CONFIG_SMP */ 452/* 453 * Between the tlbie above and updating the hash table entry below, 454 * another CPU could read the hash table entry and put it in its TLB. 455 * There are 3 cases: 456 * 1. using an empty slot 457 * 2. updating an earlier entry to change permissions (i.e. enable write) 458 * 3. taking over the PTE for an unrelated address 459 * 460 * In each case it doesn't really matter if the other CPUs have the old 461 * PTE in their TLB. So we don't need to bother with another tlbie here, 462 * which is convenient as we've overwritten the register that had the 463 * address. :-) The tlbie above is mainly to make sure that this CPU comes 464 * and gets the new PTE from the hash table. 465 * 466 * We do however have to make sure that the PTE is never in an invalid 467 * state with the V bit set. 468 */ 469found_empty: 470found_slot: 471 CLR_V(r5,r0) /* clear V (valid) bit in PTE */ 472 STPTE r5,0(r4) 473 sync 474 TLBSYNC 475 STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ 476 sync 477 SET_V(r5) 478 STPTE r5,0(r4) /* finally set V bit in PTE */ 479#endif /* CONFIG_SMP */ 480 481 sync /* make sure pte updates get to memory */ 482 blr 483 484 .section .bss 485 .align 2 486next_slot: 487 .space 4 488primary_pteg_full: 489 .space 4 490htab_hash_searches: 491 .space 4 492 .previous 493 494/* 495 * Flush the entry for a particular page from the hash table. 496 * 497 * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval, 498 * int count) 499 * 500 * We assume that there is a hash table in use (Hash != 0). 501 */ 502_GLOBAL(flush_hash_pages) 503 /* 504 * We disable interrupts here, even on UP, because we want 505 * the _PAGE_HASHPTE bit to be a reliable indication of 506 * whether the HPTE exists (or at least whether one did once). 507 * We also turn off the MMU for data accesses so that we 508 * we can't take a hash table miss (assuming the code is 509 * covered by a BAT). -- paulus 510 */ 511 mfmsr r10 512 SYNC 513 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 514 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 515 mtmsr r0 516 SYNC_601 517 isync 518 519 /* First find a PTE in the range that has _PAGE_HASHPTE set */ 520#ifndef CONFIG_PTE_64BIT 521 rlwimi r5,r4,22,20,29 522#else 523 rlwimi r5,r4,23,20,28 524#endif 5251: lwz r0,PTE_FLAGS_OFFSET(r5) 526 cmpwi cr1,r6,1 527 andi. r0,r0,_PAGE_HASHPTE 528 bne 2f 529 ble cr1,19f 530 addi r4,r4,0x1000 531 addi r5,r5,PTE_SIZE 532 addi r6,r6,-1 533 b 1b 534 535 /* Convert context and va to VSID */ 5362: mulli r3,r3,897*16 /* multiply context by context skew */ 537 rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */ 538 mulli r0,r0,0x111 /* multiply by ESID skew */ 539 add r3,r3,r0 /* note code below trims to 24 bits */ 540 541 /* Construct the high word of the PPC-style PTE (r11) */ 542 rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ 543 rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ 544 SET_V(r11) /* set V (valid) bit */ 545 546#ifdef CONFIG_SMP 547 lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha 548 addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l 549 tophys (r8, r2) 550 lwz r8, TASK_CPU(r8) 551 oris r8,r8,9 55210: lwarx r0,0,r9 553 cmpi 0,r0,0 554 bne- 11f 555 stwcx. r8,0,r9 556 beq+ 12f 55711: lwz r0,0(r9) 558 cmpi 0,r0,0 559 beq 10b 560 b 11b 56112: isync 562#endif 563 564 /* 565 * Check the _PAGE_HASHPTE bit in the linux PTE. If it is 566 * already clear, we're done (for this pte). If not, 567 * clear it (atomically) and proceed. -- paulus. 568 */ 569#if (PTE_FLAGS_OFFSET != 0) 570 addi r5,r5,PTE_FLAGS_OFFSET 571#endif 57233: lwarx r8,0,r5 /* fetch the pte flags word */ 573 andi. r0,r8,_PAGE_HASHPTE 574 beq 8f /* done if HASHPTE is already clear */ 575 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ 576 stwcx. r8,0,r5 /* update the pte */ 577 bne- 33b 578 579 patch_site 0f, patch__flush_hash_A0 580 patch_site 1f, patch__flush_hash_A1 581 patch_site 2f, patch__flush_hash_A2 582 /* Get the address of the primary PTE group in the hash table (r3) */ 5830: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */ 5841: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */ 5852: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */ 586 xor r8,r0,r8 /* make primary hash */ 587 588 /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ 589 li r0,8 /* PTEs/group */ 590 mtctr r0 591 addi r12,r8,-HPTE_SIZE 5921: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */ 593 CMPPTE 0,r0,r11 594 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ 595 beq+ 3f 596 597 patch_site 0f, patch__flush_hash_B 598 /* Search the secondary PTEG for a matching PTE */ 599 ori r11,r11,PTE_H /* set H (secondary hash) bit */ 600 li r0,8 /* PTEs/group */ 6010: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ 602 xori r12,r12,(-PTEG_SIZE & 0xffff) 603 addi r12,r12,-HPTE_SIZE 604 mtctr r0 6052: LDPTEu r0,HPTE_SIZE(r12) 606 CMPPTE 0,r0,r11 607 bdnzf 2,2b 608 xori r11,r11,PTE_H /* clear H again */ 609 bne- 4f /* should rarely fail to find it */ 610 6113: li r0,0 612 STPTE r0,0(r12) /* invalidate entry */ 6134: sync 614 tlbie r4 /* in hw tlb too */ 615 sync 616 6178: ble cr1,9f /* if all ptes checked */ 61881: addi r6,r6,-1 619 addi r5,r5,PTE_SIZE 620 addi r4,r4,0x1000 621 lwz r0,0(r5) /* check next pte */ 622 cmpwi cr1,r6,1 623 andi. r0,r0,_PAGE_HASHPTE 624 bne 33b 625 bgt cr1,81b 626 6279: 628#ifdef CONFIG_SMP 629 TLBSYNC 630 li r0,0 631 stw r0,0(r9) /* clear mmu_hash_lock */ 632#endif 633 63419: mtmsr r10 635 SYNC_601 636 isync 637 blr 638EXPORT_SYMBOL(flush_hash_pages) 639 640/* 641 * Flush an entry from the TLB 642 */ 643_GLOBAL(_tlbie) 644#ifdef CONFIG_SMP 645 lwz r8,TASK_CPU(r2) 646 oris r8,r8,11 647 mfmsr r10 648 SYNC 649 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 650 rlwinm r0,r0,0,28,26 /* clear DR */ 651 mtmsr r0 652 SYNC_601 653 isync 654 lis r9,mmu_hash_lock@h 655 ori r9,r9,mmu_hash_lock@l 656 tophys(r9,r9) 65710: lwarx r7,0,r9 658 cmpwi 0,r7,0 659 bne- 10b 660 stwcx. r8,0,r9 661 bne- 10b 662 eieio 663 tlbie r3 664 sync 665 TLBSYNC 666 li r0,0 667 stw r0,0(r9) /* clear mmu_hash_lock */ 668 mtmsr r10 669 SYNC_601 670 isync 671#else /* CONFIG_SMP */ 672 tlbie r3 673 sync 674#endif /* CONFIG_SMP */ 675 blr 676 677/* 678 * Flush the entire TLB. 603/603e only 679 */ 680_GLOBAL(_tlbia) 681#if defined(CONFIG_SMP) 682 lwz r8,TASK_CPU(r2) 683 oris r8,r8,10 684 mfmsr r10 685 SYNC 686 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 687 rlwinm r0,r0,0,28,26 /* clear DR */ 688 mtmsr r0 689 SYNC_601 690 isync 691 lis r9,mmu_hash_lock@h 692 ori r9,r9,mmu_hash_lock@l 693 tophys(r9,r9) 69410: lwarx r7,0,r9 695 cmpwi 0,r7,0 696 bne- 10b 697 stwcx. r8,0,r9 698 bne- 10b 699 sync 700 tlbia 701 sync 702 TLBSYNC 703 li r0,0 704 stw r0,0(r9) /* clear mmu_hash_lock */ 705 mtmsr r10 706 SYNC_601 707 isync 708#else /* CONFIG_SMP */ 709 sync 710 tlbia 711 sync 712#endif /* CONFIG_SMP */ 713 blr 714