1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H 3 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H 4 5 #define __ARCH_USE_5LEVEL_HACK 6 #include <asm-generic/pgtable-nopmd.h> 7 8 #include <asm/book3s/32/hash.h> 9 10 /* And here we include common definitions */ 11 12 #define _PAGE_KERNEL_RO 0 13 #define _PAGE_KERNEL_ROX (_PAGE_EXEC) 14 #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) 15 #define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) 16 17 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE 18 19 #ifndef __ASSEMBLY__ 20 21 static inline bool pte_user(pte_t pte) 22 { 23 return pte_val(pte) & _PAGE_USER; 24 } 25 #endif /* __ASSEMBLY__ */ 26 27 /* 28 * Location of the PFN in the PTE. Most 32-bit platforms use the same 29 * as _PAGE_SHIFT here (ie, naturally aligned). 30 * Platform who don't just pre-define the value so we don't override it here. 31 */ 32 #define PTE_RPN_SHIFT (PAGE_SHIFT) 33 34 /* 35 * The mask covered by the RPN must be a ULL on 32-bit platforms with 36 * 64-bit PTEs. 37 */ 38 #ifdef CONFIG_PTE_64BIT 39 #define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) 40 #else 41 #define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) 42 #endif 43 44 /* 45 * _PAGE_CHG_MASK masks of bits that are to be preserved across 46 * pgprot changes. 47 */ 48 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \ 49 _PAGE_ACCESSED | _PAGE_SPECIAL) 50 51 /* 52 * We define 2 sets of base prot bits, one for basic pages (ie, 53 * cacheable kernel and user pages) and one for non cacheable 54 * pages. We always set _PAGE_COHERENT when SMP is enabled or 55 * the processor might need it for DMA coherency. 56 */ 57 #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) 58 #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) 59 60 /* 61 * Permission masks used to generate the __P and __S table. 62 * 63 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h 64 * 65 * Write permissions imply read permissions for now. 66 */ 67 #define PAGE_NONE __pgprot(_PAGE_BASE) 68 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) 69 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) 70 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 71 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 72 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 73 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 74 75 /* Permission masks used for kernel mappings */ 76 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) 77 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) 78 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ 79 _PAGE_NO_CACHE | _PAGE_GUARDED) 80 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) 81 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 82 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) 83 84 /* 85 * Protection used for kernel text. We want the debuggers to be able to 86 * set breakpoints anywhere, so don't write protect the kernel text 87 * on platforms where such control is possible. 88 */ 89 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ 90 defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) 91 #define PAGE_KERNEL_TEXT PAGE_KERNEL_X 92 #else 93 #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX 94 #endif 95 96 /* Make modules code happy. We don't set RO yet */ 97 #define PAGE_KERNEL_EXEC PAGE_KERNEL_X 98 99 /* Advertise special mapping type for AGP */ 100 #define PAGE_AGP (PAGE_KERNEL_NC) 101 #define HAVE_PAGE_AGP 102 103 #define PTE_INDEX_SIZE PTE_SHIFT 104 #define PMD_INDEX_SIZE 0 105 #define PUD_INDEX_SIZE 0 106 #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) 107 108 #define PMD_CACHE_INDEX PMD_INDEX_SIZE 109 #define PUD_CACHE_INDEX PUD_INDEX_SIZE 110 111 #ifndef __ASSEMBLY__ 112 #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) 113 #define PMD_TABLE_SIZE 0 114 #define PUD_TABLE_SIZE 0 115 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 116 #endif /* __ASSEMBLY__ */ 117 118 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 119 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 120 121 /* 122 * The normal case is that PTEs are 32-bits and we have a 1-page 123 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus 124 * 125 * For any >32-bit physical address platform, we can use the following 126 * two level page table layout where the pgdir is 8KB and the MS 13 bits 127 * are an index to the second level table. The combined pgdir/pmd first 128 * level has 2048 entries and the second level has 512 64-bit PTE entries. 129 * -Matt 130 */ 131 /* PGDIR_SHIFT determines what a top-level page table entry can map */ 132 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) 133 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 134 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 135 136 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 137 138 #ifndef __ASSEMBLY__ 139 140 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); 141 142 #endif /* !__ASSEMBLY__ */ 143 144 /* 145 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary 146 * value (for now) on others, from where we can start layout kernel 147 * virtual space that goes below PKMAP and FIXMAP 148 */ 149 #include <asm/fixmap.h> 150 151 /* 152 * ioremap_bot starts at that address. Early ioremaps move down from there, 153 * until mem_init() at which point this becomes the top of the vmalloc 154 * and ioremap space 155 */ 156 #ifdef CONFIG_HIGHMEM 157 #define IOREMAP_TOP PKMAP_BASE 158 #else 159 #define IOREMAP_TOP FIXADDR_START 160 #endif 161 162 /* PPC32 shares vmalloc area with ioremap */ 163 #define IOREMAP_START VMALLOC_START 164 #define IOREMAP_END VMALLOC_END 165 166 /* 167 * Just any arbitrary offset to the start of the vmalloc VM area: the 168 * current 16MB value just means that there will be a 64MB "hole" after the 169 * physical memory until the kernel virtual memory starts. That means that 170 * any out-of-bounds memory accesses will hopefully be caught. 171 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 172 * area for the same reason. ;) 173 * 174 * We no longer map larger than phys RAM with the BATs so we don't have 175 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry 176 * about clashes between our early calls to ioremap() that start growing down 177 * from ioremap_base being run into the VM area allocations (growing upwards 178 * from VMALLOC_START). For this reason we have ioremap_bot to check when 179 * we actually run into our mappings setup in the early boot with the VM 180 * system. This really does become a problem for machines with good amounts 181 * of RAM. -- Cort 182 */ 183 #define VMALLOC_OFFSET (0x1000000) /* 16M */ 184 185 /* 186 * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules 187 * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear 188 * memory shall not share segments. 189 */ 190 #if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES) 191 #define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \ 192 ~(VMALLOC_OFFSET - 1)) 193 #else 194 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 195 #endif 196 197 #ifdef CONFIG_KASAN_VMALLOC 198 #define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) 199 #else 200 #define VMALLOC_END ioremap_bot 201 #endif 202 203 #ifndef __ASSEMBLY__ 204 #include <linux/sched.h> 205 #include <linux/threads.h> 206 207 /* Bits to mask out from a PGD to get to the PUD page */ 208 #define PGD_MASKED_BITS 0 209 210 #define pte_ERROR(e) \ 211 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ 212 (unsigned long long)pte_val(e)) 213 #define pgd_ERROR(e) \ 214 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 215 /* 216 * Bits in a linux-style PTE. These match the bits in the 217 * (hardware-defined) PowerPC PTE as closely as possible. 218 */ 219 220 #define pte_clear(mm, addr, ptep) \ 221 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) 222 223 #define pmd_none(pmd) (!pmd_val(pmd)) 224 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 225 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 226 static inline void pmd_clear(pmd_t *pmdp) 227 { 228 *pmdp = __pmd(0); 229 } 230 231 232 /* 233 * When flushing the tlb entry for a page, we also need to flush the hash 234 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. 235 */ 236 extern int flush_hash_pages(unsigned context, unsigned long va, 237 unsigned long pmdval, int count); 238 239 /* Add an HPTE to the hash table */ 240 extern void add_hash_page(unsigned context, unsigned long va, 241 unsigned long pmdval); 242 243 /* Flush an entry from the TLB/hash table */ 244 extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, 245 unsigned long address); 246 247 /* 248 * PTE updates. This function is called whenever an existing 249 * valid PTE is updated. This does -not- include set_pte_at() 250 * which nowadays only sets a new PTE. 251 * 252 * Depending on the type of MMU, we may need to use atomic updates 253 * and the PTE may be either 32 or 64 bit wide. In the later case, 254 * when using atomic updates, only the low part of the PTE is 255 * accessed atomically. 256 * 257 * In addition, on 44x, we also maintain a global flag indicating 258 * that an executable user mapping was modified, which is needed 259 * to properly flush the virtually tagged instruction cache of 260 * those implementations. 261 */ 262 #ifndef CONFIG_PTE_64BIT 263 static inline unsigned long pte_update(pte_t *p, 264 unsigned long clr, 265 unsigned long set) 266 { 267 unsigned long old, tmp; 268 269 __asm__ __volatile__("\ 270 1: lwarx %0,0,%3\n\ 271 andc %1,%0,%4\n\ 272 or %1,%1,%5\n" 273 " stwcx. %1,0,%3\n\ 274 bne- 1b" 275 : "=&r" (old), "=&r" (tmp), "=m" (*p) 276 : "r" (p), "r" (clr), "r" (set), "m" (*p) 277 : "cc" ); 278 279 return old; 280 } 281 #else /* CONFIG_PTE_64BIT */ 282 static inline unsigned long long pte_update(pte_t *p, 283 unsigned long clr, 284 unsigned long set) 285 { 286 unsigned long long old; 287 unsigned long tmp; 288 289 __asm__ __volatile__("\ 290 1: lwarx %L0,0,%4\n\ 291 lwzx %0,0,%3\n\ 292 andc %1,%L0,%5\n\ 293 or %1,%1,%6\n" 294 " stwcx. %1,0,%4\n\ 295 bne- 1b" 296 : "=&r" (old), "=&r" (tmp), "=m" (*p) 297 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) 298 : "cc" ); 299 300 return old; 301 } 302 #endif /* CONFIG_PTE_64BIT */ 303 304 /* 305 * 2.6 calls this without flushing the TLB entry; this is wrong 306 * for our hash-based implementation, we fix that up here. 307 */ 308 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 309 static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) 310 { 311 unsigned long old; 312 old = pte_update(ptep, _PAGE_ACCESSED, 0); 313 if (old & _PAGE_HASHPTE) { 314 unsigned long ptephys = __pa(ptep) & PAGE_MASK; 315 flush_hash_pages(context, addr, ptephys, 1); 316 } 317 return (old & _PAGE_ACCESSED) != 0; 318 } 319 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 320 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) 321 322 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 323 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 324 pte_t *ptep) 325 { 326 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); 327 } 328 329 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 330 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 331 pte_t *ptep) 332 { 333 pte_update(ptep, _PAGE_RW, 0); 334 } 335 336 static inline void __ptep_set_access_flags(struct vm_area_struct *vma, 337 pte_t *ptep, pte_t entry, 338 unsigned long address, 339 int psize) 340 { 341 unsigned long set = pte_val(entry) & 342 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 343 344 pte_update(ptep, 0, set); 345 346 flush_tlb_page(vma, address); 347 } 348 349 #define __HAVE_ARCH_PTE_SAME 350 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) 351 352 #define pmd_page_vaddr(pmd) \ 353 ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) 354 #define pmd_page(pmd) \ 355 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 356 357 /* to find an entry in a kernel page-table-directory */ 358 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 359 360 /* to find an entry in a page-table-directory */ 361 #define pgd_index(address) ((address) >> PGDIR_SHIFT) 362 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 363 364 /* Find an entry in the third-level page table.. */ 365 #define pte_index(address) \ 366 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 367 #define pte_offset_kernel(dir, addr) \ 368 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) 369 #define pte_offset_map(dir, addr) \ 370 ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \ 371 (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr)) 372 #define pte_unmap(pte) kunmap_atomic(pte) 373 374 /* 375 * Encode and decode a swap entry. 376 * Note that the bits we use in a PTE for representing a swap entry 377 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). 378 * -- paulus 379 */ 380 #define __swp_type(entry) ((entry).val & 0x1f) 381 #define __swp_offset(entry) ((entry).val >> 5) 382 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 383 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 384 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 385 386 /* Generic accessors to PTE bits */ 387 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);} 388 static inline int pte_read(pte_t pte) { return 1; } 389 static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); } 390 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); } 391 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); } 392 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 393 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } 394 395 static inline int pte_present(pte_t pte) 396 { 397 return pte_val(pte) & _PAGE_PRESENT; 398 } 399 400 static inline bool pte_hw_valid(pte_t pte) 401 { 402 return pte_val(pte) & _PAGE_PRESENT; 403 } 404 405 static inline bool pte_hashpte(pte_t pte) 406 { 407 return !!(pte_val(pte) & _PAGE_HASHPTE); 408 } 409 410 static inline bool pte_ci(pte_t pte) 411 { 412 return !!(pte_val(pte) & _PAGE_NO_CACHE); 413 } 414 415 /* 416 * We only find page table entry in the last level 417 * Hence no need for other accessors 418 */ 419 #define pte_access_permitted pte_access_permitted 420 static inline bool pte_access_permitted(pte_t pte, bool write) 421 { 422 /* 423 * A read-only access is controlled by _PAGE_USER bit. 424 * We have _PAGE_READ set for WRITE and EXECUTE 425 */ 426 if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) 427 return false; 428 429 if (write && !pte_write(pte)) 430 return false; 431 432 return true; 433 } 434 435 /* Conversion functions: convert a page and protection to a page entry, 436 * and a page entry and page directory to the page they refer to. 437 * 438 * Even if PTEs can be unsigned long long, a PFN is always an unsigned 439 * long for now. 440 */ 441 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 442 { 443 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | 444 pgprot_val(pgprot)); 445 } 446 447 static inline unsigned long pte_pfn(pte_t pte) 448 { 449 return pte_val(pte) >> PTE_RPN_SHIFT; 450 } 451 452 /* Generic modifiers for PTE bits */ 453 static inline pte_t pte_wrprotect(pte_t pte) 454 { 455 return __pte(pte_val(pte) & ~_PAGE_RW); 456 } 457 458 static inline pte_t pte_exprotect(pte_t pte) 459 { 460 return __pte(pte_val(pte) & ~_PAGE_EXEC); 461 } 462 463 static inline pte_t pte_mkclean(pte_t pte) 464 { 465 return __pte(pte_val(pte) & ~_PAGE_DIRTY); 466 } 467 468 static inline pte_t pte_mkold(pte_t pte) 469 { 470 return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 471 } 472 473 static inline pte_t pte_mkexec(pte_t pte) 474 { 475 return __pte(pte_val(pte) | _PAGE_EXEC); 476 } 477 478 static inline pte_t pte_mkpte(pte_t pte) 479 { 480 return pte; 481 } 482 483 static inline pte_t pte_mkwrite(pte_t pte) 484 { 485 return __pte(pte_val(pte) | _PAGE_RW); 486 } 487 488 static inline pte_t pte_mkdirty(pte_t pte) 489 { 490 return __pte(pte_val(pte) | _PAGE_DIRTY); 491 } 492 493 static inline pte_t pte_mkyoung(pte_t pte) 494 { 495 return __pte(pte_val(pte) | _PAGE_ACCESSED); 496 } 497 498 static inline pte_t pte_mkspecial(pte_t pte) 499 { 500 return __pte(pte_val(pte) | _PAGE_SPECIAL); 501 } 502 503 static inline pte_t pte_mkhuge(pte_t pte) 504 { 505 return pte; 506 } 507 508 static inline pte_t pte_mkprivileged(pte_t pte) 509 { 510 return __pte(pte_val(pte) & ~_PAGE_USER); 511 } 512 513 static inline pte_t pte_mkuser(pte_t pte) 514 { 515 return __pte(pte_val(pte) | _PAGE_USER); 516 } 517 518 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 519 { 520 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 521 } 522 523 524 525 /* This low level function performs the actual PTE insertion 526 * Setting the PTE depends on the MMU type and other factors. It's 527 * an horrible mess that I'm not going to try to clean up now but 528 * I'm keeping it in one place rather than spread around 529 */ 530 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 531 pte_t *ptep, pte_t pte, int percpu) 532 { 533 #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) 534 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the 535 * helper pte_update() which does an atomic update. We need to do that 536 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a 537 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving 538 * the hash bits instead (ie, same as the non-SMP case) 539 */ 540 if (percpu) 541 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 542 | (pte_val(pte) & ~_PAGE_HASHPTE)); 543 else 544 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); 545 546 #elif defined(CONFIG_PTE_64BIT) 547 /* Second case is 32-bit with 64-bit PTE. In this case, we 548 * can just store as long as we do the two halves in the right order 549 * with a barrier in between. This is possible because we take care, 550 * in the hash code, to pre-invalidate if the PTE was already hashed, 551 * which synchronizes us with any concurrent invalidation. 552 * In the percpu case, we also fallback to the simple update preserving 553 * the hash bits 554 */ 555 if (percpu) { 556 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 557 | (pte_val(pte) & ~_PAGE_HASHPTE)); 558 return; 559 } 560 if (pte_val(*ptep) & _PAGE_HASHPTE) 561 flush_hash_entry(mm, ptep, addr); 562 __asm__ __volatile__("\ 563 stw%U0%X0 %2,%0\n\ 564 eieio\n\ 565 stw%U0%X0 %L2,%1" 566 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 567 : "r" (pte) : "memory"); 568 569 #else 570 /* Third case is 32-bit hash table in UP mode, we need to preserve 571 * the _PAGE_HASHPTE bit since we may not have invalidated the previous 572 * translation in the hash yet (done in a subsequent flush_tlb_xxx()) 573 * and see we need to keep track that this PTE needs invalidating 574 */ 575 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) 576 | (pte_val(pte) & ~_PAGE_HASHPTE)); 577 #endif 578 } 579 580 /* 581 * Macro to mark a page protection value as "uncacheable". 582 */ 583 584 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ 585 _PAGE_WRITETHRU) 586 587 #define pgprot_noncached pgprot_noncached 588 static inline pgprot_t pgprot_noncached(pgprot_t prot) 589 { 590 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | 591 _PAGE_NO_CACHE | _PAGE_GUARDED); 592 } 593 594 #define pgprot_noncached_wc pgprot_noncached_wc 595 static inline pgprot_t pgprot_noncached_wc(pgprot_t prot) 596 { 597 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | 598 _PAGE_NO_CACHE); 599 } 600 601 #define pgprot_cached pgprot_cached 602 static inline pgprot_t pgprot_cached(pgprot_t prot) 603 { 604 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | 605 _PAGE_COHERENT); 606 } 607 608 #define pgprot_cached_wthru pgprot_cached_wthru 609 static inline pgprot_t pgprot_cached_wthru(pgprot_t prot) 610 { 611 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | 612 _PAGE_COHERENT | _PAGE_WRITETHRU); 613 } 614 615 #define pgprot_cached_noncoherent pgprot_cached_noncoherent 616 static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot) 617 { 618 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL); 619 } 620 621 #define pgprot_writecombine pgprot_writecombine 622 static inline pgprot_t pgprot_writecombine(pgprot_t prot) 623 { 624 return pgprot_noncached_wc(prot); 625 } 626 627 #endif /* !__ASSEMBLY__ */ 628 629 #endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */ 630