1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _PARISC_PGTABLE_H 3 #define _PARISC_PGTABLE_H 4 5 #include <asm/page.h> 6 #include <asm-generic/4level-fixup.h> 7 8 #include <asm/fixmap.h> 9 10 #ifndef __ASSEMBLY__ 11 /* 12 * we simulate an x86-style page table for the linux mm code 13 */ 14 15 #include <linux/bitops.h> 16 #include <linux/spinlock.h> 17 #include <linux/mm_types.h> 18 #include <asm/processor.h> 19 #include <asm/cache.h> 20 21 static inline spinlock_t *pgd_spinlock(pgd_t *); 22 23 /* 24 * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel 25 * memory. For the return value to be meaningful, ADDR must be >= 26 * PAGE_OFFSET. This operation can be relatively expensive (e.g., 27 * require a hash-, or multi-level tree-lookup or something of that 28 * sort) but it guarantees to return TRUE only if accessing the page 29 * at that address does not cause an error. Note that there may be 30 * addresses for which kern_addr_valid() returns FALSE even though an 31 * access would not cause an error (e.g., this is typically true for 32 * memory mapped I/O regions. 33 * 34 * XXX Need to implement this for parisc. 35 */ 36 #define kern_addr_valid(addr) (1) 37 38 /* This is for the serialization of PxTLB broadcasts. At least on the N class 39 * systems, only one PxTLB inter processor broadcast can be active at any one 40 * time on the Merced bus. 41 42 * PTE updates are protected by locks in the PMD. 43 */ 44 extern spinlock_t pa_tlb_flush_lock; 45 extern spinlock_t pa_swapper_pg_lock; 46 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 47 extern int pa_serialize_tlb_flushes; 48 #else 49 #define pa_serialize_tlb_flushes (0) 50 #endif 51 52 #define purge_tlb_start(flags) do { \ 53 if (pa_serialize_tlb_flushes) \ 54 spin_lock_irqsave(&pa_tlb_flush_lock, flags); \ 55 else \ 56 local_irq_save(flags); \ 57 } while (0) 58 #define purge_tlb_end(flags) do { \ 59 if (pa_serialize_tlb_flushes) \ 60 spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \ 61 else \ 62 local_irq_restore(flags); \ 63 } while (0) 64 65 /* Purge data and instruction TLB entries. The TLB purge instructions 66 * are slow on SMP machines since the purge must be broadcast to all CPUs. 67 */ 68 69 static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) 70 { 71 unsigned long flags; 72 73 purge_tlb_start(flags); 74 mtsp(mm->context, 1); 75 pdtlb(addr); 76 pitlb(addr); 77 purge_tlb_end(flags); 78 } 79 80 /* Certain architectures need to do special things when PTEs 81 * within a page table are directly modified. Thus, the following 82 * hook is made available. 83 */ 84 #define set_pte(pteptr, pteval) \ 85 do{ \ 86 *(pteptr) = (pteval); \ 87 } while(0) 88 89 #define set_pte_at(mm, addr, ptep, pteval) \ 90 do { \ 91 pte_t old_pte; \ 92 unsigned long flags; \ 93 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\ 94 old_pte = *ptep; \ 95 set_pte(ptep, pteval); \ 96 purge_tlb_entries(mm, addr); \ 97 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\ 98 } while (0) 99 100 #endif /* !__ASSEMBLY__ */ 101 102 #define pte_ERROR(e) \ 103 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 104 #define pmd_ERROR(e) \ 105 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) 106 #define pgd_ERROR(e) \ 107 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) 108 109 /* This is the size of the initially mapped kernel memory */ 110 #if defined(CONFIG_64BIT) 111 #define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */ 112 #else 113 #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ 114 #endif 115 #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) 116 117 #if CONFIG_PGTABLE_LEVELS == 3 118 #define PGD_ORDER 1 /* Number of pages per pgd */ 119 #define PMD_ORDER 1 /* Number of pages per pmd */ 120 #define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */ 121 #else 122 #define PGD_ORDER 1 /* Number of pages per pgd */ 123 #define PGD_ALLOC_ORDER (PGD_ORDER + 1) 124 #endif 125 126 /* Definitions for 3rd level (we use PLD here for Page Lower directory 127 * because PTE_SHIFT is used lower down to mean shift that has to be 128 * done to get usable bits out of the PTE) */ 129 #define PLD_SHIFT PAGE_SHIFT 130 #define PLD_SIZE PAGE_SIZE 131 #define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY) 132 #define PTRS_PER_PTE (1UL << BITS_PER_PTE) 133 134 /* Definitions for 2nd level */ 135 #define pgtable_cache_init() do { } while (0) 136 137 #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) 138 #define PMD_SIZE (1UL << PMD_SHIFT) 139 #define PMD_MASK (~(PMD_SIZE-1)) 140 #if CONFIG_PGTABLE_LEVELS == 3 141 #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) 142 #else 143 #define __PAGETABLE_PMD_FOLDED 1 144 #define BITS_PER_PMD 0 145 #endif 146 #define PTRS_PER_PMD (1UL << BITS_PER_PMD) 147 148 /* Definitions for 1st level */ 149 #define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD) 150 #if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG 151 #define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT) 152 #else 153 #define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) 154 #endif 155 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 156 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 157 #define PTRS_PER_PGD (1UL << BITS_PER_PGD) 158 #define USER_PTRS_PER_PGD PTRS_PER_PGD 159 160 #ifdef CONFIG_64BIT 161 #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) 162 #define MAX_ADDRESS (1UL << MAX_ADDRBITS) 163 #define SPACEID_SHIFT (MAX_ADDRBITS - 32) 164 #else 165 #define MAX_ADDRBITS (BITS_PER_LONG) 166 #define MAX_ADDRESS (1UL << MAX_ADDRBITS) 167 #define SPACEID_SHIFT 0 168 #endif 169 170 /* This calculates the number of initial pages we need for the initial 171 * page tables */ 172 #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) 173 # define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) 174 #else 175 # define PT_INITIAL (1) /* all initial PTEs fit into one page */ 176 #endif 177 178 /* 179 * pgd entries used up by user/kernel: 180 */ 181 182 #define FIRST_USER_ADDRESS 0UL 183 184 /* NB: The tlb miss handlers make certain assumptions about the order */ 185 /* of the following bits, so be careful (One example, bits 25-31 */ 186 /* are moved together in one instruction). */ 187 188 #define _PAGE_READ_BIT 31 /* (0x001) read access allowed */ 189 #define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */ 190 #define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */ 191 #define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */ 192 #define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */ 193 #define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */ 194 #define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */ 195 #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ 196 #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ 197 #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ 198 #define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */ 199 #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ 200 201 /* N.B. The bits are defined in terms of a 32 bit word above, so the */ 202 /* following macro is ok for both 32 and 64 bit. */ 203 204 #define xlate_pabit(x) (31 - x) 205 206 /* this defines the shift to the usable bits in the PTE it is set so 207 * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set 208 * to zero */ 209 #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) 210 211 /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ 212 #define PFN_PTE_SHIFT 12 213 214 #define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT)) 215 #define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT)) 216 #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) 217 #define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT)) 218 #define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT)) 219 #define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT)) 220 #define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT)) 221 #define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT)) 222 #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) 223 #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) 224 #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) 225 #define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT)) 226 #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) 227 228 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) 229 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 230 #define _PAGE_KERNEL_RO (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) 231 #define _PAGE_KERNEL_EXEC (_PAGE_KERNEL_RO | _PAGE_EXEC) 232 #define _PAGE_KERNEL_RWX (_PAGE_KERNEL_EXEC | _PAGE_WRITE) 233 #define _PAGE_KERNEL (_PAGE_KERNEL_RO | _PAGE_WRITE) 234 235 /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds 236 * are page-aligned, we don't care about the PAGE_OFFSET bits, except 237 * for a few meta-information bits, so we shift the address to be 238 * able to effectively address 40/42/44-bits of physical address space 239 * depending on 4k/16k/64k PAGE_SIZE */ 240 #define _PxD_PRESENT_BIT 31 241 #define _PxD_ATTACHED_BIT 30 242 #define _PxD_VALID_BIT 29 243 244 #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) 245 #define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) 246 #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) 247 #define PxD_FLAG_MASK (0xf) 248 #define PxD_FLAG_SHIFT (4) 249 #define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT) 250 251 #ifndef __ASSEMBLY__ 252 253 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER) 254 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE) 255 /* Others seem to make this executable, I don't know if that's correct 256 or not. The stack is mapped this way though so this is necessary 257 in the short term - dhd@linuxcare.com, 2000-08-08 */ 258 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ) 259 #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE) 260 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC) 261 #define PAGE_COPY PAGE_EXECREAD 262 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) 263 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) 264 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) 265 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX) 266 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO) 267 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 268 #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ) 269 270 271 /* 272 * We could have an execute only page using "gateway - promote to priv 273 * level 3", but that is kind of silly. So, the way things are defined 274 * now, we must always have read permission for pages with execute 275 * permission. For the fun of it we'll go ahead and support write only 276 * pages. 277 */ 278 279 /*xwr*/ 280 #define __P000 PAGE_NONE 281 #define __P001 PAGE_READONLY 282 #define __P010 __P000 /* copy on write */ 283 #define __P011 __P001 /* copy on write */ 284 #define __P100 PAGE_EXECREAD 285 #define __P101 PAGE_EXECREAD 286 #define __P110 __P100 /* copy on write */ 287 #define __P111 __P101 /* copy on write */ 288 289 #define __S000 PAGE_NONE 290 #define __S001 PAGE_READONLY 291 #define __S010 PAGE_WRITEONLY 292 #define __S011 PAGE_SHARED 293 #define __S100 PAGE_EXECREAD 294 #define __S101 PAGE_EXECREAD 295 #define __S110 PAGE_RWX 296 #define __S111 PAGE_RWX 297 298 299 extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ 300 301 /* initial page tables for 0-8MB for kernel */ 302 303 extern pte_t pg0[]; 304 305 /* zero page used for uninitialized stuff */ 306 307 extern unsigned long *empty_zero_page; 308 309 /* 310 * ZERO_PAGE is a global shared page that is always zero: used 311 * for zero-mapped memory areas etc.. 312 */ 313 314 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 315 316 #define pte_none(x) (pte_val(x) == 0) 317 #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 318 #define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0)) 319 320 #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) 321 #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 322 #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) 323 #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) 324 325 #if CONFIG_PGTABLE_LEVELS == 3 326 /* The first entry of the permanent pmd is not there if it contains 327 * the gateway marker */ 328 #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) 329 #else 330 #define pmd_none(x) (!pmd_val(x)) 331 #endif 332 #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) 333 #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) 334 static inline void pmd_clear(pmd_t *pmd) { 335 #if CONFIG_PGTABLE_LEVELS == 3 336 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) 337 /* This is the entry pointing to the permanent pmd 338 * attached to the pgd; cannot clear it */ 339 __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); 340 else 341 #endif 342 __pmd_val_set(*pmd, 0); 343 } 344 345 346 347 #if CONFIG_PGTABLE_LEVELS == 3 348 #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd))) 349 #define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd)) 350 351 /* For 64 bit we have three level tables */ 352 353 #define pgd_none(x) (!pgd_val(x)) 354 #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) 355 #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) 356 static inline void pgd_clear(pgd_t *pgd) { 357 #if CONFIG_PGTABLE_LEVELS == 3 358 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) 359 /* This is the permanent pmd attached to the pgd; cannot 360 * free it */ 361 return; 362 #endif 363 __pgd_val_set(*pgd, 0); 364 } 365 #else 366 /* 367 * The "pgd_xxx()" functions here are trivial for a folded two-level 368 * setup: the pgd is never bad, and a pmd always exists (as it's folded 369 * into the pgd entry) 370 */ 371 static inline int pgd_none(pgd_t pgd) { return 0; } 372 static inline int pgd_bad(pgd_t pgd) { return 0; } 373 static inline int pgd_present(pgd_t pgd) { return 1; } 374 static inline void pgd_clear(pgd_t * pgdp) { } 375 #endif 376 377 /* 378 * The following only work if pte_present() is true. 379 * Undefined behaviour if not.. 380 */ 381 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 382 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 383 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 384 static inline int pte_special(pte_t pte) { return 0; } 385 386 static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } 387 static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 388 static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; } 389 static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } 390 static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } 391 static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } 392 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 393 394 /* 395 * Huge pte definitions. 396 */ 397 #ifdef CONFIG_HUGETLB_PAGE 398 #define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE) 399 #define pte_mkhuge(pte) (__pte(pte_val(pte) | \ 400 (parisc_requires_coherency() ? 0 : _PAGE_HUGE))) 401 #else 402 #define pte_huge(pte) (0) 403 #define pte_mkhuge(pte) (pte) 404 #endif 405 406 407 /* 408 * Conversion functions: convert a page and protection to a page entry, 409 * and a page entry and page directory to the page they refer to. 410 */ 411 #define __mk_pte(addr,pgprot) \ 412 ({ \ 413 pte_t __pte; \ 414 \ 415 pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \ 416 \ 417 __pte; \ 418 }) 419 420 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 421 422 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 423 { 424 pte_t pte; 425 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot); 426 return pte; 427 } 428 429 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 430 { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } 431 432 /* Permanent address of a page. On parisc we don't have highmem. */ 433 434 #define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) 435 436 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 437 438 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd))) 439 440 #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) 441 #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) 442 443 #define pgd_index(address) ((address) >> PGDIR_SHIFT) 444 445 /* to find an entry in a page-table-directory */ 446 #define pgd_offset(mm, address) \ 447 ((mm)->pgd + ((address) >> PGDIR_SHIFT)) 448 449 /* to find an entry in a kernel page-table-directory */ 450 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 451 452 /* Find an entry in the second-level page table.. */ 453 454 #if CONFIG_PGTABLE_LEVELS == 3 455 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 456 #define pmd_offset(dir,address) \ 457 ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address)) 458 #else 459 #define pmd_offset(dir,addr) ((pmd_t *) dir) 460 #endif 461 462 /* Find an entry in the third-level page table.. */ 463 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) 464 #define pte_offset_kernel(pmd, address) \ 465 ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) 466 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) 467 #define pte_unmap(pte) do { } while (0) 468 469 #define pte_unmap(pte) do { } while (0) 470 #define pte_unmap_nested(pte) do { } while (0) 471 472 extern void paging_init (void); 473 474 /* Used for deferring calls to flush_dcache_page() */ 475 476 #define PG_dcache_dirty PG_arch_1 477 478 extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); 479 480 /* Encode and de-code a swap entry */ 481 482 #define __swp_type(x) ((x).val & 0x1f) 483 #define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \ 484 (((x).val >> 8) & ~0x7) ) 485 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ 486 ((offset & 0x7) << 6) | \ 487 ((offset & ~0x7) << 8) }) 488 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 489 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 490 491 492 static inline spinlock_t *pgd_spinlock(pgd_t *pgd) 493 { 494 if (unlikely(pgd == swapper_pg_dir)) 495 return &pa_swapper_pg_lock; 496 return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1))); 497 } 498 499 500 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 501 { 502 pte_t pte; 503 unsigned long flags; 504 505 if (!pte_young(*ptep)) 506 return 0; 507 508 spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags); 509 pte = *ptep; 510 if (!pte_young(pte)) { 511 spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); 512 return 0; 513 } 514 set_pte(ptep, pte_mkold(pte)); 515 purge_tlb_entries(vma->vm_mm, addr); 516 spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags); 517 return 1; 518 } 519 520 struct mm_struct; 521 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 522 { 523 pte_t old_pte; 524 unsigned long flags; 525 526 spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); 527 old_pte = *ptep; 528 set_pte(ptep, __pte(0)); 529 purge_tlb_entries(mm, addr); 530 spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); 531 532 return old_pte; 533 } 534 535 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 536 { 537 unsigned long flags; 538 spin_lock_irqsave(pgd_spinlock(mm->pgd), flags); 539 set_pte(ptep, pte_wrprotect(*ptep)); 540 purge_tlb_entries(mm, addr); 541 spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags); 542 } 543 544 #define pte_same(A,B) (pte_val(A) == pte_val(B)) 545 546 struct seq_file; 547 extern void arch_report_meminfo(struct seq_file *m); 548 549 #endif /* !__ASSEMBLY__ */ 550 551 552 /* TLB page size encoding - see table 3-1 in parisc20.pdf */ 553 #define _PAGE_SIZE_ENCODING_4K 0 554 #define _PAGE_SIZE_ENCODING_16K 1 555 #define _PAGE_SIZE_ENCODING_64K 2 556 #define _PAGE_SIZE_ENCODING_256K 3 557 #define _PAGE_SIZE_ENCODING_1M 4 558 #define _PAGE_SIZE_ENCODING_4M 5 559 #define _PAGE_SIZE_ENCODING_16M 6 560 #define _PAGE_SIZE_ENCODING_64M 7 561 562 #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) 563 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K 564 #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) 565 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K 566 #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) 567 # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K 568 #endif 569 570 571 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) 572 573 /* We provide our own get_unmapped_area to provide cache coherency */ 574 575 #define HAVE_ARCH_UNMAPPED_AREA 576 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 577 578 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 579 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 580 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 581 #define __HAVE_ARCH_PTE_SAME 582 #include <asm-generic/pgtable.h> 583 584 #endif /* _PARISC_PGTABLE_H */ 585