1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003 Ralf Baechle 7 */ 8 #ifndef _ASM_PGTABLE_H 9 #define _ASM_PGTABLE_H 10 11 #include <linux/mm_types.h> 12 #include <linux/mmzone.h> 13 #ifdef CONFIG_32BIT 14 #include <asm/pgtable-32.h> 15 #endif 16 #ifdef CONFIG_64BIT 17 #include <asm/pgtable-64.h> 18 #endif 19 20 #include <asm/io.h> 21 #include <asm/pgtable-bits.h> 22 23 struct mm_struct; 24 struct vm_area_struct; 25 26 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT) 27 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | (cpu_has_rixi ? 0 : _PAGE_READ) | \ 28 _page_cachable_default) 29 #define PAGE_COPY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ 30 (cpu_has_rixi ? _PAGE_NO_EXEC : 0) | _page_cachable_default) 31 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | \ 32 _page_cachable_default) 33 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 34 _PAGE_GLOBAL | _page_cachable_default) 35 #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 36 _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) 37 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ 38 _page_cachable_default) 39 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ 40 __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED) 41 42 /* 43 * If _PAGE_NO_EXEC is not defined, we can't do page protection for 44 * execute, and consider it to be the same as read. Also, write 45 * permissions imply read permissions. This is the closest we can get 46 * by reasonable means.. 47 */ 48 49 /* 50 * Dummy values to fill the table in mmap.c 51 * The real values will be generated at runtime 52 */ 53 #define __P000 __pgprot(0) 54 #define __P001 __pgprot(0) 55 #define __P010 __pgprot(0) 56 #define __P011 __pgprot(0) 57 #define __P100 __pgprot(0) 58 #define __P101 __pgprot(0) 59 #define __P110 __pgprot(0) 60 #define __P111 __pgprot(0) 61 62 #define __S000 __pgprot(0) 63 #define __S001 __pgprot(0) 64 #define __S010 __pgprot(0) 65 #define __S011 __pgprot(0) 66 #define __S100 __pgprot(0) 67 #define __S101 __pgprot(0) 68 #define __S110 __pgprot(0) 69 #define __S111 __pgprot(0) 70 71 extern unsigned long _page_cachable_default; 72 73 /* 74 * ZERO_PAGE is a global shared page that is always zero; used 75 * for zero-mapped memory areas etc.. 76 */ 77 78 extern unsigned long empty_zero_page; 79 extern unsigned long zero_page_mask; 80 81 #define ZERO_PAGE(vaddr) \ 82 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 83 #define __HAVE_COLOR_ZERO_PAGE 84 85 extern void paging_init(void); 86 87 /* 88 * Conversion functions: convert a page and protection to a page entry, 89 * and a page entry and page directory to the page they refer to. 90 */ 91 #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd)) 92 93 #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 94 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 95 #define pmd_page(pmd) __pmd_page(pmd) 96 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 97 98 #define pmd_page_vaddr(pmd) pmd_val(pmd) 99 100 #define htw_stop() \ 101 do { \ 102 unsigned long flags; \ 103 \ 104 if (cpu_has_htw) { \ 105 local_irq_save(flags); \ 106 if(!raw_current_cpu_data.htw_seq++) { \ 107 write_c0_pwctl(read_c0_pwctl() & \ 108 ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 109 back_to_back_c0_hazard(); \ 110 } \ 111 local_irq_restore(flags); \ 112 } \ 113 } while(0) 114 115 #define htw_start() \ 116 do { \ 117 unsigned long flags; \ 118 \ 119 if (cpu_has_htw) { \ 120 local_irq_save(flags); \ 121 if (!--raw_current_cpu_data.htw_seq) { \ 122 write_c0_pwctl(read_c0_pwctl() | \ 123 (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 124 back_to_back_c0_hazard(); \ 125 } \ 126 local_irq_restore(flags); \ 127 } \ 128 } while(0) 129 130 131 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 132 pte_t pteval); 133 134 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 135 136 #define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 137 #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 138 139 static inline void set_pte(pte_t *ptep, pte_t pte) 140 { 141 ptep->pte_high = pte.pte_high; 142 smp_wmb(); 143 ptep->pte_low = pte.pte_low; 144 145 if (pte.pte_low & _PAGE_GLOBAL) { 146 pte_t *buddy = ptep_buddy(ptep); 147 /* 148 * Make sure the buddy is global too (if it's !none, 149 * it better already be global) 150 */ 151 if (pte_none(*buddy)) { 152 buddy->pte_low |= _PAGE_GLOBAL; 153 buddy->pte_high |= _PAGE_GLOBAL; 154 } 155 } 156 } 157 158 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 159 { 160 pte_t null = __pte(0); 161 162 htw_stop(); 163 /* Preserve global status for the pair */ 164 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 165 null.pte_low = null.pte_high = _PAGE_GLOBAL; 166 167 set_pte_at(mm, addr, ptep, null); 168 htw_start(); 169 } 170 #else 171 172 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 173 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 174 175 /* 176 * Certain architectures need to do special things when pte's 177 * within a page table are directly modified. Thus, the following 178 * hook is made available. 179 */ 180 static inline void set_pte(pte_t *ptep, pte_t pteval) 181 { 182 *ptep = pteval; 183 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 184 if (pte_val(pteval) & _PAGE_GLOBAL) { 185 pte_t *buddy = ptep_buddy(ptep); 186 /* 187 * Make sure the buddy is global too (if it's !none, 188 * it better already be global) 189 */ 190 if (pte_none(*buddy)) 191 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 192 } 193 #endif 194 } 195 196 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 197 { 198 htw_stop(); 199 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 200 /* Preserve global status for the pair */ 201 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 202 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 203 else 204 #endif 205 set_pte_at(mm, addr, ptep, __pte(0)); 206 htw_start(); 207 } 208 #endif 209 210 /* 211 * (pmds are folded into puds so this doesn't get actually called, 212 * but the define is needed for a generic inline function.) 213 */ 214 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0) 215 216 #ifndef __PAGETABLE_PMD_FOLDED 217 /* 218 * (puds are folded into pgds so this doesn't get actually called, 219 * but the define is needed for a generic inline function.) 220 */ 221 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0) 222 #endif 223 224 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 225 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 226 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 227 228 /* 229 * We used to declare this array with size but gcc 3.3 and older are not able 230 * to find that this expression is a constant, so the size is dropped. 231 */ 232 extern pgd_t swapper_pg_dir[]; 233 234 /* 235 * The following only work if pte_present() is true. 236 * Undefined behaviour if not.. 237 */ 238 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 239 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 240 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 241 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 242 243 static inline pte_t pte_wrprotect(pte_t pte) 244 { 245 pte.pte_low &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 246 pte.pte_high &= ~_PAGE_SILENT_WRITE; 247 return pte; 248 } 249 250 static inline pte_t pte_mkclean(pte_t pte) 251 { 252 pte.pte_low &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 253 pte.pte_high &= ~_PAGE_SILENT_WRITE; 254 return pte; 255 } 256 257 static inline pte_t pte_mkold(pte_t pte) 258 { 259 pte.pte_low &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 260 pte.pte_high &= ~_PAGE_SILENT_READ; 261 return pte; 262 } 263 264 static inline pte_t pte_mkwrite(pte_t pte) 265 { 266 pte.pte_low |= _PAGE_WRITE; 267 if (pte.pte_low & _PAGE_MODIFIED) { 268 pte.pte_low |= _PAGE_SILENT_WRITE; 269 pte.pte_high |= _PAGE_SILENT_WRITE; 270 } 271 return pte; 272 } 273 274 static inline pte_t pte_mkdirty(pte_t pte) 275 { 276 pte.pte_low |= _PAGE_MODIFIED; 277 if (pte.pte_low & _PAGE_WRITE) { 278 pte.pte_low |= _PAGE_SILENT_WRITE; 279 pte.pte_high |= _PAGE_SILENT_WRITE; 280 } 281 return pte; 282 } 283 284 static inline pte_t pte_mkyoung(pte_t pte) 285 { 286 pte.pte_low |= _PAGE_ACCESSED; 287 if (pte.pte_low & _PAGE_READ) { 288 pte.pte_low |= _PAGE_SILENT_READ; 289 pte.pte_high |= _PAGE_SILENT_READ; 290 } 291 return pte; 292 } 293 #else 294 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 295 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 296 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 297 298 static inline pte_t pte_wrprotect(pte_t pte) 299 { 300 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 301 return pte; 302 } 303 304 static inline pte_t pte_mkclean(pte_t pte) 305 { 306 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 307 return pte; 308 } 309 310 static inline pte_t pte_mkold(pte_t pte) 311 { 312 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ); 313 return pte; 314 } 315 316 static inline pte_t pte_mkwrite(pte_t pte) 317 { 318 pte_val(pte) |= _PAGE_WRITE; 319 if (pte_val(pte) & _PAGE_MODIFIED) 320 pte_val(pte) |= _PAGE_SILENT_WRITE; 321 return pte; 322 } 323 324 static inline pte_t pte_mkdirty(pte_t pte) 325 { 326 pte_val(pte) |= _PAGE_MODIFIED; 327 if (pte_val(pte) & _PAGE_WRITE) 328 pte_val(pte) |= _PAGE_SILENT_WRITE; 329 return pte; 330 } 331 332 static inline pte_t pte_mkyoung(pte_t pte) 333 { 334 pte_val(pte) |= _PAGE_ACCESSED; 335 if (cpu_has_rixi) { 336 if (!(pte_val(pte) & _PAGE_NO_READ)) 337 pte_val(pte) |= _PAGE_SILENT_READ; 338 } else { 339 if (pte_val(pte) & _PAGE_READ) 340 pte_val(pte) |= _PAGE_SILENT_READ; 341 } 342 return pte; 343 } 344 345 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 346 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 347 348 static inline pte_t pte_mkhuge(pte_t pte) 349 { 350 pte_val(pte) |= _PAGE_HUGE; 351 return pte; 352 } 353 #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 354 #endif 355 static inline int pte_special(pte_t pte) { return 0; } 356 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 357 358 /* 359 * Macro to make mark a page protection value as "uncacheable". Note 360 * that "protection" is really a misnomer here as the protection value 361 * contains the memory attribute bits, dirty bits, and various other 362 * bits as well. 363 */ 364 #define pgprot_noncached pgprot_noncached 365 366 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 367 { 368 unsigned long prot = pgprot_val(_prot); 369 370 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 371 372 return __pgprot(prot); 373 } 374 375 static inline pgprot_t pgprot_writecombine(pgprot_t _prot) 376 { 377 unsigned long prot = pgprot_val(_prot); 378 379 /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */ 380 prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine; 381 382 return __pgprot(prot); 383 } 384 385 /* 386 * Conversion functions: convert a page and protection to a page entry, 387 * and a page entry and page directory to the page they refer to. 388 */ 389 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 390 391 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 392 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 393 { 394 pte.pte_low &= _PAGE_CHG_MASK; 395 pte.pte_high &= (_PFN_MASK | _CACHE_MASK); 396 pte.pte_low |= pgprot_val(newprot); 397 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK); 398 return pte; 399 } 400 #else 401 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 402 { 403 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 404 } 405 #endif 406 407 408 extern void __update_tlb(struct vm_area_struct *vma, unsigned long address, 409 pte_t pte); 410 411 static inline void update_mmu_cache(struct vm_area_struct *vma, 412 unsigned long address, pte_t *ptep) 413 { 414 pte_t pte = *ptep; 415 __update_tlb(vma, address, pte); 416 } 417 418 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 419 unsigned long address, pmd_t *pmdp) 420 { 421 pte_t pte = *(pte_t *)pmdp; 422 423 __update_tlb(vma, address, pte); 424 } 425 426 #define kern_addr_valid(addr) (1) 427 428 #ifdef CONFIG_PHYS_ADDR_T_64BIT 429 extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); 430 431 static inline int io_remap_pfn_range(struct vm_area_struct *vma, 432 unsigned long vaddr, 433 unsigned long pfn, 434 unsigned long size, 435 pgprot_t prot) 436 { 437 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); 438 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); 439 } 440 #define io_remap_pfn_range io_remap_pfn_range 441 #endif 442 443 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 444 445 extern int has_transparent_hugepage(void); 446 447 static inline int pmd_trans_huge(pmd_t pmd) 448 { 449 return !!(pmd_val(pmd) & _PAGE_HUGE); 450 } 451 452 static inline pmd_t pmd_mkhuge(pmd_t pmd) 453 { 454 pmd_val(pmd) |= _PAGE_HUGE; 455 456 return pmd; 457 } 458 459 static inline int pmd_trans_splitting(pmd_t pmd) 460 { 461 return !!(pmd_val(pmd) & _PAGE_SPLITTING); 462 } 463 464 static inline pmd_t pmd_mksplitting(pmd_t pmd) 465 { 466 pmd_val(pmd) |= _PAGE_SPLITTING; 467 468 return pmd; 469 } 470 471 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, 472 pmd_t *pmdp, pmd_t pmd); 473 474 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 475 /* Extern to avoid header file madness */ 476 extern void pmdp_splitting_flush(struct vm_area_struct *vma, 477 unsigned long address, 478 pmd_t *pmdp); 479 480 #define __HAVE_ARCH_PMD_WRITE 481 static inline int pmd_write(pmd_t pmd) 482 { 483 return !!(pmd_val(pmd) & _PAGE_WRITE); 484 } 485 486 static inline pmd_t pmd_wrprotect(pmd_t pmd) 487 { 488 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE); 489 return pmd; 490 } 491 492 static inline pmd_t pmd_mkwrite(pmd_t pmd) 493 { 494 pmd_val(pmd) |= _PAGE_WRITE; 495 if (pmd_val(pmd) & _PAGE_MODIFIED) 496 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 497 498 return pmd; 499 } 500 501 static inline int pmd_dirty(pmd_t pmd) 502 { 503 return !!(pmd_val(pmd) & _PAGE_MODIFIED); 504 } 505 506 static inline pmd_t pmd_mkclean(pmd_t pmd) 507 { 508 pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE); 509 return pmd; 510 } 511 512 static inline pmd_t pmd_mkdirty(pmd_t pmd) 513 { 514 pmd_val(pmd) |= _PAGE_MODIFIED; 515 if (pmd_val(pmd) & _PAGE_WRITE) 516 pmd_val(pmd) |= _PAGE_SILENT_WRITE; 517 518 return pmd; 519 } 520 521 static inline int pmd_young(pmd_t pmd) 522 { 523 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 524 } 525 526 static inline pmd_t pmd_mkold(pmd_t pmd) 527 { 528 pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 529 530 return pmd; 531 } 532 533 static inline pmd_t pmd_mkyoung(pmd_t pmd) 534 { 535 pmd_val(pmd) |= _PAGE_ACCESSED; 536 537 if (cpu_has_rixi) { 538 if (!(pmd_val(pmd) & _PAGE_NO_READ)) 539 pmd_val(pmd) |= _PAGE_SILENT_READ; 540 } else { 541 if (pmd_val(pmd) & _PAGE_READ) 542 pmd_val(pmd) |= _PAGE_SILENT_READ; 543 } 544 545 return pmd; 546 } 547 548 /* Extern to avoid header file madness */ 549 extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 550 551 static inline unsigned long pmd_pfn(pmd_t pmd) 552 { 553 return pmd_val(pmd) >> _PFN_SHIFT; 554 } 555 556 static inline struct page *pmd_page(pmd_t pmd) 557 { 558 if (pmd_trans_huge(pmd)) 559 return pfn_to_page(pmd_pfn(pmd)); 560 561 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 562 } 563 564 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 565 { 566 pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); 567 return pmd; 568 } 569 570 static inline pmd_t pmd_mknotpresent(pmd_t pmd) 571 { 572 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY); 573 574 return pmd; 575 } 576 577 /* 578 * The generic version pmdp_get_and_clear uses a version of pmd_clear() with a 579 * different prototype. 580 */ 581 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR 582 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, 583 unsigned long address, pmd_t *pmdp) 584 { 585 pmd_t old = *pmdp; 586 587 pmd_clear(pmdp); 588 589 return old; 590 } 591 592 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 593 594 #include <asm-generic/pgtable.h> 595 596 /* 597 * uncached accelerated TLB map for video memory access 598 */ 599 #ifdef CONFIG_CPU_SUPPORTS_UNCACHED_ACCELERATED 600 #define __HAVE_PHYS_MEM_ACCESS_PROT 601 602 struct file; 603 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 604 unsigned long size, pgprot_t vma_prot); 605 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 606 unsigned long size, pgprot_t *vma_prot); 607 #endif 608 609 /* 610 * We provide our own get_unmapped area to cope with the virtual aliasing 611 * constraints placed on us by the cache architecture. 612 */ 613 #define HAVE_ARCH_UNMAPPED_AREA 614 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 615 616 /* 617 * No page table caches to initialise 618 */ 619 #define pgtable_cache_init() do { } while (0) 620 621 #endif /* _ASM_PGTABLE_H */ 622