1 /* 2 * Copyright (C) 2012 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __ASM_PGTABLE_H 17 #define __ASM_PGTABLE_H 18 19 #include <asm/bug.h> 20 #include <asm/proc-fns.h> 21 22 #include <asm/memory.h> 23 #include <asm/pgtable-hwdef.h> 24 #include <asm/pgtable-prot.h> 25 26 /* 27 * VMALLOC range. 28 * 29 * VMALLOC_START: beginning of the kernel vmalloc space 30 * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space 31 * and fixed mappings 32 */ 33 #define VMALLOC_START (MODULES_END) 34 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) 35 36 #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) 37 38 #define FIRST_USER_ADDRESS 0UL 39 40 #ifndef __ASSEMBLY__ 41 42 #include <asm/cmpxchg.h> 43 #include <asm/fixmap.h> 44 #include <linux/mmdebug.h> 45 #include <linux/mm_types.h> 46 #include <linux/sched.h> 47 48 extern void __pte_error(const char *file, int line, unsigned long val); 49 extern void __pmd_error(const char *file, int line, unsigned long val); 50 extern void __pud_error(const char *file, int line, unsigned long val); 51 extern void __pgd_error(const char *file, int line, unsigned long val); 52 53 /* 54 * ZERO_PAGE is a global shared page that is always zero: used 55 * for zero-mapped memory areas etc.. 56 */ 57 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 58 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 59 60 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 61 62 #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) 63 64 #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 65 66 #define pte_none(pte) (!pte_val(pte)) 67 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) 68 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 69 70 /* 71 * The following only work if pte_present(). Undefined behaviour otherwise. 72 */ 73 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) 74 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 75 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 76 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 77 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 78 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 79 80 #define pte_cont_addr_end(addr, end) \ 81 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 82 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 83 }) 84 85 #define pmd_cont_addr_end(addr, end) \ 86 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 87 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 88 }) 89 90 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) 91 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 92 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 93 94 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 95 /* 96 * Execute-only user mappings do not have the PTE_USER bit set. All valid 97 * kernel mappings have the PTE_UXN bit set. 98 */ 99 #define pte_valid_not_user(pte) \ 100 ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN)) 101 #define pte_valid_young(pte) \ 102 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) 103 #define pte_valid_user(pte) \ 104 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) 105 106 /* 107 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 108 * so that we don't erroneously return false for pages that have been 109 * remapped as PROT_NONE but are yet to be flushed from the TLB. 110 */ 111 #define pte_accessible(mm, pte) \ 112 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) 113 114 /* 115 * p??_access_permitted() is true for valid user mappings (subject to the 116 * write permission check) other than user execute-only which do not have the 117 * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set. 118 */ 119 #define pte_access_permitted(pte, write) \ 120 (pte_valid_user(pte) && (!(write) || pte_write(pte))) 121 #define pmd_access_permitted(pmd, write) \ 122 (pte_access_permitted(pmd_pte(pmd), (write))) 123 #define pud_access_permitted(pud, write) \ 124 (pte_access_permitted(pud_pte(pud), (write))) 125 126 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 127 { 128 pte_val(pte) &= ~pgprot_val(prot); 129 return pte; 130 } 131 132 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 133 { 134 pte_val(pte) |= pgprot_val(prot); 135 return pte; 136 } 137 138 static inline pte_t pte_wrprotect(pte_t pte) 139 { 140 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 141 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 142 return pte; 143 } 144 145 static inline pte_t pte_mkwrite(pte_t pte) 146 { 147 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 148 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 149 return pte; 150 } 151 152 static inline pte_t pte_mkclean(pte_t pte) 153 { 154 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 155 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 156 157 return pte; 158 } 159 160 static inline pte_t pte_mkdirty(pte_t pte) 161 { 162 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 163 164 if (pte_write(pte)) 165 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 166 167 return pte; 168 } 169 170 static inline pte_t pte_mkold(pte_t pte) 171 { 172 return clear_pte_bit(pte, __pgprot(PTE_AF)); 173 } 174 175 static inline pte_t pte_mkyoung(pte_t pte) 176 { 177 return set_pte_bit(pte, __pgprot(PTE_AF)); 178 } 179 180 static inline pte_t pte_mkspecial(pte_t pte) 181 { 182 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 183 } 184 185 static inline pte_t pte_mkcont(pte_t pte) 186 { 187 pte = set_pte_bit(pte, __pgprot(PTE_CONT)); 188 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); 189 } 190 191 static inline pte_t pte_mknoncont(pte_t pte) 192 { 193 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 194 } 195 196 static inline pte_t pte_mkpresent(pte_t pte) 197 { 198 return set_pte_bit(pte, __pgprot(PTE_VALID)); 199 } 200 201 static inline pmd_t pmd_mkcont(pmd_t pmd) 202 { 203 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 204 } 205 206 static inline void set_pte(pte_t *ptep, pte_t pte) 207 { 208 *ptep = pte; 209 210 /* 211 * Only if the new pte is valid and kernel, otherwise TLB maintenance 212 * or update_mmu_cache() have the necessary barriers. 213 */ 214 if (pte_valid_not_user(pte)) { 215 dsb(ishst); 216 isb(); 217 } 218 } 219 220 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); 221 222 /* 223 * PTE bits configuration in the presence of hardware Dirty Bit Management 224 * (PTE_WRITE == PTE_DBM): 225 * 226 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 227 * 0 0 | 1 0 0 228 * 0 1 | 1 1 0 229 * 1 0 | 1 0 1 230 * 1 1 | 0 1 x 231 * 232 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 233 * the page fault mechanism. Checking the dirty status of a pte becomes: 234 * 235 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 236 */ 237 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 238 pte_t *ptep, pte_t pte) 239 { 240 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 241 __sync_icache_dcache(pte, addr); 242 243 /* 244 * If the existing pte is valid, check for potential race with 245 * hardware updates of the pte (ptep_set_access_flags safely changes 246 * valid ptes without going through an invalid entry). 247 */ 248 if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) && 249 (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) { 250 VM_WARN_ONCE(!pte_young(pte), 251 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 252 __func__, pte_val(*ptep), pte_val(pte)); 253 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte), 254 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 255 __func__, pte_val(*ptep), pte_val(pte)); 256 } 257 258 set_pte(ptep, pte); 259 } 260 261 #define __HAVE_ARCH_PTE_SAME 262 static inline int pte_same(pte_t pte_a, pte_t pte_b) 263 { 264 pteval_t lhs, rhs; 265 266 lhs = pte_val(pte_a); 267 rhs = pte_val(pte_b); 268 269 if (pte_present(pte_a)) 270 lhs &= ~PTE_RDONLY; 271 272 if (pte_present(pte_b)) 273 rhs &= ~PTE_RDONLY; 274 275 return (lhs == rhs); 276 } 277 278 /* 279 * Huge pte definitions. 280 */ 281 #define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) 282 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 283 284 /* 285 * Hugetlb definitions. 286 */ 287 #define HUGE_MAX_HSTATE 4 288 #define HPAGE_SHIFT PMD_SHIFT 289 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 290 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 291 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 292 293 #define __HAVE_ARCH_PTE_SPECIAL 294 295 static inline pte_t pud_pte(pud_t pud) 296 { 297 return __pte(pud_val(pud)); 298 } 299 300 static inline pmd_t pud_pmd(pud_t pud) 301 { 302 return __pmd(pud_val(pud)); 303 } 304 305 static inline pte_t pmd_pte(pmd_t pmd) 306 { 307 return __pte(pmd_val(pmd)); 308 } 309 310 static inline pmd_t pte_pmd(pte_t pte) 311 { 312 return __pmd(pte_val(pte)); 313 } 314 315 static inline pgprot_t mk_sect_prot(pgprot_t prot) 316 { 317 return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT); 318 } 319 320 #ifdef CONFIG_NUMA_BALANCING 321 /* 322 * See the comment in include/asm-generic/pgtable.h 323 */ 324 static inline int pte_protnone(pte_t pte) 325 { 326 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE; 327 } 328 329 static inline int pmd_protnone(pmd_t pmd) 330 { 331 return pte_protnone(pmd_pte(pmd)); 332 } 333 #endif 334 335 /* 336 * THP definitions. 337 */ 338 339 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 340 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 341 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 342 343 #define pmd_present(pmd) pte_present(pmd_pte(pmd)) 344 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 345 #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 346 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 347 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 348 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) 349 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 350 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 351 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 352 #define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) 353 354 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 355 356 #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 357 358 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 359 360 #define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) 361 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 362 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 363 364 #define pud_write(pud) pte_write(pud_pte(pud)) 365 #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) 366 367 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) 368 369 #define __pgprot_modify(prot,mask,bits) \ 370 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 371 372 /* 373 * Mark the prot value as uncacheable and unbufferable. 374 */ 375 #define pgprot_noncached(prot) \ 376 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 377 #define pgprot_writecombine(prot) \ 378 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 379 #define pgprot_device(prot) \ 380 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 381 #define __HAVE_PHYS_MEM_ACCESS_PROT 382 struct file; 383 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 384 unsigned long size, pgprot_t vma_prot); 385 386 #define pmd_none(pmd) (!pmd_val(pmd)) 387 388 #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) 389 390 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 391 PMD_TYPE_TABLE) 392 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 393 PMD_TYPE_SECT) 394 395 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 396 #define pud_sect(pud) (0) 397 #define pud_table(pud) (1) 398 #else 399 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 400 PUD_TYPE_SECT) 401 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 402 PUD_TYPE_TABLE) 403 #endif 404 405 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 406 { 407 *pmdp = pmd; 408 dsb(ishst); 409 isb(); 410 } 411 412 static inline void pmd_clear(pmd_t *pmdp) 413 { 414 set_pmd(pmdp, __pmd(0)); 415 } 416 417 static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 418 { 419 return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK; 420 } 421 422 /* Find an entry in the third-level page table. */ 423 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 424 425 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 426 #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) 427 428 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 429 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 430 #define pte_unmap(pte) do { } while (0) 431 #define pte_unmap_nested(pte) do { } while (0) 432 433 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 434 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 435 #define pte_clear_fixmap() clear_fixmap(FIX_PTE) 436 437 #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) 438 439 /* use ONLY for statically allocated translation tables */ 440 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 441 442 /* 443 * Conversion functions: convert a page and protection to a page entry, 444 * and a page entry and page directory to the page they refer to. 445 */ 446 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 447 448 #if CONFIG_PGTABLE_LEVELS > 2 449 450 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 451 452 #define pud_none(pud) (!pud_val(pud)) 453 #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) 454 #define pud_present(pud) pte_present(pud_pte(pud)) 455 456 static inline void set_pud(pud_t *pudp, pud_t pud) 457 { 458 *pudp = pud; 459 dsb(ishst); 460 isb(); 461 } 462 463 static inline void pud_clear(pud_t *pudp) 464 { 465 set_pud(pudp, __pud(0)); 466 } 467 468 static inline phys_addr_t pud_page_paddr(pud_t pud) 469 { 470 return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK; 471 } 472 473 /* Find an entry in the second-level page table. */ 474 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 475 476 #define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t)) 477 #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) 478 479 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 480 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 481 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 482 483 #define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK)) 484 485 /* use ONLY for statically allocated translation tables */ 486 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 487 488 #else 489 490 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 491 492 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 493 #define pmd_set_fixmap(addr) NULL 494 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 495 #define pmd_clear_fixmap() 496 497 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 498 499 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 500 501 #if CONFIG_PGTABLE_LEVELS > 3 502 503 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) 504 505 #define pgd_none(pgd) (!pgd_val(pgd)) 506 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2)) 507 #define pgd_present(pgd) (pgd_val(pgd)) 508 509 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 510 { 511 *pgdp = pgd; 512 dsb(ishst); 513 } 514 515 static inline void pgd_clear(pgd_t *pgdp) 516 { 517 set_pgd(pgdp, __pgd(0)); 518 } 519 520 static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 521 { 522 return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK; 523 } 524 525 /* Find an entry in the frst-level page table. */ 526 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 527 528 #define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t)) 529 #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) 530 531 #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) 532 #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr)) 533 #define pud_clear_fixmap() clear_fixmap(FIX_PUD) 534 535 #define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK)) 536 537 /* use ONLY for statically allocated translation tables */ 538 #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) 539 540 #else 541 542 #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) 543 544 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 545 #define pud_set_fixmap(addr) NULL 546 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 547 #define pud_clear_fixmap() 548 549 #define pud_offset_kimg(dir,addr) ((pud_t *)dir) 550 551 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 552 553 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 554 555 /* to find an entry in a page-table-directory */ 556 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 557 558 #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) 559 560 #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr))) 561 562 /* to find an entry in a kernel page-table-directory */ 563 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 564 565 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 566 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 567 568 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 569 { 570 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 571 PTE_PROT_NONE | PTE_VALID | PTE_WRITE; 572 /* preserve the hardware dirty information */ 573 if (pte_hw_dirty(pte)) 574 pte = pte_mkdirty(pte); 575 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 576 return pte; 577 } 578 579 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 580 { 581 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 582 } 583 584 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 585 extern int ptep_set_access_flags(struct vm_area_struct *vma, 586 unsigned long address, pte_t *ptep, 587 pte_t entry, int dirty); 588 589 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 590 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 591 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 592 unsigned long address, pmd_t *pmdp, 593 pmd_t entry, int dirty) 594 { 595 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); 596 } 597 #endif 598 599 /* 600 * Atomic pte/pmd modifications. 601 */ 602 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 603 static inline int __ptep_test_and_clear_young(pte_t *ptep) 604 { 605 pte_t old_pte, pte; 606 607 pte = READ_ONCE(*ptep); 608 do { 609 old_pte = pte; 610 pte = pte_mkold(pte); 611 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 612 pte_val(old_pte), pte_val(pte)); 613 } while (pte_val(pte) != pte_val(old_pte)); 614 615 return pte_young(pte); 616 } 617 618 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 619 unsigned long address, 620 pte_t *ptep) 621 { 622 return __ptep_test_and_clear_young(ptep); 623 } 624 625 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 626 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 627 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 628 unsigned long address, 629 pmd_t *pmdp) 630 { 631 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 632 } 633 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 634 635 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 636 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 637 unsigned long address, pte_t *ptep) 638 { 639 return __pte(xchg_relaxed(&pte_val(*ptep), 0)); 640 } 641 642 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 643 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 644 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 645 unsigned long address, pmd_t *pmdp) 646 { 647 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); 648 } 649 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 650 651 /* 652 * ptep_set_wrprotect - mark read-only while trasferring potential hardware 653 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 654 */ 655 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 656 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 657 { 658 pte_t old_pte, pte; 659 660 pte = READ_ONCE(*ptep); 661 do { 662 old_pte = pte; 663 /* 664 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 665 * clear), set the PTE_DIRTY bit. 666 */ 667 if (pte_hw_dirty(pte)) 668 pte = pte_mkdirty(pte); 669 pte = pte_wrprotect(pte); 670 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 671 pte_val(old_pte), pte_val(pte)); 672 } while (pte_val(pte) != pte_val(old_pte)); 673 } 674 675 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 676 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 677 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 678 unsigned long address, pmd_t *pmdp) 679 { 680 ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 681 } 682 #endif 683 684 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 685 extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; 686 687 /* 688 * Encode and decode a swap entry: 689 * bits 0-1: present (must be zero) 690 * bits 2-7: swap type 691 * bits 8-57: swap offset 692 * bit 58: PTE_PROT_NONE (must be zero) 693 */ 694 #define __SWP_TYPE_SHIFT 2 695 #define __SWP_TYPE_BITS 6 696 #define __SWP_OFFSET_BITS 50 697 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 698 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 699 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 700 701 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 702 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 703 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 704 705 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 706 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 707 708 /* 709 * Ensure that there are not more swap files than can be encoded in the kernel 710 * PTEs. 711 */ 712 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 713 714 extern int kern_addr_valid(unsigned long addr); 715 716 #include <asm-generic/pgtable.h> 717 718 void pgd_cache_init(void); 719 #define pgtable_cache_init pgd_cache_init 720 721 /* 722 * On AArch64, the cache coherency is handled via the set_pte_at() function. 723 */ 724 static inline void update_mmu_cache(struct vm_area_struct *vma, 725 unsigned long addr, pte_t *ptep) 726 { 727 /* 728 * We don't do anything here, so there's a very small chance of 729 * us retaking a user fault which we just fixed up. The alternative 730 * is doing a dsb(ishst), but that penalises the fastpath. 731 */ 732 } 733 734 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 735 736 #define kc_vaddr_to_offset(v) ((v) & ~VA_START) 737 #define kc_offset_to_vaddr(o) ((o) | VA_START) 738 739 #endif /* !__ASSEMBLY__ */ 740 741 #endif /* __ASM_PGTABLE_H */ 742