1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2012 ARM Ltd. 4 */ 5 #ifndef __ASM_PGTABLE_H 6 #define __ASM_PGTABLE_H 7 8 #include <asm/bug.h> 9 #include <asm/proc-fns.h> 10 11 #include <asm/memory.h> 12 #include <asm/pgtable-hwdef.h> 13 #include <asm/pgtable-prot.h> 14 #include <asm/tlbflush.h> 15 16 /* 17 * VMALLOC range. 18 * 19 * VMALLOC_START: beginning of the kernel vmalloc space 20 * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space 21 * and fixed mappings 22 */ 23 #define VMALLOC_START (MODULES_END) 24 #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) 25 26 #define FIRST_USER_ADDRESS 0UL 27 28 #ifndef __ASSEMBLY__ 29 30 #include <asm/cmpxchg.h> 31 #include <asm/fixmap.h> 32 #include <linux/mmdebug.h> 33 #include <linux/mm_types.h> 34 #include <linux/sched.h> 35 36 extern struct page *vmemmap; 37 38 extern void __pte_error(const char *file, int line, unsigned long val); 39 extern void __pmd_error(const char *file, int line, unsigned long val); 40 extern void __pud_error(const char *file, int line, unsigned long val); 41 extern void __pgd_error(const char *file, int line, unsigned long val); 42 43 /* 44 * ZERO_PAGE is a global shared page that is always zero: used 45 * for zero-mapped memory areas etc.. 46 */ 47 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 48 #define ZERO_PAGE(vaddr) phys_to_page(__pa_symbol(empty_zero_page)) 49 50 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) 51 52 /* 53 * Macros to convert between a physical address and its placement in a 54 * page table entry, taking care of 52-bit addresses. 55 */ 56 #ifdef CONFIG_ARM64_PA_BITS_52 57 #define __pte_to_phys(pte) \ 58 ((pte_val(pte) & PTE_ADDR_LOW) | ((pte_val(pte) & PTE_ADDR_HIGH) << 36)) 59 #define __phys_to_pte_val(phys) (((phys) | ((phys) >> 36)) & PTE_ADDR_MASK) 60 #else 61 #define __pte_to_phys(pte) (pte_val(pte) & PTE_ADDR_MASK) 62 #define __phys_to_pte_val(phys) (phys) 63 #endif 64 65 #define pte_pfn(pte) (__pte_to_phys(pte) >> PAGE_SHIFT) 66 #define pfn_pte(pfn,prot) \ 67 __pte(__phys_to_pte_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 68 69 #define pte_none(pte) (!pte_val(pte)) 70 #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) 71 #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) 72 73 /* 74 * The following only work if pte_present(). Undefined behaviour otherwise. 75 */ 76 #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) 77 #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) 78 #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) 79 #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) 80 #define pte_user_exec(pte) (!(pte_val(pte) & PTE_UXN)) 81 #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) 82 #define pte_devmap(pte) (!!(pte_val(pte) & PTE_DEVMAP)) 83 84 #define pte_cont_addr_end(addr, end) \ 85 ({ unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK; \ 86 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 87 }) 88 89 #define pmd_cont_addr_end(addr, end) \ 90 ({ unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK; \ 91 (__boundary - 1 < (end) - 1) ? __boundary : (end); \ 92 }) 93 94 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) 95 #define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) 96 #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) 97 98 #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) 99 #define pte_valid_not_user(pte) \ 100 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) 101 #define pte_valid_young(pte) \ 102 ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF)) 103 #define pte_valid_user(pte) \ 104 ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) 105 106 /* 107 * Could the pte be present in the TLB? We must check mm_tlb_flush_pending 108 * so that we don't erroneously return false for pages that have been 109 * remapped as PROT_NONE but are yet to be flushed from the TLB. 110 */ 111 #define pte_accessible(mm, pte) \ 112 (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte)) 113 114 /* 115 * p??_access_permitted() is true for valid user mappings (subject to the 116 * write permission check). PROT_NONE mappings do not have the PTE_VALID bit 117 * set. 118 */ 119 #define pte_access_permitted(pte, write) \ 120 (pte_valid_user(pte) && (!(write) || pte_write(pte))) 121 #define pmd_access_permitted(pmd, write) \ 122 (pte_access_permitted(pmd_pte(pmd), (write))) 123 #define pud_access_permitted(pud, write) \ 124 (pte_access_permitted(pud_pte(pud), (write))) 125 126 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot) 127 { 128 pte_val(pte) &= ~pgprot_val(prot); 129 return pte; 130 } 131 132 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot) 133 { 134 pte_val(pte) |= pgprot_val(prot); 135 return pte; 136 } 137 138 static inline pte_t pte_wrprotect(pte_t pte) 139 { 140 pte = clear_pte_bit(pte, __pgprot(PTE_WRITE)); 141 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 142 return pte; 143 } 144 145 static inline pte_t pte_mkwrite(pte_t pte) 146 { 147 pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); 148 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 149 return pte; 150 } 151 152 static inline pte_t pte_mkclean(pte_t pte) 153 { 154 pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); 155 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); 156 157 return pte; 158 } 159 160 static inline pte_t pte_mkdirty(pte_t pte) 161 { 162 pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); 163 164 if (pte_write(pte)) 165 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); 166 167 return pte; 168 } 169 170 static inline pte_t pte_mkold(pte_t pte) 171 { 172 return clear_pte_bit(pte, __pgprot(PTE_AF)); 173 } 174 175 static inline pte_t pte_mkyoung(pte_t pte) 176 { 177 return set_pte_bit(pte, __pgprot(PTE_AF)); 178 } 179 180 static inline pte_t pte_mkspecial(pte_t pte) 181 { 182 return set_pte_bit(pte, __pgprot(PTE_SPECIAL)); 183 } 184 185 static inline pte_t pte_mkcont(pte_t pte) 186 { 187 pte = set_pte_bit(pte, __pgprot(PTE_CONT)); 188 return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE)); 189 } 190 191 static inline pte_t pte_mknoncont(pte_t pte) 192 { 193 return clear_pte_bit(pte, __pgprot(PTE_CONT)); 194 } 195 196 static inline pte_t pte_mkpresent(pte_t pte) 197 { 198 return set_pte_bit(pte, __pgprot(PTE_VALID)); 199 } 200 201 static inline pmd_t pmd_mkcont(pmd_t pmd) 202 { 203 return __pmd(pmd_val(pmd) | PMD_SECT_CONT); 204 } 205 206 static inline pte_t pte_mkdevmap(pte_t pte) 207 { 208 return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); 209 } 210 211 static inline void set_pte(pte_t *ptep, pte_t pte) 212 { 213 WRITE_ONCE(*ptep, pte); 214 215 /* 216 * Only if the new pte is valid and kernel, otherwise TLB maintenance 217 * or update_mmu_cache() have the necessary barriers. 218 */ 219 if (pte_valid_not_user(pte)) { 220 dsb(ishst); 221 isb(); 222 } 223 } 224 225 extern void __sync_icache_dcache(pte_t pteval); 226 227 /* 228 * PTE bits configuration in the presence of hardware Dirty Bit Management 229 * (PTE_WRITE == PTE_DBM): 230 * 231 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 232 * 0 0 | 1 0 0 233 * 0 1 | 1 1 0 234 * 1 0 | 1 0 1 235 * 1 1 | 0 1 x 236 * 237 * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via 238 * the page fault mechanism. Checking the dirty status of a pte becomes: 239 * 240 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 241 */ 242 243 static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, 244 pte_t pte) 245 { 246 pte_t old_pte; 247 248 if (!IS_ENABLED(CONFIG_DEBUG_VM)) 249 return; 250 251 old_pte = READ_ONCE(*ptep); 252 253 if (!pte_valid(old_pte) || !pte_valid(pte)) 254 return; 255 if (mm != current->active_mm && atomic_read(&mm->mm_users) <= 1) 256 return; 257 258 /* 259 * Check for potential race with hardware updates of the pte 260 * (ptep_set_access_flags safely changes valid ptes without going 261 * through an invalid entry). 262 */ 263 VM_WARN_ONCE(!pte_young(pte), 264 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", 265 __func__, pte_val(old_pte), pte_val(pte)); 266 VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), 267 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", 268 __func__, pte_val(old_pte), pte_val(pte)); 269 } 270 271 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 272 pte_t *ptep, pte_t pte) 273 { 274 if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte)) 275 __sync_icache_dcache(pte); 276 277 __check_racy_pte_update(mm, ptep, pte); 278 279 set_pte(ptep, pte); 280 } 281 282 /* 283 * Huge pte definitions. 284 */ 285 #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 286 287 /* 288 * Hugetlb definitions. 289 */ 290 #define HUGE_MAX_HSTATE 4 291 #define HPAGE_SHIFT PMD_SHIFT 292 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 293 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 294 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 295 296 static inline pte_t pgd_pte(pgd_t pgd) 297 { 298 return __pte(pgd_val(pgd)); 299 } 300 301 static inline pte_t pud_pte(pud_t pud) 302 { 303 return __pte(pud_val(pud)); 304 } 305 306 static inline pud_t pte_pud(pte_t pte) 307 { 308 return __pud(pte_val(pte)); 309 } 310 311 static inline pmd_t pud_pmd(pud_t pud) 312 { 313 return __pmd(pud_val(pud)); 314 } 315 316 static inline pte_t pmd_pte(pmd_t pmd) 317 { 318 return __pte(pmd_val(pmd)); 319 } 320 321 static inline pmd_t pte_pmd(pte_t pte) 322 { 323 return __pmd(pte_val(pte)); 324 } 325 326 static inline pgprot_t mk_pud_sect_prot(pgprot_t prot) 327 { 328 return __pgprot((pgprot_val(prot) & ~PUD_TABLE_BIT) | PUD_TYPE_SECT); 329 } 330 331 static inline pgprot_t mk_pmd_sect_prot(pgprot_t prot) 332 { 333 return __pgprot((pgprot_val(prot) & ~PMD_TABLE_BIT) | PMD_TYPE_SECT); 334 } 335 336 #ifdef CONFIG_NUMA_BALANCING 337 /* 338 * See the comment in include/asm-generic/pgtable.h 339 */ 340 static inline int pte_protnone(pte_t pte) 341 { 342 return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE; 343 } 344 345 static inline int pmd_protnone(pmd_t pmd) 346 { 347 return pte_protnone(pmd_pte(pmd)); 348 } 349 #endif 350 351 /* 352 * THP definitions. 353 */ 354 355 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 356 #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 357 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 358 359 #define pmd_present(pmd) pte_present(pmd_pte(pmd)) 360 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) 361 #define pmd_young(pmd) pte_young(pmd_pte(pmd)) 362 #define pmd_valid(pmd) pte_valid(pmd_pte(pmd)) 363 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) 364 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) 365 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd))) 366 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) 367 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) 368 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) 369 #define pmd_mkinvalid(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID)) 370 371 #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 372 373 #define pmd_write(pmd) pte_write(pmd_pte(pmd)) 374 375 #define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 376 377 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 378 #define pmd_devmap(pmd) pte_devmap(pmd_pte(pmd)) 379 #endif 380 static inline pmd_t pmd_mkdevmap(pmd_t pmd) 381 { 382 return pte_pmd(set_pte_bit(pmd_pte(pmd), __pgprot(PTE_DEVMAP))); 383 } 384 385 #define __pmd_to_phys(pmd) __pte_to_phys(pmd_pte(pmd)) 386 #define __phys_to_pmd_val(phys) __phys_to_pte_val(phys) 387 #define pmd_pfn(pmd) ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT) 388 #define pfn_pmd(pfn,prot) __pmd(__phys_to_pmd_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 389 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 390 391 #define pud_young(pud) pte_young(pud_pte(pud)) 392 #define pud_mkyoung(pud) pte_pud(pte_mkyoung(pud_pte(pud))) 393 #define pud_write(pud) pte_write(pud_pte(pud)) 394 395 #define pud_mkhuge(pud) (__pud(pud_val(pud) & ~PUD_TABLE_BIT)) 396 397 #define __pud_to_phys(pud) __pte_to_phys(pud_pte(pud)) 398 #define __phys_to_pud_val(phys) __phys_to_pte_val(phys) 399 #define pud_pfn(pud) ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT) 400 #define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)) 401 402 #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) 403 404 #define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd)) 405 #define __phys_to_pgd_val(phys) __phys_to_pte_val(phys) 406 407 #define __pgprot_modify(prot,mask,bits) \ 408 __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 409 410 #define pgprot_nx(prot) \ 411 __pgprot_modify(prot, 0, PTE_PXN) 412 413 /* 414 * Mark the prot value as uncacheable and unbufferable. 415 */ 416 #define pgprot_noncached(prot) \ 417 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN) 418 #define pgprot_writecombine(prot) \ 419 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 420 #define pgprot_device(prot) \ 421 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 422 /* 423 * DMA allocations for non-coherent devices use what the Arm architecture calls 424 * "Normal non-cacheable" memory, which permits speculation, unaligned accesses 425 * and merging of writes. This is different from "Device-nGnR[nE]" memory which 426 * is intended for MMIO and thus forbids speculation, preserves access size, 427 * requires strict alignment and can also force write responses to come from the 428 * endpoint. 429 */ 430 #define pgprot_dmacoherent(prot) \ 431 __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 432 PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 433 434 #define __HAVE_PHYS_MEM_ACCESS_PROT 435 struct file; 436 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 437 unsigned long size, pgprot_t vma_prot); 438 439 #define pmd_none(pmd) (!pmd_val(pmd)) 440 441 #define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT)) 442 443 #define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 444 PMD_TYPE_TABLE) 445 #define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 446 PMD_TYPE_SECT) 447 #define pmd_leaf(pmd) pmd_sect(pmd) 448 449 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3 450 static inline bool pud_sect(pud_t pud) { return false; } 451 static inline bool pud_table(pud_t pud) { return true; } 452 #else 453 #define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 454 PUD_TYPE_SECT) 455 #define pud_table(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 456 PUD_TYPE_TABLE) 457 #endif 458 459 extern pgd_t init_pg_dir[PTRS_PER_PGD]; 460 extern pgd_t init_pg_end[]; 461 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 462 extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; 463 extern pgd_t idmap_pg_end[]; 464 extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; 465 466 extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd); 467 468 static inline bool in_swapper_pgdir(void *addr) 469 { 470 return ((unsigned long)addr & PAGE_MASK) == 471 ((unsigned long)swapper_pg_dir & PAGE_MASK); 472 } 473 474 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 475 { 476 #ifdef __PAGETABLE_PMD_FOLDED 477 if (in_swapper_pgdir(pmdp)) { 478 set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd))); 479 return; 480 } 481 #endif /* __PAGETABLE_PMD_FOLDED */ 482 483 WRITE_ONCE(*pmdp, pmd); 484 485 if (pmd_valid(pmd)) { 486 dsb(ishst); 487 isb(); 488 } 489 } 490 491 static inline void pmd_clear(pmd_t *pmdp) 492 { 493 set_pmd(pmdp, __pmd(0)); 494 } 495 496 static inline phys_addr_t pmd_page_paddr(pmd_t pmd) 497 { 498 return __pmd_to_phys(pmd); 499 } 500 501 static inline void pte_unmap(pte_t *pte) { } 502 503 /* Find an entry in the third-level page table. */ 504 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 505 506 #define pte_offset_phys(dir,addr) (pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t)) 507 #define pte_offset_kernel(dir,addr) ((pte_t *)__va(pte_offset_phys((dir), (addr)))) 508 509 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 510 511 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr)) 512 #define pte_set_fixmap_offset(pmd, addr) pte_set_fixmap(pte_offset_phys(pmd, addr)) 513 #define pte_clear_fixmap() clear_fixmap(FIX_PTE) 514 515 #define pmd_page(pmd) phys_to_page(__pmd_to_phys(pmd)) 516 517 /* use ONLY for statically allocated translation tables */ 518 #define pte_offset_kimg(dir,addr) ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr)))) 519 520 /* 521 * Conversion functions: convert a page and protection to a page entry, 522 * and a page entry and page directory to the page they refer to. 523 */ 524 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) 525 526 #if CONFIG_PGTABLE_LEVELS > 2 527 528 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) 529 530 #define pud_none(pud) (!pud_val(pud)) 531 #define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT)) 532 #define pud_present(pud) pte_present(pud_pte(pud)) 533 #define pud_leaf(pud) pud_sect(pud) 534 #define pud_valid(pud) pte_valid(pud_pte(pud)) 535 536 static inline void set_pud(pud_t *pudp, pud_t pud) 537 { 538 #ifdef __PAGETABLE_PUD_FOLDED 539 if (in_swapper_pgdir(pudp)) { 540 set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud))); 541 return; 542 } 543 #endif /* __PAGETABLE_PUD_FOLDED */ 544 545 WRITE_ONCE(*pudp, pud); 546 547 if (pud_valid(pud)) { 548 dsb(ishst); 549 isb(); 550 } 551 } 552 553 static inline void pud_clear(pud_t *pudp) 554 { 555 set_pud(pudp, __pud(0)); 556 } 557 558 static inline phys_addr_t pud_page_paddr(pud_t pud) 559 { 560 return __pud_to_phys(pud); 561 } 562 563 /* Find an entry in the second-level page table. */ 564 #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 565 566 #define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t)) 567 #define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr)))) 568 569 #define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr)) 570 #define pmd_set_fixmap_offset(pud, addr) pmd_set_fixmap(pmd_offset_phys(pud, addr)) 571 #define pmd_clear_fixmap() clear_fixmap(FIX_PMD) 572 573 #define pud_page(pud) phys_to_page(__pud_to_phys(pud)) 574 575 /* use ONLY for statically allocated translation tables */ 576 #define pmd_offset_kimg(dir,addr) ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr)))) 577 578 #else 579 580 #define pud_page_paddr(pud) ({ BUILD_BUG(); 0; }) 581 582 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */ 583 #define pmd_set_fixmap(addr) NULL 584 #define pmd_set_fixmap_offset(pudp, addr) ((pmd_t *)pudp) 585 #define pmd_clear_fixmap() 586 587 #define pmd_offset_kimg(dir,addr) ((pmd_t *)dir) 588 589 #endif /* CONFIG_PGTABLE_LEVELS > 2 */ 590 591 #if CONFIG_PGTABLE_LEVELS > 3 592 593 #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) 594 595 #define pgd_none(pgd) (!pgd_val(pgd)) 596 #define pgd_bad(pgd) (!(pgd_val(pgd) & 2)) 597 #define pgd_present(pgd) (pgd_val(pgd)) 598 599 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) 600 { 601 if (in_swapper_pgdir(pgdp)) { 602 set_swapper_pgd(pgdp, pgd); 603 return; 604 } 605 606 WRITE_ONCE(*pgdp, pgd); 607 dsb(ishst); 608 isb(); 609 } 610 611 static inline void pgd_clear(pgd_t *pgdp) 612 { 613 set_pgd(pgdp, __pgd(0)); 614 } 615 616 static inline phys_addr_t pgd_page_paddr(pgd_t pgd) 617 { 618 return __pgd_to_phys(pgd); 619 } 620 621 /* Find an entry in the frst-level page table. */ 622 #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) 623 624 #define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) 625 #define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr)))) 626 627 #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) 628 #define pud_set_fixmap_offset(pgd, addr) pud_set_fixmap(pud_offset_phys(pgd, addr)) 629 #define pud_clear_fixmap() clear_fixmap(FIX_PUD) 630 631 #define pgd_page(pgd) phys_to_page(__pgd_to_phys(pgd)) 632 633 /* use ONLY for statically allocated translation tables */ 634 #define pud_offset_kimg(dir,addr) ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr)))) 635 636 #else 637 638 #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) 639 640 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */ 641 #define pud_set_fixmap(addr) NULL 642 #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) 643 #define pud_clear_fixmap() 644 645 #define pud_offset_kimg(dir,addr) ((pud_t *)dir) 646 647 #endif /* CONFIG_PGTABLE_LEVELS > 3 */ 648 649 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 650 651 /* to find an entry in a page-table-directory */ 652 #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 653 654 #define pgd_offset_raw(pgd, addr) ((pgd) + pgd_index(addr)) 655 656 #define pgd_offset(mm, addr) (pgd_offset_raw((mm)->pgd, (addr))) 657 658 /* to find an entry in a kernel page-table-directory */ 659 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 660 661 #define pgd_set_fixmap(addr) ((pgd_t *)set_fixmap_offset(FIX_PGD, addr)) 662 #define pgd_clear_fixmap() clear_fixmap(FIX_PGD) 663 664 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 665 { 666 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | 667 PTE_PROT_NONE | PTE_VALID | PTE_WRITE | PTE_GP; 668 /* preserve the hardware dirty information */ 669 if (pte_hw_dirty(pte)) 670 pte = pte_mkdirty(pte); 671 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); 672 return pte; 673 } 674 675 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 676 { 677 return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); 678 } 679 680 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 681 extern int ptep_set_access_flags(struct vm_area_struct *vma, 682 unsigned long address, pte_t *ptep, 683 pte_t entry, int dirty); 684 685 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 686 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS 687 static inline int pmdp_set_access_flags(struct vm_area_struct *vma, 688 unsigned long address, pmd_t *pmdp, 689 pmd_t entry, int dirty) 690 { 691 return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); 692 } 693 694 static inline int pud_devmap(pud_t pud) 695 { 696 return 0; 697 } 698 699 static inline int pgd_devmap(pgd_t pgd) 700 { 701 return 0; 702 } 703 #endif 704 705 /* 706 * Atomic pte/pmd modifications. 707 */ 708 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 709 static inline int __ptep_test_and_clear_young(pte_t *ptep) 710 { 711 pte_t old_pte, pte; 712 713 pte = READ_ONCE(*ptep); 714 do { 715 old_pte = pte; 716 pte = pte_mkold(pte); 717 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 718 pte_val(old_pte), pte_val(pte)); 719 } while (pte_val(pte) != pte_val(old_pte)); 720 721 return pte_young(pte); 722 } 723 724 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 725 unsigned long address, 726 pte_t *ptep) 727 { 728 return __ptep_test_and_clear_young(ptep); 729 } 730 731 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 732 static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 733 unsigned long address, pte_t *ptep) 734 { 735 int young = ptep_test_and_clear_young(vma, address, ptep); 736 737 if (young) { 738 /* 739 * We can elide the trailing DSB here since the worst that can 740 * happen is that a CPU continues to use the young entry in its 741 * TLB and we mistakenly reclaim the associated page. The 742 * window for such an event is bounded by the next 743 * context-switch, which provides a DSB to complete the TLB 744 * invalidation. 745 */ 746 flush_tlb_page_nosync(vma, address); 747 } 748 749 return young; 750 } 751 752 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 753 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 754 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 755 unsigned long address, 756 pmd_t *pmdp) 757 { 758 return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); 759 } 760 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 761 762 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 763 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 764 unsigned long address, pte_t *ptep) 765 { 766 return __pte(xchg_relaxed(&pte_val(*ptep), 0)); 767 } 768 769 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 770 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 771 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 772 unsigned long address, pmd_t *pmdp) 773 { 774 return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); 775 } 776 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 777 778 /* 779 * ptep_set_wrprotect - mark read-only while trasferring potential hardware 780 * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. 781 */ 782 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 783 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) 784 { 785 pte_t old_pte, pte; 786 787 pte = READ_ONCE(*ptep); 788 do { 789 old_pte = pte; 790 /* 791 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY 792 * clear), set the PTE_DIRTY bit. 793 */ 794 if (pte_hw_dirty(pte)) 795 pte = pte_mkdirty(pte); 796 pte = pte_wrprotect(pte); 797 pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), 798 pte_val(old_pte), pte_val(pte)); 799 } while (pte_val(pte) != pte_val(old_pte)); 800 } 801 802 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 803 #define __HAVE_ARCH_PMDP_SET_WRPROTECT 804 static inline void pmdp_set_wrprotect(struct mm_struct *mm, 805 unsigned long address, pmd_t *pmdp) 806 { 807 ptep_set_wrprotect(mm, address, (pte_t *)pmdp); 808 } 809 810 #define pmdp_establish pmdp_establish 811 static inline pmd_t pmdp_establish(struct vm_area_struct *vma, 812 unsigned long address, pmd_t *pmdp, pmd_t pmd) 813 { 814 return __pmd(xchg_relaxed(&pmd_val(*pmdp), pmd_val(pmd))); 815 } 816 #endif 817 818 /* 819 * Encode and decode a swap entry: 820 * bits 0-1: present (must be zero) 821 * bits 2-7: swap type 822 * bits 8-57: swap offset 823 * bit 58: PTE_PROT_NONE (must be zero) 824 */ 825 #define __SWP_TYPE_SHIFT 2 826 #define __SWP_TYPE_BITS 6 827 #define __SWP_OFFSET_BITS 50 828 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) 829 #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) 830 #define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) 831 832 #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) 833 #define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) 834 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 835 836 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 837 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 838 839 /* 840 * Ensure that there are not more swap files than can be encoded in the kernel 841 * PTEs. 842 */ 843 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) 844 845 extern int kern_addr_valid(unsigned long addr); 846 847 #include <asm-generic/pgtable.h> 848 849 /* 850 * On AArch64, the cache coherency is handled via the set_pte_at() function. 851 */ 852 static inline void update_mmu_cache(struct vm_area_struct *vma, 853 unsigned long addr, pte_t *ptep) 854 { 855 /* 856 * We don't do anything here, so there's a very small chance of 857 * us retaking a user fault which we just fixed up. The alternative 858 * is doing a dsb(ishst), but that penalises the fastpath. 859 */ 860 } 861 862 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) 863 864 #ifdef CONFIG_ARM64_PA_BITS_52 865 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) 866 #else 867 #define phys_to_ttbr(addr) (addr) 868 #endif 869 870 /* 871 * On arm64 without hardware Access Flag, copying from user will fail because 872 * the pte is old and cannot be marked young. So we always end up with zeroed 873 * page after fork() + CoW for pfn mappings. We don't always have a 874 * hardware-managed access flag on arm64. 875 */ 876 static inline bool arch_faults_on_old_pte(void) 877 { 878 WARN_ON(preemptible()); 879 880 return !cpu_has_hw_af(); 881 } 882 #define arch_faults_on_old_pte arch_faults_on_old_pte 883 884 #endif /* !__ASSEMBLY__ */ 885 886 #endif /* __ASM_PGTABLE_H */ 887