1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 * 5 * Derived from MIPS: 6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 8 */ 9 #ifndef _ASM_PGTABLE_H 10 #define _ASM_PGTABLE_H 11 12 #include <linux/compiler.h> 13 #include <asm/addrspace.h> 14 #include <asm/pgtable-bits.h> 15 16 #if CONFIG_PGTABLE_LEVELS == 2 17 #include <asm-generic/pgtable-nopmd.h> 18 #elif CONFIG_PGTABLE_LEVELS == 3 19 #include <asm-generic/pgtable-nopud.h> 20 #else 21 #include <asm-generic/pgtable-nop4d.h> 22 #endif 23 24 #if CONFIG_PGTABLE_LEVELS == 2 25 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 26 #elif CONFIG_PGTABLE_LEVELS == 3 27 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 28 #define PMD_SIZE (1UL << PMD_SHIFT) 29 #define PMD_MASK (~(PMD_SIZE-1)) 30 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) 31 #elif CONFIG_PGTABLE_LEVELS == 4 32 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 33 #define PMD_SIZE (1UL << PMD_SHIFT) 34 #define PMD_MASK (~(PMD_SIZE-1)) 35 #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) 36 #define PUD_SIZE (1UL << PUD_SHIFT) 37 #define PUD_MASK (~(PUD_SIZE-1)) 38 #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) 39 #endif 40 41 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 42 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 43 44 #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) 45 46 #define PTRS_PER_PGD (PAGE_SIZE >> 3) 47 #if CONFIG_PGTABLE_LEVELS > 3 48 #define PTRS_PER_PUD (PAGE_SIZE >> 3) 49 #endif 50 #if CONFIG_PGTABLE_LEVELS > 2 51 #define PTRS_PER_PMD (PAGE_SIZE >> 3) 52 #endif 53 #define PTRS_PER_PTE (PAGE_SIZE >> 3) 54 55 #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) 56 57 #ifndef __ASSEMBLY__ 58 59 #include <linux/mm_types.h> 60 #include <linux/mmzone.h> 61 #include <asm/fixmap.h> 62 63 struct mm_struct; 64 struct vm_area_struct; 65 66 /* 67 * ZERO_PAGE is a global shared page that is always zero; used 68 * for zero-mapped memory areas etc.. 69 */ 70 71 extern unsigned long empty_zero_page; 72 extern unsigned long zero_page_mask; 73 74 #define ZERO_PAGE(vaddr) \ 75 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 76 #define __HAVE_COLOR_ZERO_PAGE 77 78 /* 79 * TLB refill handlers may also map the vmalloc area into xkvrange. 80 * Avoid the first couple of pages so NULL pointer dereferences will 81 * still reliably trap. 82 */ 83 #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) 84 #define MODULES_END (MODULES_VADDR + SZ_256M) 85 86 #define VMALLOC_START MODULES_END 87 #define VMALLOC_END \ 88 (vm_map_base + \ 89 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE) 90 91 #define pte_ERROR(e) \ 92 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 93 #ifndef __PAGETABLE_PMD_FOLDED 94 #define pmd_ERROR(e) \ 95 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 96 #endif 97 #ifndef __PAGETABLE_PUD_FOLDED 98 #define pud_ERROR(e) \ 99 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) 100 #endif 101 #define pgd_ERROR(e) \ 102 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 103 104 extern pte_t invalid_pte_table[PTRS_PER_PTE]; 105 106 #ifndef __PAGETABLE_PUD_FOLDED 107 108 typedef struct { unsigned long pud; } pud_t; 109 #define pud_val(x) ((x).pud) 110 #define __pud(x) ((pud_t) { (x) }) 111 112 extern pud_t invalid_pud_table[PTRS_PER_PUD]; 113 114 /* 115 * Empty pgd/p4d entries point to the invalid_pud_table. 116 */ 117 static inline int p4d_none(p4d_t p4d) 118 { 119 return p4d_val(p4d) == (unsigned long)invalid_pud_table; 120 } 121 122 static inline int p4d_bad(p4d_t p4d) 123 { 124 return p4d_val(p4d) & ~PAGE_MASK; 125 } 126 127 static inline int p4d_present(p4d_t p4d) 128 { 129 return p4d_val(p4d) != (unsigned long)invalid_pud_table; 130 } 131 132 static inline void p4d_clear(p4d_t *p4dp) 133 { 134 p4d_val(*p4dp) = (unsigned long)invalid_pud_table; 135 } 136 137 static inline pud_t *p4d_pgtable(p4d_t p4d) 138 { 139 return (pud_t *)p4d_val(p4d); 140 } 141 142 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) 143 { 144 *p4d = p4dval; 145 } 146 147 #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) 148 #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) 149 150 #endif 151 152 #ifndef __PAGETABLE_PMD_FOLDED 153 154 typedef struct { unsigned long pmd; } pmd_t; 155 #define pmd_val(x) ((x).pmd) 156 #define __pmd(x) ((pmd_t) { (x) }) 157 158 extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; 159 160 /* 161 * Empty pud entries point to the invalid_pmd_table. 162 */ 163 static inline int pud_none(pud_t pud) 164 { 165 return pud_val(pud) == (unsigned long)invalid_pmd_table; 166 } 167 168 static inline int pud_bad(pud_t pud) 169 { 170 return pud_val(pud) & ~PAGE_MASK; 171 } 172 173 static inline int pud_present(pud_t pud) 174 { 175 return pud_val(pud) != (unsigned long)invalid_pmd_table; 176 } 177 178 static inline void pud_clear(pud_t *pudp) 179 { 180 pud_val(*pudp) = ((unsigned long)invalid_pmd_table); 181 } 182 183 static inline pmd_t *pud_pgtable(pud_t pud) 184 { 185 return (pmd_t *)pud_val(pud); 186 } 187 188 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) 189 190 #define pud_phys(pud) PHYSADDR(pud_val(pud)) 191 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) 192 193 #endif 194 195 /* 196 * Empty pmd entries point to the invalid_pte_table. 197 */ 198 static inline int pmd_none(pmd_t pmd) 199 { 200 return pmd_val(pmd) == (unsigned long)invalid_pte_table; 201 } 202 203 static inline int pmd_bad(pmd_t pmd) 204 { 205 return (pmd_val(pmd) & ~PAGE_MASK); 206 } 207 208 static inline int pmd_present(pmd_t pmd) 209 { 210 if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) 211 return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE)); 212 213 return pmd_val(pmd) != (unsigned long)invalid_pte_table; 214 } 215 216 static inline void pmd_clear(pmd_t *pmdp) 217 { 218 pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); 219 } 220 221 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) 222 223 #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) 224 225 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 226 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 227 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 228 229 #define pmd_page_vaddr(pmd) pmd_val(pmd) 230 231 extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 232 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); 233 234 #define pte_page(x) pfn_to_page(pte_pfn(x)) 235 #define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT)) 236 #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 237 #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 238 239 /* 240 * Initialize a new pgd / pmd table with invalid pointers. 241 */ 242 extern void pgd_init(unsigned long page); 243 extern void pud_init(unsigned long page, unsigned long pagetable); 244 extern void pmd_init(unsigned long page, unsigned long pagetable); 245 246 /* 247 * Non-present pages: high 40 bits are offset, next 8 bits type, 248 * low 16 bits zero. 249 */ 250 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 251 { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; } 252 253 #define __swp_type(x) (((x).val >> 16) & 0xff) 254 #define __swp_offset(x) ((x).val >> 24) 255 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) 256 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 257 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 258 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 259 #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) 260 261 extern void paging_init(void); 262 263 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 264 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 265 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) 266 267 static inline void set_pte(pte_t *ptep, pte_t pteval) 268 { 269 *ptep = pteval; 270 if (pte_val(pteval) & _PAGE_GLOBAL) { 271 pte_t *buddy = ptep_buddy(ptep); 272 /* 273 * Make sure the buddy is global too (if it's !none, 274 * it better already be global) 275 */ 276 #ifdef CONFIG_SMP 277 /* 278 * For SMP, multiple CPUs can race, so we need to do 279 * this atomically. 280 */ 281 unsigned long page_global = _PAGE_GLOBAL; 282 unsigned long tmp; 283 284 __asm__ __volatile__ ( 285 "1:" __LL "%[tmp], %[buddy] \n" 286 " bnez %[tmp], 2f \n" 287 " or %[tmp], %[tmp], %[global] \n" 288 __SC "%[tmp], %[buddy] \n" 289 " beqz %[tmp], 1b \n" 290 " nop \n" 291 "2: \n" 292 __WEAK_LLSC_MB 293 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 294 : [global] "r" (page_global)); 295 #else /* !CONFIG_SMP */ 296 if (pte_none(*buddy)) 297 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 298 #endif /* CONFIG_SMP */ 299 } 300 } 301 302 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 303 pte_t *ptep, pte_t pteval) 304 { 305 set_pte(ptep, pteval); 306 } 307 308 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 309 { 310 /* Preserve global status for the pair */ 311 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 312 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 313 else 314 set_pte_at(mm, addr, ptep, __pte(0)); 315 } 316 317 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 318 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 319 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 320 321 extern pgd_t swapper_pg_dir[]; 322 extern pgd_t invalid_pg_dir[]; 323 324 /* 325 * The following only work if pte_present() is true. 326 * Undefined behaviour if not.. 327 */ 328 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 329 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 330 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 331 332 static inline pte_t pte_mkold(pte_t pte) 333 { 334 pte_val(pte) &= ~_PAGE_ACCESSED; 335 return pte; 336 } 337 338 static inline pte_t pte_mkyoung(pte_t pte) 339 { 340 pte_val(pte) |= _PAGE_ACCESSED; 341 return pte; 342 } 343 344 static inline pte_t pte_mkclean(pte_t pte) 345 { 346 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); 347 return pte; 348 } 349 350 static inline pte_t pte_mkdirty(pte_t pte) 351 { 352 pte_val(pte) |= _PAGE_MODIFIED; 353 if (pte_val(pte) & _PAGE_WRITE) 354 pte_val(pte) |= _PAGE_DIRTY; 355 return pte; 356 } 357 358 static inline pte_t pte_mkwrite(pte_t pte) 359 { 360 pte_val(pte) |= _PAGE_WRITE; 361 if (pte_val(pte) & _PAGE_MODIFIED) 362 pte_val(pte) |= _PAGE_DIRTY; 363 return pte; 364 } 365 366 static inline pte_t pte_wrprotect(pte_t pte) 367 { 368 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 369 return pte; 370 } 371 372 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 373 374 static inline pte_t pte_mkhuge(pte_t pte) 375 { 376 pte_val(pte) |= _PAGE_HUGE; 377 return pte; 378 } 379 380 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) 381 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 382 static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } 383 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 384 385 #define pte_accessible pte_accessible 386 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) 387 { 388 if (pte_val(a) & _PAGE_PRESENT) 389 return true; 390 391 if ((pte_val(a) & _PAGE_PROTNONE) && 392 atomic_read(&mm->tlb_flush_pending)) 393 return true; 394 395 return false; 396 } 397 398 /* 399 * Conversion functions: convert a page and protection to a page entry, 400 * and a page entry and page directory to the page they refer to. 401 */ 402 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 403 404 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 405 { 406 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | 407 (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); 408 } 409 410 extern void __update_tlb(struct vm_area_struct *vma, 411 unsigned long address, pte_t *ptep); 412 413 static inline void update_mmu_cache(struct vm_area_struct *vma, 414 unsigned long address, pte_t *ptep) 415 { 416 __update_tlb(vma, address, ptep); 417 } 418 419 #define __HAVE_ARCH_UPDATE_MMU_TLB 420 #define update_mmu_tlb update_mmu_cache 421 422 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 423 unsigned long address, pmd_t *pmdp) 424 { 425 __update_tlb(vma, address, (pte_t *)pmdp); 426 } 427 428 #define kern_addr_valid(addr) (1) 429 430 static inline unsigned long pmd_pfn(pmd_t pmd) 431 { 432 return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT; 433 } 434 435 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 436 437 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ 438 #define pmdp_establish generic_pmdp_establish 439 440 static inline int pmd_trans_huge(pmd_t pmd) 441 { 442 return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd); 443 } 444 445 static inline pmd_t pmd_mkhuge(pmd_t pmd) 446 { 447 pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) | 448 ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)); 449 pmd_val(pmd) |= _PAGE_HUGE; 450 451 return pmd; 452 } 453 454 #define pmd_write pmd_write 455 static inline int pmd_write(pmd_t pmd) 456 { 457 return !!(pmd_val(pmd) & _PAGE_WRITE); 458 } 459 460 static inline pmd_t pmd_mkwrite(pmd_t pmd) 461 { 462 pmd_val(pmd) |= _PAGE_WRITE; 463 if (pmd_val(pmd) & _PAGE_MODIFIED) 464 pmd_val(pmd) |= _PAGE_DIRTY; 465 return pmd; 466 } 467 468 static inline pmd_t pmd_wrprotect(pmd_t pmd) 469 { 470 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 471 return pmd; 472 } 473 474 static inline int pmd_dirty(pmd_t pmd) 475 { 476 return !!(pmd_val(pmd) & _PAGE_MODIFIED); 477 } 478 479 static inline pmd_t pmd_mkclean(pmd_t pmd) 480 { 481 pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); 482 return pmd; 483 } 484 485 static inline pmd_t pmd_mkdirty(pmd_t pmd) 486 { 487 pmd_val(pmd) |= _PAGE_MODIFIED; 488 if (pmd_val(pmd) & _PAGE_WRITE) 489 pmd_val(pmd) |= _PAGE_DIRTY; 490 return pmd; 491 } 492 493 static inline int pmd_young(pmd_t pmd) 494 { 495 return !!(pmd_val(pmd) & _PAGE_ACCESSED); 496 } 497 498 static inline pmd_t pmd_mkold(pmd_t pmd) 499 { 500 pmd_val(pmd) &= ~_PAGE_ACCESSED; 501 return pmd; 502 } 503 504 static inline pmd_t pmd_mkyoung(pmd_t pmd) 505 { 506 pmd_val(pmd) |= _PAGE_ACCESSED; 507 return pmd; 508 } 509 510 static inline struct page *pmd_page(pmd_t pmd) 511 { 512 if (pmd_trans_huge(pmd)) 513 return pfn_to_page(pmd_pfn(pmd)); 514 515 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 516 } 517 518 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 519 { 520 pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) | 521 (pgprot_val(newprot) & ~_HPAGE_CHG_MASK); 522 return pmd; 523 } 524 525 static inline pmd_t pmd_mkinvalid(pmd_t pmd) 526 { 527 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE); 528 529 return pmd; 530 } 531 532 /* 533 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 534 * different prototype. 535 */ 536 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 537 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 538 unsigned long address, pmd_t *pmdp) 539 { 540 pmd_t old = *pmdp; 541 542 pmd_clear(pmdp); 543 544 return old; 545 } 546 547 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 548 549 #ifdef CONFIG_NUMA_BALANCING 550 static inline long pte_protnone(pte_t pte) 551 { 552 return (pte_val(pte) & _PAGE_PROTNONE); 553 } 554 555 static inline long pmd_protnone(pmd_t pmd) 556 { 557 return (pmd_val(pmd) & _PAGE_PROTNONE); 558 } 559 #endif /* CONFIG_NUMA_BALANCING */ 560 561 /* 562 * We provide our own get_unmapped area to cope with the virtual aliasing 563 * constraints placed on us by the cache architecture. 564 */ 565 #define HAVE_ARCH_UNMAPPED_AREA 566 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 567 568 #endif /* !__ASSEMBLY__ */ 569 570 #endif /* _ASM_PGTABLE_H */ 571