1 /* 2 * Based on arch/arm/mm/mmu.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * Copyright (C) 2012 ARM Ltd. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/cache.h> 21 #include <linux/export.h> 22 #include <linux/kernel.h> 23 #include <linux/errno.h> 24 #include <linux/init.h> 25 #include <linux/ioport.h> 26 #include <linux/kexec.h> 27 #include <linux/libfdt.h> 28 #include <linux/mman.h> 29 #include <linux/nodemask.h> 30 #include <linux/memblock.h> 31 #include <linux/fs.h> 32 #include <linux/io.h> 33 #include <linux/mm.h> 34 #include <linux/vmalloc.h> 35 36 #include <asm/barrier.h> 37 #include <asm/cputype.h> 38 #include <asm/fixmap.h> 39 #include <asm/kasan.h> 40 #include <asm/kernel-pgtable.h> 41 #include <asm/sections.h> 42 #include <asm/setup.h> 43 #include <asm/sizes.h> 44 #include <asm/tlb.h> 45 #include <asm/mmu_context.h> 46 #include <asm/ptdump.h> 47 #include <asm/tlbflush.h> 48 49 #define NO_BLOCK_MAPPINGS BIT(0) 50 #define NO_CONT_MAPPINGS BIT(1) 51 52 u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 53 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; 54 u64 vabits_user __ro_after_init; 55 EXPORT_SYMBOL(vabits_user); 56 57 u64 kimage_voffset __ro_after_init; 58 EXPORT_SYMBOL(kimage_voffset); 59 60 /* 61 * Empty_zero_page is a special page that is used for zero-initialized data 62 * and COW. 63 */ 64 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 65 EXPORT_SYMBOL(empty_zero_page); 66 67 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 68 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 69 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 70 71 static DEFINE_SPINLOCK(swapper_pgdir_lock); 72 73 void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) 74 { 75 pgd_t *fixmap_pgdp; 76 77 spin_lock(&swapper_pgdir_lock); 78 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); 79 WRITE_ONCE(*fixmap_pgdp, pgd); 80 /* 81 * We need dsb(ishst) here to ensure the page-table-walker sees 82 * our new entry before set_p?d() returns. The fixmap's 83 * flush_tlb_kernel_range() via clear_fixmap() does this for us. 84 */ 85 pgd_clear_fixmap(); 86 spin_unlock(&swapper_pgdir_lock); 87 } 88 89 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 90 unsigned long size, pgprot_t vma_prot) 91 { 92 if (!pfn_valid(pfn)) 93 return pgprot_noncached(vma_prot); 94 else if (file->f_flags & O_SYNC) 95 return pgprot_writecombine(vma_prot); 96 return vma_prot; 97 } 98 EXPORT_SYMBOL(phys_mem_access_prot); 99 100 static phys_addr_t __init early_pgtable_alloc(void) 101 { 102 phys_addr_t phys; 103 void *ptr; 104 105 phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); 106 if (!phys) 107 panic("Failed to allocate page table page\n"); 108 109 /* 110 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE 111 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise 112 * any level of table. 113 */ 114 ptr = pte_set_fixmap(phys); 115 116 memset(ptr, 0, PAGE_SIZE); 117 118 /* 119 * Implicit barriers also ensure the zeroed page is visible to the page 120 * table walker 121 */ 122 pte_clear_fixmap(); 123 124 return phys; 125 } 126 127 static bool pgattr_change_is_safe(u64 old, u64 new) 128 { 129 /* 130 * The following mapping attributes may be updated in live 131 * kernel mappings without the need for break-before-make. 132 */ 133 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; 134 135 /* creating or taking down mappings is always safe */ 136 if (old == 0 || new == 0) 137 return true; 138 139 /* live contiguous mappings may not be manipulated at all */ 140 if ((old | new) & PTE_CONT) 141 return false; 142 143 /* Transitioning from Non-Global to Global is unsafe */ 144 if (old & ~new & PTE_NG) 145 return false; 146 147 return ((old ^ new) & ~mask) == 0; 148 } 149 150 static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, 151 phys_addr_t phys, pgprot_t prot) 152 { 153 pte_t *ptep; 154 155 ptep = pte_set_fixmap_offset(pmdp, addr); 156 do { 157 pte_t old_pte = READ_ONCE(*ptep); 158 159 set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); 160 161 /* 162 * After the PTE entry has been populated once, we 163 * only allow updates to the permission attributes. 164 */ 165 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), 166 READ_ONCE(pte_val(*ptep)))); 167 168 phys += PAGE_SIZE; 169 } while (ptep++, addr += PAGE_SIZE, addr != end); 170 171 pte_clear_fixmap(); 172 } 173 174 static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, 175 unsigned long end, phys_addr_t phys, 176 pgprot_t prot, 177 phys_addr_t (*pgtable_alloc)(void), 178 int flags) 179 { 180 unsigned long next; 181 pmd_t pmd = READ_ONCE(*pmdp); 182 183 BUG_ON(pmd_sect(pmd)); 184 if (pmd_none(pmd)) { 185 phys_addr_t pte_phys; 186 BUG_ON(!pgtable_alloc); 187 pte_phys = pgtable_alloc(); 188 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); 189 pmd = READ_ONCE(*pmdp); 190 } 191 BUG_ON(pmd_bad(pmd)); 192 193 do { 194 pgprot_t __prot = prot; 195 196 next = pte_cont_addr_end(addr, end); 197 198 /* use a contiguous mapping if the range is suitably aligned */ 199 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 200 (flags & NO_CONT_MAPPINGS) == 0) 201 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 202 203 init_pte(pmdp, addr, next, phys, __prot); 204 205 phys += next - addr; 206 } while (addr = next, addr != end); 207 } 208 209 static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, 210 phys_addr_t phys, pgprot_t prot, 211 phys_addr_t (*pgtable_alloc)(void), int flags) 212 { 213 unsigned long next; 214 pmd_t *pmdp; 215 216 pmdp = pmd_set_fixmap_offset(pudp, addr); 217 do { 218 pmd_t old_pmd = READ_ONCE(*pmdp); 219 220 next = pmd_addr_end(addr, end); 221 222 /* try section mapping first */ 223 if (((addr | next | phys) & ~SECTION_MASK) == 0 && 224 (flags & NO_BLOCK_MAPPINGS) == 0) { 225 pmd_set_huge(pmdp, phys, prot); 226 227 /* 228 * After the PMD entry has been populated once, we 229 * only allow updates to the permission attributes. 230 */ 231 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 232 READ_ONCE(pmd_val(*pmdp)))); 233 } else { 234 alloc_init_cont_pte(pmdp, addr, next, phys, prot, 235 pgtable_alloc, flags); 236 237 BUG_ON(pmd_val(old_pmd) != 0 && 238 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); 239 } 240 phys += next - addr; 241 } while (pmdp++, addr = next, addr != end); 242 243 pmd_clear_fixmap(); 244 } 245 246 static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, 247 unsigned long end, phys_addr_t phys, 248 pgprot_t prot, 249 phys_addr_t (*pgtable_alloc)(void), int flags) 250 { 251 unsigned long next; 252 pud_t pud = READ_ONCE(*pudp); 253 254 /* 255 * Check for initial section mappings in the pgd/pud. 256 */ 257 BUG_ON(pud_sect(pud)); 258 if (pud_none(pud)) { 259 phys_addr_t pmd_phys; 260 BUG_ON(!pgtable_alloc); 261 pmd_phys = pgtable_alloc(); 262 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); 263 pud = READ_ONCE(*pudp); 264 } 265 BUG_ON(pud_bad(pud)); 266 267 do { 268 pgprot_t __prot = prot; 269 270 next = pmd_cont_addr_end(addr, end); 271 272 /* use a contiguous mapping if the range is suitably aligned */ 273 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 274 (flags & NO_CONT_MAPPINGS) == 0) 275 __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 276 277 init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); 278 279 phys += next - addr; 280 } while (addr = next, addr != end); 281 } 282 283 static inline bool use_1G_block(unsigned long addr, unsigned long next, 284 unsigned long phys) 285 { 286 if (PAGE_SHIFT != 12) 287 return false; 288 289 if (((addr | next | phys) & ~PUD_MASK) != 0) 290 return false; 291 292 return true; 293 } 294 295 static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, 296 phys_addr_t phys, pgprot_t prot, 297 phys_addr_t (*pgtable_alloc)(void), 298 int flags) 299 { 300 unsigned long next; 301 pud_t *pudp; 302 pgd_t pgd = READ_ONCE(*pgdp); 303 304 if (pgd_none(pgd)) { 305 phys_addr_t pud_phys; 306 BUG_ON(!pgtable_alloc); 307 pud_phys = pgtable_alloc(); 308 __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE); 309 pgd = READ_ONCE(*pgdp); 310 } 311 BUG_ON(pgd_bad(pgd)); 312 313 pudp = pud_set_fixmap_offset(pgdp, addr); 314 do { 315 pud_t old_pud = READ_ONCE(*pudp); 316 317 next = pud_addr_end(addr, end); 318 319 /* 320 * For 4K granule only, attempt to put down a 1GB block 321 */ 322 if (use_1G_block(addr, next, phys) && 323 (flags & NO_BLOCK_MAPPINGS) == 0) { 324 pud_set_huge(pudp, phys, prot); 325 326 /* 327 * After the PUD entry has been populated once, we 328 * only allow updates to the permission attributes. 329 */ 330 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 331 READ_ONCE(pud_val(*pudp)))); 332 } else { 333 alloc_init_cont_pmd(pudp, addr, next, phys, prot, 334 pgtable_alloc, flags); 335 336 BUG_ON(pud_val(old_pud) != 0 && 337 pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); 338 } 339 phys += next - addr; 340 } while (pudp++, addr = next, addr != end); 341 342 pud_clear_fixmap(); 343 } 344 345 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 346 unsigned long virt, phys_addr_t size, 347 pgprot_t prot, 348 phys_addr_t (*pgtable_alloc)(void), 349 int flags) 350 { 351 unsigned long addr, length, end, next; 352 pgd_t *pgdp = pgd_offset_raw(pgdir, virt); 353 354 /* 355 * If the virtual and physical address don't have the same offset 356 * within a page, we cannot map the region as the caller expects. 357 */ 358 if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 359 return; 360 361 phys &= PAGE_MASK; 362 addr = virt & PAGE_MASK; 363 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); 364 365 end = addr + length; 366 do { 367 next = pgd_addr_end(addr, end); 368 alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, 369 flags); 370 phys += next - addr; 371 } while (pgdp++, addr = next, addr != end); 372 } 373 374 static phys_addr_t pgd_pgtable_alloc(void) 375 { 376 void *ptr = (void *)__get_free_page(PGALLOC_GFP); 377 if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) 378 BUG(); 379 380 /* Ensure the zeroed page is visible to the page table walker */ 381 dsb(ishst); 382 return __pa(ptr); 383 } 384 385 /* 386 * This function can only be used to modify existing table entries, 387 * without allocating new levels of table. Note that this permits the 388 * creation of new section or page entries. 389 */ 390 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 391 phys_addr_t size, pgprot_t prot) 392 { 393 if (virt < VMALLOC_START) { 394 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 395 &phys, virt); 396 return; 397 } 398 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 399 NO_CONT_MAPPINGS); 400 } 401 402 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 403 unsigned long virt, phys_addr_t size, 404 pgprot_t prot, bool page_mappings_only) 405 { 406 int flags = 0; 407 408 BUG_ON(mm == &init_mm); 409 410 if (page_mappings_only) 411 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 412 413 __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 414 pgd_pgtable_alloc, flags); 415 } 416 417 static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 418 phys_addr_t size, pgprot_t prot) 419 { 420 if (virt < VMALLOC_START) { 421 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 422 &phys, virt); 423 return; 424 } 425 426 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 427 NO_CONT_MAPPINGS); 428 429 /* flush the TLBs after updating live kernel mappings */ 430 flush_tlb_kernel_range(virt, virt + size); 431 } 432 433 static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, 434 phys_addr_t end, pgprot_t prot, int flags) 435 { 436 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, 437 prot, early_pgtable_alloc, flags); 438 } 439 440 void __init mark_linear_text_alias_ro(void) 441 { 442 /* 443 * Remove the write permissions from the linear alias of .text/.rodata 444 */ 445 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), 446 (unsigned long)__init_begin - (unsigned long)_text, 447 PAGE_KERNEL_RO); 448 } 449 450 static void __init map_mem(pgd_t *pgdp) 451 { 452 phys_addr_t kernel_start = __pa_symbol(_text); 453 phys_addr_t kernel_end = __pa_symbol(__init_begin); 454 struct memblock_region *reg; 455 int flags = 0; 456 457 if (rodata_full || debug_pagealloc_enabled()) 458 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 459 460 /* 461 * Take care not to create a writable alias for the 462 * read-only text and rodata sections of the kernel image. 463 * So temporarily mark them as NOMAP to skip mappings in 464 * the following for-loop 465 */ 466 memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 467 #ifdef CONFIG_KEXEC_CORE 468 if (crashk_res.end) 469 memblock_mark_nomap(crashk_res.start, 470 resource_size(&crashk_res)); 471 #endif 472 473 /* map all the memory banks */ 474 for_each_memblock(memory, reg) { 475 phys_addr_t start = reg->base; 476 phys_addr_t end = start + reg->size; 477 478 if (start >= end) 479 break; 480 if (memblock_is_nomap(reg)) 481 continue; 482 483 __map_memblock(pgdp, start, end, PAGE_KERNEL, flags); 484 } 485 486 /* 487 * Map the linear alias of the [_text, __init_begin) interval 488 * as non-executable now, and remove the write permission in 489 * mark_linear_text_alias_ro() below (which will be called after 490 * alternative patching has completed). This makes the contents 491 * of the region accessible to subsystems such as hibernate, 492 * but protects it from inadvertent modification or execution. 493 * Note that contiguous mappings cannot be remapped in this way, 494 * so we should avoid them here. 495 */ 496 __map_memblock(pgdp, kernel_start, kernel_end, 497 PAGE_KERNEL, NO_CONT_MAPPINGS); 498 memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 499 500 #ifdef CONFIG_KEXEC_CORE 501 /* 502 * Use page-level mappings here so that we can shrink the region 503 * in page granularity and put back unused memory to buddy system 504 * through /sys/kernel/kexec_crash_size interface. 505 */ 506 if (crashk_res.end) { 507 __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1, 508 PAGE_KERNEL, 509 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 510 memblock_clear_nomap(crashk_res.start, 511 resource_size(&crashk_res)); 512 } 513 #endif 514 } 515 516 void mark_rodata_ro(void) 517 { 518 unsigned long section_size; 519 520 /* 521 * mark .rodata as read only. Use __init_begin rather than __end_rodata 522 * to cover NOTES and EXCEPTION_TABLE. 523 */ 524 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 525 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 526 section_size, PAGE_KERNEL_RO); 527 528 debug_checkwx(); 529 } 530 531 static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, 532 pgprot_t prot, struct vm_struct *vma, 533 int flags, unsigned long vm_flags) 534 { 535 phys_addr_t pa_start = __pa_symbol(va_start); 536 unsigned long size = va_end - va_start; 537 538 BUG_ON(!PAGE_ALIGNED(pa_start)); 539 BUG_ON(!PAGE_ALIGNED(size)); 540 541 __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, 542 early_pgtable_alloc, flags); 543 544 if (!(vm_flags & VM_NO_GUARD)) 545 size += PAGE_SIZE; 546 547 vma->addr = va_start; 548 vma->phys_addr = pa_start; 549 vma->size = size; 550 vma->flags = VM_MAP | vm_flags; 551 vma->caller = __builtin_return_address(0); 552 553 vm_area_add_early(vma); 554 } 555 556 static int __init parse_rodata(char *arg) 557 { 558 int ret = strtobool(arg, &rodata_enabled); 559 if (!ret) { 560 rodata_full = false; 561 return 0; 562 } 563 564 /* permit 'full' in addition to boolean options */ 565 if (strcmp(arg, "full")) 566 return -EINVAL; 567 568 rodata_enabled = true; 569 rodata_full = true; 570 return 0; 571 } 572 early_param("rodata", parse_rodata); 573 574 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 575 static int __init map_entry_trampoline(void) 576 { 577 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 578 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 579 580 /* The trampoline is always mapped and can therefore be global */ 581 pgprot_val(prot) &= ~PTE_NG; 582 583 /* Map only the text into the trampoline page table */ 584 memset(tramp_pg_dir, 0, PGD_SIZE); 585 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, 586 prot, pgd_pgtable_alloc, 0); 587 588 /* Map both the text and data into the kernel page table */ 589 __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); 590 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 591 extern char __entry_tramp_data_start[]; 592 593 __set_fixmap(FIX_ENTRY_TRAMP_DATA, 594 __pa_symbol(__entry_tramp_data_start), 595 PAGE_KERNEL_RO); 596 } 597 598 return 0; 599 } 600 core_initcall(map_entry_trampoline); 601 #endif 602 603 /* 604 * Create fine-grained mappings for the kernel. 605 */ 606 static void __init map_kernel(pgd_t *pgdp) 607 { 608 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, 609 vmlinux_initdata, vmlinux_data; 610 611 /* 612 * External debuggers may need to write directly to the text 613 * mapping to install SW breakpoints. Allow this (only) when 614 * explicitly requested with rodata=off. 615 */ 616 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 617 618 /* 619 * Only rodata will be remapped with different permissions later on, 620 * all other segments are allowed to use contiguous mappings. 621 */ 622 map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, 623 VM_NO_GUARD); 624 map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, 625 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); 626 map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, 627 &vmlinux_inittext, 0, VM_NO_GUARD); 628 map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, 629 &vmlinux_initdata, 0, VM_NO_GUARD); 630 map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); 631 632 if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) { 633 /* 634 * The fixmap falls in a separate pgd to the kernel, and doesn't 635 * live in the carveout for the swapper_pg_dir. We can simply 636 * re-use the existing dir for the fixmap. 637 */ 638 set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), 639 READ_ONCE(*pgd_offset_k(FIXADDR_START))); 640 } else if (CONFIG_PGTABLE_LEVELS > 3) { 641 /* 642 * The fixmap shares its top level pgd entry with the kernel 643 * mapping. This can really only occur when we are running 644 * with 16k/4 levels, so we can simply reuse the pud level 645 * entry instead. 646 */ 647 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 648 pud_populate(&init_mm, 649 pud_set_fixmap_offset(pgdp, FIXADDR_START), 650 lm_alias(bm_pmd)); 651 pud_clear_fixmap(); 652 } else { 653 BUG(); 654 } 655 656 kasan_copy_shadow(pgdp); 657 } 658 659 void __init paging_init(void) 660 { 661 pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); 662 663 map_kernel(pgdp); 664 map_mem(pgdp); 665 666 pgd_clear_fixmap(); 667 668 cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 669 init_mm.pgd = swapper_pg_dir; 670 671 memblock_free(__pa_symbol(init_pg_dir), 672 __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); 673 674 memblock_allow_resize(); 675 } 676 677 /* 678 * Check whether a kernel address is valid (derived from arch/x86/). 679 */ 680 int kern_addr_valid(unsigned long addr) 681 { 682 pgd_t *pgdp; 683 pud_t *pudp, pud; 684 pmd_t *pmdp, pmd; 685 pte_t *ptep, pte; 686 687 if ((((long)addr) >> VA_BITS) != -1UL) 688 return 0; 689 690 pgdp = pgd_offset_k(addr); 691 if (pgd_none(READ_ONCE(*pgdp))) 692 return 0; 693 694 pudp = pud_offset(pgdp, addr); 695 pud = READ_ONCE(*pudp); 696 if (pud_none(pud)) 697 return 0; 698 699 if (pud_sect(pud)) 700 return pfn_valid(pud_pfn(pud)); 701 702 pmdp = pmd_offset(pudp, addr); 703 pmd = READ_ONCE(*pmdp); 704 if (pmd_none(pmd)) 705 return 0; 706 707 if (pmd_sect(pmd)) 708 return pfn_valid(pmd_pfn(pmd)); 709 710 ptep = pte_offset_kernel(pmdp, addr); 711 pte = READ_ONCE(*ptep); 712 if (pte_none(pte)) 713 return 0; 714 715 return pfn_valid(pte_pfn(pte)); 716 } 717 #ifdef CONFIG_SPARSEMEM_VMEMMAP 718 #if !ARM64_SWAPPER_USES_SECTION_MAPS 719 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 720 struct vmem_altmap *altmap) 721 { 722 return vmemmap_populate_basepages(start, end, node); 723 } 724 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 725 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 726 struct vmem_altmap *altmap) 727 { 728 unsigned long addr = start; 729 unsigned long next; 730 pgd_t *pgdp; 731 pud_t *pudp; 732 pmd_t *pmdp; 733 734 do { 735 next = pmd_addr_end(addr, end); 736 737 pgdp = vmemmap_pgd_populate(addr, node); 738 if (!pgdp) 739 return -ENOMEM; 740 741 pudp = vmemmap_pud_populate(pgdp, addr, node); 742 if (!pudp) 743 return -ENOMEM; 744 745 pmdp = pmd_offset(pudp, addr); 746 if (pmd_none(READ_ONCE(*pmdp))) { 747 void *p = NULL; 748 749 p = vmemmap_alloc_block_buf(PMD_SIZE, node); 750 if (!p) 751 return -ENOMEM; 752 753 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); 754 } else 755 vmemmap_verify((pte_t *)pmdp, node, addr, next); 756 } while (addr = next, addr != end); 757 758 return 0; 759 } 760 #endif /* CONFIG_ARM64_64K_PAGES */ 761 void vmemmap_free(unsigned long start, unsigned long end, 762 struct vmem_altmap *altmap) 763 { 764 } 765 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 766 767 static inline pud_t * fixmap_pud(unsigned long addr) 768 { 769 pgd_t *pgdp = pgd_offset_k(addr); 770 pgd_t pgd = READ_ONCE(*pgdp); 771 772 BUG_ON(pgd_none(pgd) || pgd_bad(pgd)); 773 774 return pud_offset_kimg(pgdp, addr); 775 } 776 777 static inline pmd_t * fixmap_pmd(unsigned long addr) 778 { 779 pud_t *pudp = fixmap_pud(addr); 780 pud_t pud = READ_ONCE(*pudp); 781 782 BUG_ON(pud_none(pud) || pud_bad(pud)); 783 784 return pmd_offset_kimg(pudp, addr); 785 } 786 787 static inline pte_t * fixmap_pte(unsigned long addr) 788 { 789 return &bm_pte[pte_index(addr)]; 790 } 791 792 /* 793 * The p*d_populate functions call virt_to_phys implicitly so they can't be used 794 * directly on kernel symbols (bm_p*d). This function is called too early to use 795 * lm_alias so __p*d_populate functions must be used to populate with the 796 * physical address from __pa_symbol. 797 */ 798 void __init early_fixmap_init(void) 799 { 800 pgd_t *pgdp, pgd; 801 pud_t *pudp; 802 pmd_t *pmdp; 803 unsigned long addr = FIXADDR_START; 804 805 pgdp = pgd_offset_k(addr); 806 pgd = READ_ONCE(*pgdp); 807 if (CONFIG_PGTABLE_LEVELS > 3 && 808 !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) { 809 /* 810 * We only end up here if the kernel mapping and the fixmap 811 * share the top level pgd entry, which should only happen on 812 * 16k/4 levels configurations. 813 */ 814 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 815 pudp = pud_offset_kimg(pgdp, addr); 816 } else { 817 if (pgd_none(pgd)) 818 __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); 819 pudp = fixmap_pud(addr); 820 } 821 if (pud_none(READ_ONCE(*pudp))) 822 __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); 823 pmdp = fixmap_pmd(addr); 824 __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); 825 826 /* 827 * The boot-ioremap range spans multiple pmds, for which 828 * we are not prepared: 829 */ 830 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 831 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 832 833 if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 834 || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 835 WARN_ON(1); 836 pr_warn("pmdp %p != %p, %p\n", 837 pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 838 fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 839 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 840 fix_to_virt(FIX_BTMAP_BEGIN)); 841 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 842 fix_to_virt(FIX_BTMAP_END)); 843 844 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 845 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 846 } 847 } 848 849 /* 850 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we 851 * ever need to use IPIs for TLB broadcasting, then we're in trouble here. 852 */ 853 void __set_fixmap(enum fixed_addresses idx, 854 phys_addr_t phys, pgprot_t flags) 855 { 856 unsigned long addr = __fix_to_virt(idx); 857 pte_t *ptep; 858 859 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 860 861 ptep = fixmap_pte(addr); 862 863 if (pgprot_val(flags)) { 864 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); 865 } else { 866 pte_clear(&init_mm, addr, ptep); 867 flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 868 } 869 } 870 871 void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 872 { 873 const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 874 int offset; 875 void *dt_virt; 876 877 /* 878 * Check whether the physical FDT address is set and meets the minimum 879 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 880 * at least 8 bytes so that we can always access the magic and size 881 * fields of the FDT header after mapping the first chunk, double check 882 * here if that is indeed the case. 883 */ 884 BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 885 if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 886 return NULL; 887 888 /* 889 * Make sure that the FDT region can be mapped without the need to 890 * allocate additional translation table pages, so that it is safe 891 * to call create_mapping_noalloc() this early. 892 * 893 * On 64k pages, the FDT will be mapped using PTEs, so we need to 894 * be in the same PMD as the rest of the fixmap. 895 * On 4k pages, we'll use section mappings for the FDT so we only 896 * have to be in the same PUD. 897 */ 898 BUILD_BUG_ON(dt_virt_base % SZ_2M); 899 900 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 901 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 902 903 offset = dt_phys % SWAPPER_BLOCK_SIZE; 904 dt_virt = (void *)dt_virt_base + offset; 905 906 /* map the first chunk so we can read the size from the header */ 907 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 908 dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 909 910 if (fdt_magic(dt_virt) != FDT_MAGIC) 911 return NULL; 912 913 *size = fdt_totalsize(dt_virt); 914 if (*size > MAX_FDT_SIZE) 915 return NULL; 916 917 if (offset + *size > SWAPPER_BLOCK_SIZE) 918 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 919 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); 920 921 return dt_virt; 922 } 923 924 void *__init fixmap_remap_fdt(phys_addr_t dt_phys) 925 { 926 void *dt_virt; 927 int size; 928 929 dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); 930 if (!dt_virt) 931 return NULL; 932 933 memblock_reserve(dt_phys, size); 934 return dt_virt; 935 } 936 937 int __init arch_ioremap_pud_supported(void) 938 { 939 /* only 4k granule supports level 1 block mappings */ 940 return IS_ENABLED(CONFIG_ARM64_4K_PAGES); 941 } 942 943 int __init arch_ioremap_pmd_supported(void) 944 { 945 return 1; 946 } 947 948 int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 949 { 950 pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT | 951 pgprot_val(mk_sect_prot(prot))); 952 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), sect_prot); 953 954 /* Only allow permission changes for now */ 955 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), 956 pud_val(new_pud))) 957 return 0; 958 959 BUG_ON(phys & ~PUD_MASK); 960 set_pud(pudp, new_pud); 961 return 1; 962 } 963 964 int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) 965 { 966 pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT | 967 pgprot_val(mk_sect_prot(prot))); 968 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), sect_prot); 969 970 /* Only allow permission changes for now */ 971 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), 972 pmd_val(new_pmd))) 973 return 0; 974 975 BUG_ON(phys & ~PMD_MASK); 976 set_pmd(pmdp, new_pmd); 977 return 1; 978 } 979 980 int pud_clear_huge(pud_t *pudp) 981 { 982 if (!pud_sect(READ_ONCE(*pudp))) 983 return 0; 984 pud_clear(pudp); 985 return 1; 986 } 987 988 int pmd_clear_huge(pmd_t *pmdp) 989 { 990 if (!pmd_sect(READ_ONCE(*pmdp))) 991 return 0; 992 pmd_clear(pmdp); 993 return 1; 994 } 995 996 int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) 997 { 998 pte_t *table; 999 pmd_t pmd; 1000 1001 pmd = READ_ONCE(*pmdp); 1002 1003 if (!pmd_table(pmd)) { 1004 VM_WARN_ON(1); 1005 return 1; 1006 } 1007 1008 table = pte_offset_kernel(pmdp, addr); 1009 pmd_clear(pmdp); 1010 __flush_tlb_kernel_pgtable(addr); 1011 pte_free_kernel(NULL, table); 1012 return 1; 1013 } 1014 1015 int pud_free_pmd_page(pud_t *pudp, unsigned long addr) 1016 { 1017 pmd_t *table; 1018 pmd_t *pmdp; 1019 pud_t pud; 1020 unsigned long next, end; 1021 1022 pud = READ_ONCE(*pudp); 1023 1024 if (!pud_table(pud)) { 1025 VM_WARN_ON(1); 1026 return 1; 1027 } 1028 1029 table = pmd_offset(pudp, addr); 1030 pmdp = table; 1031 next = addr; 1032 end = addr + PUD_SIZE; 1033 do { 1034 pmd_free_pte_page(pmdp, next); 1035 } while (pmdp++, next += PMD_SIZE, next != end); 1036 1037 pud_clear(pudp); 1038 __flush_tlb_kernel_pgtable(addr); 1039 pmd_free(NULL, table); 1040 return 1; 1041 } 1042 1043 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) 1044 { 1045 return 0; /* Don't attempt a block mapping */ 1046 } 1047 1048 #ifdef CONFIG_MEMORY_HOTPLUG 1049 int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, 1050 bool want_memblock) 1051 { 1052 int flags = 0; 1053 1054 if (rodata_full || debug_pagealloc_enabled()) 1055 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 1056 1057 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1058 size, PAGE_KERNEL, pgd_pgtable_alloc, flags); 1059 1060 return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, 1061 altmap, want_memblock); 1062 } 1063 #endif 1064