1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c1cc1552SCatalin Marinas /* 3c1cc1552SCatalin Marinas * Based on arch/arm/mm/mmu.c 4c1cc1552SCatalin Marinas * 5c1cc1552SCatalin Marinas * Copyright (C) 1995-2005 Russell King 6c1cc1552SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 7c1cc1552SCatalin Marinas */ 8c1cc1552SCatalin Marinas 95a9e3e15SJisheng Zhang #include <linux/cache.h> 10c1cc1552SCatalin Marinas #include <linux/export.h> 11c1cc1552SCatalin Marinas #include <linux/kernel.h> 12c1cc1552SCatalin Marinas #include <linux/errno.h> 13c1cc1552SCatalin Marinas #include <linux/init.h> 1498d2e153STakahiro Akashi #include <linux/ioport.h> 1598d2e153STakahiro Akashi #include <linux/kexec.h> 1661bd93ceSArd Biesheuvel #include <linux/libfdt.h> 17c1cc1552SCatalin Marinas #include <linux/mman.h> 18c1cc1552SCatalin Marinas #include <linux/nodemask.h> 19c1cc1552SCatalin Marinas #include <linux/memblock.h> 20dc90f084SChristoph Hellwig #include <linux/memremap.h> 21bbd6ec60SAnshuman Khandual #include <linux/memory.h> 22c1cc1552SCatalin Marinas #include <linux/fs.h> 232475ff9dSCatalin Marinas #include <linux/io.h> 242077be67SLaura Abbott #include <linux/mm.h> 256efd8499STobias Klauser #include <linux/vmalloc.h> 266d47c23bSMike Rapoport #include <linux/set_memory.h> 27c1cc1552SCatalin Marinas 2821ab99c2SMark Rutland #include <asm/barrier.h> 29c1cc1552SCatalin Marinas #include <asm/cputype.h> 30af86e597SLaura Abbott #include <asm/fixmap.h> 31068a17a5SMark Rutland #include <asm/kasan.h> 32b433dce0SSuzuki K. Poulose #include <asm/kernel-pgtable.h> 33c1cc1552SCatalin Marinas #include <asm/sections.h> 34c1cc1552SCatalin Marinas #include <asm/setup.h> 3587dfb311SMasahiro Yamada #include <linux/sizes.h> 36c1cc1552SCatalin Marinas #include <asm/tlb.h> 37c1cc1552SCatalin Marinas #include <asm/mmu_context.h> 381404d6f1SLaura Abbott #include <asm/ptdump.h> 39ec28bb9cSChintan Pandya #include <asm/tlbflush.h> 40ca15ca40SMike Rapoport #include <asm/pgalloc.h> 41c1cc1552SCatalin Marinas 42c0951366SArd Biesheuvel #define NO_BLOCK_MAPPINGS BIT(0) 43d27cfa1fSArd Biesheuvel #define NO_CONT_MAPPINGS BIT(1) 4487143f40SArd Biesheuvel #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ 45c0951366SArd Biesheuvel 467ba8f2b2SArd Biesheuvel u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); 47fa2a8445SKristina Martsenko u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; 48dd006da2SArd Biesheuvel 495383cc6eSSteve Capper u64 __section(".mmuoff.data.write") vabits_actual; 505383cc6eSSteve Capper EXPORT_SYMBOL(vabits_actual); 51c1cc1552SCatalin Marinas 525a9e3e15SJisheng Zhang u64 kimage_voffset __ro_after_init; 53a7f8de16SArd Biesheuvel EXPORT_SYMBOL(kimage_voffset); 54a7f8de16SArd Biesheuvel 55c1cc1552SCatalin Marinas /* 56c1cc1552SCatalin Marinas * Empty_zero_page is a special page that is used for zero-initialized data 57c1cc1552SCatalin Marinas * and COW. 58c1cc1552SCatalin Marinas */ 595227cfa7SMark Rutland unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 60c1cc1552SCatalin Marinas EXPORT_SYMBOL(empty_zero_page); 61c1cc1552SCatalin Marinas 62f9040773SArd Biesheuvel static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 63f9040773SArd Biesheuvel static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 64f9040773SArd Biesheuvel static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 65f9040773SArd Biesheuvel 662330b7caSJun Yao static DEFINE_SPINLOCK(swapper_pgdir_lock); 67ee017ee3SJianyong Wu static DEFINE_MUTEX(fixmap_lock); 682330b7caSJun Yao 692330b7caSJun Yao void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) 702330b7caSJun Yao { 712330b7caSJun Yao pgd_t *fixmap_pgdp; 722330b7caSJun Yao 732330b7caSJun Yao spin_lock(&swapper_pgdir_lock); 7426a6f87eSJames Morse fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); 752330b7caSJun Yao WRITE_ONCE(*fixmap_pgdp, pgd); 762330b7caSJun Yao /* 772330b7caSJun Yao * We need dsb(ishst) here to ensure the page-table-walker sees 782330b7caSJun Yao * our new entry before set_p?d() returns. The fixmap's 792330b7caSJun Yao * flush_tlb_kernel_range() via clear_fixmap() does this for us. 802330b7caSJun Yao */ 812330b7caSJun Yao pgd_clear_fixmap(); 822330b7caSJun Yao spin_unlock(&swapper_pgdir_lock); 832330b7caSJun Yao } 842330b7caSJun Yao 85c1cc1552SCatalin Marinas pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 86c1cc1552SCatalin Marinas unsigned long size, pgprot_t vma_prot) 87c1cc1552SCatalin Marinas { 88873ba463SMike Rapoport if (!pfn_is_map_memory(pfn)) 89c1cc1552SCatalin Marinas return pgprot_noncached(vma_prot); 90c1cc1552SCatalin Marinas else if (file->f_flags & O_SYNC) 91c1cc1552SCatalin Marinas return pgprot_writecombine(vma_prot); 92c1cc1552SCatalin Marinas return vma_prot; 93c1cc1552SCatalin Marinas } 94c1cc1552SCatalin Marinas EXPORT_SYMBOL(phys_mem_access_prot); 95c1cc1552SCatalin Marinas 9690292acaSYu Zhao static phys_addr_t __init early_pgtable_alloc(int shift) 97c1cc1552SCatalin Marinas { 987142392dSSuzuki K. Poulose phys_addr_t phys; 997142392dSSuzuki K. Poulose void *ptr; 1007142392dSSuzuki K. Poulose 101c6975d7cSQian Cai phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 102c6975d7cSQian Cai MEMBLOCK_ALLOC_NOLEAKTRACE); 103ecc3e771SMike Rapoport if (!phys) 104ecc3e771SMike Rapoport panic("Failed to allocate page table page\n"); 105f4710445SMark Rutland 106f4710445SMark Rutland /* 107f4710445SMark Rutland * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE 108f4710445SMark Rutland * slot will be free, so we can (ab)use the FIX_PTE slot to initialise 109f4710445SMark Rutland * any level of table. 110f4710445SMark Rutland */ 111f4710445SMark Rutland ptr = pte_set_fixmap(phys); 112f4710445SMark Rutland 11321ab99c2SMark Rutland memset(ptr, 0, PAGE_SIZE); 11421ab99c2SMark Rutland 115f4710445SMark Rutland /* 116f4710445SMark Rutland * Implicit barriers also ensure the zeroed page is visible to the page 117f4710445SMark Rutland * table walker 118f4710445SMark Rutland */ 119f4710445SMark Rutland pte_clear_fixmap(); 120f4710445SMark Rutland 121f4710445SMark Rutland return phys; 122c1cc1552SCatalin Marinas } 123c1cc1552SCatalin Marinas 124e98216b5SArd Biesheuvel static bool pgattr_change_is_safe(u64 old, u64 new) 125e98216b5SArd Biesheuvel { 126e98216b5SArd Biesheuvel /* 127e98216b5SArd Biesheuvel * The following mapping attributes may be updated in live 128e98216b5SArd Biesheuvel * kernel mappings without the need for break-before-make. 129e98216b5SArd Biesheuvel */ 1300178dc76SCatalin Marinas pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; 131e98216b5SArd Biesheuvel 132141d1497SArd Biesheuvel /* creating or taking down mappings is always safe */ 133141d1497SArd Biesheuvel if (old == 0 || new == 0) 134141d1497SArd Biesheuvel return true; 135141d1497SArd Biesheuvel 136141d1497SArd Biesheuvel /* live contiguous mappings may not be manipulated at all */ 137141d1497SArd Biesheuvel if ((old | new) & PTE_CONT) 138141d1497SArd Biesheuvel return false; 139141d1497SArd Biesheuvel 140753e8abcSArd Biesheuvel /* Transitioning from Non-Global to Global is unsafe */ 141753e8abcSArd Biesheuvel if (old & ~new & PTE_NG) 142753e8abcSArd Biesheuvel return false; 1434e602056SWill Deacon 1440178dc76SCatalin Marinas /* 1450178dc76SCatalin Marinas * Changing the memory type between Normal and Normal-Tagged is safe 1460178dc76SCatalin Marinas * since Tagged is considered a permission attribute from the 1470178dc76SCatalin Marinas * mismatched attribute aliases perspective. 1480178dc76SCatalin Marinas */ 1490178dc76SCatalin Marinas if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || 1500178dc76SCatalin Marinas (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && 1510178dc76SCatalin Marinas ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || 1520178dc76SCatalin Marinas (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) 1530178dc76SCatalin Marinas mask |= PTE_ATTRINDX_MASK; 1540178dc76SCatalin Marinas 155141d1497SArd Biesheuvel return ((old ^ new) & ~mask) == 0; 156e98216b5SArd Biesheuvel } 157e98216b5SArd Biesheuvel 15820a004e7SWill Deacon static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, 159d27cfa1fSArd Biesheuvel phys_addr_t phys, pgprot_t prot) 160c1cc1552SCatalin Marinas { 16120a004e7SWill Deacon pte_t *ptep; 162c1cc1552SCatalin Marinas 16320a004e7SWill Deacon ptep = pte_set_fixmap_offset(pmdp, addr); 164c1cc1552SCatalin Marinas do { 16520a004e7SWill Deacon pte_t old_pte = READ_ONCE(*ptep); 166e98216b5SArd Biesheuvel 16720a004e7SWill Deacon set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); 168e98216b5SArd Biesheuvel 169e98216b5SArd Biesheuvel /* 170e98216b5SArd Biesheuvel * After the PTE entry has been populated once, we 171e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 172e98216b5SArd Biesheuvel */ 17320a004e7SWill Deacon BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), 17420a004e7SWill Deacon READ_ONCE(pte_val(*ptep)))); 175e98216b5SArd Biesheuvel 176e393cf40SArd Biesheuvel phys += PAGE_SIZE; 17720a004e7SWill Deacon } while (ptep++, addr += PAGE_SIZE, addr != end); 178f4710445SMark Rutland 179f4710445SMark Rutland pte_clear_fixmap(); 180c1cc1552SCatalin Marinas } 181c1cc1552SCatalin Marinas 18220a004e7SWill Deacon static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, 183d27cfa1fSArd Biesheuvel unsigned long end, phys_addr_t phys, 184d27cfa1fSArd Biesheuvel pgprot_t prot, 18590292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), 186c0951366SArd Biesheuvel int flags) 187c1cc1552SCatalin Marinas { 188c1cc1552SCatalin Marinas unsigned long next; 18920a004e7SWill Deacon pmd_t pmd = READ_ONCE(*pmdp); 190c1cc1552SCatalin Marinas 19120a004e7SWill Deacon BUG_ON(pmd_sect(pmd)); 19220a004e7SWill Deacon if (pmd_none(pmd)) { 19387143f40SArd Biesheuvel pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN; 194d27cfa1fSArd Biesheuvel phys_addr_t pte_phys; 19587143f40SArd Biesheuvel 19687143f40SArd Biesheuvel if (flags & NO_EXEC_MAPPINGS) 19787143f40SArd Biesheuvel pmdval |= PMD_TABLE_PXN; 198132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 19990292acaSYu Zhao pte_phys = pgtable_alloc(PAGE_SHIFT); 20087143f40SArd Biesheuvel __pmd_populate(pmdp, pte_phys, pmdval); 20120a004e7SWill Deacon pmd = READ_ONCE(*pmdp); 202c1cc1552SCatalin Marinas } 20320a004e7SWill Deacon BUG_ON(pmd_bad(pmd)); 204d27cfa1fSArd Biesheuvel 205d27cfa1fSArd Biesheuvel do { 206d27cfa1fSArd Biesheuvel pgprot_t __prot = prot; 207d27cfa1fSArd Biesheuvel 208d27cfa1fSArd Biesheuvel next = pte_cont_addr_end(addr, end); 209d27cfa1fSArd Biesheuvel 210d27cfa1fSArd Biesheuvel /* use a contiguous mapping if the range is suitably aligned */ 211d27cfa1fSArd Biesheuvel if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 212d27cfa1fSArd Biesheuvel (flags & NO_CONT_MAPPINGS) == 0) 213d27cfa1fSArd Biesheuvel __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 214d27cfa1fSArd Biesheuvel 21520a004e7SWill Deacon init_pte(pmdp, addr, next, phys, __prot); 216d27cfa1fSArd Biesheuvel 217d27cfa1fSArd Biesheuvel phys += next - addr; 218d27cfa1fSArd Biesheuvel } while (addr = next, addr != end); 219d27cfa1fSArd Biesheuvel } 220d27cfa1fSArd Biesheuvel 22120a004e7SWill Deacon static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, 222d27cfa1fSArd Biesheuvel phys_addr_t phys, pgprot_t prot, 22390292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), int flags) 224d27cfa1fSArd Biesheuvel { 225d27cfa1fSArd Biesheuvel unsigned long next; 22620a004e7SWill Deacon pmd_t *pmdp; 227c1cc1552SCatalin Marinas 22820a004e7SWill Deacon pmdp = pmd_set_fixmap_offset(pudp, addr); 229c1cc1552SCatalin Marinas do { 23020a004e7SWill Deacon pmd_t old_pmd = READ_ONCE(*pmdp); 231e98216b5SArd Biesheuvel 232c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 233e98216b5SArd Biesheuvel 234c1cc1552SCatalin Marinas /* try section mapping first */ 2354aaa87abSAnshuman Khandual if (((addr | next | phys) & ~PMD_MASK) == 0 && 236c0951366SArd Biesheuvel (flags & NO_BLOCK_MAPPINGS) == 0) { 23720a004e7SWill Deacon pmd_set_huge(pmdp, phys, prot); 238e98216b5SArd Biesheuvel 239a55f9929SCatalin Marinas /* 240e98216b5SArd Biesheuvel * After the PMD entry has been populated once, we 241e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 242a55f9929SCatalin Marinas */ 243e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 24420a004e7SWill Deacon READ_ONCE(pmd_val(*pmdp)))); 245a55f9929SCatalin Marinas } else { 24620a004e7SWill Deacon alloc_init_cont_pte(pmdp, addr, next, phys, prot, 247d27cfa1fSArd Biesheuvel pgtable_alloc, flags); 248e98216b5SArd Biesheuvel 249e98216b5SArd Biesheuvel BUG_ON(pmd_val(old_pmd) != 0 && 25020a004e7SWill Deacon pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); 251a55f9929SCatalin Marinas } 252c1cc1552SCatalin Marinas phys += next - addr; 25320a004e7SWill Deacon } while (pmdp++, addr = next, addr != end); 254f4710445SMark Rutland 255f4710445SMark Rutland pmd_clear_fixmap(); 256c1cc1552SCatalin Marinas } 257c1cc1552SCatalin Marinas 25820a004e7SWill Deacon static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, 259d27cfa1fSArd Biesheuvel unsigned long end, phys_addr_t phys, 260d27cfa1fSArd Biesheuvel pgprot_t prot, 26190292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), int flags) 262d27cfa1fSArd Biesheuvel { 263d27cfa1fSArd Biesheuvel unsigned long next; 26420a004e7SWill Deacon pud_t pud = READ_ONCE(*pudp); 265d27cfa1fSArd Biesheuvel 266d27cfa1fSArd Biesheuvel /* 267d27cfa1fSArd Biesheuvel * Check for initial section mappings in the pgd/pud. 268d27cfa1fSArd Biesheuvel */ 26920a004e7SWill Deacon BUG_ON(pud_sect(pud)); 27020a004e7SWill Deacon if (pud_none(pud)) { 27187143f40SArd Biesheuvel pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN; 272d27cfa1fSArd Biesheuvel phys_addr_t pmd_phys; 27387143f40SArd Biesheuvel 27487143f40SArd Biesheuvel if (flags & NO_EXEC_MAPPINGS) 27587143f40SArd Biesheuvel pudval |= PUD_TABLE_PXN; 276d27cfa1fSArd Biesheuvel BUG_ON(!pgtable_alloc); 27790292acaSYu Zhao pmd_phys = pgtable_alloc(PMD_SHIFT); 27887143f40SArd Biesheuvel __pud_populate(pudp, pmd_phys, pudval); 27920a004e7SWill Deacon pud = READ_ONCE(*pudp); 280d27cfa1fSArd Biesheuvel } 28120a004e7SWill Deacon BUG_ON(pud_bad(pud)); 282d27cfa1fSArd Biesheuvel 283d27cfa1fSArd Biesheuvel do { 284d27cfa1fSArd Biesheuvel pgprot_t __prot = prot; 285d27cfa1fSArd Biesheuvel 286d27cfa1fSArd Biesheuvel next = pmd_cont_addr_end(addr, end); 287d27cfa1fSArd Biesheuvel 288d27cfa1fSArd Biesheuvel /* use a contiguous mapping if the range is suitably aligned */ 289d27cfa1fSArd Biesheuvel if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 290d27cfa1fSArd Biesheuvel (flags & NO_CONT_MAPPINGS) == 0) 291d27cfa1fSArd Biesheuvel __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 292d27cfa1fSArd Biesheuvel 29320a004e7SWill Deacon init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); 294d27cfa1fSArd Biesheuvel 295d27cfa1fSArd Biesheuvel phys += next - addr; 296d27cfa1fSArd Biesheuvel } while (addr = next, addr != end); 297d27cfa1fSArd Biesheuvel } 298d27cfa1fSArd Biesheuvel 29920a004e7SWill Deacon static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, 300da141706SLaura Abbott phys_addr_t phys, pgprot_t prot, 30190292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), 302c0951366SArd Biesheuvel int flags) 303c1cc1552SCatalin Marinas { 304c1cc1552SCatalin Marinas unsigned long next; 30520a004e7SWill Deacon pud_t *pudp; 306e9f63768SMike Rapoport p4d_t *p4dp = p4d_offset(pgdp, addr); 307e9f63768SMike Rapoport p4d_t p4d = READ_ONCE(*p4dp); 308c1cc1552SCatalin Marinas 309e9f63768SMike Rapoport if (p4d_none(p4d)) { 31087143f40SArd Biesheuvel p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN; 311132233a7SLaura Abbott phys_addr_t pud_phys; 31287143f40SArd Biesheuvel 31387143f40SArd Biesheuvel if (flags & NO_EXEC_MAPPINGS) 31487143f40SArd Biesheuvel p4dval |= P4D_TABLE_PXN; 315132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 31690292acaSYu Zhao pud_phys = pgtable_alloc(PUD_SHIFT); 31787143f40SArd Biesheuvel __p4d_populate(p4dp, pud_phys, p4dval); 318e9f63768SMike Rapoport p4d = READ_ONCE(*p4dp); 319c79b954bSJungseok Lee } 320e9f63768SMike Rapoport BUG_ON(p4d_bad(p4d)); 321c79b954bSJungseok Lee 322ee017ee3SJianyong Wu /* 323ee017ee3SJianyong Wu * No need for locking during early boot. And it doesn't work as 324ee017ee3SJianyong Wu * expected with KASLR enabled. 325ee017ee3SJianyong Wu */ 326ee017ee3SJianyong Wu if (system_state != SYSTEM_BOOTING) 327ee017ee3SJianyong Wu mutex_lock(&fixmap_lock); 328e9f63768SMike Rapoport pudp = pud_set_fixmap_offset(p4dp, addr); 329c1cc1552SCatalin Marinas do { 33020a004e7SWill Deacon pud_t old_pud = READ_ONCE(*pudp); 331e98216b5SArd Biesheuvel 332c1cc1552SCatalin Marinas next = pud_addr_end(addr, end); 333206a2a73SSteve Capper 334206a2a73SSteve Capper /* 335206a2a73SSteve Capper * For 4K granule only, attempt to put down a 1GB block 336206a2a73SSteve Capper */ 3371310222cSAnshuman Khandual if (pud_sect_supported() && 3381310222cSAnshuman Khandual ((addr | next | phys) & ~PUD_MASK) == 0 && 339c0951366SArd Biesheuvel (flags & NO_BLOCK_MAPPINGS) == 0) { 34020a004e7SWill Deacon pud_set_huge(pudp, phys, prot); 341206a2a73SSteve Capper 342206a2a73SSteve Capper /* 343e98216b5SArd Biesheuvel * After the PUD entry has been populated once, we 344e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 345206a2a73SSteve Capper */ 346e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 34720a004e7SWill Deacon READ_ONCE(pud_val(*pudp)))); 348206a2a73SSteve Capper } else { 34920a004e7SWill Deacon alloc_init_cont_pmd(pudp, addr, next, phys, prot, 350c0951366SArd Biesheuvel pgtable_alloc, flags); 351e98216b5SArd Biesheuvel 352e98216b5SArd Biesheuvel BUG_ON(pud_val(old_pud) != 0 && 35320a004e7SWill Deacon pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); 354206a2a73SSteve Capper } 355c1cc1552SCatalin Marinas phys += next - addr; 35620a004e7SWill Deacon } while (pudp++, addr = next, addr != end); 357f4710445SMark Rutland 358f4710445SMark Rutland pud_clear_fixmap(); 359ee017ee3SJianyong Wu if (system_state != SYSTEM_BOOTING) 360ee017ee3SJianyong Wu mutex_unlock(&fixmap_lock); 361c1cc1552SCatalin Marinas } 362c1cc1552SCatalin Marinas 36340f87d31SArd Biesheuvel static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 36440f87d31SArd Biesheuvel unsigned long virt, phys_addr_t size, 36540f87d31SArd Biesheuvel pgprot_t prot, 36690292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), 367c0951366SArd Biesheuvel int flags) 368c1cc1552SCatalin Marinas { 36932d18708SMasahiro Yamada unsigned long addr, end, next; 370974b9b2cSMike Rapoport pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); 371c1cc1552SCatalin Marinas 372cc5d2b3bSMark Rutland /* 373cc5d2b3bSMark Rutland * If the virtual and physical address don't have the same offset 374cc5d2b3bSMark Rutland * within a page, we cannot map the region as the caller expects. 375cc5d2b3bSMark Rutland */ 376cc5d2b3bSMark Rutland if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 377cc5d2b3bSMark Rutland return; 378cc5d2b3bSMark Rutland 3799c4e08a3SMark Rutland phys &= PAGE_MASK; 380c1cc1552SCatalin Marinas addr = virt & PAGE_MASK; 38132d18708SMasahiro Yamada end = PAGE_ALIGN(virt + size); 382c1cc1552SCatalin Marinas 383c1cc1552SCatalin Marinas do { 384c1cc1552SCatalin Marinas next = pgd_addr_end(addr, end); 38520a004e7SWill Deacon alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, 386c0951366SArd Biesheuvel flags); 387c1cc1552SCatalin Marinas phys += next - addr; 38820a004e7SWill Deacon } while (pgdp++, addr = next, addr != end); 389c1cc1552SCatalin Marinas } 390c1cc1552SCatalin Marinas 391*47546a19SArd Biesheuvel #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 392*47546a19SArd Biesheuvel extern __alias(__create_pgd_mapping) 393*47546a19SArd Biesheuvel void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt, 394*47546a19SArd Biesheuvel phys_addr_t size, pgprot_t prot, 395*47546a19SArd Biesheuvel phys_addr_t (*pgtable_alloc)(int), int flags); 396*47546a19SArd Biesheuvel #endif 397*47546a19SArd Biesheuvel 398475ba3fcSWill Deacon static phys_addr_t __pgd_pgtable_alloc(int shift) 399369aaab8SYu Zhao { 40050f11a8aSMike Rapoport void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL); 401369aaab8SYu Zhao BUG_ON(!ptr); 402369aaab8SYu Zhao 403369aaab8SYu Zhao /* Ensure the zeroed page is visible to the page table walker */ 404369aaab8SYu Zhao dsb(ishst); 405369aaab8SYu Zhao return __pa(ptr); 406369aaab8SYu Zhao } 407369aaab8SYu Zhao 40890292acaSYu Zhao static phys_addr_t pgd_pgtable_alloc(int shift) 409da141706SLaura Abbott { 410475ba3fcSWill Deacon phys_addr_t pa = __pgd_pgtable_alloc(shift); 41190292acaSYu Zhao 41290292acaSYu Zhao /* 41390292acaSYu Zhao * Call proper page table ctor in case later we need to 41490292acaSYu Zhao * call core mm functions like apply_to_page_range() on 41590292acaSYu Zhao * this pre-allocated page table. 41690292acaSYu Zhao * 41790292acaSYu Zhao * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is 41890292acaSYu Zhao * folded, and if so pgtable_pmd_page_ctor() becomes nop. 41990292acaSYu Zhao */ 42090292acaSYu Zhao if (shift == PAGE_SHIFT) 421b4ed71f5SMark Rutland BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa))); 42290292acaSYu Zhao else if (shift == PMD_SHIFT) 423475ba3fcSWill Deacon BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); 42421ab99c2SMark Rutland 425475ba3fcSWill Deacon return pa; 426da141706SLaura Abbott } 427da141706SLaura Abbott 428132233a7SLaura Abbott /* 429132233a7SLaura Abbott * This function can only be used to modify existing table entries, 430132233a7SLaura Abbott * without allocating new levels of table. Note that this permits the 431132233a7SLaura Abbott * creation of new section or page entries. 432132233a7SLaura Abbott */ 433132233a7SLaura Abbott static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 434da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 435d7ecbddfSMark Salter { 43677ad4ce6SMark Rutland if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 437d7ecbddfSMark Salter pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 438d7ecbddfSMark Salter &phys, virt); 439d7ecbddfSMark Salter return; 440d7ecbddfSMark Salter } 441d27cfa1fSArd Biesheuvel __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 442d27cfa1fSArd Biesheuvel NO_CONT_MAPPINGS); 443d7ecbddfSMark Salter } 444d7ecbddfSMark Salter 4458ce837ceSArd Biesheuvel void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 4468ce837ceSArd Biesheuvel unsigned long virt, phys_addr_t size, 447f14c66ceSArd Biesheuvel pgprot_t prot, bool page_mappings_only) 4488ce837ceSArd Biesheuvel { 449c0951366SArd Biesheuvel int flags = 0; 450c0951366SArd Biesheuvel 4511378dc3dSArd Biesheuvel BUG_ON(mm == &init_mm); 4521378dc3dSArd Biesheuvel 453c0951366SArd Biesheuvel if (page_mappings_only) 454d27cfa1fSArd Biesheuvel flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 455c0951366SArd Biesheuvel 45611509a30SMark Rutland __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 457c0951366SArd Biesheuvel pgd_pgtable_alloc, flags); 458d7ecbddfSMark Salter } 459d7ecbddfSMark Salter 460aa8c09beSArd Biesheuvel static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 461da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 462da141706SLaura Abbott { 46377ad4ce6SMark Rutland if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { 464aa8c09beSArd Biesheuvel pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 465da141706SLaura Abbott &phys, virt); 466da141706SLaura Abbott return; 467da141706SLaura Abbott } 468da141706SLaura Abbott 469d27cfa1fSArd Biesheuvel __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 470d27cfa1fSArd Biesheuvel NO_CONT_MAPPINGS); 471aa8c09beSArd Biesheuvel 472aa8c09beSArd Biesheuvel /* flush the TLBs after updating live kernel mappings */ 473aa8c09beSArd Biesheuvel flush_tlb_kernel_range(virt, virt + size); 474da141706SLaura Abbott } 475da141706SLaura Abbott 47620a004e7SWill Deacon static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, 47798d2e153STakahiro Akashi phys_addr_t end, pgprot_t prot, int flags) 478da141706SLaura Abbott { 47920a004e7SWill Deacon __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, 48098d2e153STakahiro Akashi prot, early_pgtable_alloc, flags); 481da141706SLaura Abbott } 482da141706SLaura Abbott 4835ea5306cSArd Biesheuvel void __init mark_linear_text_alias_ro(void) 4845ea5306cSArd Biesheuvel { 4855ea5306cSArd Biesheuvel /* 4865ea5306cSArd Biesheuvel * Remove the write permissions from the linear alias of .text/.rodata 4875ea5306cSArd Biesheuvel */ 488e2a073ddSArd Biesheuvel update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext), 489e2a073ddSArd Biesheuvel (unsigned long)__init_begin - (unsigned long)_stext, 4905ea5306cSArd Biesheuvel PAGE_KERNEL_RO); 4915ea5306cSArd Biesheuvel } 4925ea5306cSArd Biesheuvel 4932687275aSCatalin Marinas static bool crash_mem_map __initdata; 4942687275aSCatalin Marinas 4952687275aSCatalin Marinas static int __init enable_crash_mem_map(char *arg) 4962687275aSCatalin Marinas { 4972687275aSCatalin Marinas /* 4982687275aSCatalin Marinas * Proper parameter parsing is done by reserve_crashkernel(). We only 4992687275aSCatalin Marinas * need to know if the linear map has to avoid block mappings so that 5002687275aSCatalin Marinas * the crashkernel reservations can be unmapped later. 5012687275aSCatalin Marinas */ 5022687275aSCatalin Marinas crash_mem_map = true; 5032687275aSCatalin Marinas 5042687275aSCatalin Marinas return 0; 5052687275aSCatalin Marinas } 5062687275aSCatalin Marinas early_param("crashkernel", enable_crash_mem_map); 5072687275aSCatalin Marinas 50820a004e7SWill Deacon static void __init map_mem(pgd_t *pgdp) 509c1cc1552SCatalin Marinas { 51087143f40SArd Biesheuvel static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); 511e2a073ddSArd Biesheuvel phys_addr_t kernel_start = __pa_symbol(_stext); 51298d2e153STakahiro Akashi phys_addr_t kernel_end = __pa_symbol(__init_begin); 513b10d6bcaSMike Rapoport phys_addr_t start, end; 51487143f40SArd Biesheuvel int flags = NO_EXEC_MAPPINGS; 515b10d6bcaSMike Rapoport u64 i; 51698d2e153STakahiro Akashi 51787143f40SArd Biesheuvel /* 51887143f40SArd Biesheuvel * Setting hierarchical PXNTable attributes on table entries covering 51987143f40SArd Biesheuvel * the linear region is only possible if it is guaranteed that no table 52087143f40SArd Biesheuvel * entries at any level are being shared between the linear region and 52187143f40SArd Biesheuvel * the vmalloc region. Check whether this is true for the PGD level, in 52287143f40SArd Biesheuvel * which case it is guaranteed to be true for all other levels as well. 52387143f40SArd Biesheuvel */ 52487143f40SArd Biesheuvel BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); 52587143f40SArd Biesheuvel 52603149563SVijay Balakrishna if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) 52787143f40SArd Biesheuvel flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 52898d2e153STakahiro Akashi 52998d2e153STakahiro Akashi /* 53098d2e153STakahiro Akashi * Take care not to create a writable alias for the 53198d2e153STakahiro Akashi * read-only text and rodata sections of the kernel image. 53298d2e153STakahiro Akashi * So temporarily mark them as NOMAP to skip mappings in 53398d2e153STakahiro Akashi * the following for-loop 53498d2e153STakahiro Akashi */ 53598d2e153STakahiro Akashi memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 536f6bc87c3SSteve Capper 53703149563SVijay Balakrishna #ifdef CONFIG_KEXEC_CORE 53803149563SVijay Balakrishna if (crash_mem_map) { 53903149563SVijay Balakrishna if (IS_ENABLED(CONFIG_ZONE_DMA) || 54003149563SVijay Balakrishna IS_ENABLED(CONFIG_ZONE_DMA32)) 54103149563SVijay Balakrishna flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 54203149563SVijay Balakrishna else if (crashk_res.end) 54303149563SVijay Balakrishna memblock_mark_nomap(crashk_res.start, 54403149563SVijay Balakrishna resource_size(&crashk_res)); 54503149563SVijay Balakrishna } 54603149563SVijay Balakrishna #endif 54703149563SVijay Balakrishna 548c1cc1552SCatalin Marinas /* map all the memory banks */ 549b10d6bcaSMike Rapoport for_each_mem_range(i, &start, &end) { 550c1cc1552SCatalin Marinas if (start >= end) 551c1cc1552SCatalin Marinas break; 5520178dc76SCatalin Marinas /* 5530178dc76SCatalin Marinas * The linear map must allow allocation tags reading/writing 5540178dc76SCatalin Marinas * if MTE is present. Otherwise, it has the same attributes as 5550178dc76SCatalin Marinas * PAGE_KERNEL. 5560178dc76SCatalin Marinas */ 557d15dfd31SCatalin Marinas __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), 558d15dfd31SCatalin Marinas flags); 559c1cc1552SCatalin Marinas } 56098d2e153STakahiro Akashi 56198d2e153STakahiro Akashi /* 562e2a073ddSArd Biesheuvel * Map the linear alias of the [_stext, __init_begin) interval 56398d2e153STakahiro Akashi * as non-executable now, and remove the write permission in 56498d2e153STakahiro Akashi * mark_linear_text_alias_ro() below (which will be called after 56598d2e153STakahiro Akashi * alternative patching has completed). This makes the contents 56698d2e153STakahiro Akashi * of the region accessible to subsystems such as hibernate, 56798d2e153STakahiro Akashi * but protects it from inadvertent modification or execution. 56898d2e153STakahiro Akashi * Note that contiguous mappings cannot be remapped in this way, 56998d2e153STakahiro Akashi * so we should avoid them here. 57098d2e153STakahiro Akashi */ 57120a004e7SWill Deacon __map_memblock(pgdp, kernel_start, kernel_end, 57298d2e153STakahiro Akashi PAGE_KERNEL, NO_CONT_MAPPINGS); 57398d2e153STakahiro Akashi memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 57403149563SVijay Balakrishna 57503149563SVijay Balakrishna /* 57603149563SVijay Balakrishna * Use page-level mappings here so that we can shrink the region 57703149563SVijay Balakrishna * in page granularity and put back unused memory to buddy system 57803149563SVijay Balakrishna * through /sys/kernel/kexec_crash_size interface. 57903149563SVijay Balakrishna */ 58003149563SVijay Balakrishna #ifdef CONFIG_KEXEC_CORE 58103149563SVijay Balakrishna if (crash_mem_map && 58203149563SVijay Balakrishna !IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) { 58303149563SVijay Balakrishna if (crashk_res.end) { 58403149563SVijay Balakrishna __map_memblock(pgdp, crashk_res.start, 58503149563SVijay Balakrishna crashk_res.end + 1, 58603149563SVijay Balakrishna PAGE_KERNEL, 58703149563SVijay Balakrishna NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 58803149563SVijay Balakrishna memblock_clear_nomap(crashk_res.start, 58903149563SVijay Balakrishna resource_size(&crashk_res)); 59003149563SVijay Balakrishna } 59103149563SVijay Balakrishna } 59203149563SVijay Balakrishna #endif 593c1cc1552SCatalin Marinas } 594c1cc1552SCatalin Marinas 595da141706SLaura Abbott void mark_rodata_ro(void) 596da141706SLaura Abbott { 5972f39b5f9SJeremy Linton unsigned long section_size; 598f9040773SArd Biesheuvel 5992f39b5f9SJeremy Linton /* 6009fdc14c5SArd Biesheuvel * mark .rodata as read only. Use __init_begin rather than __end_rodata 6019fdc14c5SArd Biesheuvel * to cover NOTES and EXCEPTION_TABLE. 6022f39b5f9SJeremy Linton */ 6039fdc14c5SArd Biesheuvel section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 604aa8c09beSArd Biesheuvel update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 6052f39b5f9SJeremy Linton section_size, PAGE_KERNEL_RO); 606e98216b5SArd Biesheuvel 6071404d6f1SLaura Abbott debug_checkwx(); 608da141706SLaura Abbott } 609da141706SLaura Abbott 61020a004e7SWill Deacon static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, 611d27cfa1fSArd Biesheuvel pgprot_t prot, struct vm_struct *vma, 61292bbd16eSWill Deacon int flags, unsigned long vm_flags) 613068a17a5SMark Rutland { 6142077be67SLaura Abbott phys_addr_t pa_start = __pa_symbol(va_start); 615068a17a5SMark Rutland unsigned long size = va_end - va_start; 616068a17a5SMark Rutland 617068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(pa_start)); 618068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(size)); 619068a17a5SMark Rutland 62020a004e7SWill Deacon __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, 621d27cfa1fSArd Biesheuvel early_pgtable_alloc, flags); 622f9040773SArd Biesheuvel 62392bbd16eSWill Deacon if (!(vm_flags & VM_NO_GUARD)) 62492bbd16eSWill Deacon size += PAGE_SIZE; 62592bbd16eSWill Deacon 626f9040773SArd Biesheuvel vma->addr = va_start; 627f9040773SArd Biesheuvel vma->phys_addr = pa_start; 628f9040773SArd Biesheuvel vma->size = size; 62992bbd16eSWill Deacon vma->flags = VM_MAP | vm_flags; 630f9040773SArd Biesheuvel vma->caller = __builtin_return_address(0); 631f9040773SArd Biesheuvel 632f9040773SArd Biesheuvel vm_area_add_early(vma); 633068a17a5SMark Rutland } 634068a17a5SMark Rutland 63528b066daSArd Biesheuvel static int __init parse_rodata(char *arg) 63628b066daSArd Biesheuvel { 637c55191e9SArd Biesheuvel int ret = strtobool(arg, &rodata_enabled); 638c55191e9SArd Biesheuvel if (!ret) { 639c55191e9SArd Biesheuvel rodata_full = false; 640c55191e9SArd Biesheuvel return 0; 641c55191e9SArd Biesheuvel } 642c55191e9SArd Biesheuvel 643c55191e9SArd Biesheuvel /* permit 'full' in addition to boolean options */ 644c55191e9SArd Biesheuvel if (strcmp(arg, "full")) 645c55191e9SArd Biesheuvel return -EINVAL; 646c55191e9SArd Biesheuvel 647c55191e9SArd Biesheuvel rodata_enabled = true; 648c55191e9SArd Biesheuvel rodata_full = true; 649c55191e9SArd Biesheuvel return 0; 65028b066daSArd Biesheuvel } 65128b066daSArd Biesheuvel early_param("rodata", parse_rodata); 65228b066daSArd Biesheuvel 65351a0048bSWill Deacon #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 65451a0048bSWill Deacon static int __init map_entry_trampoline(void) 65551a0048bSWill Deacon { 656a9c406e6SJames Morse int i; 657a9c406e6SJames Morse 65851a0048bSWill Deacon pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 65951a0048bSWill Deacon phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 66051a0048bSWill Deacon 66151a0048bSWill Deacon /* The trampoline is always mapped and can therefore be global */ 66251a0048bSWill Deacon pgprot_val(prot) &= ~PTE_NG; 66351a0048bSWill Deacon 66451a0048bSWill Deacon /* Map only the text into the trampoline page table */ 66551a0048bSWill Deacon memset(tramp_pg_dir, 0, PGD_SIZE); 666a9c406e6SJames Morse __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, 667a9c406e6SJames Morse entry_tramp_text_size(), prot, 668a9c406e6SJames Morse __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS); 66951a0048bSWill Deacon 6706c27c408SWill Deacon /* Map both the text and data into the kernel page table */ 671a9c406e6SJames Morse for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) 672a9c406e6SJames Morse __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, 673a9c406e6SJames Morse pa_start + i * PAGE_SIZE, prot); 674a9c406e6SJames Morse 6756c27c408SWill Deacon if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 6766c27c408SWill Deacon extern char __entry_tramp_data_start[]; 6776c27c408SWill Deacon 6786c27c408SWill Deacon __set_fixmap(FIX_ENTRY_TRAMP_DATA, 6796c27c408SWill Deacon __pa_symbol(__entry_tramp_data_start), 6806c27c408SWill Deacon PAGE_KERNEL_RO); 6816c27c408SWill Deacon } 6826c27c408SWill Deacon 68351a0048bSWill Deacon return 0; 68451a0048bSWill Deacon } 68551a0048bSWill Deacon core_initcall(map_entry_trampoline); 68651a0048bSWill Deacon #endif 68751a0048bSWill Deacon 688068a17a5SMark Rutland /* 689c8027285SMark Brown * Open coded check for BTI, only for use to determine configuration 690c8027285SMark Brown * for early mappings for before the cpufeature code has run. 691c8027285SMark Brown */ 692c8027285SMark Brown static bool arm64_early_this_cpu_has_bti(void) 693c8027285SMark Brown { 694c8027285SMark Brown u64 pfr1; 695c8027285SMark Brown 696c8027285SMark Brown if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 697c8027285SMark Brown return false; 698c8027285SMark Brown 69993ad55b7SMarc Zyngier pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); 700c8027285SMark Brown return cpuid_feature_extract_unsigned_field(pfr1, 701c8027285SMark Brown ID_AA64PFR1_BT_SHIFT); 702c8027285SMark Brown } 703c8027285SMark Brown 704c8027285SMark Brown /* 705068a17a5SMark Rutland * Create fine-grained mappings for the kernel. 706068a17a5SMark Rutland */ 70720a004e7SWill Deacon static void __init map_kernel(pgd_t *pgdp) 708068a17a5SMark Rutland { 7092ebe088bSArd Biesheuvel static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, 7102ebe088bSArd Biesheuvel vmlinux_initdata, vmlinux_data; 711068a17a5SMark Rutland 71228b066daSArd Biesheuvel /* 71328b066daSArd Biesheuvel * External debuggers may need to write directly to the text 71428b066daSArd Biesheuvel * mapping to install SW breakpoints. Allow this (only) when 71528b066daSArd Biesheuvel * explicitly requested with rodata=off. 71628b066daSArd Biesheuvel */ 71728b066daSArd Biesheuvel pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 71828b066daSArd Biesheuvel 719d27cfa1fSArd Biesheuvel /* 720c8027285SMark Brown * If we have a CPU that supports BTI and a kernel built for 721c8027285SMark Brown * BTI then mark the kernel executable text as guarded pages 722c8027285SMark Brown * now so we don't have to rewrite the page tables later. 723c8027285SMark Brown */ 724c8027285SMark Brown if (arm64_early_this_cpu_has_bti()) 725c8027285SMark Brown text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP); 726c8027285SMark Brown 727c8027285SMark Brown /* 728d27cfa1fSArd Biesheuvel * Only rodata will be remapped with different permissions later on, 729d27cfa1fSArd Biesheuvel * all other segments are allowed to use contiguous mappings. 730d27cfa1fSArd Biesheuvel */ 731e2a073ddSArd Biesheuvel map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0, 73292bbd16eSWill Deacon VM_NO_GUARD); 73320a004e7SWill Deacon map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, 73492bbd16eSWill Deacon &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); 73520a004e7SWill Deacon map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, 73692bbd16eSWill Deacon &vmlinux_inittext, 0, VM_NO_GUARD); 73720a004e7SWill Deacon map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, 73892bbd16eSWill Deacon &vmlinux_initdata, 0, VM_NO_GUARD); 73920a004e7SWill Deacon map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); 740068a17a5SMark Rutland 741974b9b2cSMike Rapoport if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) { 742068a17a5SMark Rutland /* 743f9040773SArd Biesheuvel * The fixmap falls in a separate pgd to the kernel, and doesn't 744f9040773SArd Biesheuvel * live in the carveout for the swapper_pg_dir. We can simply 745f9040773SArd Biesheuvel * re-use the existing dir for the fixmap. 746068a17a5SMark Rutland */ 747974b9b2cSMike Rapoport set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START), 74820a004e7SWill Deacon READ_ONCE(*pgd_offset_k(FIXADDR_START))); 749f9040773SArd Biesheuvel } else if (CONFIG_PGTABLE_LEVELS > 3) { 750b333b0baSMark Rutland pgd_t *bm_pgdp; 751e9f63768SMike Rapoport p4d_t *bm_p4dp; 752b333b0baSMark Rutland pud_t *bm_pudp; 753f9040773SArd Biesheuvel /* 754f9040773SArd Biesheuvel * The fixmap shares its top level pgd entry with the kernel 755f9040773SArd Biesheuvel * mapping. This can really only occur when we are running 756f9040773SArd Biesheuvel * with 16k/4 levels, so we can simply reuse the pud level 757f9040773SArd Biesheuvel * entry instead. 758f9040773SArd Biesheuvel */ 759f9040773SArd Biesheuvel BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 760974b9b2cSMike Rapoport bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START); 761e9f63768SMike Rapoport bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START); 762e9f63768SMike Rapoport bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START); 763b333b0baSMark Rutland pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); 764f9040773SArd Biesheuvel pud_clear_fixmap(); 765f9040773SArd Biesheuvel } else { 766f9040773SArd Biesheuvel BUG(); 767f9040773SArd Biesheuvel } 768068a17a5SMark Rutland 76920a004e7SWill Deacon kasan_copy_shadow(pgdp); 770068a17a5SMark Rutland } 771068a17a5SMark Rutland 772c1cc1552SCatalin Marinas void __init paging_init(void) 773c1cc1552SCatalin Marinas { 7742330b7caSJun Yao pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); 775068a17a5SMark Rutland 77620a004e7SWill Deacon map_kernel(pgdp); 77720a004e7SWill Deacon map_mem(pgdp); 778068a17a5SMark Rutland 779068a17a5SMark Rutland pgd_clear_fixmap(); 780068a17a5SMark Rutland 781068a17a5SMark Rutland cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 7822b5548b6SJun Yao init_mm.pgd = swapper_pg_dir; 783068a17a5SMark Rutland 7843ecc6834SMike Rapoport memblock_phys_free(__pa_symbol(init_pg_dir), 7852b5548b6SJun Yao __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); 78624cc61d8SArd Biesheuvel 78724cc61d8SArd Biesheuvel memblock_allow_resize(); 788c1cc1552SCatalin Marinas } 789c1cc1552SCatalin Marinas 790c1cc1552SCatalin Marinas /* 791c1cc1552SCatalin Marinas * Check whether a kernel address is valid (derived from arch/x86/). 792c1cc1552SCatalin Marinas */ 793c1cc1552SCatalin Marinas int kern_addr_valid(unsigned long addr) 794c1cc1552SCatalin Marinas { 79520a004e7SWill Deacon pgd_t *pgdp; 796e9f63768SMike Rapoport p4d_t *p4dp; 79720a004e7SWill Deacon pud_t *pudp, pud; 79820a004e7SWill Deacon pmd_t *pmdp, pmd; 79920a004e7SWill Deacon pte_t *ptep, pte; 800c1cc1552SCatalin Marinas 8018dd4daa0SShyam Thombre addr = arch_kasan_reset_tag(addr); 802c1cc1552SCatalin Marinas if ((((long)addr) >> VA_BITS) != -1UL) 803c1cc1552SCatalin Marinas return 0; 804c1cc1552SCatalin Marinas 80520a004e7SWill Deacon pgdp = pgd_offset_k(addr); 80620a004e7SWill Deacon if (pgd_none(READ_ONCE(*pgdp))) 807c1cc1552SCatalin Marinas return 0; 808c1cc1552SCatalin Marinas 809e9f63768SMike Rapoport p4dp = p4d_offset(pgdp, addr); 810e9f63768SMike Rapoport if (p4d_none(READ_ONCE(*p4dp))) 811e9f63768SMike Rapoport return 0; 812e9f63768SMike Rapoport 813e9f63768SMike Rapoport pudp = pud_offset(p4dp, addr); 81420a004e7SWill Deacon pud = READ_ONCE(*pudp); 81520a004e7SWill Deacon if (pud_none(pud)) 816c1cc1552SCatalin Marinas return 0; 817c1cc1552SCatalin Marinas 81820a004e7SWill Deacon if (pud_sect(pud)) 81920a004e7SWill Deacon return pfn_valid(pud_pfn(pud)); 820206a2a73SSteve Capper 82120a004e7SWill Deacon pmdp = pmd_offset(pudp, addr); 82220a004e7SWill Deacon pmd = READ_ONCE(*pmdp); 82320a004e7SWill Deacon if (pmd_none(pmd)) 824c1cc1552SCatalin Marinas return 0; 825c1cc1552SCatalin Marinas 82620a004e7SWill Deacon if (pmd_sect(pmd)) 82720a004e7SWill Deacon return pfn_valid(pmd_pfn(pmd)); 828da6e4cb6SDave Anderson 82920a004e7SWill Deacon ptep = pte_offset_kernel(pmdp, addr); 83020a004e7SWill Deacon pte = READ_ONCE(*ptep); 83120a004e7SWill Deacon if (pte_none(pte)) 832c1cc1552SCatalin Marinas return 0; 833c1cc1552SCatalin Marinas 83420a004e7SWill Deacon return pfn_valid(pte_pfn(pte)); 835c1cc1552SCatalin Marinas } 836bbd6ec60SAnshuman Khandual 837bbd6ec60SAnshuman Khandual #ifdef CONFIG_MEMORY_HOTPLUG 838eee07935SAnshuman Khandual static void free_hotplug_page_range(struct page *page, size_t size, 839eee07935SAnshuman Khandual struct vmem_altmap *altmap) 840bbd6ec60SAnshuman Khandual { 841eee07935SAnshuman Khandual if (altmap) { 842eee07935SAnshuman Khandual vmem_altmap_free(altmap, size >> PAGE_SHIFT); 843eee07935SAnshuman Khandual } else { 844bbd6ec60SAnshuman Khandual WARN_ON(PageReserved(page)); 845bbd6ec60SAnshuman Khandual free_pages((unsigned long)page_address(page), get_order(size)); 846bbd6ec60SAnshuman Khandual } 847eee07935SAnshuman Khandual } 848bbd6ec60SAnshuman Khandual 849bbd6ec60SAnshuman Khandual static void free_hotplug_pgtable_page(struct page *page) 850bbd6ec60SAnshuman Khandual { 851eee07935SAnshuman Khandual free_hotplug_page_range(page, PAGE_SIZE, NULL); 852bbd6ec60SAnshuman Khandual } 853bbd6ec60SAnshuman Khandual 854bbd6ec60SAnshuman Khandual static bool pgtable_range_aligned(unsigned long start, unsigned long end, 855bbd6ec60SAnshuman Khandual unsigned long floor, unsigned long ceiling, 856bbd6ec60SAnshuman Khandual unsigned long mask) 857bbd6ec60SAnshuman Khandual { 858bbd6ec60SAnshuman Khandual start &= mask; 859bbd6ec60SAnshuman Khandual if (start < floor) 860bbd6ec60SAnshuman Khandual return false; 861bbd6ec60SAnshuman Khandual 862bbd6ec60SAnshuman Khandual if (ceiling) { 863bbd6ec60SAnshuman Khandual ceiling &= mask; 864bbd6ec60SAnshuman Khandual if (!ceiling) 865bbd6ec60SAnshuman Khandual return false; 866bbd6ec60SAnshuman Khandual } 867bbd6ec60SAnshuman Khandual 868bbd6ec60SAnshuman Khandual if (end - 1 > ceiling - 1) 869bbd6ec60SAnshuman Khandual return false; 870bbd6ec60SAnshuman Khandual return true; 871bbd6ec60SAnshuman Khandual } 872bbd6ec60SAnshuman Khandual 873bbd6ec60SAnshuman Khandual static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, 874eee07935SAnshuman Khandual unsigned long end, bool free_mapped, 875eee07935SAnshuman Khandual struct vmem_altmap *altmap) 876bbd6ec60SAnshuman Khandual { 877bbd6ec60SAnshuman Khandual pte_t *ptep, pte; 878bbd6ec60SAnshuman Khandual 879bbd6ec60SAnshuman Khandual do { 880bbd6ec60SAnshuman Khandual ptep = pte_offset_kernel(pmdp, addr); 881bbd6ec60SAnshuman Khandual pte = READ_ONCE(*ptep); 882bbd6ec60SAnshuman Khandual if (pte_none(pte)) 883bbd6ec60SAnshuman Khandual continue; 884bbd6ec60SAnshuman Khandual 885bbd6ec60SAnshuman Khandual WARN_ON(!pte_present(pte)); 886bbd6ec60SAnshuman Khandual pte_clear(&init_mm, addr, ptep); 887bbd6ec60SAnshuman Khandual flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 888bbd6ec60SAnshuman Khandual if (free_mapped) 889eee07935SAnshuman Khandual free_hotplug_page_range(pte_page(pte), 890eee07935SAnshuman Khandual PAGE_SIZE, altmap); 891bbd6ec60SAnshuman Khandual } while (addr += PAGE_SIZE, addr < end); 892bbd6ec60SAnshuman Khandual } 893bbd6ec60SAnshuman Khandual 894bbd6ec60SAnshuman Khandual static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, 895eee07935SAnshuman Khandual unsigned long end, bool free_mapped, 896eee07935SAnshuman Khandual struct vmem_altmap *altmap) 897bbd6ec60SAnshuman Khandual { 898bbd6ec60SAnshuman Khandual unsigned long next; 899bbd6ec60SAnshuman Khandual pmd_t *pmdp, pmd; 900bbd6ec60SAnshuman Khandual 901bbd6ec60SAnshuman Khandual do { 902bbd6ec60SAnshuman Khandual next = pmd_addr_end(addr, end); 903bbd6ec60SAnshuman Khandual pmdp = pmd_offset(pudp, addr); 904bbd6ec60SAnshuman Khandual pmd = READ_ONCE(*pmdp); 905bbd6ec60SAnshuman Khandual if (pmd_none(pmd)) 906bbd6ec60SAnshuman Khandual continue; 907bbd6ec60SAnshuman Khandual 908bbd6ec60SAnshuman Khandual WARN_ON(!pmd_present(pmd)); 909bbd6ec60SAnshuman Khandual if (pmd_sect(pmd)) { 910bbd6ec60SAnshuman Khandual pmd_clear(pmdp); 911bbd6ec60SAnshuman Khandual 912bbd6ec60SAnshuman Khandual /* 913bbd6ec60SAnshuman Khandual * One TLBI should be sufficient here as the PMD_SIZE 914bbd6ec60SAnshuman Khandual * range is mapped with a single block entry. 915bbd6ec60SAnshuman Khandual */ 916bbd6ec60SAnshuman Khandual flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 917bbd6ec60SAnshuman Khandual if (free_mapped) 918bbd6ec60SAnshuman Khandual free_hotplug_page_range(pmd_page(pmd), 919eee07935SAnshuman Khandual PMD_SIZE, altmap); 920bbd6ec60SAnshuman Khandual continue; 921bbd6ec60SAnshuman Khandual } 922bbd6ec60SAnshuman Khandual WARN_ON(!pmd_table(pmd)); 923eee07935SAnshuman Khandual unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); 924bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 925bbd6ec60SAnshuman Khandual } 926bbd6ec60SAnshuman Khandual 927bbd6ec60SAnshuman Khandual static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, 928eee07935SAnshuman Khandual unsigned long end, bool free_mapped, 929eee07935SAnshuman Khandual struct vmem_altmap *altmap) 930bbd6ec60SAnshuman Khandual { 931bbd6ec60SAnshuman Khandual unsigned long next; 932bbd6ec60SAnshuman Khandual pud_t *pudp, pud; 933bbd6ec60SAnshuman Khandual 934bbd6ec60SAnshuman Khandual do { 935bbd6ec60SAnshuman Khandual next = pud_addr_end(addr, end); 936bbd6ec60SAnshuman Khandual pudp = pud_offset(p4dp, addr); 937bbd6ec60SAnshuman Khandual pud = READ_ONCE(*pudp); 938bbd6ec60SAnshuman Khandual if (pud_none(pud)) 939bbd6ec60SAnshuman Khandual continue; 940bbd6ec60SAnshuman Khandual 941bbd6ec60SAnshuman Khandual WARN_ON(!pud_present(pud)); 942bbd6ec60SAnshuman Khandual if (pud_sect(pud)) { 943bbd6ec60SAnshuman Khandual pud_clear(pudp); 944bbd6ec60SAnshuman Khandual 945bbd6ec60SAnshuman Khandual /* 946bbd6ec60SAnshuman Khandual * One TLBI should be sufficient here as the PUD_SIZE 947bbd6ec60SAnshuman Khandual * range is mapped with a single block entry. 948bbd6ec60SAnshuman Khandual */ 949bbd6ec60SAnshuman Khandual flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 950bbd6ec60SAnshuman Khandual if (free_mapped) 951bbd6ec60SAnshuman Khandual free_hotplug_page_range(pud_page(pud), 952eee07935SAnshuman Khandual PUD_SIZE, altmap); 953bbd6ec60SAnshuman Khandual continue; 954bbd6ec60SAnshuman Khandual } 955bbd6ec60SAnshuman Khandual WARN_ON(!pud_table(pud)); 956eee07935SAnshuman Khandual unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); 957bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 958bbd6ec60SAnshuman Khandual } 959bbd6ec60SAnshuman Khandual 960bbd6ec60SAnshuman Khandual static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, 961eee07935SAnshuman Khandual unsigned long end, bool free_mapped, 962eee07935SAnshuman Khandual struct vmem_altmap *altmap) 963bbd6ec60SAnshuman Khandual { 964bbd6ec60SAnshuman Khandual unsigned long next; 965bbd6ec60SAnshuman Khandual p4d_t *p4dp, p4d; 966bbd6ec60SAnshuman Khandual 967bbd6ec60SAnshuman Khandual do { 968bbd6ec60SAnshuman Khandual next = p4d_addr_end(addr, end); 969bbd6ec60SAnshuman Khandual p4dp = p4d_offset(pgdp, addr); 970bbd6ec60SAnshuman Khandual p4d = READ_ONCE(*p4dp); 971bbd6ec60SAnshuman Khandual if (p4d_none(p4d)) 972bbd6ec60SAnshuman Khandual continue; 973bbd6ec60SAnshuman Khandual 974bbd6ec60SAnshuman Khandual WARN_ON(!p4d_present(p4d)); 975eee07935SAnshuman Khandual unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); 976bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 977bbd6ec60SAnshuman Khandual } 978bbd6ec60SAnshuman Khandual 979bbd6ec60SAnshuman Khandual static void unmap_hotplug_range(unsigned long addr, unsigned long end, 980eee07935SAnshuman Khandual bool free_mapped, struct vmem_altmap *altmap) 981bbd6ec60SAnshuman Khandual { 982bbd6ec60SAnshuman Khandual unsigned long next; 983bbd6ec60SAnshuman Khandual pgd_t *pgdp, pgd; 984bbd6ec60SAnshuman Khandual 985eee07935SAnshuman Khandual /* 986eee07935SAnshuman Khandual * altmap can only be used as vmemmap mapping backing memory. 987eee07935SAnshuman Khandual * In case the backing memory itself is not being freed, then 988eee07935SAnshuman Khandual * altmap is irrelevant. Warn about this inconsistency when 989eee07935SAnshuman Khandual * encountered. 990eee07935SAnshuman Khandual */ 991eee07935SAnshuman Khandual WARN_ON(!free_mapped && altmap); 992eee07935SAnshuman Khandual 993bbd6ec60SAnshuman Khandual do { 994bbd6ec60SAnshuman Khandual next = pgd_addr_end(addr, end); 995bbd6ec60SAnshuman Khandual pgdp = pgd_offset_k(addr); 996bbd6ec60SAnshuman Khandual pgd = READ_ONCE(*pgdp); 997bbd6ec60SAnshuman Khandual if (pgd_none(pgd)) 998bbd6ec60SAnshuman Khandual continue; 999bbd6ec60SAnshuman Khandual 1000bbd6ec60SAnshuman Khandual WARN_ON(!pgd_present(pgd)); 1001eee07935SAnshuman Khandual unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap); 1002bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1003bbd6ec60SAnshuman Khandual } 1004bbd6ec60SAnshuman Khandual 1005bbd6ec60SAnshuman Khandual static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, 1006bbd6ec60SAnshuman Khandual unsigned long end, unsigned long floor, 1007bbd6ec60SAnshuman Khandual unsigned long ceiling) 1008bbd6ec60SAnshuman Khandual { 1009bbd6ec60SAnshuman Khandual pte_t *ptep, pte; 1010bbd6ec60SAnshuman Khandual unsigned long i, start = addr; 1011bbd6ec60SAnshuman Khandual 1012bbd6ec60SAnshuman Khandual do { 1013bbd6ec60SAnshuman Khandual ptep = pte_offset_kernel(pmdp, addr); 1014bbd6ec60SAnshuman Khandual pte = READ_ONCE(*ptep); 1015bbd6ec60SAnshuman Khandual 1016bbd6ec60SAnshuman Khandual /* 1017bbd6ec60SAnshuman Khandual * This is just a sanity check here which verifies that 1018bbd6ec60SAnshuman Khandual * pte clearing has been done by earlier unmap loops. 1019bbd6ec60SAnshuman Khandual */ 1020bbd6ec60SAnshuman Khandual WARN_ON(!pte_none(pte)); 1021bbd6ec60SAnshuman Khandual } while (addr += PAGE_SIZE, addr < end); 1022bbd6ec60SAnshuman Khandual 1023bbd6ec60SAnshuman Khandual if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) 1024bbd6ec60SAnshuman Khandual return; 1025bbd6ec60SAnshuman Khandual 1026bbd6ec60SAnshuman Khandual /* 1027bbd6ec60SAnshuman Khandual * Check whether we can free the pte page if the rest of the 1028bbd6ec60SAnshuman Khandual * entries are empty. Overlap with other regions have been 1029bbd6ec60SAnshuman Khandual * handled by the floor/ceiling check. 1030bbd6ec60SAnshuman Khandual */ 1031bbd6ec60SAnshuman Khandual ptep = pte_offset_kernel(pmdp, 0UL); 1032bbd6ec60SAnshuman Khandual for (i = 0; i < PTRS_PER_PTE; i++) { 1033bbd6ec60SAnshuman Khandual if (!pte_none(READ_ONCE(ptep[i]))) 1034bbd6ec60SAnshuman Khandual return; 1035bbd6ec60SAnshuman Khandual } 1036bbd6ec60SAnshuman Khandual 1037bbd6ec60SAnshuman Khandual pmd_clear(pmdp); 1038bbd6ec60SAnshuman Khandual __flush_tlb_kernel_pgtable(start); 1039bbd6ec60SAnshuman Khandual free_hotplug_pgtable_page(virt_to_page(ptep)); 1040bbd6ec60SAnshuman Khandual } 1041bbd6ec60SAnshuman Khandual 1042bbd6ec60SAnshuman Khandual static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, 1043bbd6ec60SAnshuman Khandual unsigned long end, unsigned long floor, 1044bbd6ec60SAnshuman Khandual unsigned long ceiling) 1045bbd6ec60SAnshuman Khandual { 1046bbd6ec60SAnshuman Khandual pmd_t *pmdp, pmd; 1047bbd6ec60SAnshuman Khandual unsigned long i, next, start = addr; 1048bbd6ec60SAnshuman Khandual 1049bbd6ec60SAnshuman Khandual do { 1050bbd6ec60SAnshuman Khandual next = pmd_addr_end(addr, end); 1051bbd6ec60SAnshuman Khandual pmdp = pmd_offset(pudp, addr); 1052bbd6ec60SAnshuman Khandual pmd = READ_ONCE(*pmdp); 1053bbd6ec60SAnshuman Khandual if (pmd_none(pmd)) 1054bbd6ec60SAnshuman Khandual continue; 1055bbd6ec60SAnshuman Khandual 1056bbd6ec60SAnshuman Khandual WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); 1057bbd6ec60SAnshuman Khandual free_empty_pte_table(pmdp, addr, next, floor, ceiling); 1058bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1059bbd6ec60SAnshuman Khandual 1060bbd6ec60SAnshuman Khandual if (CONFIG_PGTABLE_LEVELS <= 2) 1061bbd6ec60SAnshuman Khandual return; 1062bbd6ec60SAnshuman Khandual 1063bbd6ec60SAnshuman Khandual if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) 1064bbd6ec60SAnshuman Khandual return; 1065bbd6ec60SAnshuman Khandual 1066bbd6ec60SAnshuman Khandual /* 1067bbd6ec60SAnshuman Khandual * Check whether we can free the pmd page if the rest of the 1068bbd6ec60SAnshuman Khandual * entries are empty. Overlap with other regions have been 1069bbd6ec60SAnshuman Khandual * handled by the floor/ceiling check. 1070bbd6ec60SAnshuman Khandual */ 1071bbd6ec60SAnshuman Khandual pmdp = pmd_offset(pudp, 0UL); 1072bbd6ec60SAnshuman Khandual for (i = 0; i < PTRS_PER_PMD; i++) { 1073bbd6ec60SAnshuman Khandual if (!pmd_none(READ_ONCE(pmdp[i]))) 1074bbd6ec60SAnshuman Khandual return; 1075bbd6ec60SAnshuman Khandual } 1076bbd6ec60SAnshuman Khandual 1077bbd6ec60SAnshuman Khandual pud_clear(pudp); 1078bbd6ec60SAnshuman Khandual __flush_tlb_kernel_pgtable(start); 1079bbd6ec60SAnshuman Khandual free_hotplug_pgtable_page(virt_to_page(pmdp)); 1080bbd6ec60SAnshuman Khandual } 1081bbd6ec60SAnshuman Khandual 1082bbd6ec60SAnshuman Khandual static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, 1083bbd6ec60SAnshuman Khandual unsigned long end, unsigned long floor, 1084bbd6ec60SAnshuman Khandual unsigned long ceiling) 1085bbd6ec60SAnshuman Khandual { 1086bbd6ec60SAnshuman Khandual pud_t *pudp, pud; 1087bbd6ec60SAnshuman Khandual unsigned long i, next, start = addr; 1088bbd6ec60SAnshuman Khandual 1089bbd6ec60SAnshuman Khandual do { 1090bbd6ec60SAnshuman Khandual next = pud_addr_end(addr, end); 1091bbd6ec60SAnshuman Khandual pudp = pud_offset(p4dp, addr); 1092bbd6ec60SAnshuman Khandual pud = READ_ONCE(*pudp); 1093bbd6ec60SAnshuman Khandual if (pud_none(pud)) 1094bbd6ec60SAnshuman Khandual continue; 1095bbd6ec60SAnshuman Khandual 1096bbd6ec60SAnshuman Khandual WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); 1097bbd6ec60SAnshuman Khandual free_empty_pmd_table(pudp, addr, next, floor, ceiling); 1098bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1099bbd6ec60SAnshuman Khandual 1100bbd6ec60SAnshuman Khandual if (CONFIG_PGTABLE_LEVELS <= 3) 1101bbd6ec60SAnshuman Khandual return; 1102bbd6ec60SAnshuman Khandual 1103bbd6ec60SAnshuman Khandual if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) 1104bbd6ec60SAnshuman Khandual return; 1105bbd6ec60SAnshuman Khandual 1106bbd6ec60SAnshuman Khandual /* 1107bbd6ec60SAnshuman Khandual * Check whether we can free the pud page if the rest of the 1108bbd6ec60SAnshuman Khandual * entries are empty. Overlap with other regions have been 1109bbd6ec60SAnshuman Khandual * handled by the floor/ceiling check. 1110bbd6ec60SAnshuman Khandual */ 1111bbd6ec60SAnshuman Khandual pudp = pud_offset(p4dp, 0UL); 1112bbd6ec60SAnshuman Khandual for (i = 0; i < PTRS_PER_PUD; i++) { 1113bbd6ec60SAnshuman Khandual if (!pud_none(READ_ONCE(pudp[i]))) 1114bbd6ec60SAnshuman Khandual return; 1115bbd6ec60SAnshuman Khandual } 1116bbd6ec60SAnshuman Khandual 1117bbd6ec60SAnshuman Khandual p4d_clear(p4dp); 1118bbd6ec60SAnshuman Khandual __flush_tlb_kernel_pgtable(start); 1119bbd6ec60SAnshuman Khandual free_hotplug_pgtable_page(virt_to_page(pudp)); 1120bbd6ec60SAnshuman Khandual } 1121bbd6ec60SAnshuman Khandual 1122bbd6ec60SAnshuman Khandual static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, 1123bbd6ec60SAnshuman Khandual unsigned long end, unsigned long floor, 1124bbd6ec60SAnshuman Khandual unsigned long ceiling) 1125bbd6ec60SAnshuman Khandual { 1126bbd6ec60SAnshuman Khandual unsigned long next; 1127bbd6ec60SAnshuman Khandual p4d_t *p4dp, p4d; 1128bbd6ec60SAnshuman Khandual 1129bbd6ec60SAnshuman Khandual do { 1130bbd6ec60SAnshuman Khandual next = p4d_addr_end(addr, end); 1131bbd6ec60SAnshuman Khandual p4dp = p4d_offset(pgdp, addr); 1132bbd6ec60SAnshuman Khandual p4d = READ_ONCE(*p4dp); 1133bbd6ec60SAnshuman Khandual if (p4d_none(p4d)) 1134bbd6ec60SAnshuman Khandual continue; 1135bbd6ec60SAnshuman Khandual 1136bbd6ec60SAnshuman Khandual WARN_ON(!p4d_present(p4d)); 1137bbd6ec60SAnshuman Khandual free_empty_pud_table(p4dp, addr, next, floor, ceiling); 1138bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1139bbd6ec60SAnshuman Khandual } 1140bbd6ec60SAnshuman Khandual 1141bbd6ec60SAnshuman Khandual static void free_empty_tables(unsigned long addr, unsigned long end, 1142bbd6ec60SAnshuman Khandual unsigned long floor, unsigned long ceiling) 1143bbd6ec60SAnshuman Khandual { 1144bbd6ec60SAnshuman Khandual unsigned long next; 1145bbd6ec60SAnshuman Khandual pgd_t *pgdp, pgd; 1146bbd6ec60SAnshuman Khandual 1147bbd6ec60SAnshuman Khandual do { 1148bbd6ec60SAnshuman Khandual next = pgd_addr_end(addr, end); 1149bbd6ec60SAnshuman Khandual pgdp = pgd_offset_k(addr); 1150bbd6ec60SAnshuman Khandual pgd = READ_ONCE(*pgdp); 1151bbd6ec60SAnshuman Khandual if (pgd_none(pgd)) 1152bbd6ec60SAnshuman Khandual continue; 1153bbd6ec60SAnshuman Khandual 1154bbd6ec60SAnshuman Khandual WARN_ON(!pgd_present(pgd)); 1155bbd6ec60SAnshuman Khandual free_empty_p4d_table(pgdp, addr, next, floor, ceiling); 1156bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1157bbd6ec60SAnshuman Khandual } 1158bbd6ec60SAnshuman Khandual #endif 1159bbd6ec60SAnshuman Khandual 11602062d44dSAnshuman Khandual #if !ARM64_KERNEL_USES_PMD_MAPS 11617b73d978SChristoph Hellwig int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 11627b73d978SChristoph Hellwig struct vmem_altmap *altmap) 1163c1cc1552SCatalin Marinas { 1164edb739eeSAnshuman Khandual WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1165eee07935SAnshuman Khandual return vmemmap_populate_basepages(start, end, node, altmap); 1166c1cc1552SCatalin Marinas } 11672062d44dSAnshuman Khandual #else /* !ARM64_KERNEL_USES_PMD_MAPS */ 11687b73d978SChristoph Hellwig int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 11697b73d978SChristoph Hellwig struct vmem_altmap *altmap) 1170c1cc1552SCatalin Marinas { 11710aad818bSJohannes Weiner unsigned long addr = start; 1172c1cc1552SCatalin Marinas unsigned long next; 117320a004e7SWill Deacon pgd_t *pgdp; 1174e9f63768SMike Rapoport p4d_t *p4dp; 117520a004e7SWill Deacon pud_t *pudp; 117620a004e7SWill Deacon pmd_t *pmdp; 1177c1cc1552SCatalin Marinas 1178edb739eeSAnshuman Khandual WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1179c1cc1552SCatalin Marinas do { 1180c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 1181c1cc1552SCatalin Marinas 118220a004e7SWill Deacon pgdp = vmemmap_pgd_populate(addr, node); 118320a004e7SWill Deacon if (!pgdp) 1184c1cc1552SCatalin Marinas return -ENOMEM; 1185c1cc1552SCatalin Marinas 1186e9f63768SMike Rapoport p4dp = vmemmap_p4d_populate(pgdp, addr, node); 1187e9f63768SMike Rapoport if (!p4dp) 1188e9f63768SMike Rapoport return -ENOMEM; 1189e9f63768SMike Rapoport 1190e9f63768SMike Rapoport pudp = vmemmap_pud_populate(p4dp, addr, node); 119120a004e7SWill Deacon if (!pudp) 1192c1cc1552SCatalin Marinas return -ENOMEM; 1193c1cc1552SCatalin Marinas 119420a004e7SWill Deacon pmdp = pmd_offset(pudp, addr); 119520a004e7SWill Deacon if (pmd_none(READ_ONCE(*pmdp))) { 1196c1cc1552SCatalin Marinas void *p = NULL; 1197c1cc1552SCatalin Marinas 1198eee07935SAnshuman Khandual p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); 11999f84f39fSSudarshan Rajagopalan if (!p) { 12009f84f39fSSudarshan Rajagopalan if (vmemmap_populate_basepages(addr, next, node, altmap)) 1201c1cc1552SCatalin Marinas return -ENOMEM; 12029f84f39fSSudarshan Rajagopalan continue; 12039f84f39fSSudarshan Rajagopalan } 1204c1cc1552SCatalin Marinas 120520a004e7SWill Deacon pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); 1206c1cc1552SCatalin Marinas } else 120720a004e7SWill Deacon vmemmap_verify((pte_t *)pmdp, node, addr, next); 1208c1cc1552SCatalin Marinas } while (addr = next, addr != end); 1209c1cc1552SCatalin Marinas 1210c1cc1552SCatalin Marinas return 0; 1211c1cc1552SCatalin Marinas } 12122062d44dSAnshuman Khandual #endif /* !ARM64_KERNEL_USES_PMD_MAPS */ 121340221c73SAnshuman Khandual 121440221c73SAnshuman Khandual #ifdef CONFIG_MEMORY_HOTPLUG 121524b6d416SChristoph Hellwig void vmemmap_free(unsigned long start, unsigned long end, 121624b6d416SChristoph Hellwig struct vmem_altmap *altmap) 12170197518cSTang Chen { 1218bbd6ec60SAnshuman Khandual WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1219bbd6ec60SAnshuman Khandual 1220eee07935SAnshuman Khandual unmap_hotplug_range(start, end, true, altmap); 1221bbd6ec60SAnshuman Khandual free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); 12220197518cSTang Chen } 122340221c73SAnshuman Khandual #endif /* CONFIG_MEMORY_HOTPLUG */ 1224af86e597SLaura Abbott 1225af86e597SLaura Abbott static inline pud_t *fixmap_pud(unsigned long addr) 1226af86e597SLaura Abbott { 122720a004e7SWill Deacon pgd_t *pgdp = pgd_offset_k(addr); 1228e9f63768SMike Rapoport p4d_t *p4dp = p4d_offset(pgdp, addr); 1229e9f63768SMike Rapoport p4d_t p4d = READ_ONCE(*p4dp); 1230af86e597SLaura Abbott 1231e9f63768SMike Rapoport BUG_ON(p4d_none(p4d) || p4d_bad(p4d)); 1232af86e597SLaura Abbott 1233e9f63768SMike Rapoport return pud_offset_kimg(p4dp, addr); 1234af86e597SLaura Abbott } 1235af86e597SLaura Abbott 1236af86e597SLaura Abbott static inline pmd_t *fixmap_pmd(unsigned long addr) 1237af86e597SLaura Abbott { 123820a004e7SWill Deacon pud_t *pudp = fixmap_pud(addr); 123920a004e7SWill Deacon pud_t pud = READ_ONCE(*pudp); 1240af86e597SLaura Abbott 124120a004e7SWill Deacon BUG_ON(pud_none(pud) || pud_bad(pud)); 1242af86e597SLaura Abbott 124320a004e7SWill Deacon return pmd_offset_kimg(pudp, addr); 1244af86e597SLaura Abbott } 1245af86e597SLaura Abbott 1246af86e597SLaura Abbott static inline pte_t *fixmap_pte(unsigned long addr) 1247af86e597SLaura Abbott { 1248157962f5SArd Biesheuvel return &bm_pte[pte_index(addr)]; 1249af86e597SLaura Abbott } 1250af86e597SLaura Abbott 12512077be67SLaura Abbott /* 12522077be67SLaura Abbott * The p*d_populate functions call virt_to_phys implicitly so they can't be used 12532077be67SLaura Abbott * directly on kernel symbols (bm_p*d). This function is called too early to use 12542077be67SLaura Abbott * lm_alias so __p*d_populate functions must be used to populate with the 12552077be67SLaura Abbott * physical address from __pa_symbol. 12562077be67SLaura Abbott */ 1257af86e597SLaura Abbott void __init early_fixmap_init(void) 1258af86e597SLaura Abbott { 1259e9f63768SMike Rapoport pgd_t *pgdp; 1260e9f63768SMike Rapoport p4d_t *p4dp, p4d; 126120a004e7SWill Deacon pud_t *pudp; 126220a004e7SWill Deacon pmd_t *pmdp; 1263af86e597SLaura Abbott unsigned long addr = FIXADDR_START; 1264af86e597SLaura Abbott 126520a004e7SWill Deacon pgdp = pgd_offset_k(addr); 1266e9f63768SMike Rapoport p4dp = p4d_offset(pgdp, addr); 1267e9f63768SMike Rapoport p4d = READ_ONCE(*p4dp); 1268f80fb3a3SArd Biesheuvel if (CONFIG_PGTABLE_LEVELS > 3 && 1269e9f63768SMike Rapoport !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) { 1270f9040773SArd Biesheuvel /* 1271f9040773SArd Biesheuvel * We only end up here if the kernel mapping and the fixmap 1272f9040773SArd Biesheuvel * share the top level pgd entry, which should only happen on 1273f9040773SArd Biesheuvel * 16k/4 levels configurations. 1274f9040773SArd Biesheuvel */ 1275f9040773SArd Biesheuvel BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 1276e9f63768SMike Rapoport pudp = pud_offset_kimg(p4dp, addr); 1277f9040773SArd Biesheuvel } else { 1278e9f63768SMike Rapoport if (p4d_none(p4d)) 1279c1fd78a7SArd Biesheuvel __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE); 128020a004e7SWill Deacon pudp = fixmap_pud(addr); 1281f9040773SArd Biesheuvel } 128220a004e7SWill Deacon if (pud_none(READ_ONCE(*pudp))) 1283c1fd78a7SArd Biesheuvel __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE); 128420a004e7SWill Deacon pmdp = fixmap_pmd(addr); 128520a004e7SWill Deacon __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); 1286af86e597SLaura Abbott 1287af86e597SLaura Abbott /* 1288af86e597SLaura Abbott * The boot-ioremap range spans multiple pmds, for which 1289157962f5SArd Biesheuvel * we are not prepared: 1290af86e597SLaura Abbott */ 1291af86e597SLaura Abbott BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 1292af86e597SLaura Abbott != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 1293af86e597SLaura Abbott 129420a004e7SWill Deacon if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 129520a004e7SWill Deacon || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 1296af86e597SLaura Abbott WARN_ON(1); 129720a004e7SWill Deacon pr_warn("pmdp %p != %p, %p\n", 129820a004e7SWill Deacon pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 1299af86e597SLaura Abbott fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 1300af86e597SLaura Abbott pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 1301af86e597SLaura Abbott fix_to_virt(FIX_BTMAP_BEGIN)); 1302af86e597SLaura Abbott pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 1303af86e597SLaura Abbott fix_to_virt(FIX_BTMAP_END)); 1304af86e597SLaura Abbott 1305af86e597SLaura Abbott pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 1306af86e597SLaura Abbott pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 1307af86e597SLaura Abbott } 1308af86e597SLaura Abbott } 1309af86e597SLaura Abbott 131018b4b276SJames Morse /* 131118b4b276SJames Morse * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we 131218b4b276SJames Morse * ever need to use IPIs for TLB broadcasting, then we're in trouble here. 131318b4b276SJames Morse */ 1314af86e597SLaura Abbott void __set_fixmap(enum fixed_addresses idx, 1315af86e597SLaura Abbott phys_addr_t phys, pgprot_t flags) 1316af86e597SLaura Abbott { 1317af86e597SLaura Abbott unsigned long addr = __fix_to_virt(idx); 131820a004e7SWill Deacon pte_t *ptep; 1319af86e597SLaura Abbott 1320b63dbef9SMark Rutland BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 1321af86e597SLaura Abbott 132220a004e7SWill Deacon ptep = fixmap_pte(addr); 1323af86e597SLaura Abbott 1324af86e597SLaura Abbott if (pgprot_val(flags)) { 132520a004e7SWill Deacon set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); 1326af86e597SLaura Abbott } else { 132720a004e7SWill Deacon pte_clear(&init_mm, addr, ptep); 1328af86e597SLaura Abbott flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 1329af86e597SLaura Abbott } 1330af86e597SLaura Abbott } 133161bd93ceSArd Biesheuvel 1332e112b032SHsin-Yi Wang void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 133361bd93ceSArd Biesheuvel { 133461bd93ceSArd Biesheuvel const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 1335f80fb3a3SArd Biesheuvel int offset; 133661bd93ceSArd Biesheuvel void *dt_virt; 133761bd93ceSArd Biesheuvel 133861bd93ceSArd Biesheuvel /* 133961bd93ceSArd Biesheuvel * Check whether the physical FDT address is set and meets the minimum 134061bd93ceSArd Biesheuvel * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 134104a84810SArd Biesheuvel * at least 8 bytes so that we can always access the magic and size 134204a84810SArd Biesheuvel * fields of the FDT header after mapping the first chunk, double check 134304a84810SArd Biesheuvel * here if that is indeed the case. 134461bd93ceSArd Biesheuvel */ 134561bd93ceSArd Biesheuvel BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 134661bd93ceSArd Biesheuvel if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 134761bd93ceSArd Biesheuvel return NULL; 134861bd93ceSArd Biesheuvel 134961bd93ceSArd Biesheuvel /* 135061bd93ceSArd Biesheuvel * Make sure that the FDT region can be mapped without the need to 135161bd93ceSArd Biesheuvel * allocate additional translation table pages, so that it is safe 1352132233a7SLaura Abbott * to call create_mapping_noalloc() this early. 135361bd93ceSArd Biesheuvel * 135461bd93ceSArd Biesheuvel * On 64k pages, the FDT will be mapped using PTEs, so we need to 135561bd93ceSArd Biesheuvel * be in the same PMD as the rest of the fixmap. 135661bd93ceSArd Biesheuvel * On 4k pages, we'll use section mappings for the FDT so we only 135761bd93ceSArd Biesheuvel * have to be in the same PUD. 135861bd93ceSArd Biesheuvel */ 135961bd93ceSArd Biesheuvel BUILD_BUG_ON(dt_virt_base % SZ_2M); 136061bd93ceSArd Biesheuvel 1361b433dce0SSuzuki K. Poulose BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 1362b433dce0SSuzuki K. Poulose __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 136361bd93ceSArd Biesheuvel 1364b433dce0SSuzuki K. Poulose offset = dt_phys % SWAPPER_BLOCK_SIZE; 136561bd93ceSArd Biesheuvel dt_virt = (void *)dt_virt_base + offset; 136661bd93ceSArd Biesheuvel 136761bd93ceSArd Biesheuvel /* map the first chunk so we can read the size from the header */ 1368132233a7SLaura Abbott create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 1369132233a7SLaura Abbott dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 137061bd93ceSArd Biesheuvel 137104a84810SArd Biesheuvel if (fdt_magic(dt_virt) != FDT_MAGIC) 137261bd93ceSArd Biesheuvel return NULL; 137361bd93ceSArd Biesheuvel 1374f80fb3a3SArd Biesheuvel *size = fdt_totalsize(dt_virt); 1375f80fb3a3SArd Biesheuvel if (*size > MAX_FDT_SIZE) 137661bd93ceSArd Biesheuvel return NULL; 137761bd93ceSArd Biesheuvel 1378f80fb3a3SArd Biesheuvel if (offset + *size > SWAPPER_BLOCK_SIZE) 1379132233a7SLaura Abbott create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 1380f80fb3a3SArd Biesheuvel round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); 1381f80fb3a3SArd Biesheuvel 1382f80fb3a3SArd Biesheuvel return dt_virt; 1383f80fb3a3SArd Biesheuvel } 1384f80fb3a3SArd Biesheuvel 138520a004e7SWill Deacon int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 1386324420bfSArd Biesheuvel { 1387f7f0097aSAnshuman Khandual pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); 138815122ee2SWill Deacon 138982034c23SLaura Abbott /* Only allow permission changes for now */ 139082034c23SLaura Abbott if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), 139182034c23SLaura Abbott pud_val(new_pud))) 139215122ee2SWill Deacon return 0; 139315122ee2SWill Deacon 139487dedf7cSAnshuman Khandual VM_BUG_ON(phys & ~PUD_MASK); 139582034c23SLaura Abbott set_pud(pudp, new_pud); 1396324420bfSArd Biesheuvel return 1; 1397324420bfSArd Biesheuvel } 1398324420bfSArd Biesheuvel 139920a004e7SWill Deacon int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) 1400324420bfSArd Biesheuvel { 1401f7f0097aSAnshuman Khandual pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); 140215122ee2SWill Deacon 140382034c23SLaura Abbott /* Only allow permission changes for now */ 140482034c23SLaura Abbott if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), 140582034c23SLaura Abbott pmd_val(new_pmd))) 140615122ee2SWill Deacon return 0; 140715122ee2SWill Deacon 140887dedf7cSAnshuman Khandual VM_BUG_ON(phys & ~PMD_MASK); 140982034c23SLaura Abbott set_pmd(pmdp, new_pmd); 1410324420bfSArd Biesheuvel return 1; 1411324420bfSArd Biesheuvel } 1412324420bfSArd Biesheuvel 1413d8a71905SJonathan Marek int pud_clear_huge(pud_t *pudp) 1414d8a71905SJonathan Marek { 1415d8a71905SJonathan Marek if (!pud_sect(READ_ONCE(*pudp))) 1416d8a71905SJonathan Marek return 0; 1417d8a71905SJonathan Marek pud_clear(pudp); 1418d8a71905SJonathan Marek return 1; 1419d8a71905SJonathan Marek } 1420d8a71905SJonathan Marek 142120a004e7SWill Deacon int pmd_clear_huge(pmd_t *pmdp) 1422324420bfSArd Biesheuvel { 142320a004e7SWill Deacon if (!pmd_sect(READ_ONCE(*pmdp))) 1424324420bfSArd Biesheuvel return 0; 142520a004e7SWill Deacon pmd_clear(pmdp); 1426324420bfSArd Biesheuvel return 1; 1427324420bfSArd Biesheuvel } 1428b6bdb751SToshi Kani 1429ec28bb9cSChintan Pandya int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) 1430b6bdb751SToshi Kani { 1431ec28bb9cSChintan Pandya pte_t *table; 1432ec28bb9cSChintan Pandya pmd_t pmd; 1433ec28bb9cSChintan Pandya 1434ec28bb9cSChintan Pandya pmd = READ_ONCE(*pmdp); 1435ec28bb9cSChintan Pandya 1436fac880c7SMark Rutland if (!pmd_table(pmd)) { 14379c006972SWill Deacon VM_WARN_ON(1); 1438ec28bb9cSChintan Pandya return 1; 1439b6bdb751SToshi Kani } 1440b6bdb751SToshi Kani 1441ec28bb9cSChintan Pandya table = pte_offset_kernel(pmdp, addr); 1442ec28bb9cSChintan Pandya pmd_clear(pmdp); 1443ec28bb9cSChintan Pandya __flush_tlb_kernel_pgtable(addr); 1444ec28bb9cSChintan Pandya pte_free_kernel(NULL, table); 1445ec28bb9cSChintan Pandya return 1; 1446ec28bb9cSChintan Pandya } 1447ec28bb9cSChintan Pandya 1448ec28bb9cSChintan Pandya int pud_free_pmd_page(pud_t *pudp, unsigned long addr) 1449b6bdb751SToshi Kani { 1450ec28bb9cSChintan Pandya pmd_t *table; 1451ec28bb9cSChintan Pandya pmd_t *pmdp; 1452ec28bb9cSChintan Pandya pud_t pud; 1453ec28bb9cSChintan Pandya unsigned long next, end; 1454ec28bb9cSChintan Pandya 1455ec28bb9cSChintan Pandya pud = READ_ONCE(*pudp); 1456ec28bb9cSChintan Pandya 1457fac880c7SMark Rutland if (!pud_table(pud)) { 14589c006972SWill Deacon VM_WARN_ON(1); 1459ec28bb9cSChintan Pandya return 1; 1460ec28bb9cSChintan Pandya } 1461ec28bb9cSChintan Pandya 1462ec28bb9cSChintan Pandya table = pmd_offset(pudp, addr); 1463ec28bb9cSChintan Pandya pmdp = table; 1464ec28bb9cSChintan Pandya next = addr; 1465ec28bb9cSChintan Pandya end = addr + PUD_SIZE; 1466ec28bb9cSChintan Pandya do { 1467ec28bb9cSChintan Pandya pmd_free_pte_page(pmdp, next); 1468ec28bb9cSChintan Pandya } while (pmdp++, next += PMD_SIZE, next != end); 1469ec28bb9cSChintan Pandya 1470ec28bb9cSChintan Pandya pud_clear(pudp); 1471ec28bb9cSChintan Pandya __flush_tlb_kernel_pgtable(addr); 1472ec28bb9cSChintan Pandya pmd_free(NULL, table); 1473ec28bb9cSChintan Pandya return 1; 1474b6bdb751SToshi Kani } 14754ab21506SRobin Murphy 14764ab21506SRobin Murphy #ifdef CONFIG_MEMORY_HOTPLUG 1477bbd6ec60SAnshuman Khandual static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) 1478bbd6ec60SAnshuman Khandual { 1479bbd6ec60SAnshuman Khandual unsigned long end = start + size; 1480bbd6ec60SAnshuman Khandual 1481bbd6ec60SAnshuman Khandual WARN_ON(pgdir != init_mm.pgd); 1482bbd6ec60SAnshuman Khandual WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); 1483bbd6ec60SAnshuman Khandual 1484eee07935SAnshuman Khandual unmap_hotplug_range(start, end, false, NULL); 1485bbd6ec60SAnshuman Khandual free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); 1486bbd6ec60SAnshuman Khandual } 1487bbd6ec60SAnshuman Khandual 148803aaf83fSAnshuman Khandual struct range arch_get_mappable_range(void) 148958284a90SAnshuman Khandual { 149003aaf83fSAnshuman Khandual struct range mhp_range; 1491ee7febceSPavel Tatashin u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); 1492ee7febceSPavel Tatashin u64 end_linear_pa = __pa(PAGE_END - 1); 1493ee7febceSPavel Tatashin 1494ee7febceSPavel Tatashin if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 1495ee7febceSPavel Tatashin /* 1496ee7febceSPavel Tatashin * Check for a wrap, it is possible because of randomized linear 1497ee7febceSPavel Tatashin * mapping the start physical address is actually bigger than 1498ee7febceSPavel Tatashin * the end physical address. In this case set start to zero 1499ee7febceSPavel Tatashin * because [0, end_linear_pa] range must still be able to cover 1500ee7febceSPavel Tatashin * all addressable physical addresses. 1501ee7febceSPavel Tatashin */ 1502ee7febceSPavel Tatashin if (start_linear_pa > end_linear_pa) 1503ee7febceSPavel Tatashin start_linear_pa = 0; 1504ee7febceSPavel Tatashin } 1505ee7febceSPavel Tatashin 1506ee7febceSPavel Tatashin WARN_ON(start_linear_pa > end_linear_pa); 150703aaf83fSAnshuman Khandual 150858284a90SAnshuman Khandual /* 150958284a90SAnshuman Khandual * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] 151058284a90SAnshuman Khandual * accommodating both its ends but excluding PAGE_END. Max physical 151158284a90SAnshuman Khandual * range which can be mapped inside this linear mapping range, must 151258284a90SAnshuman Khandual * also be derived from its end points. 151358284a90SAnshuman Khandual */ 1514ee7febceSPavel Tatashin mhp_range.start = start_linear_pa; 1515ee7febceSPavel Tatashin mhp_range.end = end_linear_pa; 1516ee7febceSPavel Tatashin 151703aaf83fSAnshuman Khandual return mhp_range; 151858284a90SAnshuman Khandual } 151958284a90SAnshuman Khandual 1520940519f0SMichal Hocko int arch_add_memory(int nid, u64 start, u64 size, 1521f5637d3bSLogan Gunthorpe struct mhp_params *params) 15224ab21506SRobin Murphy { 152387143f40SArd Biesheuvel int ret, flags = NO_EXEC_MAPPINGS; 15244ab21506SRobin Murphy 152503aaf83fSAnshuman Khandual VM_BUG_ON(!mhp_range_allowed(start, size, true)); 1526840b2398SMarco Elver 1527840b2398SMarco Elver /* 1528840b2398SMarco Elver * KFENCE requires linear map to be mapped at page granularity, so that 1529840b2398SMarco Elver * it is possible to protect/unprotect single pages in the KFENCE pool. 1530840b2398SMarco Elver */ 15316d47c23bSMike Rapoport if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) 153287143f40SArd Biesheuvel flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 15334ab21506SRobin Murphy 15344ab21506SRobin Murphy __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1535bfeb022fSLogan Gunthorpe size, params->pgprot, __pgd_pgtable_alloc, 1536bfeb022fSLogan Gunthorpe flags); 15374ab21506SRobin Murphy 153816993c0fSDan Williams memblock_clear_nomap(start, size); 153916993c0fSDan Williams 1540bbd6ec60SAnshuman Khandual ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, 1541f5637d3bSLogan Gunthorpe params); 1542bbd6ec60SAnshuman Khandual if (ret) 1543bbd6ec60SAnshuman Khandual __remove_pgd_mapping(swapper_pg_dir, 1544bbd6ec60SAnshuman Khandual __phys_to_virt(start), size); 15458fac67caSSudarshan Rajagopalan else { 15468fac67caSSudarshan Rajagopalan max_pfn = PFN_UP(start + size); 15478fac67caSSudarshan Rajagopalan max_low_pfn = max_pfn; 15488fac67caSSudarshan Rajagopalan } 15498fac67caSSudarshan Rajagopalan 1550bbd6ec60SAnshuman Khandual return ret; 15514ab21506SRobin Murphy } 1552bbd6ec60SAnshuman Khandual 155365a2aa5fSDavid Hildenbrand void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 155422eb6346SDavid Hildenbrand { 155522eb6346SDavid Hildenbrand unsigned long start_pfn = start >> PAGE_SHIFT; 155622eb6346SDavid Hildenbrand unsigned long nr_pages = size >> PAGE_SHIFT; 155722eb6346SDavid Hildenbrand 1558feee6b29SDavid Hildenbrand __remove_pages(start_pfn, nr_pages, altmap); 1559bbd6ec60SAnshuman Khandual __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); 156022eb6346SDavid Hildenbrand } 1561bbd6ec60SAnshuman Khandual 1562bbd6ec60SAnshuman Khandual /* 1563bbd6ec60SAnshuman Khandual * This memory hotplug notifier helps prevent boot memory from being 1564bbd6ec60SAnshuman Khandual * inadvertently removed as it blocks pfn range offlining process in 1565bbd6ec60SAnshuman Khandual * __offline_pages(). Hence this prevents both offlining as well as 1566bbd6ec60SAnshuman Khandual * removal process for boot memory which is initially always online. 1567bbd6ec60SAnshuman Khandual * In future if and when boot memory could be removed, this notifier 1568bbd6ec60SAnshuman Khandual * should be dropped and free_hotplug_page_range() should handle any 1569bbd6ec60SAnshuman Khandual * reserved pages allocated during boot. 1570bbd6ec60SAnshuman Khandual */ 1571bbd6ec60SAnshuman Khandual static int prevent_bootmem_remove_notifier(struct notifier_block *nb, 1572bbd6ec60SAnshuman Khandual unsigned long action, void *data) 1573bbd6ec60SAnshuman Khandual { 1574bbd6ec60SAnshuman Khandual struct mem_section *ms; 1575bbd6ec60SAnshuman Khandual struct memory_notify *arg = data; 1576bbd6ec60SAnshuman Khandual unsigned long end_pfn = arg->start_pfn + arg->nr_pages; 1577bbd6ec60SAnshuman Khandual unsigned long pfn = arg->start_pfn; 1578bbd6ec60SAnshuman Khandual 15799fb3d4a3SAnshuman Khandual if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE)) 1580bbd6ec60SAnshuman Khandual return NOTIFY_OK; 1581bbd6ec60SAnshuman Khandual 1582bbd6ec60SAnshuman Khandual for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 15839fb3d4a3SAnshuman Khandual unsigned long start = PFN_PHYS(pfn); 15849fb3d4a3SAnshuman Khandual unsigned long end = start + (1UL << PA_SECTION_SHIFT); 15859fb3d4a3SAnshuman Khandual 1586bbd6ec60SAnshuman Khandual ms = __pfn_to_section(pfn); 15879fb3d4a3SAnshuman Khandual if (!early_section(ms)) 15889fb3d4a3SAnshuman Khandual continue; 15899fb3d4a3SAnshuman Khandual 15909fb3d4a3SAnshuman Khandual if (action == MEM_GOING_OFFLINE) { 15919fb3d4a3SAnshuman Khandual /* 15929fb3d4a3SAnshuman Khandual * Boot memory removal is not supported. Prevent 15939fb3d4a3SAnshuman Khandual * it via blocking any attempted offline request 15949fb3d4a3SAnshuman Khandual * for the boot memory and just report it. 15959fb3d4a3SAnshuman Khandual */ 15969fb3d4a3SAnshuman Khandual pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end); 1597bbd6ec60SAnshuman Khandual return NOTIFY_BAD; 15989fb3d4a3SAnshuman Khandual } else if (action == MEM_OFFLINE) { 15999fb3d4a3SAnshuman Khandual /* 16009fb3d4a3SAnshuman Khandual * This should have never happened. Boot memory 16019fb3d4a3SAnshuman Khandual * offlining should have been prevented by this 16029fb3d4a3SAnshuman Khandual * very notifier. Probably some memory removal 16039fb3d4a3SAnshuman Khandual * procedure might have changed which would then 16049fb3d4a3SAnshuman Khandual * require further debug. 16059fb3d4a3SAnshuman Khandual */ 16069fb3d4a3SAnshuman Khandual pr_err("Boot memory [%lx %lx] offlined\n", start, end); 16079fb3d4a3SAnshuman Khandual 16089fb3d4a3SAnshuman Khandual /* 16099fb3d4a3SAnshuman Khandual * Core memory hotplug does not process a return 16109fb3d4a3SAnshuman Khandual * code from the notifier for MEM_OFFLINE events. 16119fb3d4a3SAnshuman Khandual * The error condition has been reported. Return 16129fb3d4a3SAnshuman Khandual * from here as if ignored. 16139fb3d4a3SAnshuman Khandual */ 16149fb3d4a3SAnshuman Khandual return NOTIFY_DONE; 16159fb3d4a3SAnshuman Khandual } 1616bbd6ec60SAnshuman Khandual } 1617bbd6ec60SAnshuman Khandual return NOTIFY_OK; 1618bbd6ec60SAnshuman Khandual } 1619bbd6ec60SAnshuman Khandual 1620bbd6ec60SAnshuman Khandual static struct notifier_block prevent_bootmem_remove_nb = { 1621bbd6ec60SAnshuman Khandual .notifier_call = prevent_bootmem_remove_notifier, 1622bbd6ec60SAnshuman Khandual }; 1623bbd6ec60SAnshuman Khandual 1624fdd99a41SAnshuman Khandual /* 1625fdd99a41SAnshuman Khandual * This ensures that boot memory sections on the platform are online 1626fdd99a41SAnshuman Khandual * from early boot. Memory sections could not be prevented from being 1627fdd99a41SAnshuman Khandual * offlined, unless for some reason they are not online to begin with. 1628fdd99a41SAnshuman Khandual * This helps validate the basic assumption on which the above memory 1629fdd99a41SAnshuman Khandual * event notifier works to prevent boot memory section offlining and 1630fdd99a41SAnshuman Khandual * its possible removal. 1631fdd99a41SAnshuman Khandual */ 1632fdd99a41SAnshuman Khandual static void validate_bootmem_online(void) 1633fdd99a41SAnshuman Khandual { 1634fdd99a41SAnshuman Khandual phys_addr_t start, end, addr; 1635fdd99a41SAnshuman Khandual struct mem_section *ms; 1636fdd99a41SAnshuman Khandual u64 i; 1637fdd99a41SAnshuman Khandual 1638fdd99a41SAnshuman Khandual /* 1639fdd99a41SAnshuman Khandual * Scanning across all memblock might be expensive 1640fdd99a41SAnshuman Khandual * on some big memory systems. Hence enable this 1641fdd99a41SAnshuman Khandual * validation only with DEBUG_VM. 1642fdd99a41SAnshuman Khandual */ 1643fdd99a41SAnshuman Khandual if (!IS_ENABLED(CONFIG_DEBUG_VM)) 1644fdd99a41SAnshuman Khandual return; 1645fdd99a41SAnshuman Khandual 1646fdd99a41SAnshuman Khandual for_each_mem_range(i, &start, &end) { 1647fdd99a41SAnshuman Khandual for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) { 1648fdd99a41SAnshuman Khandual ms = __pfn_to_section(PHYS_PFN(addr)); 1649fdd99a41SAnshuman Khandual 1650fdd99a41SAnshuman Khandual /* 1651fdd99a41SAnshuman Khandual * All memory ranges in the system at this point 1652fdd99a41SAnshuman Khandual * should have been marked as early sections. 1653fdd99a41SAnshuman Khandual */ 1654fdd99a41SAnshuman Khandual WARN_ON(!early_section(ms)); 1655fdd99a41SAnshuman Khandual 1656fdd99a41SAnshuman Khandual /* 1657fdd99a41SAnshuman Khandual * Memory notifier mechanism here to prevent boot 1658fdd99a41SAnshuman Khandual * memory offlining depends on the fact that each 1659fdd99a41SAnshuman Khandual * early section memory on the system is initially 1660fdd99a41SAnshuman Khandual * online. Otherwise a given memory section which 1661fdd99a41SAnshuman Khandual * is already offline will be overlooked and can 1662fdd99a41SAnshuman Khandual * be removed completely. Call out such sections. 1663fdd99a41SAnshuman Khandual */ 1664fdd99a41SAnshuman Khandual if (!online_section(ms)) 1665fdd99a41SAnshuman Khandual pr_err("Boot memory [%llx %llx] is offline, can be removed\n", 1666fdd99a41SAnshuman Khandual addr, addr + (1UL << PA_SECTION_SHIFT)); 1667fdd99a41SAnshuman Khandual } 1668fdd99a41SAnshuman Khandual } 1669fdd99a41SAnshuman Khandual } 1670fdd99a41SAnshuman Khandual 1671bbd6ec60SAnshuman Khandual static int __init prevent_bootmem_remove_init(void) 1672bbd6ec60SAnshuman Khandual { 1673cb45babeSAnshuman Khandual int ret = 0; 1674cb45babeSAnshuman Khandual 1675cb45babeSAnshuman Khandual if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 1676cb45babeSAnshuman Khandual return ret; 1677cb45babeSAnshuman Khandual 1678fdd99a41SAnshuman Khandual validate_bootmem_online(); 1679cb45babeSAnshuman Khandual ret = register_memory_notifier(&prevent_bootmem_remove_nb); 1680cb45babeSAnshuman Khandual if (ret) 1681cb45babeSAnshuman Khandual pr_err("%s: Notifier registration failed %d\n", __func__, ret); 1682cb45babeSAnshuman Khandual 1683cb45babeSAnshuman Khandual return ret; 1684bbd6ec60SAnshuman Khandual } 1685cb45babeSAnshuman Khandual early_initcall(prevent_bootmem_remove_init); 168622eb6346SDavid Hildenbrand #endif 1687