1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 2c1cc1552SCatalin Marinas /* 3c1cc1552SCatalin Marinas * Based on arch/arm/mm/mmu.c 4c1cc1552SCatalin Marinas * 5c1cc1552SCatalin Marinas * Copyright (C) 1995-2005 Russell King 6c1cc1552SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 7c1cc1552SCatalin Marinas */ 8c1cc1552SCatalin Marinas 95a9e3e15SJisheng Zhang #include <linux/cache.h> 10c1cc1552SCatalin Marinas #include <linux/export.h> 11c1cc1552SCatalin Marinas #include <linux/kernel.h> 12c1cc1552SCatalin Marinas #include <linux/errno.h> 13c1cc1552SCatalin Marinas #include <linux/init.h> 1498d2e153STakahiro Akashi #include <linux/ioport.h> 1598d2e153STakahiro Akashi #include <linux/kexec.h> 1661bd93ceSArd Biesheuvel #include <linux/libfdt.h> 17c1cc1552SCatalin Marinas #include <linux/mman.h> 18c1cc1552SCatalin Marinas #include <linux/nodemask.h> 19c1cc1552SCatalin Marinas #include <linux/memblock.h> 20dc90f084SChristoph Hellwig #include <linux/memremap.h> 21bbd6ec60SAnshuman Khandual #include <linux/memory.h> 22c1cc1552SCatalin Marinas #include <linux/fs.h> 232475ff9dSCatalin Marinas #include <linux/io.h> 242077be67SLaura Abbott #include <linux/mm.h> 256efd8499STobias Klauser #include <linux/vmalloc.h> 266d47c23bSMike Rapoport #include <linux/set_memory.h> 27bfa7965bSZhenhua Huang #include <linux/kfence.h> 28c1cc1552SCatalin Marinas 2921ab99c2SMark Rutland #include <asm/barrier.h> 30c1cc1552SCatalin Marinas #include <asm/cputype.h> 31af86e597SLaura Abbott #include <asm/fixmap.h> 32068a17a5SMark Rutland #include <asm/kasan.h> 33b433dce0SSuzuki K. Poulose #include <asm/kernel-pgtable.h> 34c1cc1552SCatalin Marinas #include <asm/sections.h> 35c1cc1552SCatalin Marinas #include <asm/setup.h> 3687dfb311SMasahiro Yamada #include <linux/sizes.h> 37c1cc1552SCatalin Marinas #include <asm/tlb.h> 38c1cc1552SCatalin Marinas #include <asm/mmu_context.h> 391404d6f1SLaura Abbott #include <asm/ptdump.h> 40ec28bb9cSChintan Pandya #include <asm/tlbflush.h> 41ca15ca40SMike Rapoport #include <asm/pgalloc.h> 42bfa7965bSZhenhua Huang #include <asm/kfence.h> 43c1cc1552SCatalin Marinas 44c0951366SArd Biesheuvel #define NO_BLOCK_MAPPINGS BIT(0) 45d27cfa1fSArd Biesheuvel #define NO_CONT_MAPPINGS BIT(1) 4687143f40SArd Biesheuvel #define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ 47c0951366SArd Biesheuvel 48e8d13cceSArd Biesheuvel int idmap_t0sz __ro_after_init; 49dd006da2SArd Biesheuvel 500d9b1ffeSArd Biesheuvel #if VA_BITS > 48 510d9b1ffeSArd Biesheuvel u64 vabits_actual __ro_after_init = VA_BITS_MIN; 525383cc6eSSteve Capper EXPORT_SYMBOL(vabits_actual); 530d9b1ffeSArd Biesheuvel #endif 54c1cc1552SCatalin Marinas 55475031b6SArd Biesheuvel u64 kimage_vaddr __ro_after_init = (u64)&_text; 56475031b6SArd Biesheuvel EXPORT_SYMBOL(kimage_vaddr); 57c1cc1552SCatalin Marinas 585a9e3e15SJisheng Zhang u64 kimage_voffset __ro_after_init; 59a7f8de16SArd Biesheuvel EXPORT_SYMBOL(kimage_voffset); 60a7f8de16SArd Biesheuvel 61005e1267SArd Biesheuvel u32 __boot_cpu_mode[] = { BOOT_CPU_MODE_EL2, BOOT_CPU_MODE_EL1 }; 62005e1267SArd Biesheuvel 63005e1267SArd Biesheuvel /* 64005e1267SArd Biesheuvel * The booting CPU updates the failed status @__early_cpu_boot_status, 65005e1267SArd Biesheuvel * with MMU turned off. 66005e1267SArd Biesheuvel */ 67005e1267SArd Biesheuvel long __section(".mmuoff.data.write") __early_cpu_boot_status; 68005e1267SArd Biesheuvel 69c1cc1552SCatalin Marinas /* 70c1cc1552SCatalin Marinas * Empty_zero_page is a special page that is used for zero-initialized data 71c1cc1552SCatalin Marinas * and COW. 72c1cc1552SCatalin Marinas */ 735227cfa7SMark Rutland unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 74c1cc1552SCatalin Marinas EXPORT_SYMBOL(empty_zero_page); 75c1cc1552SCatalin Marinas 762330b7caSJun Yao static DEFINE_SPINLOCK(swapper_pgdir_lock); 77ee017ee3SJianyong Wu static DEFINE_MUTEX(fixmap_lock); 782330b7caSJun Yao 792330b7caSJun Yao void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) 802330b7caSJun Yao { 812330b7caSJun Yao pgd_t *fixmap_pgdp; 822330b7caSJun Yao 832330b7caSJun Yao spin_lock(&swapper_pgdir_lock); 8426a6f87eSJames Morse fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); 852330b7caSJun Yao WRITE_ONCE(*fixmap_pgdp, pgd); 862330b7caSJun Yao /* 872330b7caSJun Yao * We need dsb(ishst) here to ensure the page-table-walker sees 882330b7caSJun Yao * our new entry before set_p?d() returns. The fixmap's 892330b7caSJun Yao * flush_tlb_kernel_range() via clear_fixmap() does this for us. 902330b7caSJun Yao */ 912330b7caSJun Yao pgd_clear_fixmap(); 922330b7caSJun Yao spin_unlock(&swapper_pgdir_lock); 932330b7caSJun Yao } 942330b7caSJun Yao 95c1cc1552SCatalin Marinas pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 96c1cc1552SCatalin Marinas unsigned long size, pgprot_t vma_prot) 97c1cc1552SCatalin Marinas { 98873ba463SMike Rapoport if (!pfn_is_map_memory(pfn)) 99c1cc1552SCatalin Marinas return pgprot_noncached(vma_prot); 100c1cc1552SCatalin Marinas else if (file->f_flags & O_SYNC) 101c1cc1552SCatalin Marinas return pgprot_writecombine(vma_prot); 102c1cc1552SCatalin Marinas return vma_prot; 103c1cc1552SCatalin Marinas } 104c1cc1552SCatalin Marinas EXPORT_SYMBOL(phys_mem_access_prot); 105c1cc1552SCatalin Marinas 10690292acaSYu Zhao static phys_addr_t __init early_pgtable_alloc(int shift) 107c1cc1552SCatalin Marinas { 1087142392dSSuzuki K. Poulose phys_addr_t phys; 1097142392dSSuzuki K. Poulose void *ptr; 1107142392dSSuzuki K. Poulose 111c6975d7cSQian Cai phys = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 112c6975d7cSQian Cai MEMBLOCK_ALLOC_NOLEAKTRACE); 113ecc3e771SMike Rapoport if (!phys) 114ecc3e771SMike Rapoport panic("Failed to allocate page table page\n"); 115f4710445SMark Rutland 116f4710445SMark Rutland /* 117f4710445SMark Rutland * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE 118f4710445SMark Rutland * slot will be free, so we can (ab)use the FIX_PTE slot to initialise 119f4710445SMark Rutland * any level of table. 120f4710445SMark Rutland */ 121f4710445SMark Rutland ptr = pte_set_fixmap(phys); 122f4710445SMark Rutland 12321ab99c2SMark Rutland memset(ptr, 0, PAGE_SIZE); 12421ab99c2SMark Rutland 125f4710445SMark Rutland /* 126f4710445SMark Rutland * Implicit barriers also ensure the zeroed page is visible to the page 127f4710445SMark Rutland * table walker 128f4710445SMark Rutland */ 129f4710445SMark Rutland pte_clear_fixmap(); 130f4710445SMark Rutland 131f4710445SMark Rutland return phys; 132c1cc1552SCatalin Marinas } 133c1cc1552SCatalin Marinas 134004fc58fSAnshuman Khandual bool pgattr_change_is_safe(u64 old, u64 new) 135e98216b5SArd Biesheuvel { 136e98216b5SArd Biesheuvel /* 137e98216b5SArd Biesheuvel * The following mapping attributes may be updated in live 138e98216b5SArd Biesheuvel * kernel mappings without the need for break-before-make. 139e98216b5SArd Biesheuvel */ 1400178dc76SCatalin Marinas pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; 141e98216b5SArd Biesheuvel 142141d1497SArd Biesheuvel /* creating or taking down mappings is always safe */ 143004fc58fSAnshuman Khandual if (!pte_valid(__pte(old)) || !pte_valid(__pte(new))) 144141d1497SArd Biesheuvel return true; 145141d1497SArd Biesheuvel 146004fc58fSAnshuman Khandual /* A live entry's pfn should not change */ 147004fc58fSAnshuman Khandual if (pte_pfn(__pte(old)) != pte_pfn(__pte(new))) 148004fc58fSAnshuman Khandual return false; 149004fc58fSAnshuman Khandual 150141d1497SArd Biesheuvel /* live contiguous mappings may not be manipulated at all */ 151141d1497SArd Biesheuvel if ((old | new) & PTE_CONT) 152141d1497SArd Biesheuvel return false; 153141d1497SArd Biesheuvel 154753e8abcSArd Biesheuvel /* Transitioning from Non-Global to Global is unsafe */ 155753e8abcSArd Biesheuvel if (old & ~new & PTE_NG) 156753e8abcSArd Biesheuvel return false; 1574e602056SWill Deacon 1580178dc76SCatalin Marinas /* 1590178dc76SCatalin Marinas * Changing the memory type between Normal and Normal-Tagged is safe 1600178dc76SCatalin Marinas * since Tagged is considered a permission attribute from the 1610178dc76SCatalin Marinas * mismatched attribute aliases perspective. 1620178dc76SCatalin Marinas */ 1630178dc76SCatalin Marinas if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || 1640178dc76SCatalin Marinas (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && 1650178dc76SCatalin Marinas ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || 1660178dc76SCatalin Marinas (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) 1670178dc76SCatalin Marinas mask |= PTE_ATTRINDX_MASK; 1680178dc76SCatalin Marinas 169141d1497SArd Biesheuvel return ((old ^ new) & ~mask) == 0; 170e98216b5SArd Biesheuvel } 171e98216b5SArd Biesheuvel 17220a004e7SWill Deacon static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, 173d27cfa1fSArd Biesheuvel phys_addr_t phys, pgprot_t prot) 174c1cc1552SCatalin Marinas { 17520a004e7SWill Deacon pte_t *ptep; 176c1cc1552SCatalin Marinas 17720a004e7SWill Deacon ptep = pte_set_fixmap_offset(pmdp, addr); 178c1cc1552SCatalin Marinas do { 17920a004e7SWill Deacon pte_t old_pte = READ_ONCE(*ptep); 180e98216b5SArd Biesheuvel 18120a004e7SWill Deacon set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); 182e98216b5SArd Biesheuvel 183e98216b5SArd Biesheuvel /* 184e98216b5SArd Biesheuvel * After the PTE entry has been populated once, we 185e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 186e98216b5SArd Biesheuvel */ 18720a004e7SWill Deacon BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), 18820a004e7SWill Deacon READ_ONCE(pte_val(*ptep)))); 189e98216b5SArd Biesheuvel 190e393cf40SArd Biesheuvel phys += PAGE_SIZE; 19120a004e7SWill Deacon } while (ptep++, addr += PAGE_SIZE, addr != end); 192f4710445SMark Rutland 193f4710445SMark Rutland pte_clear_fixmap(); 194c1cc1552SCatalin Marinas } 195c1cc1552SCatalin Marinas 19620a004e7SWill Deacon static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, 197d27cfa1fSArd Biesheuvel unsigned long end, phys_addr_t phys, 198d27cfa1fSArd Biesheuvel pgprot_t prot, 19990292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), 200c0951366SArd Biesheuvel int flags) 201c1cc1552SCatalin Marinas { 202c1cc1552SCatalin Marinas unsigned long next; 20320a004e7SWill Deacon pmd_t pmd = READ_ONCE(*pmdp); 204c1cc1552SCatalin Marinas 20520a004e7SWill Deacon BUG_ON(pmd_sect(pmd)); 20620a004e7SWill Deacon if (pmd_none(pmd)) { 20787143f40SArd Biesheuvel pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN; 208d27cfa1fSArd Biesheuvel phys_addr_t pte_phys; 20987143f40SArd Biesheuvel 21087143f40SArd Biesheuvel if (flags & NO_EXEC_MAPPINGS) 21187143f40SArd Biesheuvel pmdval |= PMD_TABLE_PXN; 212132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 21390292acaSYu Zhao pte_phys = pgtable_alloc(PAGE_SHIFT); 21487143f40SArd Biesheuvel __pmd_populate(pmdp, pte_phys, pmdval); 21520a004e7SWill Deacon pmd = READ_ONCE(*pmdp); 216c1cc1552SCatalin Marinas } 21720a004e7SWill Deacon BUG_ON(pmd_bad(pmd)); 218d27cfa1fSArd Biesheuvel 219d27cfa1fSArd Biesheuvel do { 220d27cfa1fSArd Biesheuvel pgprot_t __prot = prot; 221d27cfa1fSArd Biesheuvel 222d27cfa1fSArd Biesheuvel next = pte_cont_addr_end(addr, end); 223d27cfa1fSArd Biesheuvel 224d27cfa1fSArd Biesheuvel /* use a contiguous mapping if the range is suitably aligned */ 225d27cfa1fSArd Biesheuvel if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 226d27cfa1fSArd Biesheuvel (flags & NO_CONT_MAPPINGS) == 0) 227d27cfa1fSArd Biesheuvel __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 228d27cfa1fSArd Biesheuvel 22920a004e7SWill Deacon init_pte(pmdp, addr, next, phys, __prot); 230d27cfa1fSArd Biesheuvel 231d27cfa1fSArd Biesheuvel phys += next - addr; 232d27cfa1fSArd Biesheuvel } while (addr = next, addr != end); 233d27cfa1fSArd Biesheuvel } 234d27cfa1fSArd Biesheuvel 23520a004e7SWill Deacon static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, 236d27cfa1fSArd Biesheuvel phys_addr_t phys, pgprot_t prot, 23790292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), int flags) 238d27cfa1fSArd Biesheuvel { 239d27cfa1fSArd Biesheuvel unsigned long next; 24020a004e7SWill Deacon pmd_t *pmdp; 241c1cc1552SCatalin Marinas 24220a004e7SWill Deacon pmdp = pmd_set_fixmap_offset(pudp, addr); 243c1cc1552SCatalin Marinas do { 24420a004e7SWill Deacon pmd_t old_pmd = READ_ONCE(*pmdp); 245e98216b5SArd Biesheuvel 246c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 247e98216b5SArd Biesheuvel 248c1cc1552SCatalin Marinas /* try section mapping first */ 2494aaa87abSAnshuman Khandual if (((addr | next | phys) & ~PMD_MASK) == 0 && 250c0951366SArd Biesheuvel (flags & NO_BLOCK_MAPPINGS) == 0) { 25120a004e7SWill Deacon pmd_set_huge(pmdp, phys, prot); 252e98216b5SArd Biesheuvel 253a55f9929SCatalin Marinas /* 254e98216b5SArd Biesheuvel * After the PMD entry has been populated once, we 255e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 256a55f9929SCatalin Marinas */ 257e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 25820a004e7SWill Deacon READ_ONCE(pmd_val(*pmdp)))); 259a55f9929SCatalin Marinas } else { 26020a004e7SWill Deacon alloc_init_cont_pte(pmdp, addr, next, phys, prot, 261d27cfa1fSArd Biesheuvel pgtable_alloc, flags); 262e98216b5SArd Biesheuvel 263e98216b5SArd Biesheuvel BUG_ON(pmd_val(old_pmd) != 0 && 26420a004e7SWill Deacon pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); 265a55f9929SCatalin Marinas } 266c1cc1552SCatalin Marinas phys += next - addr; 26720a004e7SWill Deacon } while (pmdp++, addr = next, addr != end); 268f4710445SMark Rutland 269f4710445SMark Rutland pmd_clear_fixmap(); 270c1cc1552SCatalin Marinas } 271c1cc1552SCatalin Marinas 27220a004e7SWill Deacon static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, 273d27cfa1fSArd Biesheuvel unsigned long end, phys_addr_t phys, 274d27cfa1fSArd Biesheuvel pgprot_t prot, 27590292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), int flags) 276d27cfa1fSArd Biesheuvel { 277d27cfa1fSArd Biesheuvel unsigned long next; 27820a004e7SWill Deacon pud_t pud = READ_ONCE(*pudp); 279d27cfa1fSArd Biesheuvel 280d27cfa1fSArd Biesheuvel /* 281d27cfa1fSArd Biesheuvel * Check for initial section mappings in the pgd/pud. 282d27cfa1fSArd Biesheuvel */ 28320a004e7SWill Deacon BUG_ON(pud_sect(pud)); 28420a004e7SWill Deacon if (pud_none(pud)) { 28587143f40SArd Biesheuvel pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN; 286d27cfa1fSArd Biesheuvel phys_addr_t pmd_phys; 28787143f40SArd Biesheuvel 28887143f40SArd Biesheuvel if (flags & NO_EXEC_MAPPINGS) 28987143f40SArd Biesheuvel pudval |= PUD_TABLE_PXN; 290d27cfa1fSArd Biesheuvel BUG_ON(!pgtable_alloc); 29190292acaSYu Zhao pmd_phys = pgtable_alloc(PMD_SHIFT); 29287143f40SArd Biesheuvel __pud_populate(pudp, pmd_phys, pudval); 29320a004e7SWill Deacon pud = READ_ONCE(*pudp); 294d27cfa1fSArd Biesheuvel } 29520a004e7SWill Deacon BUG_ON(pud_bad(pud)); 296d27cfa1fSArd Biesheuvel 297d27cfa1fSArd Biesheuvel do { 298d27cfa1fSArd Biesheuvel pgprot_t __prot = prot; 299d27cfa1fSArd Biesheuvel 300d27cfa1fSArd Biesheuvel next = pmd_cont_addr_end(addr, end); 301d27cfa1fSArd Biesheuvel 302d27cfa1fSArd Biesheuvel /* use a contiguous mapping if the range is suitably aligned */ 303d27cfa1fSArd Biesheuvel if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 304d27cfa1fSArd Biesheuvel (flags & NO_CONT_MAPPINGS) == 0) 305d27cfa1fSArd Biesheuvel __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 306d27cfa1fSArd Biesheuvel 30720a004e7SWill Deacon init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); 308d27cfa1fSArd Biesheuvel 309d27cfa1fSArd Biesheuvel phys += next - addr; 310d27cfa1fSArd Biesheuvel } while (addr = next, addr != end); 311d27cfa1fSArd Biesheuvel } 312d27cfa1fSArd Biesheuvel 31320a004e7SWill Deacon static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, 314da141706SLaura Abbott phys_addr_t phys, pgprot_t prot, 31590292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), 316c0951366SArd Biesheuvel int flags) 317c1cc1552SCatalin Marinas { 318c1cc1552SCatalin Marinas unsigned long next; 31920a004e7SWill Deacon pud_t *pudp; 320e9f63768SMike Rapoport p4d_t *p4dp = p4d_offset(pgdp, addr); 321e9f63768SMike Rapoport p4d_t p4d = READ_ONCE(*p4dp); 322c1cc1552SCatalin Marinas 323e9f63768SMike Rapoport if (p4d_none(p4d)) { 32487143f40SArd Biesheuvel p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN; 325132233a7SLaura Abbott phys_addr_t pud_phys; 32687143f40SArd Biesheuvel 32787143f40SArd Biesheuvel if (flags & NO_EXEC_MAPPINGS) 32887143f40SArd Biesheuvel p4dval |= P4D_TABLE_PXN; 329132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 33090292acaSYu Zhao pud_phys = pgtable_alloc(PUD_SHIFT); 33187143f40SArd Biesheuvel __p4d_populate(p4dp, pud_phys, p4dval); 332e9f63768SMike Rapoport p4d = READ_ONCE(*p4dp); 333c79b954bSJungseok Lee } 334e9f63768SMike Rapoport BUG_ON(p4d_bad(p4d)); 335c79b954bSJungseok Lee 336e9f63768SMike Rapoport pudp = pud_set_fixmap_offset(p4dp, addr); 337c1cc1552SCatalin Marinas do { 33820a004e7SWill Deacon pud_t old_pud = READ_ONCE(*pudp); 339e98216b5SArd Biesheuvel 340c1cc1552SCatalin Marinas next = pud_addr_end(addr, end); 341206a2a73SSteve Capper 342206a2a73SSteve Capper /* 343206a2a73SSteve Capper * For 4K granule only, attempt to put down a 1GB block 344206a2a73SSteve Capper */ 3451310222cSAnshuman Khandual if (pud_sect_supported() && 3461310222cSAnshuman Khandual ((addr | next | phys) & ~PUD_MASK) == 0 && 347c0951366SArd Biesheuvel (flags & NO_BLOCK_MAPPINGS) == 0) { 34820a004e7SWill Deacon pud_set_huge(pudp, phys, prot); 349206a2a73SSteve Capper 350206a2a73SSteve Capper /* 351e98216b5SArd Biesheuvel * After the PUD entry has been populated once, we 352e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 353206a2a73SSteve Capper */ 354e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 35520a004e7SWill Deacon READ_ONCE(pud_val(*pudp)))); 356206a2a73SSteve Capper } else { 35720a004e7SWill Deacon alloc_init_cont_pmd(pudp, addr, next, phys, prot, 358c0951366SArd Biesheuvel pgtable_alloc, flags); 359e98216b5SArd Biesheuvel 360e98216b5SArd Biesheuvel BUG_ON(pud_val(old_pud) != 0 && 36120a004e7SWill Deacon pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); 362206a2a73SSteve Capper } 363c1cc1552SCatalin Marinas phys += next - addr; 36420a004e7SWill Deacon } while (pudp++, addr = next, addr != end); 365f4710445SMark Rutland 366f4710445SMark Rutland pud_clear_fixmap(); 367c1cc1552SCatalin Marinas } 368c1cc1552SCatalin Marinas 36961d2d180SMark Rutland static void __create_pgd_mapping_locked(pgd_t *pgdir, phys_addr_t phys, 37040f87d31SArd Biesheuvel unsigned long virt, phys_addr_t size, 37140f87d31SArd Biesheuvel pgprot_t prot, 37290292acaSYu Zhao phys_addr_t (*pgtable_alloc)(int), 373c0951366SArd Biesheuvel int flags) 374c1cc1552SCatalin Marinas { 37532d18708SMasahiro Yamada unsigned long addr, end, next; 376974b9b2cSMike Rapoport pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); 377c1cc1552SCatalin Marinas 378cc5d2b3bSMark Rutland /* 379cc5d2b3bSMark Rutland * If the virtual and physical address don't have the same offset 380cc5d2b3bSMark Rutland * within a page, we cannot map the region as the caller expects. 381cc5d2b3bSMark Rutland */ 382cc5d2b3bSMark Rutland if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 383cc5d2b3bSMark Rutland return; 384cc5d2b3bSMark Rutland 3859c4e08a3SMark Rutland phys &= PAGE_MASK; 386c1cc1552SCatalin Marinas addr = virt & PAGE_MASK; 38732d18708SMasahiro Yamada end = PAGE_ALIGN(virt + size); 388c1cc1552SCatalin Marinas 389c1cc1552SCatalin Marinas do { 390c1cc1552SCatalin Marinas next = pgd_addr_end(addr, end); 39120a004e7SWill Deacon alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, 392c0951366SArd Biesheuvel flags); 393c1cc1552SCatalin Marinas phys += next - addr; 39420a004e7SWill Deacon } while (pgdp++, addr = next, addr != end); 395c1cc1552SCatalin Marinas } 396c1cc1552SCatalin Marinas 39761d2d180SMark Rutland static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 39861d2d180SMark Rutland unsigned long virt, phys_addr_t size, 39961d2d180SMark Rutland pgprot_t prot, 40061d2d180SMark Rutland phys_addr_t (*pgtable_alloc)(int), 40161d2d180SMark Rutland int flags) 40261d2d180SMark Rutland { 40361d2d180SMark Rutland mutex_lock(&fixmap_lock); 40461d2d180SMark Rutland __create_pgd_mapping_locked(pgdir, phys, virt, size, prot, 40561d2d180SMark Rutland pgtable_alloc, flags); 40661d2d180SMark Rutland mutex_unlock(&fixmap_lock); 40761d2d180SMark Rutland } 40861d2d180SMark Rutland 40947546a19SArd Biesheuvel #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 41061d2d180SMark Rutland extern __alias(__create_pgd_mapping_locked) 41147546a19SArd Biesheuvel void create_kpti_ng_temp_pgd(pgd_t *pgdir, phys_addr_t phys, unsigned long virt, 41247546a19SArd Biesheuvel phys_addr_t size, pgprot_t prot, 41347546a19SArd Biesheuvel phys_addr_t (*pgtable_alloc)(int), int flags); 41447546a19SArd Biesheuvel #endif 41547546a19SArd Biesheuvel 416475ba3fcSWill Deacon static phys_addr_t __pgd_pgtable_alloc(int shift) 417369aaab8SYu Zhao { 41850f11a8aSMike Rapoport void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL); 419369aaab8SYu Zhao BUG_ON(!ptr); 420369aaab8SYu Zhao 421369aaab8SYu Zhao /* Ensure the zeroed page is visible to the page table walker */ 422369aaab8SYu Zhao dsb(ishst); 423369aaab8SYu Zhao return __pa(ptr); 424369aaab8SYu Zhao } 425369aaab8SYu Zhao 42690292acaSYu Zhao static phys_addr_t pgd_pgtable_alloc(int shift) 427da141706SLaura Abbott { 428475ba3fcSWill Deacon phys_addr_t pa = __pgd_pgtable_alloc(shift); 429*11b4fa8bSVishal Moola (Oracle) struct ptdesc *ptdesc = page_ptdesc(phys_to_page(pa)); 43090292acaSYu Zhao 43190292acaSYu Zhao /* 43290292acaSYu Zhao * Call proper page table ctor in case later we need to 43390292acaSYu Zhao * call core mm functions like apply_to_page_range() on 43490292acaSYu Zhao * this pre-allocated page table. 43590292acaSYu Zhao * 43690292acaSYu Zhao * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is 437*11b4fa8bSVishal Moola (Oracle) * folded, and if so pagetable_pte_ctor() becomes nop. 43890292acaSYu Zhao */ 43990292acaSYu Zhao if (shift == PAGE_SHIFT) 440*11b4fa8bSVishal Moola (Oracle) BUG_ON(!pagetable_pte_ctor(ptdesc)); 44190292acaSYu Zhao else if (shift == PMD_SHIFT) 442*11b4fa8bSVishal Moola (Oracle) BUG_ON(!pagetable_pmd_ctor(ptdesc)); 44321ab99c2SMark Rutland 444475ba3fcSWill Deacon return pa; 445da141706SLaura Abbott } 446da141706SLaura Abbott 447132233a7SLaura Abbott /* 448132233a7SLaura Abbott * This function can only be used to modify existing table entries, 449132233a7SLaura Abbott * without allocating new levels of table. Note that this permits the 450132233a7SLaura Abbott * creation of new section or page entries. 451132233a7SLaura Abbott */ 452b9754776SMark Rutland void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 453da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 454d7ecbddfSMark Salter { 455ab9b4008SMark Rutland if (virt < PAGE_OFFSET) { 456d7ecbddfSMark Salter pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 457d7ecbddfSMark Salter &phys, virt); 458d7ecbddfSMark Salter return; 459d7ecbddfSMark Salter } 460d27cfa1fSArd Biesheuvel __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 461d27cfa1fSArd Biesheuvel NO_CONT_MAPPINGS); 462d7ecbddfSMark Salter } 463d7ecbddfSMark Salter 4648ce837ceSArd Biesheuvel void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 4658ce837ceSArd Biesheuvel unsigned long virt, phys_addr_t size, 466f14c66ceSArd Biesheuvel pgprot_t prot, bool page_mappings_only) 4678ce837ceSArd Biesheuvel { 468c0951366SArd Biesheuvel int flags = 0; 469c0951366SArd Biesheuvel 4701378dc3dSArd Biesheuvel BUG_ON(mm == &init_mm); 4711378dc3dSArd Biesheuvel 472c0951366SArd Biesheuvel if (page_mappings_only) 473d27cfa1fSArd Biesheuvel flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 474c0951366SArd Biesheuvel 47511509a30SMark Rutland __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 476c0951366SArd Biesheuvel pgd_pgtable_alloc, flags); 477d7ecbddfSMark Salter } 478d7ecbddfSMark Salter 479aa8c09beSArd Biesheuvel static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 480da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 481da141706SLaura Abbott { 482ab9b4008SMark Rutland if (virt < PAGE_OFFSET) { 483aa8c09beSArd Biesheuvel pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 484da141706SLaura Abbott &phys, virt); 485da141706SLaura Abbott return; 486da141706SLaura Abbott } 487da141706SLaura Abbott 488d27cfa1fSArd Biesheuvel __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 489d27cfa1fSArd Biesheuvel NO_CONT_MAPPINGS); 490aa8c09beSArd Biesheuvel 491aa8c09beSArd Biesheuvel /* flush the TLBs after updating live kernel mappings */ 492aa8c09beSArd Biesheuvel flush_tlb_kernel_range(virt, virt + size); 493da141706SLaura Abbott } 494da141706SLaura Abbott 49520a004e7SWill Deacon static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, 49698d2e153STakahiro Akashi phys_addr_t end, pgprot_t prot, int flags) 497da141706SLaura Abbott { 49820a004e7SWill Deacon __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, 49998d2e153STakahiro Akashi prot, early_pgtable_alloc, flags); 500da141706SLaura Abbott } 501da141706SLaura Abbott 5025ea5306cSArd Biesheuvel void __init mark_linear_text_alias_ro(void) 5035ea5306cSArd Biesheuvel { 5045ea5306cSArd Biesheuvel /* 5055ea5306cSArd Biesheuvel * Remove the write permissions from the linear alias of .text/.rodata 5065ea5306cSArd Biesheuvel */ 507e2a073ddSArd Biesheuvel update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext), 508e2a073ddSArd Biesheuvel (unsigned long)__init_begin - (unsigned long)_stext, 5095ea5306cSArd Biesheuvel PAGE_KERNEL_RO); 5105ea5306cSArd Biesheuvel } 5115ea5306cSArd Biesheuvel 512bfa7965bSZhenhua Huang #ifdef CONFIG_KFENCE 513bfa7965bSZhenhua Huang 514bfa7965bSZhenhua Huang bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; 515bfa7965bSZhenhua Huang 516bfa7965bSZhenhua Huang /* early_param() will be parsed before map_mem() below. */ 517bfa7965bSZhenhua Huang static int __init parse_kfence_early_init(char *arg) 518bfa7965bSZhenhua Huang { 519bfa7965bSZhenhua Huang int val; 520bfa7965bSZhenhua Huang 521bfa7965bSZhenhua Huang if (get_option(&arg, &val)) 522bfa7965bSZhenhua Huang kfence_early_init = !!val; 523bfa7965bSZhenhua Huang return 0; 524bfa7965bSZhenhua Huang } 525bfa7965bSZhenhua Huang early_param("kfence.sample_interval", parse_kfence_early_init); 526bfa7965bSZhenhua Huang 527bfa7965bSZhenhua Huang static phys_addr_t __init arm64_kfence_alloc_pool(void) 528bfa7965bSZhenhua Huang { 529bfa7965bSZhenhua Huang phys_addr_t kfence_pool; 530bfa7965bSZhenhua Huang 531bfa7965bSZhenhua Huang if (!kfence_early_init) 532bfa7965bSZhenhua Huang return 0; 533bfa7965bSZhenhua Huang 534bfa7965bSZhenhua Huang kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); 535bfa7965bSZhenhua Huang if (!kfence_pool) { 536bfa7965bSZhenhua Huang pr_err("failed to allocate kfence pool\n"); 537bfa7965bSZhenhua Huang kfence_early_init = false; 538bfa7965bSZhenhua Huang return 0; 539bfa7965bSZhenhua Huang } 540bfa7965bSZhenhua Huang 541bfa7965bSZhenhua Huang /* Temporarily mark as NOMAP. */ 542bfa7965bSZhenhua Huang memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); 543bfa7965bSZhenhua Huang 544bfa7965bSZhenhua Huang return kfence_pool; 545bfa7965bSZhenhua Huang } 546bfa7965bSZhenhua Huang 547bfa7965bSZhenhua Huang static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) 548bfa7965bSZhenhua Huang { 549bfa7965bSZhenhua Huang if (!kfence_pool) 550bfa7965bSZhenhua Huang return; 551bfa7965bSZhenhua Huang 552bfa7965bSZhenhua Huang /* KFENCE pool needs page-level mapping. */ 553bfa7965bSZhenhua Huang __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, 554bfa7965bSZhenhua Huang pgprot_tagged(PAGE_KERNEL), 555bfa7965bSZhenhua Huang NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 556bfa7965bSZhenhua Huang memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); 557bfa7965bSZhenhua Huang __kfence_pool = phys_to_virt(kfence_pool); 558bfa7965bSZhenhua Huang } 559bfa7965bSZhenhua Huang #else /* CONFIG_KFENCE */ 560bfa7965bSZhenhua Huang 561bfa7965bSZhenhua Huang static inline phys_addr_t arm64_kfence_alloc_pool(void) { return 0; } 562bfa7965bSZhenhua Huang static inline void arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) { } 563bfa7965bSZhenhua Huang 564bfa7965bSZhenhua Huang #endif /* CONFIG_KFENCE */ 565bfa7965bSZhenhua Huang 56620a004e7SWill Deacon static void __init map_mem(pgd_t *pgdp) 567c1cc1552SCatalin Marinas { 56887143f40SArd Biesheuvel static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN); 569e2a073ddSArd Biesheuvel phys_addr_t kernel_start = __pa_symbol(_stext); 57098d2e153STakahiro Akashi phys_addr_t kernel_end = __pa_symbol(__init_begin); 571b10d6bcaSMike Rapoport phys_addr_t start, end; 572bfa7965bSZhenhua Huang phys_addr_t early_kfence_pool; 57387143f40SArd Biesheuvel int flags = NO_EXEC_MAPPINGS; 574b10d6bcaSMike Rapoport u64 i; 57598d2e153STakahiro Akashi 57687143f40SArd Biesheuvel /* 57787143f40SArd Biesheuvel * Setting hierarchical PXNTable attributes on table entries covering 57887143f40SArd Biesheuvel * the linear region is only possible if it is guaranteed that no table 57987143f40SArd Biesheuvel * entries at any level are being shared between the linear region and 58087143f40SArd Biesheuvel * the vmalloc region. Check whether this is true for the PGD level, in 58187143f40SArd Biesheuvel * which case it is guaranteed to be true for all other levels as well. 58287143f40SArd Biesheuvel */ 58387143f40SArd Biesheuvel BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); 58487143f40SArd Biesheuvel 585bfa7965bSZhenhua Huang early_kfence_pool = arm64_kfence_alloc_pool(); 586bfa7965bSZhenhua Huang 587b9dd04a2SMike Rapoport if (can_set_direct_map()) 58887143f40SArd Biesheuvel flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 58998d2e153STakahiro Akashi 59098d2e153STakahiro Akashi /* 59198d2e153STakahiro Akashi * Take care not to create a writable alias for the 59298d2e153STakahiro Akashi * read-only text and rodata sections of the kernel image. 59398d2e153STakahiro Akashi * So temporarily mark them as NOMAP to skip mappings in 59498d2e153STakahiro Akashi * the following for-loop 59598d2e153STakahiro Akashi */ 59698d2e153STakahiro Akashi memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 597f6bc87c3SSteve Capper 598c1cc1552SCatalin Marinas /* map all the memory banks */ 599b10d6bcaSMike Rapoport for_each_mem_range(i, &start, &end) { 600c1cc1552SCatalin Marinas if (start >= end) 601c1cc1552SCatalin Marinas break; 6020178dc76SCatalin Marinas /* 6030178dc76SCatalin Marinas * The linear map must allow allocation tags reading/writing 6040178dc76SCatalin Marinas * if MTE is present. Otherwise, it has the same attributes as 6050178dc76SCatalin Marinas * PAGE_KERNEL. 6060178dc76SCatalin Marinas */ 607d15dfd31SCatalin Marinas __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), 608d15dfd31SCatalin Marinas flags); 609c1cc1552SCatalin Marinas } 61098d2e153STakahiro Akashi 61198d2e153STakahiro Akashi /* 612e2a073ddSArd Biesheuvel * Map the linear alias of the [_stext, __init_begin) interval 61398d2e153STakahiro Akashi * as non-executable now, and remove the write permission in 61498d2e153STakahiro Akashi * mark_linear_text_alias_ro() below (which will be called after 61598d2e153STakahiro Akashi * alternative patching has completed). This makes the contents 61698d2e153STakahiro Akashi * of the region accessible to subsystems such as hibernate, 61798d2e153STakahiro Akashi * but protects it from inadvertent modification or execution. 61898d2e153STakahiro Akashi * Note that contiguous mappings cannot be remapped in this way, 61998d2e153STakahiro Akashi * so we should avoid them here. 62098d2e153STakahiro Akashi */ 62120a004e7SWill Deacon __map_memblock(pgdp, kernel_start, kernel_end, 62298d2e153STakahiro Akashi PAGE_KERNEL, NO_CONT_MAPPINGS); 62398d2e153STakahiro Akashi memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 624bfa7965bSZhenhua Huang arm64_kfence_map_pool(early_kfence_pool, pgdp); 625c1cc1552SCatalin Marinas } 626c1cc1552SCatalin Marinas 627da141706SLaura Abbott void mark_rodata_ro(void) 628da141706SLaura Abbott { 6292f39b5f9SJeremy Linton unsigned long section_size; 630f9040773SArd Biesheuvel 6312f39b5f9SJeremy Linton /* 6329fdc14c5SArd Biesheuvel * mark .rodata as read only. Use __init_begin rather than __end_rodata 6339fdc14c5SArd Biesheuvel * to cover NOTES and EXCEPTION_TABLE. 6342f39b5f9SJeremy Linton */ 6359fdc14c5SArd Biesheuvel section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 636aa8c09beSArd Biesheuvel update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 6372f39b5f9SJeremy Linton section_size, PAGE_KERNEL_RO); 638e98216b5SArd Biesheuvel 6391404d6f1SLaura Abbott debug_checkwx(); 640da141706SLaura Abbott } 641da141706SLaura Abbott 64220a004e7SWill Deacon static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, 643d27cfa1fSArd Biesheuvel pgprot_t prot, struct vm_struct *vma, 64492bbd16eSWill Deacon int flags, unsigned long vm_flags) 645068a17a5SMark Rutland { 6462077be67SLaura Abbott phys_addr_t pa_start = __pa_symbol(va_start); 647068a17a5SMark Rutland unsigned long size = va_end - va_start; 648068a17a5SMark Rutland 649068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(pa_start)); 650068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(size)); 651068a17a5SMark Rutland 65220a004e7SWill Deacon __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, 653d27cfa1fSArd Biesheuvel early_pgtable_alloc, flags); 654f9040773SArd Biesheuvel 65592bbd16eSWill Deacon if (!(vm_flags & VM_NO_GUARD)) 65692bbd16eSWill Deacon size += PAGE_SIZE; 65792bbd16eSWill Deacon 658f9040773SArd Biesheuvel vma->addr = va_start; 659f9040773SArd Biesheuvel vma->phys_addr = pa_start; 660f9040773SArd Biesheuvel vma->size = size; 66192bbd16eSWill Deacon vma->flags = VM_MAP | vm_flags; 662f9040773SArd Biesheuvel vma->caller = __builtin_return_address(0); 663f9040773SArd Biesheuvel 664f9040773SArd Biesheuvel vm_area_add_early(vma); 665068a17a5SMark Rutland } 666068a17a5SMark Rutland 667601eaec5SRussell King static pgprot_t kernel_exec_prot(void) 668601eaec5SRussell King { 669601eaec5SRussell King return rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 670601eaec5SRussell King } 671601eaec5SRussell King 67251a0048bSWill Deacon #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 67351a0048bSWill Deacon static int __init map_entry_trampoline(void) 67451a0048bSWill Deacon { 675a9c406e6SJames Morse int i; 676a9c406e6SJames Morse 677601eaec5SRussell King pgprot_t prot = kernel_exec_prot(); 67851a0048bSWill Deacon phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); 67951a0048bSWill Deacon 68051a0048bSWill Deacon /* The trampoline is always mapped and can therefore be global */ 68151a0048bSWill Deacon pgprot_val(prot) &= ~PTE_NG; 68251a0048bSWill Deacon 68351a0048bSWill Deacon /* Map only the text into the trampoline page table */ 68451a0048bSWill Deacon memset(tramp_pg_dir, 0, PGD_SIZE); 685a9c406e6SJames Morse __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, 686a9c406e6SJames Morse entry_tramp_text_size(), prot, 687a9c406e6SJames Morse __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS); 68851a0048bSWill Deacon 6896c27c408SWill Deacon /* Map both the text and data into the kernel page table */ 690a9c406e6SJames Morse for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) 691a9c406e6SJames Morse __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, 692a9c406e6SJames Morse pa_start + i * PAGE_SIZE, prot); 693a9c406e6SJames Morse 6941c9a8e87SArd Biesheuvel if (IS_ENABLED(CONFIG_RELOCATABLE)) 6951c9a8e87SArd Biesheuvel __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, 6961c9a8e87SArd Biesheuvel pa_start + i * PAGE_SIZE, PAGE_KERNEL_RO); 6976c27c408SWill Deacon 69851a0048bSWill Deacon return 0; 69951a0048bSWill Deacon } 70051a0048bSWill Deacon core_initcall(map_entry_trampoline); 70151a0048bSWill Deacon #endif 70251a0048bSWill Deacon 703068a17a5SMark Rutland /* 704c8027285SMark Brown * Open coded check for BTI, only for use to determine configuration 705c8027285SMark Brown * for early mappings for before the cpufeature code has run. 706c8027285SMark Brown */ 707c8027285SMark Brown static bool arm64_early_this_cpu_has_bti(void) 708c8027285SMark Brown { 709c8027285SMark Brown u64 pfr1; 710c8027285SMark Brown 711c8027285SMark Brown if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) 712c8027285SMark Brown return false; 713c8027285SMark Brown 71493ad55b7SMarc Zyngier pfr1 = __read_sysreg_by_encoding(SYS_ID_AA64PFR1_EL1); 715c8027285SMark Brown return cpuid_feature_extract_unsigned_field(pfr1, 7166ca2b9caSMark Brown ID_AA64PFR1_EL1_BT_SHIFT); 717c8027285SMark Brown } 718c8027285SMark Brown 719c8027285SMark Brown /* 720068a17a5SMark Rutland * Create fine-grained mappings for the kernel. 721068a17a5SMark Rutland */ 72220a004e7SWill Deacon static void __init map_kernel(pgd_t *pgdp) 723068a17a5SMark Rutland { 7242ebe088bSArd Biesheuvel static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, 7252ebe088bSArd Biesheuvel vmlinux_initdata, vmlinux_data; 726068a17a5SMark Rutland 72728b066daSArd Biesheuvel /* 72828b066daSArd Biesheuvel * External debuggers may need to write directly to the text 72928b066daSArd Biesheuvel * mapping to install SW breakpoints. Allow this (only) when 73028b066daSArd Biesheuvel * explicitly requested with rodata=off. 73128b066daSArd Biesheuvel */ 732601eaec5SRussell King pgprot_t text_prot = kernel_exec_prot(); 73328b066daSArd Biesheuvel 734d27cfa1fSArd Biesheuvel /* 735c8027285SMark Brown * If we have a CPU that supports BTI and a kernel built for 736c8027285SMark Brown * BTI then mark the kernel executable text as guarded pages 737c8027285SMark Brown * now so we don't have to rewrite the page tables later. 738c8027285SMark Brown */ 739c8027285SMark Brown if (arm64_early_this_cpu_has_bti()) 740c8027285SMark Brown text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP); 741c8027285SMark Brown 742c8027285SMark Brown /* 743d27cfa1fSArd Biesheuvel * Only rodata will be remapped with different permissions later on, 744d27cfa1fSArd Biesheuvel * all other segments are allowed to use contiguous mappings. 745d27cfa1fSArd Biesheuvel */ 746e2a073ddSArd Biesheuvel map_kernel_segment(pgdp, _stext, _etext, text_prot, &vmlinux_text, 0, 74792bbd16eSWill Deacon VM_NO_GUARD); 74820a004e7SWill Deacon map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, 74992bbd16eSWill Deacon &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); 75020a004e7SWill Deacon map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, 75192bbd16eSWill Deacon &vmlinux_inittext, 0, VM_NO_GUARD); 75220a004e7SWill Deacon map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, 75392bbd16eSWill Deacon &vmlinux_initdata, 0, VM_NO_GUARD); 75420a004e7SWill Deacon map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); 755068a17a5SMark Rutland 756b9754776SMark Rutland fixmap_copy(pgdp); 75720a004e7SWill Deacon kasan_copy_shadow(pgdp); 758068a17a5SMark Rutland } 759068a17a5SMark Rutland 760c3cee924SArd Biesheuvel static void __init create_idmap(void) 761c3cee924SArd Biesheuvel { 762c3cee924SArd Biesheuvel u64 start = __pa_symbol(__idmap_text_start); 763c3cee924SArd Biesheuvel u64 size = __pa_symbol(__idmap_text_end) - start; 764c3cee924SArd Biesheuvel pgd_t *pgd = idmap_pg_dir; 765c3cee924SArd Biesheuvel u64 pgd_phys; 766c3cee924SArd Biesheuvel 767c3cee924SArd Biesheuvel /* check if we need an additional level of translation */ 768c3cee924SArd Biesheuvel if (VA_BITS < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) { 769c3cee924SArd Biesheuvel pgd_phys = early_pgtable_alloc(PAGE_SHIFT); 770c3cee924SArd Biesheuvel set_pgd(&idmap_pg_dir[start >> VA_BITS], 771c3cee924SArd Biesheuvel __pgd(pgd_phys | P4D_TYPE_TABLE)); 772c3cee924SArd Biesheuvel pgd = __va(pgd_phys); 773c3cee924SArd Biesheuvel } 774c3cee924SArd Biesheuvel __create_pgd_mapping(pgd, start, start, size, PAGE_KERNEL_ROX, 775c3cee924SArd Biesheuvel early_pgtable_alloc, 0); 776c3cee924SArd Biesheuvel 777c3cee924SArd Biesheuvel if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) { 778c3cee924SArd Biesheuvel extern u32 __idmap_kpti_flag; 779c3cee924SArd Biesheuvel u64 pa = __pa_symbol(&__idmap_kpti_flag); 780c3cee924SArd Biesheuvel 781c3cee924SArd Biesheuvel /* 782c3cee924SArd Biesheuvel * The KPTI G-to-nG conversion code needs a read-write mapping 783c3cee924SArd Biesheuvel * of its synchronization flag in the ID map. 784c3cee924SArd Biesheuvel */ 785c3cee924SArd Biesheuvel __create_pgd_mapping(pgd, pa, pa, sizeof(u32), PAGE_KERNEL, 786c3cee924SArd Biesheuvel early_pgtable_alloc, 0); 787c3cee924SArd Biesheuvel } 788c3cee924SArd Biesheuvel } 789c3cee924SArd Biesheuvel 790c1cc1552SCatalin Marinas void __init paging_init(void) 791c1cc1552SCatalin Marinas { 7922330b7caSJun Yao pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); 793c3cee924SArd Biesheuvel extern pgd_t init_idmap_pg_dir[]; 794068a17a5SMark Rutland 795e8d13cceSArd Biesheuvel idmap_t0sz = 63UL - __fls(__pa_symbol(_end) | GENMASK(VA_BITS_MIN - 1, 0)); 796068a17a5SMark Rutland 79720a004e7SWill Deacon map_kernel(pgdp); 79820a004e7SWill Deacon map_mem(pgdp); 799068a17a5SMark Rutland 800068a17a5SMark Rutland pgd_clear_fixmap(); 801068a17a5SMark Rutland 802c3cee924SArd Biesheuvel cpu_replace_ttbr1(lm_alias(swapper_pg_dir), init_idmap_pg_dir); 8032b5548b6SJun Yao init_mm.pgd = swapper_pg_dir; 804068a17a5SMark Rutland 8053ecc6834SMike Rapoport memblock_phys_free(__pa_symbol(init_pg_dir), 8062b5548b6SJun Yao __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); 80724cc61d8SArd Biesheuvel 80824cc61d8SArd Biesheuvel memblock_allow_resize(); 809c3cee924SArd Biesheuvel 810c3cee924SArd Biesheuvel create_idmap(); 811c1cc1552SCatalin Marinas } 812c1cc1552SCatalin Marinas 813bbd6ec60SAnshuman Khandual #ifdef CONFIG_MEMORY_HOTPLUG 814eee07935SAnshuman Khandual static void free_hotplug_page_range(struct page *page, size_t size, 815eee07935SAnshuman Khandual struct vmem_altmap *altmap) 816bbd6ec60SAnshuman Khandual { 817eee07935SAnshuman Khandual if (altmap) { 818eee07935SAnshuman Khandual vmem_altmap_free(altmap, size >> PAGE_SHIFT); 819eee07935SAnshuman Khandual } else { 820bbd6ec60SAnshuman Khandual WARN_ON(PageReserved(page)); 821bbd6ec60SAnshuman Khandual free_pages((unsigned long)page_address(page), get_order(size)); 822bbd6ec60SAnshuman Khandual } 823eee07935SAnshuman Khandual } 824bbd6ec60SAnshuman Khandual 825bbd6ec60SAnshuman Khandual static void free_hotplug_pgtable_page(struct page *page) 826bbd6ec60SAnshuman Khandual { 827eee07935SAnshuman Khandual free_hotplug_page_range(page, PAGE_SIZE, NULL); 828bbd6ec60SAnshuman Khandual } 829bbd6ec60SAnshuman Khandual 830bbd6ec60SAnshuman Khandual static bool pgtable_range_aligned(unsigned long start, unsigned long end, 831bbd6ec60SAnshuman Khandual unsigned long floor, unsigned long ceiling, 832bbd6ec60SAnshuman Khandual unsigned long mask) 833bbd6ec60SAnshuman Khandual { 834bbd6ec60SAnshuman Khandual start &= mask; 835bbd6ec60SAnshuman Khandual if (start < floor) 836bbd6ec60SAnshuman Khandual return false; 837bbd6ec60SAnshuman Khandual 838bbd6ec60SAnshuman Khandual if (ceiling) { 839bbd6ec60SAnshuman Khandual ceiling &= mask; 840bbd6ec60SAnshuman Khandual if (!ceiling) 841bbd6ec60SAnshuman Khandual return false; 842bbd6ec60SAnshuman Khandual } 843bbd6ec60SAnshuman Khandual 844bbd6ec60SAnshuman Khandual if (end - 1 > ceiling - 1) 845bbd6ec60SAnshuman Khandual return false; 846bbd6ec60SAnshuman Khandual return true; 847bbd6ec60SAnshuman Khandual } 848bbd6ec60SAnshuman Khandual 849bbd6ec60SAnshuman Khandual static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, 850eee07935SAnshuman Khandual unsigned long end, bool free_mapped, 851eee07935SAnshuman Khandual struct vmem_altmap *altmap) 852bbd6ec60SAnshuman Khandual { 853bbd6ec60SAnshuman Khandual pte_t *ptep, pte; 854bbd6ec60SAnshuman Khandual 855bbd6ec60SAnshuman Khandual do { 856bbd6ec60SAnshuman Khandual ptep = pte_offset_kernel(pmdp, addr); 857bbd6ec60SAnshuman Khandual pte = READ_ONCE(*ptep); 858bbd6ec60SAnshuman Khandual if (pte_none(pte)) 859bbd6ec60SAnshuman Khandual continue; 860bbd6ec60SAnshuman Khandual 861bbd6ec60SAnshuman Khandual WARN_ON(!pte_present(pte)); 862bbd6ec60SAnshuman Khandual pte_clear(&init_mm, addr, ptep); 863bbd6ec60SAnshuman Khandual flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 864bbd6ec60SAnshuman Khandual if (free_mapped) 865eee07935SAnshuman Khandual free_hotplug_page_range(pte_page(pte), 866eee07935SAnshuman Khandual PAGE_SIZE, altmap); 867bbd6ec60SAnshuman Khandual } while (addr += PAGE_SIZE, addr < end); 868bbd6ec60SAnshuman Khandual } 869bbd6ec60SAnshuman Khandual 870bbd6ec60SAnshuman Khandual static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, 871eee07935SAnshuman Khandual unsigned long end, bool free_mapped, 872eee07935SAnshuman Khandual struct vmem_altmap *altmap) 873bbd6ec60SAnshuman Khandual { 874bbd6ec60SAnshuman Khandual unsigned long next; 875bbd6ec60SAnshuman Khandual pmd_t *pmdp, pmd; 876bbd6ec60SAnshuman Khandual 877bbd6ec60SAnshuman Khandual do { 878bbd6ec60SAnshuman Khandual next = pmd_addr_end(addr, end); 879bbd6ec60SAnshuman Khandual pmdp = pmd_offset(pudp, addr); 880bbd6ec60SAnshuman Khandual pmd = READ_ONCE(*pmdp); 881bbd6ec60SAnshuman Khandual if (pmd_none(pmd)) 882bbd6ec60SAnshuman Khandual continue; 883bbd6ec60SAnshuman Khandual 884bbd6ec60SAnshuman Khandual WARN_ON(!pmd_present(pmd)); 885bbd6ec60SAnshuman Khandual if (pmd_sect(pmd)) { 886bbd6ec60SAnshuman Khandual pmd_clear(pmdp); 887bbd6ec60SAnshuman Khandual 888bbd6ec60SAnshuman Khandual /* 889bbd6ec60SAnshuman Khandual * One TLBI should be sufficient here as the PMD_SIZE 890bbd6ec60SAnshuman Khandual * range is mapped with a single block entry. 891bbd6ec60SAnshuman Khandual */ 892bbd6ec60SAnshuman Khandual flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 893bbd6ec60SAnshuman Khandual if (free_mapped) 894bbd6ec60SAnshuman Khandual free_hotplug_page_range(pmd_page(pmd), 895eee07935SAnshuman Khandual PMD_SIZE, altmap); 896bbd6ec60SAnshuman Khandual continue; 897bbd6ec60SAnshuman Khandual } 898bbd6ec60SAnshuman Khandual WARN_ON(!pmd_table(pmd)); 899eee07935SAnshuman Khandual unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); 900bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 901bbd6ec60SAnshuman Khandual } 902bbd6ec60SAnshuman Khandual 903bbd6ec60SAnshuman Khandual static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, 904eee07935SAnshuman Khandual unsigned long end, bool free_mapped, 905eee07935SAnshuman Khandual struct vmem_altmap *altmap) 906bbd6ec60SAnshuman Khandual { 907bbd6ec60SAnshuman Khandual unsigned long next; 908bbd6ec60SAnshuman Khandual pud_t *pudp, pud; 909bbd6ec60SAnshuman Khandual 910bbd6ec60SAnshuman Khandual do { 911bbd6ec60SAnshuman Khandual next = pud_addr_end(addr, end); 912bbd6ec60SAnshuman Khandual pudp = pud_offset(p4dp, addr); 913bbd6ec60SAnshuman Khandual pud = READ_ONCE(*pudp); 914bbd6ec60SAnshuman Khandual if (pud_none(pud)) 915bbd6ec60SAnshuman Khandual continue; 916bbd6ec60SAnshuman Khandual 917bbd6ec60SAnshuman Khandual WARN_ON(!pud_present(pud)); 918bbd6ec60SAnshuman Khandual if (pud_sect(pud)) { 919bbd6ec60SAnshuman Khandual pud_clear(pudp); 920bbd6ec60SAnshuman Khandual 921bbd6ec60SAnshuman Khandual /* 922bbd6ec60SAnshuman Khandual * One TLBI should be sufficient here as the PUD_SIZE 923bbd6ec60SAnshuman Khandual * range is mapped with a single block entry. 924bbd6ec60SAnshuman Khandual */ 925bbd6ec60SAnshuman Khandual flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 926bbd6ec60SAnshuman Khandual if (free_mapped) 927bbd6ec60SAnshuman Khandual free_hotplug_page_range(pud_page(pud), 928eee07935SAnshuman Khandual PUD_SIZE, altmap); 929bbd6ec60SAnshuman Khandual continue; 930bbd6ec60SAnshuman Khandual } 931bbd6ec60SAnshuman Khandual WARN_ON(!pud_table(pud)); 932eee07935SAnshuman Khandual unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); 933bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 934bbd6ec60SAnshuman Khandual } 935bbd6ec60SAnshuman Khandual 936bbd6ec60SAnshuman Khandual static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, 937eee07935SAnshuman Khandual unsigned long end, bool free_mapped, 938eee07935SAnshuman Khandual struct vmem_altmap *altmap) 939bbd6ec60SAnshuman Khandual { 940bbd6ec60SAnshuman Khandual unsigned long next; 941bbd6ec60SAnshuman Khandual p4d_t *p4dp, p4d; 942bbd6ec60SAnshuman Khandual 943bbd6ec60SAnshuman Khandual do { 944bbd6ec60SAnshuman Khandual next = p4d_addr_end(addr, end); 945bbd6ec60SAnshuman Khandual p4dp = p4d_offset(pgdp, addr); 946bbd6ec60SAnshuman Khandual p4d = READ_ONCE(*p4dp); 947bbd6ec60SAnshuman Khandual if (p4d_none(p4d)) 948bbd6ec60SAnshuman Khandual continue; 949bbd6ec60SAnshuman Khandual 950bbd6ec60SAnshuman Khandual WARN_ON(!p4d_present(p4d)); 951eee07935SAnshuman Khandual unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); 952bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 953bbd6ec60SAnshuman Khandual } 954bbd6ec60SAnshuman Khandual 955bbd6ec60SAnshuman Khandual static void unmap_hotplug_range(unsigned long addr, unsigned long end, 956eee07935SAnshuman Khandual bool free_mapped, struct vmem_altmap *altmap) 957bbd6ec60SAnshuman Khandual { 958bbd6ec60SAnshuman Khandual unsigned long next; 959bbd6ec60SAnshuman Khandual pgd_t *pgdp, pgd; 960bbd6ec60SAnshuman Khandual 961eee07935SAnshuman Khandual /* 962eee07935SAnshuman Khandual * altmap can only be used as vmemmap mapping backing memory. 963eee07935SAnshuman Khandual * In case the backing memory itself is not being freed, then 964eee07935SAnshuman Khandual * altmap is irrelevant. Warn about this inconsistency when 965eee07935SAnshuman Khandual * encountered. 966eee07935SAnshuman Khandual */ 967eee07935SAnshuman Khandual WARN_ON(!free_mapped && altmap); 968eee07935SAnshuman Khandual 969bbd6ec60SAnshuman Khandual do { 970bbd6ec60SAnshuman Khandual next = pgd_addr_end(addr, end); 971bbd6ec60SAnshuman Khandual pgdp = pgd_offset_k(addr); 972bbd6ec60SAnshuman Khandual pgd = READ_ONCE(*pgdp); 973bbd6ec60SAnshuman Khandual if (pgd_none(pgd)) 974bbd6ec60SAnshuman Khandual continue; 975bbd6ec60SAnshuman Khandual 976bbd6ec60SAnshuman Khandual WARN_ON(!pgd_present(pgd)); 977eee07935SAnshuman Khandual unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap); 978bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 979bbd6ec60SAnshuman Khandual } 980bbd6ec60SAnshuman Khandual 981bbd6ec60SAnshuman Khandual static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, 982bbd6ec60SAnshuman Khandual unsigned long end, unsigned long floor, 983bbd6ec60SAnshuman Khandual unsigned long ceiling) 984bbd6ec60SAnshuman Khandual { 985bbd6ec60SAnshuman Khandual pte_t *ptep, pte; 986bbd6ec60SAnshuman Khandual unsigned long i, start = addr; 987bbd6ec60SAnshuman Khandual 988bbd6ec60SAnshuman Khandual do { 989bbd6ec60SAnshuman Khandual ptep = pte_offset_kernel(pmdp, addr); 990bbd6ec60SAnshuman Khandual pte = READ_ONCE(*ptep); 991bbd6ec60SAnshuman Khandual 992bbd6ec60SAnshuman Khandual /* 993bbd6ec60SAnshuman Khandual * This is just a sanity check here which verifies that 994bbd6ec60SAnshuman Khandual * pte clearing has been done by earlier unmap loops. 995bbd6ec60SAnshuman Khandual */ 996bbd6ec60SAnshuman Khandual WARN_ON(!pte_none(pte)); 997bbd6ec60SAnshuman Khandual } while (addr += PAGE_SIZE, addr < end); 998bbd6ec60SAnshuman Khandual 999bbd6ec60SAnshuman Khandual if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) 1000bbd6ec60SAnshuman Khandual return; 1001bbd6ec60SAnshuman Khandual 1002bbd6ec60SAnshuman Khandual /* 1003bbd6ec60SAnshuman Khandual * Check whether we can free the pte page if the rest of the 1004bbd6ec60SAnshuman Khandual * entries are empty. Overlap with other regions have been 1005bbd6ec60SAnshuman Khandual * handled by the floor/ceiling check. 1006bbd6ec60SAnshuman Khandual */ 1007bbd6ec60SAnshuman Khandual ptep = pte_offset_kernel(pmdp, 0UL); 1008bbd6ec60SAnshuman Khandual for (i = 0; i < PTRS_PER_PTE; i++) { 1009bbd6ec60SAnshuman Khandual if (!pte_none(READ_ONCE(ptep[i]))) 1010bbd6ec60SAnshuman Khandual return; 1011bbd6ec60SAnshuman Khandual } 1012bbd6ec60SAnshuman Khandual 1013bbd6ec60SAnshuman Khandual pmd_clear(pmdp); 1014bbd6ec60SAnshuman Khandual __flush_tlb_kernel_pgtable(start); 1015bbd6ec60SAnshuman Khandual free_hotplug_pgtable_page(virt_to_page(ptep)); 1016bbd6ec60SAnshuman Khandual } 1017bbd6ec60SAnshuman Khandual 1018bbd6ec60SAnshuman Khandual static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, 1019bbd6ec60SAnshuman Khandual unsigned long end, unsigned long floor, 1020bbd6ec60SAnshuman Khandual unsigned long ceiling) 1021bbd6ec60SAnshuman Khandual { 1022bbd6ec60SAnshuman Khandual pmd_t *pmdp, pmd; 1023bbd6ec60SAnshuman Khandual unsigned long i, next, start = addr; 1024bbd6ec60SAnshuman Khandual 1025bbd6ec60SAnshuman Khandual do { 1026bbd6ec60SAnshuman Khandual next = pmd_addr_end(addr, end); 1027bbd6ec60SAnshuman Khandual pmdp = pmd_offset(pudp, addr); 1028bbd6ec60SAnshuman Khandual pmd = READ_ONCE(*pmdp); 1029bbd6ec60SAnshuman Khandual if (pmd_none(pmd)) 1030bbd6ec60SAnshuman Khandual continue; 1031bbd6ec60SAnshuman Khandual 1032bbd6ec60SAnshuman Khandual WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); 1033bbd6ec60SAnshuman Khandual free_empty_pte_table(pmdp, addr, next, floor, ceiling); 1034bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1035bbd6ec60SAnshuman Khandual 1036bbd6ec60SAnshuman Khandual if (CONFIG_PGTABLE_LEVELS <= 2) 1037bbd6ec60SAnshuman Khandual return; 1038bbd6ec60SAnshuman Khandual 1039bbd6ec60SAnshuman Khandual if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) 1040bbd6ec60SAnshuman Khandual return; 1041bbd6ec60SAnshuman Khandual 1042bbd6ec60SAnshuman Khandual /* 1043bbd6ec60SAnshuman Khandual * Check whether we can free the pmd page if the rest of the 1044bbd6ec60SAnshuman Khandual * entries are empty. Overlap with other regions have been 1045bbd6ec60SAnshuman Khandual * handled by the floor/ceiling check. 1046bbd6ec60SAnshuman Khandual */ 1047bbd6ec60SAnshuman Khandual pmdp = pmd_offset(pudp, 0UL); 1048bbd6ec60SAnshuman Khandual for (i = 0; i < PTRS_PER_PMD; i++) { 1049bbd6ec60SAnshuman Khandual if (!pmd_none(READ_ONCE(pmdp[i]))) 1050bbd6ec60SAnshuman Khandual return; 1051bbd6ec60SAnshuman Khandual } 1052bbd6ec60SAnshuman Khandual 1053bbd6ec60SAnshuman Khandual pud_clear(pudp); 1054bbd6ec60SAnshuman Khandual __flush_tlb_kernel_pgtable(start); 1055bbd6ec60SAnshuman Khandual free_hotplug_pgtable_page(virt_to_page(pmdp)); 1056bbd6ec60SAnshuman Khandual } 1057bbd6ec60SAnshuman Khandual 1058bbd6ec60SAnshuman Khandual static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, 1059bbd6ec60SAnshuman Khandual unsigned long end, unsigned long floor, 1060bbd6ec60SAnshuman Khandual unsigned long ceiling) 1061bbd6ec60SAnshuman Khandual { 1062bbd6ec60SAnshuman Khandual pud_t *pudp, pud; 1063bbd6ec60SAnshuman Khandual unsigned long i, next, start = addr; 1064bbd6ec60SAnshuman Khandual 1065bbd6ec60SAnshuman Khandual do { 1066bbd6ec60SAnshuman Khandual next = pud_addr_end(addr, end); 1067bbd6ec60SAnshuman Khandual pudp = pud_offset(p4dp, addr); 1068bbd6ec60SAnshuman Khandual pud = READ_ONCE(*pudp); 1069bbd6ec60SAnshuman Khandual if (pud_none(pud)) 1070bbd6ec60SAnshuman Khandual continue; 1071bbd6ec60SAnshuman Khandual 1072bbd6ec60SAnshuman Khandual WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); 1073bbd6ec60SAnshuman Khandual free_empty_pmd_table(pudp, addr, next, floor, ceiling); 1074bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1075bbd6ec60SAnshuman Khandual 1076bbd6ec60SAnshuman Khandual if (CONFIG_PGTABLE_LEVELS <= 3) 1077bbd6ec60SAnshuman Khandual return; 1078bbd6ec60SAnshuman Khandual 1079bbd6ec60SAnshuman Khandual if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) 1080bbd6ec60SAnshuman Khandual return; 1081bbd6ec60SAnshuman Khandual 1082bbd6ec60SAnshuman Khandual /* 1083bbd6ec60SAnshuman Khandual * Check whether we can free the pud page if the rest of the 1084bbd6ec60SAnshuman Khandual * entries are empty. Overlap with other regions have been 1085bbd6ec60SAnshuman Khandual * handled by the floor/ceiling check. 1086bbd6ec60SAnshuman Khandual */ 1087bbd6ec60SAnshuman Khandual pudp = pud_offset(p4dp, 0UL); 1088bbd6ec60SAnshuman Khandual for (i = 0; i < PTRS_PER_PUD; i++) { 1089bbd6ec60SAnshuman Khandual if (!pud_none(READ_ONCE(pudp[i]))) 1090bbd6ec60SAnshuman Khandual return; 1091bbd6ec60SAnshuman Khandual } 1092bbd6ec60SAnshuman Khandual 1093bbd6ec60SAnshuman Khandual p4d_clear(p4dp); 1094bbd6ec60SAnshuman Khandual __flush_tlb_kernel_pgtable(start); 1095bbd6ec60SAnshuman Khandual free_hotplug_pgtable_page(virt_to_page(pudp)); 1096bbd6ec60SAnshuman Khandual } 1097bbd6ec60SAnshuman Khandual 1098bbd6ec60SAnshuman Khandual static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, 1099bbd6ec60SAnshuman Khandual unsigned long end, unsigned long floor, 1100bbd6ec60SAnshuman Khandual unsigned long ceiling) 1101bbd6ec60SAnshuman Khandual { 1102bbd6ec60SAnshuman Khandual unsigned long next; 1103bbd6ec60SAnshuman Khandual p4d_t *p4dp, p4d; 1104bbd6ec60SAnshuman Khandual 1105bbd6ec60SAnshuman Khandual do { 1106bbd6ec60SAnshuman Khandual next = p4d_addr_end(addr, end); 1107bbd6ec60SAnshuman Khandual p4dp = p4d_offset(pgdp, addr); 1108bbd6ec60SAnshuman Khandual p4d = READ_ONCE(*p4dp); 1109bbd6ec60SAnshuman Khandual if (p4d_none(p4d)) 1110bbd6ec60SAnshuman Khandual continue; 1111bbd6ec60SAnshuman Khandual 1112bbd6ec60SAnshuman Khandual WARN_ON(!p4d_present(p4d)); 1113bbd6ec60SAnshuman Khandual free_empty_pud_table(p4dp, addr, next, floor, ceiling); 1114bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1115bbd6ec60SAnshuman Khandual } 1116bbd6ec60SAnshuman Khandual 1117bbd6ec60SAnshuman Khandual static void free_empty_tables(unsigned long addr, unsigned long end, 1118bbd6ec60SAnshuman Khandual unsigned long floor, unsigned long ceiling) 1119bbd6ec60SAnshuman Khandual { 1120bbd6ec60SAnshuman Khandual unsigned long next; 1121bbd6ec60SAnshuman Khandual pgd_t *pgdp, pgd; 1122bbd6ec60SAnshuman Khandual 1123bbd6ec60SAnshuman Khandual do { 1124bbd6ec60SAnshuman Khandual next = pgd_addr_end(addr, end); 1125bbd6ec60SAnshuman Khandual pgdp = pgd_offset_k(addr); 1126bbd6ec60SAnshuman Khandual pgd = READ_ONCE(*pgdp); 1127bbd6ec60SAnshuman Khandual if (pgd_none(pgd)) 1128bbd6ec60SAnshuman Khandual continue; 1129bbd6ec60SAnshuman Khandual 1130bbd6ec60SAnshuman Khandual WARN_ON(!pgd_present(pgd)); 1131bbd6ec60SAnshuman Khandual free_empty_p4d_table(pgdp, addr, next, floor, ceiling); 1132bbd6ec60SAnshuman Khandual } while (addr = next, addr < end); 1133bbd6ec60SAnshuman Khandual } 1134bbd6ec60SAnshuman Khandual #endif 1135bbd6ec60SAnshuman Khandual 11362045a3b8SFeiyang Chen void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node, 11372045a3b8SFeiyang Chen unsigned long addr, unsigned long next) 11382045a3b8SFeiyang Chen { 11392045a3b8SFeiyang Chen pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); 11402045a3b8SFeiyang Chen } 11412045a3b8SFeiyang Chen 11422045a3b8SFeiyang Chen int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, 11432045a3b8SFeiyang Chen unsigned long addr, unsigned long next) 11442045a3b8SFeiyang Chen { 11452045a3b8SFeiyang Chen vmemmap_verify((pte_t *)pmdp, node, addr, next); 11462045a3b8SFeiyang Chen return 1; 11472045a3b8SFeiyang Chen } 11482045a3b8SFeiyang Chen 11497b73d978SChristoph Hellwig int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 11507b73d978SChristoph Hellwig struct vmem_altmap *altmap) 1151c1cc1552SCatalin Marinas { 1152edb739eeSAnshuman Khandual WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1153739e49e0SKefeng Wang 115438e4b660SAnshuman Khandual if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES)) 1155739e49e0SKefeng Wang return vmemmap_populate_basepages(start, end, node, altmap); 11562045a3b8SFeiyang Chen else 11572045a3b8SFeiyang Chen return vmemmap_populate_hugepages(start, end, node, altmap); 1158c1cc1552SCatalin Marinas } 115940221c73SAnshuman Khandual 116040221c73SAnshuman Khandual #ifdef CONFIG_MEMORY_HOTPLUG 116124b6d416SChristoph Hellwig void vmemmap_free(unsigned long start, unsigned long end, 116224b6d416SChristoph Hellwig struct vmem_altmap *altmap) 11630197518cSTang Chen { 1164bbd6ec60SAnshuman Khandual WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); 1165bbd6ec60SAnshuman Khandual 1166eee07935SAnshuman Khandual unmap_hotplug_range(start, end, true, altmap); 1167bbd6ec60SAnshuman Khandual free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); 11680197518cSTang Chen } 116940221c73SAnshuman Khandual #endif /* CONFIG_MEMORY_HOTPLUG */ 1170af86e597SLaura Abbott 117120a004e7SWill Deacon int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) 1172324420bfSArd Biesheuvel { 1173f7f0097aSAnshuman Khandual pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); 117415122ee2SWill Deacon 117582034c23SLaura Abbott /* Only allow permission changes for now */ 117682034c23SLaura Abbott if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), 117782034c23SLaura Abbott pud_val(new_pud))) 117815122ee2SWill Deacon return 0; 117915122ee2SWill Deacon 118087dedf7cSAnshuman Khandual VM_BUG_ON(phys & ~PUD_MASK); 118182034c23SLaura Abbott set_pud(pudp, new_pud); 1182324420bfSArd Biesheuvel return 1; 1183324420bfSArd Biesheuvel } 1184324420bfSArd Biesheuvel 118520a004e7SWill Deacon int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) 1186324420bfSArd Biesheuvel { 1187f7f0097aSAnshuman Khandual pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); 118815122ee2SWill Deacon 118982034c23SLaura Abbott /* Only allow permission changes for now */ 119082034c23SLaura Abbott if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), 119182034c23SLaura Abbott pmd_val(new_pmd))) 119215122ee2SWill Deacon return 0; 119315122ee2SWill Deacon 119487dedf7cSAnshuman Khandual VM_BUG_ON(phys & ~PMD_MASK); 119582034c23SLaura Abbott set_pmd(pmdp, new_pmd); 1196324420bfSArd Biesheuvel return 1; 1197324420bfSArd Biesheuvel } 1198324420bfSArd Biesheuvel 1199d8a71905SJonathan Marek int pud_clear_huge(pud_t *pudp) 1200d8a71905SJonathan Marek { 1201d8a71905SJonathan Marek if (!pud_sect(READ_ONCE(*pudp))) 1202d8a71905SJonathan Marek return 0; 1203d8a71905SJonathan Marek pud_clear(pudp); 1204d8a71905SJonathan Marek return 1; 1205d8a71905SJonathan Marek } 1206d8a71905SJonathan Marek 120720a004e7SWill Deacon int pmd_clear_huge(pmd_t *pmdp) 1208324420bfSArd Biesheuvel { 120920a004e7SWill Deacon if (!pmd_sect(READ_ONCE(*pmdp))) 1210324420bfSArd Biesheuvel return 0; 121120a004e7SWill Deacon pmd_clear(pmdp); 1212324420bfSArd Biesheuvel return 1; 1213324420bfSArd Biesheuvel } 1214b6bdb751SToshi Kani 1215ec28bb9cSChintan Pandya int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) 1216b6bdb751SToshi Kani { 1217ec28bb9cSChintan Pandya pte_t *table; 1218ec28bb9cSChintan Pandya pmd_t pmd; 1219ec28bb9cSChintan Pandya 1220ec28bb9cSChintan Pandya pmd = READ_ONCE(*pmdp); 1221ec28bb9cSChintan Pandya 1222fac880c7SMark Rutland if (!pmd_table(pmd)) { 12239c006972SWill Deacon VM_WARN_ON(1); 1224ec28bb9cSChintan Pandya return 1; 1225b6bdb751SToshi Kani } 1226b6bdb751SToshi Kani 1227ec28bb9cSChintan Pandya table = pte_offset_kernel(pmdp, addr); 1228ec28bb9cSChintan Pandya pmd_clear(pmdp); 1229ec28bb9cSChintan Pandya __flush_tlb_kernel_pgtable(addr); 1230ec28bb9cSChintan Pandya pte_free_kernel(NULL, table); 1231ec28bb9cSChintan Pandya return 1; 1232ec28bb9cSChintan Pandya } 1233ec28bb9cSChintan Pandya 1234ec28bb9cSChintan Pandya int pud_free_pmd_page(pud_t *pudp, unsigned long addr) 1235b6bdb751SToshi Kani { 1236ec28bb9cSChintan Pandya pmd_t *table; 1237ec28bb9cSChintan Pandya pmd_t *pmdp; 1238ec28bb9cSChintan Pandya pud_t pud; 1239ec28bb9cSChintan Pandya unsigned long next, end; 1240ec28bb9cSChintan Pandya 1241ec28bb9cSChintan Pandya pud = READ_ONCE(*pudp); 1242ec28bb9cSChintan Pandya 1243fac880c7SMark Rutland if (!pud_table(pud)) { 12449c006972SWill Deacon VM_WARN_ON(1); 1245ec28bb9cSChintan Pandya return 1; 1246ec28bb9cSChintan Pandya } 1247ec28bb9cSChintan Pandya 1248ec28bb9cSChintan Pandya table = pmd_offset(pudp, addr); 1249ec28bb9cSChintan Pandya pmdp = table; 1250ec28bb9cSChintan Pandya next = addr; 1251ec28bb9cSChintan Pandya end = addr + PUD_SIZE; 1252ec28bb9cSChintan Pandya do { 1253ec28bb9cSChintan Pandya pmd_free_pte_page(pmdp, next); 1254ec28bb9cSChintan Pandya } while (pmdp++, next += PMD_SIZE, next != end); 1255ec28bb9cSChintan Pandya 1256ec28bb9cSChintan Pandya pud_clear(pudp); 1257ec28bb9cSChintan Pandya __flush_tlb_kernel_pgtable(addr); 1258ec28bb9cSChintan Pandya pmd_free(NULL, table); 1259ec28bb9cSChintan Pandya return 1; 1260b6bdb751SToshi Kani } 12614ab21506SRobin Murphy 12624ab21506SRobin Murphy #ifdef CONFIG_MEMORY_HOTPLUG 1263bbd6ec60SAnshuman Khandual static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) 1264bbd6ec60SAnshuman Khandual { 1265bbd6ec60SAnshuman Khandual unsigned long end = start + size; 1266bbd6ec60SAnshuman Khandual 1267bbd6ec60SAnshuman Khandual WARN_ON(pgdir != init_mm.pgd); 1268bbd6ec60SAnshuman Khandual WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); 1269bbd6ec60SAnshuman Khandual 1270eee07935SAnshuman Khandual unmap_hotplug_range(start, end, false, NULL); 1271bbd6ec60SAnshuman Khandual free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); 1272bbd6ec60SAnshuman Khandual } 1273bbd6ec60SAnshuman Khandual 127403aaf83fSAnshuman Khandual struct range arch_get_mappable_range(void) 127558284a90SAnshuman Khandual { 127603aaf83fSAnshuman Khandual struct range mhp_range; 1277ee7febceSPavel Tatashin u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); 1278ee7febceSPavel Tatashin u64 end_linear_pa = __pa(PAGE_END - 1); 1279ee7febceSPavel Tatashin 1280ee7febceSPavel Tatashin if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { 1281ee7febceSPavel Tatashin /* 1282ee7febceSPavel Tatashin * Check for a wrap, it is possible because of randomized linear 1283ee7febceSPavel Tatashin * mapping the start physical address is actually bigger than 1284ee7febceSPavel Tatashin * the end physical address. In this case set start to zero 1285ee7febceSPavel Tatashin * because [0, end_linear_pa] range must still be able to cover 1286ee7febceSPavel Tatashin * all addressable physical addresses. 1287ee7febceSPavel Tatashin */ 1288ee7febceSPavel Tatashin if (start_linear_pa > end_linear_pa) 1289ee7febceSPavel Tatashin start_linear_pa = 0; 1290ee7febceSPavel Tatashin } 1291ee7febceSPavel Tatashin 1292ee7febceSPavel Tatashin WARN_ON(start_linear_pa > end_linear_pa); 129303aaf83fSAnshuman Khandual 129458284a90SAnshuman Khandual /* 129558284a90SAnshuman Khandual * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] 129658284a90SAnshuman Khandual * accommodating both its ends but excluding PAGE_END. Max physical 129758284a90SAnshuman Khandual * range which can be mapped inside this linear mapping range, must 129858284a90SAnshuman Khandual * also be derived from its end points. 129958284a90SAnshuman Khandual */ 1300ee7febceSPavel Tatashin mhp_range.start = start_linear_pa; 1301ee7febceSPavel Tatashin mhp_range.end = end_linear_pa; 1302ee7febceSPavel Tatashin 130303aaf83fSAnshuman Khandual return mhp_range; 130458284a90SAnshuman Khandual } 130558284a90SAnshuman Khandual 1306940519f0SMichal Hocko int arch_add_memory(int nid, u64 start, u64 size, 1307f5637d3bSLogan Gunthorpe struct mhp_params *params) 13084ab21506SRobin Murphy { 130987143f40SArd Biesheuvel int ret, flags = NO_EXEC_MAPPINGS; 13104ab21506SRobin Murphy 131103aaf83fSAnshuman Khandual VM_BUG_ON(!mhp_range_allowed(start, size, true)); 1312840b2398SMarco Elver 1313b9dd04a2SMike Rapoport if (can_set_direct_map()) 131487143f40SArd Biesheuvel flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 13154ab21506SRobin Murphy 13164ab21506SRobin Murphy __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), 1317bfeb022fSLogan Gunthorpe size, params->pgprot, __pgd_pgtable_alloc, 1318bfeb022fSLogan Gunthorpe flags); 13194ab21506SRobin Murphy 132016993c0fSDan Williams memblock_clear_nomap(start, size); 132116993c0fSDan Williams 1322bbd6ec60SAnshuman Khandual ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, 1323f5637d3bSLogan Gunthorpe params); 1324bbd6ec60SAnshuman Khandual if (ret) 1325bbd6ec60SAnshuman Khandual __remove_pgd_mapping(swapper_pg_dir, 1326bbd6ec60SAnshuman Khandual __phys_to_virt(start), size); 13278fac67caSSudarshan Rajagopalan else { 13288fac67caSSudarshan Rajagopalan max_pfn = PFN_UP(start + size); 13298fac67caSSudarshan Rajagopalan max_low_pfn = max_pfn; 13308fac67caSSudarshan Rajagopalan } 13318fac67caSSudarshan Rajagopalan 1332bbd6ec60SAnshuman Khandual return ret; 13334ab21506SRobin Murphy } 1334bbd6ec60SAnshuman Khandual 133565a2aa5fSDavid Hildenbrand void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) 133622eb6346SDavid Hildenbrand { 133722eb6346SDavid Hildenbrand unsigned long start_pfn = start >> PAGE_SHIFT; 133822eb6346SDavid Hildenbrand unsigned long nr_pages = size >> PAGE_SHIFT; 133922eb6346SDavid Hildenbrand 1340feee6b29SDavid Hildenbrand __remove_pages(start_pfn, nr_pages, altmap); 1341bbd6ec60SAnshuman Khandual __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); 134222eb6346SDavid Hildenbrand } 1343bbd6ec60SAnshuman Khandual 1344bbd6ec60SAnshuman Khandual /* 1345bbd6ec60SAnshuman Khandual * This memory hotplug notifier helps prevent boot memory from being 1346bbd6ec60SAnshuman Khandual * inadvertently removed as it blocks pfn range offlining process in 1347bbd6ec60SAnshuman Khandual * __offline_pages(). Hence this prevents both offlining as well as 1348bbd6ec60SAnshuman Khandual * removal process for boot memory which is initially always online. 1349bbd6ec60SAnshuman Khandual * In future if and when boot memory could be removed, this notifier 1350bbd6ec60SAnshuman Khandual * should be dropped and free_hotplug_page_range() should handle any 1351bbd6ec60SAnshuman Khandual * reserved pages allocated during boot. 1352bbd6ec60SAnshuman Khandual */ 1353bbd6ec60SAnshuman Khandual static int prevent_bootmem_remove_notifier(struct notifier_block *nb, 1354bbd6ec60SAnshuman Khandual unsigned long action, void *data) 1355bbd6ec60SAnshuman Khandual { 1356bbd6ec60SAnshuman Khandual struct mem_section *ms; 1357bbd6ec60SAnshuman Khandual struct memory_notify *arg = data; 1358bbd6ec60SAnshuman Khandual unsigned long end_pfn = arg->start_pfn + arg->nr_pages; 1359bbd6ec60SAnshuman Khandual unsigned long pfn = arg->start_pfn; 1360bbd6ec60SAnshuman Khandual 13619fb3d4a3SAnshuman Khandual if ((action != MEM_GOING_OFFLINE) && (action != MEM_OFFLINE)) 1362bbd6ec60SAnshuman Khandual return NOTIFY_OK; 1363bbd6ec60SAnshuman Khandual 1364bbd6ec60SAnshuman Khandual for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 13659fb3d4a3SAnshuman Khandual unsigned long start = PFN_PHYS(pfn); 13669fb3d4a3SAnshuman Khandual unsigned long end = start + (1UL << PA_SECTION_SHIFT); 13679fb3d4a3SAnshuman Khandual 1368bbd6ec60SAnshuman Khandual ms = __pfn_to_section(pfn); 13699fb3d4a3SAnshuman Khandual if (!early_section(ms)) 13709fb3d4a3SAnshuman Khandual continue; 13719fb3d4a3SAnshuman Khandual 13729fb3d4a3SAnshuman Khandual if (action == MEM_GOING_OFFLINE) { 13739fb3d4a3SAnshuman Khandual /* 13749fb3d4a3SAnshuman Khandual * Boot memory removal is not supported. Prevent 13759fb3d4a3SAnshuman Khandual * it via blocking any attempted offline request 13769fb3d4a3SAnshuman Khandual * for the boot memory and just report it. 13779fb3d4a3SAnshuman Khandual */ 13789fb3d4a3SAnshuman Khandual pr_warn("Boot memory [%lx %lx] offlining attempted\n", start, end); 1379bbd6ec60SAnshuman Khandual return NOTIFY_BAD; 13809fb3d4a3SAnshuman Khandual } else if (action == MEM_OFFLINE) { 13819fb3d4a3SAnshuman Khandual /* 13829fb3d4a3SAnshuman Khandual * This should have never happened. Boot memory 13839fb3d4a3SAnshuman Khandual * offlining should have been prevented by this 13849fb3d4a3SAnshuman Khandual * very notifier. Probably some memory removal 13859fb3d4a3SAnshuman Khandual * procedure might have changed which would then 13869fb3d4a3SAnshuman Khandual * require further debug. 13879fb3d4a3SAnshuman Khandual */ 13889fb3d4a3SAnshuman Khandual pr_err("Boot memory [%lx %lx] offlined\n", start, end); 13899fb3d4a3SAnshuman Khandual 13909fb3d4a3SAnshuman Khandual /* 13919fb3d4a3SAnshuman Khandual * Core memory hotplug does not process a return 13929fb3d4a3SAnshuman Khandual * code from the notifier for MEM_OFFLINE events. 13939fb3d4a3SAnshuman Khandual * The error condition has been reported. Return 13949fb3d4a3SAnshuman Khandual * from here as if ignored. 13959fb3d4a3SAnshuman Khandual */ 13969fb3d4a3SAnshuman Khandual return NOTIFY_DONE; 13979fb3d4a3SAnshuman Khandual } 1398bbd6ec60SAnshuman Khandual } 1399bbd6ec60SAnshuman Khandual return NOTIFY_OK; 1400bbd6ec60SAnshuman Khandual } 1401bbd6ec60SAnshuman Khandual 1402bbd6ec60SAnshuman Khandual static struct notifier_block prevent_bootmem_remove_nb = { 1403bbd6ec60SAnshuman Khandual .notifier_call = prevent_bootmem_remove_notifier, 1404bbd6ec60SAnshuman Khandual }; 1405bbd6ec60SAnshuman Khandual 1406fdd99a41SAnshuman Khandual /* 1407fdd99a41SAnshuman Khandual * This ensures that boot memory sections on the platform are online 1408fdd99a41SAnshuman Khandual * from early boot. Memory sections could not be prevented from being 1409fdd99a41SAnshuman Khandual * offlined, unless for some reason they are not online to begin with. 1410fdd99a41SAnshuman Khandual * This helps validate the basic assumption on which the above memory 1411fdd99a41SAnshuman Khandual * event notifier works to prevent boot memory section offlining and 1412fdd99a41SAnshuman Khandual * its possible removal. 1413fdd99a41SAnshuman Khandual */ 1414fdd99a41SAnshuman Khandual static void validate_bootmem_online(void) 1415fdd99a41SAnshuman Khandual { 1416fdd99a41SAnshuman Khandual phys_addr_t start, end, addr; 1417fdd99a41SAnshuman Khandual struct mem_section *ms; 1418fdd99a41SAnshuman Khandual u64 i; 1419fdd99a41SAnshuman Khandual 1420fdd99a41SAnshuman Khandual /* 1421fdd99a41SAnshuman Khandual * Scanning across all memblock might be expensive 1422fdd99a41SAnshuman Khandual * on some big memory systems. Hence enable this 1423fdd99a41SAnshuman Khandual * validation only with DEBUG_VM. 1424fdd99a41SAnshuman Khandual */ 1425fdd99a41SAnshuman Khandual if (!IS_ENABLED(CONFIG_DEBUG_VM)) 1426fdd99a41SAnshuman Khandual return; 1427fdd99a41SAnshuman Khandual 1428fdd99a41SAnshuman Khandual for_each_mem_range(i, &start, &end) { 1429fdd99a41SAnshuman Khandual for (addr = start; addr < end; addr += (1UL << PA_SECTION_SHIFT)) { 1430fdd99a41SAnshuman Khandual ms = __pfn_to_section(PHYS_PFN(addr)); 1431fdd99a41SAnshuman Khandual 1432fdd99a41SAnshuman Khandual /* 1433fdd99a41SAnshuman Khandual * All memory ranges in the system at this point 1434fdd99a41SAnshuman Khandual * should have been marked as early sections. 1435fdd99a41SAnshuman Khandual */ 1436fdd99a41SAnshuman Khandual WARN_ON(!early_section(ms)); 1437fdd99a41SAnshuman Khandual 1438fdd99a41SAnshuman Khandual /* 1439fdd99a41SAnshuman Khandual * Memory notifier mechanism here to prevent boot 1440fdd99a41SAnshuman Khandual * memory offlining depends on the fact that each 1441fdd99a41SAnshuman Khandual * early section memory on the system is initially 1442fdd99a41SAnshuman Khandual * online. Otherwise a given memory section which 1443fdd99a41SAnshuman Khandual * is already offline will be overlooked and can 1444fdd99a41SAnshuman Khandual * be removed completely. Call out such sections. 1445fdd99a41SAnshuman Khandual */ 1446fdd99a41SAnshuman Khandual if (!online_section(ms)) 1447fdd99a41SAnshuman Khandual pr_err("Boot memory [%llx %llx] is offline, can be removed\n", 1448fdd99a41SAnshuman Khandual addr, addr + (1UL << PA_SECTION_SHIFT)); 1449fdd99a41SAnshuman Khandual } 1450fdd99a41SAnshuman Khandual } 1451fdd99a41SAnshuman Khandual } 1452fdd99a41SAnshuman Khandual 1453bbd6ec60SAnshuman Khandual static int __init prevent_bootmem_remove_init(void) 1454bbd6ec60SAnshuman Khandual { 1455cb45babeSAnshuman Khandual int ret = 0; 1456cb45babeSAnshuman Khandual 1457cb45babeSAnshuman Khandual if (!IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 1458cb45babeSAnshuman Khandual return ret; 1459cb45babeSAnshuman Khandual 1460fdd99a41SAnshuman Khandual validate_bootmem_online(); 1461cb45babeSAnshuman Khandual ret = register_memory_notifier(&prevent_bootmem_remove_nb); 1462cb45babeSAnshuman Khandual if (ret) 1463cb45babeSAnshuman Khandual pr_err("%s: Notifier registration failed %d\n", __func__, ret); 1464cb45babeSAnshuman Khandual 1465cb45babeSAnshuman Khandual return ret; 1466bbd6ec60SAnshuman Khandual } 1467cb45babeSAnshuman Khandual early_initcall(prevent_bootmem_remove_init); 146822eb6346SDavid Hildenbrand #endif 14695db568e7SAnshuman Khandual 14705db568e7SAnshuman Khandual pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 14715db568e7SAnshuman Khandual { 14725db568e7SAnshuman Khandual if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) && 14735db568e7SAnshuman Khandual cpus_have_const_cap(ARM64_WORKAROUND_2645198)) { 14745db568e7SAnshuman Khandual /* 14755db568e7SAnshuman Khandual * Break-before-make (BBM) is required for all user space mappings 14765db568e7SAnshuman Khandual * when the permission changes from executable to non-executable 14775db568e7SAnshuman Khandual * in cases where cpu is affected with errata #2645198. 14785db568e7SAnshuman Khandual */ 14795db568e7SAnshuman Khandual if (pte_user_exec(READ_ONCE(*ptep))) 14805db568e7SAnshuman Khandual return ptep_clear_flush(vma, addr, ptep); 14815db568e7SAnshuman Khandual } 14825db568e7SAnshuman Khandual return ptep_get_and_clear(vma->vm_mm, addr, ptep); 14835db568e7SAnshuman Khandual } 14845db568e7SAnshuman Khandual 14855db568e7SAnshuman Khandual void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 14865db568e7SAnshuman Khandual pte_t old_pte, pte_t pte) 14875db568e7SAnshuman Khandual { 14885db568e7SAnshuman Khandual set_pte_at(vma->vm_mm, addr, ptep, pte); 14895db568e7SAnshuman Khandual } 1490