1c1cc1552SCatalin Marinas /* 2c1cc1552SCatalin Marinas * Based on arch/arm/mm/mmu.c 3c1cc1552SCatalin Marinas * 4c1cc1552SCatalin Marinas * Copyright (C) 1995-2005 Russell King 5c1cc1552SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 6c1cc1552SCatalin Marinas * 7c1cc1552SCatalin Marinas * This program is free software; you can redistribute it and/or modify 8c1cc1552SCatalin Marinas * it under the terms of the GNU General Public License version 2 as 9c1cc1552SCatalin Marinas * published by the Free Software Foundation. 10c1cc1552SCatalin Marinas * 11c1cc1552SCatalin Marinas * This program is distributed in the hope that it will be useful, 12c1cc1552SCatalin Marinas * but WITHOUT ANY WARRANTY; without even the implied warranty of 13c1cc1552SCatalin Marinas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14c1cc1552SCatalin Marinas * GNU General Public License for more details. 15c1cc1552SCatalin Marinas * 16c1cc1552SCatalin Marinas * You should have received a copy of the GNU General Public License 17c1cc1552SCatalin Marinas * along with this program. If not, see <http://www.gnu.org/licenses/>. 18c1cc1552SCatalin Marinas */ 19c1cc1552SCatalin Marinas 205a9e3e15SJisheng Zhang #include <linux/cache.h> 21c1cc1552SCatalin Marinas #include <linux/export.h> 22c1cc1552SCatalin Marinas #include <linux/kernel.h> 23c1cc1552SCatalin Marinas #include <linux/errno.h> 24c1cc1552SCatalin Marinas #include <linux/init.h> 2598d2e153STakahiro Akashi #include <linux/ioport.h> 2698d2e153STakahiro Akashi #include <linux/kexec.h> 2761bd93ceSArd Biesheuvel #include <linux/libfdt.h> 28c1cc1552SCatalin Marinas #include <linux/mman.h> 29c1cc1552SCatalin Marinas #include <linux/nodemask.h> 30c1cc1552SCatalin Marinas #include <linux/memblock.h> 31c1cc1552SCatalin Marinas #include <linux/fs.h> 322475ff9dSCatalin Marinas #include <linux/io.h> 332077be67SLaura Abbott #include <linux/mm.h> 346efd8499STobias Klauser #include <linux/vmalloc.h> 35c1cc1552SCatalin Marinas 3621ab99c2SMark Rutland #include <asm/barrier.h> 37c1cc1552SCatalin Marinas #include <asm/cputype.h> 38af86e597SLaura Abbott #include <asm/fixmap.h> 39068a17a5SMark Rutland #include <asm/kasan.h> 40b433dce0SSuzuki K. Poulose #include <asm/kernel-pgtable.h> 41c1cc1552SCatalin Marinas #include <asm/sections.h> 42c1cc1552SCatalin Marinas #include <asm/setup.h> 43c1cc1552SCatalin Marinas #include <asm/sizes.h> 44c1cc1552SCatalin Marinas #include <asm/tlb.h> 45c79b954bSJungseok Lee #include <asm/memblock.h> 46c1cc1552SCatalin Marinas #include <asm/mmu_context.h> 471404d6f1SLaura Abbott #include <asm/ptdump.h> 48c1cc1552SCatalin Marinas 49c0951366SArd Biesheuvel #define NO_BLOCK_MAPPINGS BIT(0) 50d27cfa1fSArd Biesheuvel #define NO_CONT_MAPPINGS BIT(1) 51c0951366SArd Biesheuvel 52dd006da2SArd Biesheuvel u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 53dd006da2SArd Biesheuvel 545a9e3e15SJisheng Zhang u64 kimage_voffset __ro_after_init; 55a7f8de16SArd Biesheuvel EXPORT_SYMBOL(kimage_voffset); 56a7f8de16SArd Biesheuvel 57c1cc1552SCatalin Marinas /* 58c1cc1552SCatalin Marinas * Empty_zero_page is a special page that is used for zero-initialized data 59c1cc1552SCatalin Marinas * and COW. 60c1cc1552SCatalin Marinas */ 615227cfa7SMark Rutland unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 62c1cc1552SCatalin Marinas EXPORT_SYMBOL(empty_zero_page); 63c1cc1552SCatalin Marinas 64f9040773SArd Biesheuvel static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 65f9040773SArd Biesheuvel static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 66f9040773SArd Biesheuvel static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 67f9040773SArd Biesheuvel 68c1cc1552SCatalin Marinas pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 69c1cc1552SCatalin Marinas unsigned long size, pgprot_t vma_prot) 70c1cc1552SCatalin Marinas { 71c1cc1552SCatalin Marinas if (!pfn_valid(pfn)) 72c1cc1552SCatalin Marinas return pgprot_noncached(vma_prot); 73c1cc1552SCatalin Marinas else if (file->f_flags & O_SYNC) 74c1cc1552SCatalin Marinas return pgprot_writecombine(vma_prot); 75c1cc1552SCatalin Marinas return vma_prot; 76c1cc1552SCatalin Marinas } 77c1cc1552SCatalin Marinas EXPORT_SYMBOL(phys_mem_access_prot); 78c1cc1552SCatalin Marinas 79f4710445SMark Rutland static phys_addr_t __init early_pgtable_alloc(void) 80c1cc1552SCatalin Marinas { 817142392dSSuzuki K. Poulose phys_addr_t phys; 827142392dSSuzuki K. Poulose void *ptr; 837142392dSSuzuki K. Poulose 8421ab99c2SMark Rutland phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 85f4710445SMark Rutland 86f4710445SMark Rutland /* 87f4710445SMark Rutland * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE 88f4710445SMark Rutland * slot will be free, so we can (ab)use the FIX_PTE slot to initialise 89f4710445SMark Rutland * any level of table. 90f4710445SMark Rutland */ 91f4710445SMark Rutland ptr = pte_set_fixmap(phys); 92f4710445SMark Rutland 9321ab99c2SMark Rutland memset(ptr, 0, PAGE_SIZE); 9421ab99c2SMark Rutland 95f4710445SMark Rutland /* 96f4710445SMark Rutland * Implicit barriers also ensure the zeroed page is visible to the page 97f4710445SMark Rutland * table walker 98f4710445SMark Rutland */ 99f4710445SMark Rutland pte_clear_fixmap(); 100f4710445SMark Rutland 101f4710445SMark Rutland return phys; 102c1cc1552SCatalin Marinas } 103c1cc1552SCatalin Marinas 104e98216b5SArd Biesheuvel static bool pgattr_change_is_safe(u64 old, u64 new) 105e98216b5SArd Biesheuvel { 106e98216b5SArd Biesheuvel /* 107e98216b5SArd Biesheuvel * The following mapping attributes may be updated in live 108e98216b5SArd Biesheuvel * kernel mappings without the need for break-before-make. 109e98216b5SArd Biesheuvel */ 110e98216b5SArd Biesheuvel static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; 111e98216b5SArd Biesheuvel 112141d1497SArd Biesheuvel /* creating or taking down mappings is always safe */ 113141d1497SArd Biesheuvel if (old == 0 || new == 0) 114141d1497SArd Biesheuvel return true; 115141d1497SArd Biesheuvel 116141d1497SArd Biesheuvel /* live contiguous mappings may not be manipulated at all */ 117141d1497SArd Biesheuvel if ((old | new) & PTE_CONT) 118141d1497SArd Biesheuvel return false; 119141d1497SArd Biesheuvel 120141d1497SArd Biesheuvel return ((old ^ new) & ~mask) == 0; 121e98216b5SArd Biesheuvel } 122e98216b5SArd Biesheuvel 123d27cfa1fSArd Biesheuvel static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, 124d27cfa1fSArd Biesheuvel phys_addr_t phys, pgprot_t prot) 125c1cc1552SCatalin Marinas { 126c1cc1552SCatalin Marinas pte_t *pte; 127c1cc1552SCatalin Marinas 128f4710445SMark Rutland pte = pte_set_fixmap_offset(pmd, addr); 129c1cc1552SCatalin Marinas do { 130e98216b5SArd Biesheuvel pte_t old_pte = *pte; 131e98216b5SArd Biesheuvel 132e393cf40SArd Biesheuvel set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot)); 133e98216b5SArd Biesheuvel 134e98216b5SArd Biesheuvel /* 135e98216b5SArd Biesheuvel * After the PTE entry has been populated once, we 136e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 137e98216b5SArd Biesheuvel */ 138e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte))); 139e98216b5SArd Biesheuvel 140e393cf40SArd Biesheuvel phys += PAGE_SIZE; 141667c2759SCatalin Marinas } while (pte++, addr += PAGE_SIZE, addr != end); 142f4710445SMark Rutland 143f4710445SMark Rutland pte_clear_fixmap(); 144c1cc1552SCatalin Marinas } 145c1cc1552SCatalin Marinas 146d27cfa1fSArd Biesheuvel static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr, 147d27cfa1fSArd Biesheuvel unsigned long end, phys_addr_t phys, 148d27cfa1fSArd Biesheuvel pgprot_t prot, 14953e1b329SArd Biesheuvel phys_addr_t (*pgtable_alloc)(void), 150c0951366SArd Biesheuvel int flags) 151c1cc1552SCatalin Marinas { 152c1cc1552SCatalin Marinas unsigned long next; 153c1cc1552SCatalin Marinas 154d27cfa1fSArd Biesheuvel BUG_ON(pmd_sect(*pmd)); 155d27cfa1fSArd Biesheuvel if (pmd_none(*pmd)) { 156d27cfa1fSArd Biesheuvel phys_addr_t pte_phys; 157132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 158d27cfa1fSArd Biesheuvel pte_phys = pgtable_alloc(); 159d27cfa1fSArd Biesheuvel __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); 160c1cc1552SCatalin Marinas } 161d27cfa1fSArd Biesheuvel BUG_ON(pmd_bad(*pmd)); 162d27cfa1fSArd Biesheuvel 163d27cfa1fSArd Biesheuvel do { 164d27cfa1fSArd Biesheuvel pgprot_t __prot = prot; 165d27cfa1fSArd Biesheuvel 166d27cfa1fSArd Biesheuvel next = pte_cont_addr_end(addr, end); 167d27cfa1fSArd Biesheuvel 168d27cfa1fSArd Biesheuvel /* use a contiguous mapping if the range is suitably aligned */ 169d27cfa1fSArd Biesheuvel if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && 170d27cfa1fSArd Biesheuvel (flags & NO_CONT_MAPPINGS) == 0) 171d27cfa1fSArd Biesheuvel __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 172d27cfa1fSArd Biesheuvel 173d27cfa1fSArd Biesheuvel init_pte(pmd, addr, next, phys, __prot); 174d27cfa1fSArd Biesheuvel 175d27cfa1fSArd Biesheuvel phys += next - addr; 176d27cfa1fSArd Biesheuvel } while (addr = next, addr != end); 177d27cfa1fSArd Biesheuvel } 178d27cfa1fSArd Biesheuvel 179d27cfa1fSArd Biesheuvel static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end, 180d27cfa1fSArd Biesheuvel phys_addr_t phys, pgprot_t prot, 181d27cfa1fSArd Biesheuvel phys_addr_t (*pgtable_alloc)(void), int flags) 182d27cfa1fSArd Biesheuvel { 183d27cfa1fSArd Biesheuvel unsigned long next; 184d27cfa1fSArd Biesheuvel pmd_t *pmd; 185c1cc1552SCatalin Marinas 186f4710445SMark Rutland pmd = pmd_set_fixmap_offset(pud, addr); 187c1cc1552SCatalin Marinas do { 188e98216b5SArd Biesheuvel pmd_t old_pmd = *pmd; 189e98216b5SArd Biesheuvel 190c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 191e98216b5SArd Biesheuvel 192c1cc1552SCatalin Marinas /* try section mapping first */ 19383863f25SLaura Abbott if (((addr | next | phys) & ~SECTION_MASK) == 0 && 194c0951366SArd Biesheuvel (flags & NO_BLOCK_MAPPINGS) == 0) { 195d81bbe6dSMark Rutland pmd_set_huge(pmd, phys, prot); 196e98216b5SArd Biesheuvel 197a55f9929SCatalin Marinas /* 198e98216b5SArd Biesheuvel * After the PMD entry has been populated once, we 199e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 200a55f9929SCatalin Marinas */ 201e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 202e98216b5SArd Biesheuvel pmd_val(*pmd))); 203a55f9929SCatalin Marinas } else { 204d27cfa1fSArd Biesheuvel alloc_init_cont_pte(pmd, addr, next, phys, prot, 205d27cfa1fSArd Biesheuvel pgtable_alloc, flags); 206e98216b5SArd Biesheuvel 207e98216b5SArd Biesheuvel BUG_ON(pmd_val(old_pmd) != 0 && 208e98216b5SArd Biesheuvel pmd_val(old_pmd) != pmd_val(*pmd)); 209a55f9929SCatalin Marinas } 210c1cc1552SCatalin Marinas phys += next - addr; 211c1cc1552SCatalin Marinas } while (pmd++, addr = next, addr != end); 212f4710445SMark Rutland 213f4710445SMark Rutland pmd_clear_fixmap(); 214c1cc1552SCatalin Marinas } 215c1cc1552SCatalin Marinas 216d27cfa1fSArd Biesheuvel static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr, 217d27cfa1fSArd Biesheuvel unsigned long end, phys_addr_t phys, 218d27cfa1fSArd Biesheuvel pgprot_t prot, 219d27cfa1fSArd Biesheuvel phys_addr_t (*pgtable_alloc)(void), int flags) 220d27cfa1fSArd Biesheuvel { 221d27cfa1fSArd Biesheuvel unsigned long next; 222d27cfa1fSArd Biesheuvel 223d27cfa1fSArd Biesheuvel /* 224d27cfa1fSArd Biesheuvel * Check for initial section mappings in the pgd/pud. 225d27cfa1fSArd Biesheuvel */ 226d27cfa1fSArd Biesheuvel BUG_ON(pud_sect(*pud)); 227d27cfa1fSArd Biesheuvel if (pud_none(*pud)) { 228d27cfa1fSArd Biesheuvel phys_addr_t pmd_phys; 229d27cfa1fSArd Biesheuvel BUG_ON(!pgtable_alloc); 230d27cfa1fSArd Biesheuvel pmd_phys = pgtable_alloc(); 231d27cfa1fSArd Biesheuvel __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); 232d27cfa1fSArd Biesheuvel } 233d27cfa1fSArd Biesheuvel BUG_ON(pud_bad(*pud)); 234d27cfa1fSArd Biesheuvel 235d27cfa1fSArd Biesheuvel do { 236d27cfa1fSArd Biesheuvel pgprot_t __prot = prot; 237d27cfa1fSArd Biesheuvel 238d27cfa1fSArd Biesheuvel next = pmd_cont_addr_end(addr, end); 239d27cfa1fSArd Biesheuvel 240d27cfa1fSArd Biesheuvel /* use a contiguous mapping if the range is suitably aligned */ 241d27cfa1fSArd Biesheuvel if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && 242d27cfa1fSArd Biesheuvel (flags & NO_CONT_MAPPINGS) == 0) 243d27cfa1fSArd Biesheuvel __prot = __pgprot(pgprot_val(prot) | PTE_CONT); 244d27cfa1fSArd Biesheuvel 245d27cfa1fSArd Biesheuvel init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags); 246d27cfa1fSArd Biesheuvel 247d27cfa1fSArd Biesheuvel phys += next - addr; 248d27cfa1fSArd Biesheuvel } while (addr = next, addr != end); 249d27cfa1fSArd Biesheuvel } 250d27cfa1fSArd Biesheuvel 251da141706SLaura Abbott static inline bool use_1G_block(unsigned long addr, unsigned long next, 252da141706SLaura Abbott unsigned long phys) 253da141706SLaura Abbott { 254da141706SLaura Abbott if (PAGE_SHIFT != 12) 255da141706SLaura Abbott return false; 256da141706SLaura Abbott 257da141706SLaura Abbott if (((addr | next | phys) & ~PUD_MASK) != 0) 258da141706SLaura Abbott return false; 259da141706SLaura Abbott 260da141706SLaura Abbott return true; 261da141706SLaura Abbott } 262da141706SLaura Abbott 26311509a30SMark Rutland static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 264da141706SLaura Abbott phys_addr_t phys, pgprot_t prot, 26553e1b329SArd Biesheuvel phys_addr_t (*pgtable_alloc)(void), 266c0951366SArd Biesheuvel int flags) 267c1cc1552SCatalin Marinas { 268c79b954bSJungseok Lee pud_t *pud; 269c1cc1552SCatalin Marinas unsigned long next; 270c1cc1552SCatalin Marinas 271c79b954bSJungseok Lee if (pgd_none(*pgd)) { 272132233a7SLaura Abbott phys_addr_t pud_phys; 273132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 274132233a7SLaura Abbott pud_phys = pgtable_alloc(); 275f4710445SMark Rutland __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE); 276c79b954bSJungseok Lee } 277c79b954bSJungseok Lee BUG_ON(pgd_bad(*pgd)); 278c79b954bSJungseok Lee 279f4710445SMark Rutland pud = pud_set_fixmap_offset(pgd, addr); 280c1cc1552SCatalin Marinas do { 281e98216b5SArd Biesheuvel pud_t old_pud = *pud; 282e98216b5SArd Biesheuvel 283c1cc1552SCatalin Marinas next = pud_addr_end(addr, end); 284206a2a73SSteve Capper 285206a2a73SSteve Capper /* 286206a2a73SSteve Capper * For 4K granule only, attempt to put down a 1GB block 287206a2a73SSteve Capper */ 288c0951366SArd Biesheuvel if (use_1G_block(addr, next, phys) && 289c0951366SArd Biesheuvel (flags & NO_BLOCK_MAPPINGS) == 0) { 290c661cb1cSMark Rutland pud_set_huge(pud, phys, prot); 291206a2a73SSteve Capper 292206a2a73SSteve Capper /* 293e98216b5SArd Biesheuvel * After the PUD entry has been populated once, we 294e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 295206a2a73SSteve Capper */ 296e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 297e98216b5SArd Biesheuvel pud_val(*pud))); 298206a2a73SSteve Capper } else { 299d27cfa1fSArd Biesheuvel alloc_init_cont_pmd(pud, addr, next, phys, prot, 300c0951366SArd Biesheuvel pgtable_alloc, flags); 301e98216b5SArd Biesheuvel 302e98216b5SArd Biesheuvel BUG_ON(pud_val(old_pud) != 0 && 303e98216b5SArd Biesheuvel pud_val(old_pud) != pud_val(*pud)); 304206a2a73SSteve Capper } 305c1cc1552SCatalin Marinas phys += next - addr; 306c1cc1552SCatalin Marinas } while (pud++, addr = next, addr != end); 307f4710445SMark Rutland 308f4710445SMark Rutland pud_clear_fixmap(); 309c1cc1552SCatalin Marinas } 310c1cc1552SCatalin Marinas 31140f87d31SArd Biesheuvel static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 31240f87d31SArd Biesheuvel unsigned long virt, phys_addr_t size, 31340f87d31SArd Biesheuvel pgprot_t prot, 31453e1b329SArd Biesheuvel phys_addr_t (*pgtable_alloc)(void), 315c0951366SArd Biesheuvel int flags) 316c1cc1552SCatalin Marinas { 317c1cc1552SCatalin Marinas unsigned long addr, length, end, next; 31840f87d31SArd Biesheuvel pgd_t *pgd = pgd_offset_raw(pgdir, virt); 319c1cc1552SCatalin Marinas 320cc5d2b3bSMark Rutland /* 321cc5d2b3bSMark Rutland * If the virtual and physical address don't have the same offset 322cc5d2b3bSMark Rutland * within a page, we cannot map the region as the caller expects. 323cc5d2b3bSMark Rutland */ 324cc5d2b3bSMark Rutland if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 325cc5d2b3bSMark Rutland return; 326cc5d2b3bSMark Rutland 3279c4e08a3SMark Rutland phys &= PAGE_MASK; 328c1cc1552SCatalin Marinas addr = virt & PAGE_MASK; 329c1cc1552SCatalin Marinas length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); 330c1cc1552SCatalin Marinas 331c1cc1552SCatalin Marinas end = addr + length; 332c1cc1552SCatalin Marinas do { 333c1cc1552SCatalin Marinas next = pgd_addr_end(addr, end); 33453e1b329SArd Biesheuvel alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc, 335c0951366SArd Biesheuvel flags); 336c1cc1552SCatalin Marinas phys += next - addr; 337c1cc1552SCatalin Marinas } while (pgd++, addr = next, addr != end); 338c1cc1552SCatalin Marinas } 339c1cc1552SCatalin Marinas 3401378dc3dSArd Biesheuvel static phys_addr_t pgd_pgtable_alloc(void) 341da141706SLaura Abbott { 34221ab99c2SMark Rutland void *ptr = (void *)__get_free_page(PGALLOC_GFP); 3431378dc3dSArd Biesheuvel if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) 3441378dc3dSArd Biesheuvel BUG(); 34521ab99c2SMark Rutland 34621ab99c2SMark Rutland /* Ensure the zeroed page is visible to the page table walker */ 34721ab99c2SMark Rutland dsb(ishst); 348f4710445SMark Rutland return __pa(ptr); 349da141706SLaura Abbott } 350da141706SLaura Abbott 351132233a7SLaura Abbott /* 352132233a7SLaura Abbott * This function can only be used to modify existing table entries, 353132233a7SLaura Abbott * without allocating new levels of table. Note that this permits the 354132233a7SLaura Abbott * creation of new section or page entries. 355132233a7SLaura Abbott */ 356132233a7SLaura Abbott static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 357da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 358d7ecbddfSMark Salter { 359d7ecbddfSMark Salter if (virt < VMALLOC_START) { 360d7ecbddfSMark Salter pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 361d7ecbddfSMark Salter &phys, virt); 362d7ecbddfSMark Salter return; 363d7ecbddfSMark Salter } 364d27cfa1fSArd Biesheuvel __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 365d27cfa1fSArd Biesheuvel NO_CONT_MAPPINGS); 366d7ecbddfSMark Salter } 367d7ecbddfSMark Salter 3688ce837ceSArd Biesheuvel void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 3698ce837ceSArd Biesheuvel unsigned long virt, phys_addr_t size, 370f14c66ceSArd Biesheuvel pgprot_t prot, bool page_mappings_only) 3718ce837ceSArd Biesheuvel { 372c0951366SArd Biesheuvel int flags = 0; 373c0951366SArd Biesheuvel 3741378dc3dSArd Biesheuvel BUG_ON(mm == &init_mm); 3751378dc3dSArd Biesheuvel 376c0951366SArd Biesheuvel if (page_mappings_only) 377d27cfa1fSArd Biesheuvel flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 378c0951366SArd Biesheuvel 37911509a30SMark Rutland __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 380c0951366SArd Biesheuvel pgd_pgtable_alloc, flags); 381d7ecbddfSMark Salter } 382d7ecbddfSMark Salter 383aa8c09beSArd Biesheuvel static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 384da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 385da141706SLaura Abbott { 386da141706SLaura Abbott if (virt < VMALLOC_START) { 387aa8c09beSArd Biesheuvel pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 388da141706SLaura Abbott &phys, virt); 389da141706SLaura Abbott return; 390da141706SLaura Abbott } 391da141706SLaura Abbott 392d27cfa1fSArd Biesheuvel __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, 393d27cfa1fSArd Biesheuvel NO_CONT_MAPPINGS); 394aa8c09beSArd Biesheuvel 395aa8c09beSArd Biesheuvel /* flush the TLBs after updating live kernel mappings */ 396aa8c09beSArd Biesheuvel flush_tlb_kernel_range(virt, virt + size); 397da141706SLaura Abbott } 398da141706SLaura Abbott 39998d2e153STakahiro Akashi static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, 40098d2e153STakahiro Akashi phys_addr_t end, pgprot_t prot, int flags) 401da141706SLaura Abbott { 40298d2e153STakahiro Akashi __create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start, 40398d2e153STakahiro Akashi prot, early_pgtable_alloc, flags); 404da141706SLaura Abbott } 405da141706SLaura Abbott 4065ea5306cSArd Biesheuvel void __init mark_linear_text_alias_ro(void) 4075ea5306cSArd Biesheuvel { 4085ea5306cSArd Biesheuvel /* 4095ea5306cSArd Biesheuvel * Remove the write permissions from the linear alias of .text/.rodata 4105ea5306cSArd Biesheuvel */ 4115ea5306cSArd Biesheuvel update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), 4125ea5306cSArd Biesheuvel (unsigned long)__init_begin - (unsigned long)_text, 4135ea5306cSArd Biesheuvel PAGE_KERNEL_RO); 4145ea5306cSArd Biesheuvel } 4155ea5306cSArd Biesheuvel 416068a17a5SMark Rutland static void __init map_mem(pgd_t *pgd) 417c1cc1552SCatalin Marinas { 41898d2e153STakahiro Akashi phys_addr_t kernel_start = __pa_symbol(_text); 41998d2e153STakahiro Akashi phys_addr_t kernel_end = __pa_symbol(__init_begin); 420c1cc1552SCatalin Marinas struct memblock_region *reg; 42198d2e153STakahiro Akashi int flags = 0; 42298d2e153STakahiro Akashi 42398d2e153STakahiro Akashi if (debug_pagealloc_enabled()) 42498d2e153STakahiro Akashi flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; 42598d2e153STakahiro Akashi 42698d2e153STakahiro Akashi /* 42798d2e153STakahiro Akashi * Take care not to create a writable alias for the 42898d2e153STakahiro Akashi * read-only text and rodata sections of the kernel image. 42998d2e153STakahiro Akashi * So temporarily mark them as NOMAP to skip mappings in 43098d2e153STakahiro Akashi * the following for-loop 43198d2e153STakahiro Akashi */ 43298d2e153STakahiro Akashi memblock_mark_nomap(kernel_start, kernel_end - kernel_start); 43398d2e153STakahiro Akashi #ifdef CONFIG_KEXEC_CORE 43498d2e153STakahiro Akashi if (crashk_res.end) 43598d2e153STakahiro Akashi memblock_mark_nomap(crashk_res.start, 43698d2e153STakahiro Akashi resource_size(&crashk_res)); 43798d2e153STakahiro Akashi #endif 438f6bc87c3SSteve Capper 439c1cc1552SCatalin Marinas /* map all the memory banks */ 440c1cc1552SCatalin Marinas for_each_memblock(memory, reg) { 441c1cc1552SCatalin Marinas phys_addr_t start = reg->base; 442c1cc1552SCatalin Marinas phys_addr_t end = start + reg->size; 443c1cc1552SCatalin Marinas 444c1cc1552SCatalin Marinas if (start >= end) 445c1cc1552SCatalin Marinas break; 44668709f45SArd Biesheuvel if (memblock_is_nomap(reg)) 44768709f45SArd Biesheuvel continue; 448c1cc1552SCatalin Marinas 44998d2e153STakahiro Akashi __map_memblock(pgd, start, end, PAGE_KERNEL, flags); 450c1cc1552SCatalin Marinas } 45198d2e153STakahiro Akashi 45298d2e153STakahiro Akashi /* 45398d2e153STakahiro Akashi * Map the linear alias of the [_text, __init_begin) interval 45498d2e153STakahiro Akashi * as non-executable now, and remove the write permission in 45598d2e153STakahiro Akashi * mark_linear_text_alias_ro() below (which will be called after 45698d2e153STakahiro Akashi * alternative patching has completed). This makes the contents 45798d2e153STakahiro Akashi * of the region accessible to subsystems such as hibernate, 45898d2e153STakahiro Akashi * but protects it from inadvertent modification or execution. 45998d2e153STakahiro Akashi * Note that contiguous mappings cannot be remapped in this way, 46098d2e153STakahiro Akashi * so we should avoid them here. 46198d2e153STakahiro Akashi */ 46298d2e153STakahiro Akashi __map_memblock(pgd, kernel_start, kernel_end, 46398d2e153STakahiro Akashi PAGE_KERNEL, NO_CONT_MAPPINGS); 46498d2e153STakahiro Akashi memblock_clear_nomap(kernel_start, kernel_end - kernel_start); 46598d2e153STakahiro Akashi 46698d2e153STakahiro Akashi #ifdef CONFIG_KEXEC_CORE 46798d2e153STakahiro Akashi /* 46898d2e153STakahiro Akashi * Use page-level mappings here so that we can shrink the region 46998d2e153STakahiro Akashi * in page granularity and put back unused memory to buddy system 47098d2e153STakahiro Akashi * through /sys/kernel/kexec_crash_size interface. 47198d2e153STakahiro Akashi */ 47298d2e153STakahiro Akashi if (crashk_res.end) { 47398d2e153STakahiro Akashi __map_memblock(pgd, crashk_res.start, crashk_res.end + 1, 47498d2e153STakahiro Akashi PAGE_KERNEL, 47598d2e153STakahiro Akashi NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); 47698d2e153STakahiro Akashi memblock_clear_nomap(crashk_res.start, 47798d2e153STakahiro Akashi resource_size(&crashk_res)); 47898d2e153STakahiro Akashi } 47998d2e153STakahiro Akashi #endif 480c1cc1552SCatalin Marinas } 481c1cc1552SCatalin Marinas 482da141706SLaura Abbott void mark_rodata_ro(void) 483da141706SLaura Abbott { 4842f39b5f9SJeremy Linton unsigned long section_size; 485f9040773SArd Biesheuvel 4862f39b5f9SJeremy Linton /* 4879fdc14c5SArd Biesheuvel * mark .rodata as read only. Use __init_begin rather than __end_rodata 4889fdc14c5SArd Biesheuvel * to cover NOTES and EXCEPTION_TABLE. 4892f39b5f9SJeremy Linton */ 4909fdc14c5SArd Biesheuvel section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 491aa8c09beSArd Biesheuvel update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 4922f39b5f9SJeremy Linton section_size, PAGE_KERNEL_RO); 493e98216b5SArd Biesheuvel 4941404d6f1SLaura Abbott debug_checkwx(); 495da141706SLaura Abbott } 496da141706SLaura Abbott 4972c09ec06SArd Biesheuvel static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, 498d27cfa1fSArd Biesheuvel pgprot_t prot, struct vm_struct *vma, 49992bbd16eSWill Deacon int flags, unsigned long vm_flags) 500068a17a5SMark Rutland { 5012077be67SLaura Abbott phys_addr_t pa_start = __pa_symbol(va_start); 502068a17a5SMark Rutland unsigned long size = va_end - va_start; 503068a17a5SMark Rutland 504068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(pa_start)); 505068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(size)); 506068a17a5SMark Rutland 507068a17a5SMark Rutland __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, 508d27cfa1fSArd Biesheuvel early_pgtable_alloc, flags); 509f9040773SArd Biesheuvel 51092bbd16eSWill Deacon if (!(vm_flags & VM_NO_GUARD)) 51192bbd16eSWill Deacon size += PAGE_SIZE; 51292bbd16eSWill Deacon 513f9040773SArd Biesheuvel vma->addr = va_start; 514f9040773SArd Biesheuvel vma->phys_addr = pa_start; 515f9040773SArd Biesheuvel vma->size = size; 51692bbd16eSWill Deacon vma->flags = VM_MAP | vm_flags; 517f9040773SArd Biesheuvel vma->caller = __builtin_return_address(0); 518f9040773SArd Biesheuvel 519f9040773SArd Biesheuvel vm_area_add_early(vma); 520068a17a5SMark Rutland } 521068a17a5SMark Rutland 52228b066daSArd Biesheuvel static int __init parse_rodata(char *arg) 52328b066daSArd Biesheuvel { 52428b066daSArd Biesheuvel return strtobool(arg, &rodata_enabled); 52528b066daSArd Biesheuvel } 52628b066daSArd Biesheuvel early_param("rodata", parse_rodata); 52728b066daSArd Biesheuvel 528068a17a5SMark Rutland /* 529068a17a5SMark Rutland * Create fine-grained mappings for the kernel. 530068a17a5SMark Rutland */ 531068a17a5SMark Rutland static void __init map_kernel(pgd_t *pgd) 532068a17a5SMark Rutland { 5332ebe088bSArd Biesheuvel static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, 5342ebe088bSArd Biesheuvel vmlinux_initdata, vmlinux_data; 535068a17a5SMark Rutland 53628b066daSArd Biesheuvel /* 53728b066daSArd Biesheuvel * External debuggers may need to write directly to the text 53828b066daSArd Biesheuvel * mapping to install SW breakpoints. Allow this (only) when 53928b066daSArd Biesheuvel * explicitly requested with rodata=off. 54028b066daSArd Biesheuvel */ 54128b066daSArd Biesheuvel pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; 54228b066daSArd Biesheuvel 543d27cfa1fSArd Biesheuvel /* 544d27cfa1fSArd Biesheuvel * Only rodata will be remapped with different permissions later on, 545d27cfa1fSArd Biesheuvel * all other segments are allowed to use contiguous mappings. 546d27cfa1fSArd Biesheuvel */ 54792bbd16eSWill Deacon map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0, 54892bbd16eSWill Deacon VM_NO_GUARD); 5492ebe088bSArd Biesheuvel map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL, 55092bbd16eSWill Deacon &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); 5512ebe088bSArd Biesheuvel map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot, 55292bbd16eSWill Deacon &vmlinux_inittext, 0, VM_NO_GUARD); 5532ebe088bSArd Biesheuvel map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL, 55492bbd16eSWill Deacon &vmlinux_initdata, 0, VM_NO_GUARD); 55592bbd16eSWill Deacon map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); 556068a17a5SMark Rutland 557f9040773SArd Biesheuvel if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { 558068a17a5SMark Rutland /* 559f9040773SArd Biesheuvel * The fixmap falls in a separate pgd to the kernel, and doesn't 560f9040773SArd Biesheuvel * live in the carveout for the swapper_pg_dir. We can simply 561f9040773SArd Biesheuvel * re-use the existing dir for the fixmap. 562068a17a5SMark Rutland */ 563f9040773SArd Biesheuvel set_pgd(pgd_offset_raw(pgd, FIXADDR_START), 564f9040773SArd Biesheuvel *pgd_offset_k(FIXADDR_START)); 565f9040773SArd Biesheuvel } else if (CONFIG_PGTABLE_LEVELS > 3) { 566f9040773SArd Biesheuvel /* 567f9040773SArd Biesheuvel * The fixmap shares its top level pgd entry with the kernel 568f9040773SArd Biesheuvel * mapping. This can really only occur when we are running 569f9040773SArd Biesheuvel * with 16k/4 levels, so we can simply reuse the pud level 570f9040773SArd Biesheuvel * entry instead. 571f9040773SArd Biesheuvel */ 572f9040773SArd Biesheuvel BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 573f9040773SArd Biesheuvel set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START), 5742077be67SLaura Abbott __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE)); 575f9040773SArd Biesheuvel pud_clear_fixmap(); 576f9040773SArd Biesheuvel } else { 577f9040773SArd Biesheuvel BUG(); 578f9040773SArd Biesheuvel } 579068a17a5SMark Rutland 580068a17a5SMark Rutland kasan_copy_shadow(pgd); 581068a17a5SMark Rutland } 582068a17a5SMark Rutland 583c1cc1552SCatalin Marinas /* 584c1cc1552SCatalin Marinas * paging_init() sets up the page tables, initialises the zone memory 585c1cc1552SCatalin Marinas * maps and sets up the zero page. 586c1cc1552SCatalin Marinas */ 587c1cc1552SCatalin Marinas void __init paging_init(void) 588c1cc1552SCatalin Marinas { 589068a17a5SMark Rutland phys_addr_t pgd_phys = early_pgtable_alloc(); 590068a17a5SMark Rutland pgd_t *pgd = pgd_set_fixmap(pgd_phys); 591068a17a5SMark Rutland 592068a17a5SMark Rutland map_kernel(pgd); 593068a17a5SMark Rutland map_mem(pgd); 594068a17a5SMark Rutland 595068a17a5SMark Rutland /* 596068a17a5SMark Rutland * We want to reuse the original swapper_pg_dir so we don't have to 597068a17a5SMark Rutland * communicate the new address to non-coherent secondaries in 598068a17a5SMark Rutland * secondary_entry, and so cpu_switch_mm can generate the address with 599068a17a5SMark Rutland * adrp+add rather than a load from some global variable. 600068a17a5SMark Rutland * 601068a17a5SMark Rutland * To do this we need to go via a temporary pgd. 602068a17a5SMark Rutland */ 603068a17a5SMark Rutland cpu_replace_ttbr1(__va(pgd_phys)); 60412f043ffSArnd Bergmann memcpy(swapper_pg_dir, pgd, PGD_SIZE); 6052077be67SLaura Abbott cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 606068a17a5SMark Rutland 607068a17a5SMark Rutland pgd_clear_fixmap(); 608068a17a5SMark Rutland memblock_free(pgd_phys, PAGE_SIZE); 609068a17a5SMark Rutland 610068a17a5SMark Rutland /* 611068a17a5SMark Rutland * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd 612068a17a5SMark Rutland * allocated with it. 613068a17a5SMark Rutland */ 6142077be67SLaura Abbott memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE, 615068a17a5SMark Rutland SWAPPER_DIR_SIZE - PAGE_SIZE); 616c1cc1552SCatalin Marinas } 617c1cc1552SCatalin Marinas 618c1cc1552SCatalin Marinas /* 619c1cc1552SCatalin Marinas * Check whether a kernel address is valid (derived from arch/x86/). 620c1cc1552SCatalin Marinas */ 621c1cc1552SCatalin Marinas int kern_addr_valid(unsigned long addr) 622c1cc1552SCatalin Marinas { 623c1cc1552SCatalin Marinas pgd_t *pgd; 624c1cc1552SCatalin Marinas pud_t *pud; 625c1cc1552SCatalin Marinas pmd_t *pmd; 626c1cc1552SCatalin Marinas pte_t *pte; 627c1cc1552SCatalin Marinas 628c1cc1552SCatalin Marinas if ((((long)addr) >> VA_BITS) != -1UL) 629c1cc1552SCatalin Marinas return 0; 630c1cc1552SCatalin Marinas 631c1cc1552SCatalin Marinas pgd = pgd_offset_k(addr); 632c1cc1552SCatalin Marinas if (pgd_none(*pgd)) 633c1cc1552SCatalin Marinas return 0; 634c1cc1552SCatalin Marinas 635c1cc1552SCatalin Marinas pud = pud_offset(pgd, addr); 636c1cc1552SCatalin Marinas if (pud_none(*pud)) 637c1cc1552SCatalin Marinas return 0; 638c1cc1552SCatalin Marinas 639206a2a73SSteve Capper if (pud_sect(*pud)) 640206a2a73SSteve Capper return pfn_valid(pud_pfn(*pud)); 641206a2a73SSteve Capper 642c1cc1552SCatalin Marinas pmd = pmd_offset(pud, addr); 643c1cc1552SCatalin Marinas if (pmd_none(*pmd)) 644c1cc1552SCatalin Marinas return 0; 645c1cc1552SCatalin Marinas 646da6e4cb6SDave Anderson if (pmd_sect(*pmd)) 647da6e4cb6SDave Anderson return pfn_valid(pmd_pfn(*pmd)); 648da6e4cb6SDave Anderson 649c1cc1552SCatalin Marinas pte = pte_offset_kernel(pmd, addr); 650c1cc1552SCatalin Marinas if (pte_none(*pte)) 651c1cc1552SCatalin Marinas return 0; 652c1cc1552SCatalin Marinas 653c1cc1552SCatalin Marinas return pfn_valid(pte_pfn(*pte)); 654c1cc1552SCatalin Marinas } 655c1cc1552SCatalin Marinas #ifdef CONFIG_SPARSEMEM_VMEMMAP 656b433dce0SSuzuki K. Poulose #if !ARM64_SWAPPER_USES_SECTION_MAPS 6577b73d978SChristoph Hellwig int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 6587b73d978SChristoph Hellwig struct vmem_altmap *altmap) 659c1cc1552SCatalin Marinas { 6600aad818bSJohannes Weiner return vmemmap_populate_basepages(start, end, node); 661c1cc1552SCatalin Marinas } 662b433dce0SSuzuki K. Poulose #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 6637b73d978SChristoph Hellwig int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, 6647b73d978SChristoph Hellwig struct vmem_altmap *altmap) 665c1cc1552SCatalin Marinas { 6660aad818bSJohannes Weiner unsigned long addr = start; 667c1cc1552SCatalin Marinas unsigned long next; 668c1cc1552SCatalin Marinas pgd_t *pgd; 669c1cc1552SCatalin Marinas pud_t *pud; 670c1cc1552SCatalin Marinas pmd_t *pmd; 671c1cc1552SCatalin Marinas 672c1cc1552SCatalin Marinas do { 673c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 674c1cc1552SCatalin Marinas 675c1cc1552SCatalin Marinas pgd = vmemmap_pgd_populate(addr, node); 676c1cc1552SCatalin Marinas if (!pgd) 677c1cc1552SCatalin Marinas return -ENOMEM; 678c1cc1552SCatalin Marinas 679c1cc1552SCatalin Marinas pud = vmemmap_pud_populate(pgd, addr, node); 680c1cc1552SCatalin Marinas if (!pud) 681c1cc1552SCatalin Marinas return -ENOMEM; 682c1cc1552SCatalin Marinas 683c1cc1552SCatalin Marinas pmd = pmd_offset(pud, addr); 684c1cc1552SCatalin Marinas if (pmd_none(*pmd)) { 685c1cc1552SCatalin Marinas void *p = NULL; 686c1cc1552SCatalin Marinas 687c1cc1552SCatalin Marinas p = vmemmap_alloc_block_buf(PMD_SIZE, node); 688c1cc1552SCatalin Marinas if (!p) 689c1cc1552SCatalin Marinas return -ENOMEM; 690c1cc1552SCatalin Marinas 691a501e324SCatalin Marinas set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); 692c1cc1552SCatalin Marinas } else 693c1cc1552SCatalin Marinas vmemmap_verify((pte_t *)pmd, node, addr, next); 694c1cc1552SCatalin Marinas } while (addr = next, addr != end); 695c1cc1552SCatalin Marinas 696c1cc1552SCatalin Marinas return 0; 697c1cc1552SCatalin Marinas } 698c1cc1552SCatalin Marinas #endif /* CONFIG_ARM64_64K_PAGES */ 699*24b6d416SChristoph Hellwig void vmemmap_free(unsigned long start, unsigned long end, 700*24b6d416SChristoph Hellwig struct vmem_altmap *altmap) 7010197518cSTang Chen { 7020197518cSTang Chen } 703c1cc1552SCatalin Marinas #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 704af86e597SLaura Abbott 705af86e597SLaura Abbott static inline pud_t * fixmap_pud(unsigned long addr) 706af86e597SLaura Abbott { 707af86e597SLaura Abbott pgd_t *pgd = pgd_offset_k(addr); 708af86e597SLaura Abbott 709af86e597SLaura Abbott BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); 710af86e597SLaura Abbott 711157962f5SArd Biesheuvel return pud_offset_kimg(pgd, addr); 712af86e597SLaura Abbott } 713af86e597SLaura Abbott 714af86e597SLaura Abbott static inline pmd_t * fixmap_pmd(unsigned long addr) 715af86e597SLaura Abbott { 716af86e597SLaura Abbott pud_t *pud = fixmap_pud(addr); 717af86e597SLaura Abbott 718af86e597SLaura Abbott BUG_ON(pud_none(*pud) || pud_bad(*pud)); 719af86e597SLaura Abbott 720157962f5SArd Biesheuvel return pmd_offset_kimg(pud, addr); 721af86e597SLaura Abbott } 722af86e597SLaura Abbott 723af86e597SLaura Abbott static inline pte_t * fixmap_pte(unsigned long addr) 724af86e597SLaura Abbott { 725157962f5SArd Biesheuvel return &bm_pte[pte_index(addr)]; 726af86e597SLaura Abbott } 727af86e597SLaura Abbott 7282077be67SLaura Abbott /* 7292077be67SLaura Abbott * The p*d_populate functions call virt_to_phys implicitly so they can't be used 7302077be67SLaura Abbott * directly on kernel symbols (bm_p*d). This function is called too early to use 7312077be67SLaura Abbott * lm_alias so __p*d_populate functions must be used to populate with the 7322077be67SLaura Abbott * physical address from __pa_symbol. 7332077be67SLaura Abbott */ 734af86e597SLaura Abbott void __init early_fixmap_init(void) 735af86e597SLaura Abbott { 736af86e597SLaura Abbott pgd_t *pgd; 737af86e597SLaura Abbott pud_t *pud; 738af86e597SLaura Abbott pmd_t *pmd; 739af86e597SLaura Abbott unsigned long addr = FIXADDR_START; 740af86e597SLaura Abbott 741af86e597SLaura Abbott pgd = pgd_offset_k(addr); 742f80fb3a3SArd Biesheuvel if (CONFIG_PGTABLE_LEVELS > 3 && 7432077be67SLaura Abbott !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) { 744f9040773SArd Biesheuvel /* 745f9040773SArd Biesheuvel * We only end up here if the kernel mapping and the fixmap 746f9040773SArd Biesheuvel * share the top level pgd entry, which should only happen on 747f9040773SArd Biesheuvel * 16k/4 levels configurations. 748f9040773SArd Biesheuvel */ 749f9040773SArd Biesheuvel BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 750f9040773SArd Biesheuvel pud = pud_offset_kimg(pgd, addr); 751f9040773SArd Biesheuvel } else { 7522077be67SLaura Abbott if (pgd_none(*pgd)) 7532077be67SLaura Abbott __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE); 754157962f5SArd Biesheuvel pud = fixmap_pud(addr); 755f9040773SArd Biesheuvel } 7562077be67SLaura Abbott if (pud_none(*pud)) 7572077be67SLaura Abbott __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); 758157962f5SArd Biesheuvel pmd = fixmap_pmd(addr); 7592077be67SLaura Abbott __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE); 760af86e597SLaura Abbott 761af86e597SLaura Abbott /* 762af86e597SLaura Abbott * The boot-ioremap range spans multiple pmds, for which 763157962f5SArd Biesheuvel * we are not prepared: 764af86e597SLaura Abbott */ 765af86e597SLaura Abbott BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 766af86e597SLaura Abbott != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 767af86e597SLaura Abbott 768af86e597SLaura Abbott if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 769af86e597SLaura Abbott || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 770af86e597SLaura Abbott WARN_ON(1); 771af86e597SLaura Abbott pr_warn("pmd %p != %p, %p\n", 772af86e597SLaura Abbott pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 773af86e597SLaura Abbott fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 774af86e597SLaura Abbott pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 775af86e597SLaura Abbott fix_to_virt(FIX_BTMAP_BEGIN)); 776af86e597SLaura Abbott pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 777af86e597SLaura Abbott fix_to_virt(FIX_BTMAP_END)); 778af86e597SLaura Abbott 779af86e597SLaura Abbott pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 780af86e597SLaura Abbott pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 781af86e597SLaura Abbott } 782af86e597SLaura Abbott } 783af86e597SLaura Abbott 78418b4b276SJames Morse /* 78518b4b276SJames Morse * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we 78618b4b276SJames Morse * ever need to use IPIs for TLB broadcasting, then we're in trouble here. 78718b4b276SJames Morse */ 788af86e597SLaura Abbott void __set_fixmap(enum fixed_addresses idx, 789af86e597SLaura Abbott phys_addr_t phys, pgprot_t flags) 790af86e597SLaura Abbott { 791af86e597SLaura Abbott unsigned long addr = __fix_to_virt(idx); 792af86e597SLaura Abbott pte_t *pte; 793af86e597SLaura Abbott 794b63dbef9SMark Rutland BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 795af86e597SLaura Abbott 796af86e597SLaura Abbott pte = fixmap_pte(addr); 797af86e597SLaura Abbott 798af86e597SLaura Abbott if (pgprot_val(flags)) { 799af86e597SLaura Abbott set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 800af86e597SLaura Abbott } else { 801af86e597SLaura Abbott pte_clear(&init_mm, addr, pte); 802af86e597SLaura Abbott flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 803af86e597SLaura Abbott } 804af86e597SLaura Abbott } 80561bd93ceSArd Biesheuvel 806f80fb3a3SArd Biesheuvel void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 80761bd93ceSArd Biesheuvel { 80861bd93ceSArd Biesheuvel const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 809f80fb3a3SArd Biesheuvel int offset; 81061bd93ceSArd Biesheuvel void *dt_virt; 81161bd93ceSArd Biesheuvel 81261bd93ceSArd Biesheuvel /* 81361bd93ceSArd Biesheuvel * Check whether the physical FDT address is set and meets the minimum 81461bd93ceSArd Biesheuvel * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 81504a84810SArd Biesheuvel * at least 8 bytes so that we can always access the magic and size 81604a84810SArd Biesheuvel * fields of the FDT header after mapping the first chunk, double check 81704a84810SArd Biesheuvel * here if that is indeed the case. 81861bd93ceSArd Biesheuvel */ 81961bd93ceSArd Biesheuvel BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 82061bd93ceSArd Biesheuvel if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 82161bd93ceSArd Biesheuvel return NULL; 82261bd93ceSArd Biesheuvel 82361bd93ceSArd Biesheuvel /* 82461bd93ceSArd Biesheuvel * Make sure that the FDT region can be mapped without the need to 82561bd93ceSArd Biesheuvel * allocate additional translation table pages, so that it is safe 826132233a7SLaura Abbott * to call create_mapping_noalloc() this early. 82761bd93ceSArd Biesheuvel * 82861bd93ceSArd Biesheuvel * On 64k pages, the FDT will be mapped using PTEs, so we need to 82961bd93ceSArd Biesheuvel * be in the same PMD as the rest of the fixmap. 83061bd93ceSArd Biesheuvel * On 4k pages, we'll use section mappings for the FDT so we only 83161bd93ceSArd Biesheuvel * have to be in the same PUD. 83261bd93ceSArd Biesheuvel */ 83361bd93ceSArd Biesheuvel BUILD_BUG_ON(dt_virt_base % SZ_2M); 83461bd93ceSArd Biesheuvel 835b433dce0SSuzuki K. Poulose BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 836b433dce0SSuzuki K. Poulose __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 83761bd93ceSArd Biesheuvel 838b433dce0SSuzuki K. Poulose offset = dt_phys % SWAPPER_BLOCK_SIZE; 83961bd93ceSArd Biesheuvel dt_virt = (void *)dt_virt_base + offset; 84061bd93ceSArd Biesheuvel 84161bd93ceSArd Biesheuvel /* map the first chunk so we can read the size from the header */ 842132233a7SLaura Abbott create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 843132233a7SLaura Abbott dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 84461bd93ceSArd Biesheuvel 84504a84810SArd Biesheuvel if (fdt_magic(dt_virt) != FDT_MAGIC) 84661bd93ceSArd Biesheuvel return NULL; 84761bd93ceSArd Biesheuvel 848f80fb3a3SArd Biesheuvel *size = fdt_totalsize(dt_virt); 849f80fb3a3SArd Biesheuvel if (*size > MAX_FDT_SIZE) 85061bd93ceSArd Biesheuvel return NULL; 85161bd93ceSArd Biesheuvel 852f80fb3a3SArd Biesheuvel if (offset + *size > SWAPPER_BLOCK_SIZE) 853132233a7SLaura Abbott create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 854f80fb3a3SArd Biesheuvel round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); 855f80fb3a3SArd Biesheuvel 856f80fb3a3SArd Biesheuvel return dt_virt; 857f80fb3a3SArd Biesheuvel } 858f80fb3a3SArd Biesheuvel 859f80fb3a3SArd Biesheuvel void *__init fixmap_remap_fdt(phys_addr_t dt_phys) 860f80fb3a3SArd Biesheuvel { 861f80fb3a3SArd Biesheuvel void *dt_virt; 862f80fb3a3SArd Biesheuvel int size; 863f80fb3a3SArd Biesheuvel 864f80fb3a3SArd Biesheuvel dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); 865f80fb3a3SArd Biesheuvel if (!dt_virt) 866f80fb3a3SArd Biesheuvel return NULL; 86761bd93ceSArd Biesheuvel 86861bd93ceSArd Biesheuvel memblock_reserve(dt_phys, size); 86961bd93ceSArd Biesheuvel return dt_virt; 87061bd93ceSArd Biesheuvel } 871324420bfSArd Biesheuvel 872324420bfSArd Biesheuvel int __init arch_ioremap_pud_supported(void) 873324420bfSArd Biesheuvel { 874324420bfSArd Biesheuvel /* only 4k granule supports level 1 block mappings */ 875324420bfSArd Biesheuvel return IS_ENABLED(CONFIG_ARM64_4K_PAGES); 876324420bfSArd Biesheuvel } 877324420bfSArd Biesheuvel 878324420bfSArd Biesheuvel int __init arch_ioremap_pmd_supported(void) 879324420bfSArd Biesheuvel { 880324420bfSArd Biesheuvel return 1; 881324420bfSArd Biesheuvel } 882324420bfSArd Biesheuvel 883324420bfSArd Biesheuvel int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) 884324420bfSArd Biesheuvel { 885324420bfSArd Biesheuvel BUG_ON(phys & ~PUD_MASK); 886324420bfSArd Biesheuvel set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); 887324420bfSArd Biesheuvel return 1; 888324420bfSArd Biesheuvel } 889324420bfSArd Biesheuvel 890324420bfSArd Biesheuvel int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) 891324420bfSArd Biesheuvel { 892324420bfSArd Biesheuvel BUG_ON(phys & ~PMD_MASK); 893324420bfSArd Biesheuvel set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); 894324420bfSArd Biesheuvel return 1; 895324420bfSArd Biesheuvel } 896324420bfSArd Biesheuvel 897324420bfSArd Biesheuvel int pud_clear_huge(pud_t *pud) 898324420bfSArd Biesheuvel { 899324420bfSArd Biesheuvel if (!pud_sect(*pud)) 900324420bfSArd Biesheuvel return 0; 901324420bfSArd Biesheuvel pud_clear(pud); 902324420bfSArd Biesheuvel return 1; 903324420bfSArd Biesheuvel } 904324420bfSArd Biesheuvel 905324420bfSArd Biesheuvel int pmd_clear_huge(pmd_t *pmd) 906324420bfSArd Biesheuvel { 907324420bfSArd Biesheuvel if (!pmd_sect(*pmd)) 908324420bfSArd Biesheuvel return 0; 909324420bfSArd Biesheuvel pmd_clear(pmd); 910324420bfSArd Biesheuvel return 1; 911324420bfSArd Biesheuvel } 912