1c1cc1552SCatalin Marinas /* 2c1cc1552SCatalin Marinas * Based on arch/arm/mm/mmu.c 3c1cc1552SCatalin Marinas * 4c1cc1552SCatalin Marinas * Copyright (C) 1995-2005 Russell King 5c1cc1552SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 6c1cc1552SCatalin Marinas * 7c1cc1552SCatalin Marinas * This program is free software; you can redistribute it and/or modify 8c1cc1552SCatalin Marinas * it under the terms of the GNU General Public License version 2 as 9c1cc1552SCatalin Marinas * published by the Free Software Foundation. 10c1cc1552SCatalin Marinas * 11c1cc1552SCatalin Marinas * This program is distributed in the hope that it will be useful, 12c1cc1552SCatalin Marinas * but WITHOUT ANY WARRANTY; without even the implied warranty of 13c1cc1552SCatalin Marinas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14c1cc1552SCatalin Marinas * GNU General Public License for more details. 15c1cc1552SCatalin Marinas * 16c1cc1552SCatalin Marinas * You should have received a copy of the GNU General Public License 17c1cc1552SCatalin Marinas * along with this program. If not, see <http://www.gnu.org/licenses/>. 18c1cc1552SCatalin Marinas */ 19c1cc1552SCatalin Marinas 205a9e3e15SJisheng Zhang #include <linux/cache.h> 21c1cc1552SCatalin Marinas #include <linux/export.h> 22c1cc1552SCatalin Marinas #include <linux/kernel.h> 23c1cc1552SCatalin Marinas #include <linux/errno.h> 24c1cc1552SCatalin Marinas #include <linux/init.h> 2561bd93ceSArd Biesheuvel #include <linux/libfdt.h> 26c1cc1552SCatalin Marinas #include <linux/mman.h> 27c1cc1552SCatalin Marinas #include <linux/nodemask.h> 28c1cc1552SCatalin Marinas #include <linux/memblock.h> 29c1cc1552SCatalin Marinas #include <linux/fs.h> 302475ff9dSCatalin Marinas #include <linux/io.h> 312077be67SLaura Abbott #include <linux/mm.h> 32c1cc1552SCatalin Marinas 3321ab99c2SMark Rutland #include <asm/barrier.h> 34c1cc1552SCatalin Marinas #include <asm/cputype.h> 35af86e597SLaura Abbott #include <asm/fixmap.h> 36068a17a5SMark Rutland #include <asm/kasan.h> 37b433dce0SSuzuki K. Poulose #include <asm/kernel-pgtable.h> 38c1cc1552SCatalin Marinas #include <asm/sections.h> 39c1cc1552SCatalin Marinas #include <asm/setup.h> 40c1cc1552SCatalin Marinas #include <asm/sizes.h> 41c1cc1552SCatalin Marinas #include <asm/tlb.h> 42c79b954bSJungseok Lee #include <asm/memblock.h> 43c1cc1552SCatalin Marinas #include <asm/mmu_context.h> 441404d6f1SLaura Abbott #include <asm/ptdump.h> 45c1cc1552SCatalin Marinas 46dd006da2SArd Biesheuvel u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 47dd006da2SArd Biesheuvel 485a9e3e15SJisheng Zhang u64 kimage_voffset __ro_after_init; 49a7f8de16SArd Biesheuvel EXPORT_SYMBOL(kimage_voffset); 50a7f8de16SArd Biesheuvel 51c1cc1552SCatalin Marinas /* 52c1cc1552SCatalin Marinas * Empty_zero_page is a special page that is used for zero-initialized data 53c1cc1552SCatalin Marinas * and COW. 54c1cc1552SCatalin Marinas */ 555227cfa7SMark Rutland unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 56c1cc1552SCatalin Marinas EXPORT_SYMBOL(empty_zero_page); 57c1cc1552SCatalin Marinas 58f9040773SArd Biesheuvel static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 59f9040773SArd Biesheuvel static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 60f9040773SArd Biesheuvel static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 61f9040773SArd Biesheuvel 62c1cc1552SCatalin Marinas pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 63c1cc1552SCatalin Marinas unsigned long size, pgprot_t vma_prot) 64c1cc1552SCatalin Marinas { 65c1cc1552SCatalin Marinas if (!pfn_valid(pfn)) 66c1cc1552SCatalin Marinas return pgprot_noncached(vma_prot); 67c1cc1552SCatalin Marinas else if (file->f_flags & O_SYNC) 68c1cc1552SCatalin Marinas return pgprot_writecombine(vma_prot); 69c1cc1552SCatalin Marinas return vma_prot; 70c1cc1552SCatalin Marinas } 71c1cc1552SCatalin Marinas EXPORT_SYMBOL(phys_mem_access_prot); 72c1cc1552SCatalin Marinas 73f4710445SMark Rutland static phys_addr_t __init early_pgtable_alloc(void) 74c1cc1552SCatalin Marinas { 757142392dSSuzuki K. Poulose phys_addr_t phys; 767142392dSSuzuki K. Poulose void *ptr; 777142392dSSuzuki K. Poulose 7821ab99c2SMark Rutland phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 79f4710445SMark Rutland 80f4710445SMark Rutland /* 81f4710445SMark Rutland * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE 82f4710445SMark Rutland * slot will be free, so we can (ab)use the FIX_PTE slot to initialise 83f4710445SMark Rutland * any level of table. 84f4710445SMark Rutland */ 85f4710445SMark Rutland ptr = pte_set_fixmap(phys); 86f4710445SMark Rutland 8721ab99c2SMark Rutland memset(ptr, 0, PAGE_SIZE); 8821ab99c2SMark Rutland 89f4710445SMark Rutland /* 90f4710445SMark Rutland * Implicit barriers also ensure the zeroed page is visible to the page 91f4710445SMark Rutland * table walker 92f4710445SMark Rutland */ 93f4710445SMark Rutland pte_clear_fixmap(); 94f4710445SMark Rutland 95f4710445SMark Rutland return phys; 96c1cc1552SCatalin Marinas } 97c1cc1552SCatalin Marinas 98e98216b5SArd Biesheuvel static bool pgattr_change_is_safe(u64 old, u64 new) 99e98216b5SArd Biesheuvel { 100e98216b5SArd Biesheuvel /* 101e98216b5SArd Biesheuvel * The following mapping attributes may be updated in live 102e98216b5SArd Biesheuvel * kernel mappings without the need for break-before-make. 103e98216b5SArd Biesheuvel */ 104e98216b5SArd Biesheuvel static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE; 105e98216b5SArd Biesheuvel 106e98216b5SArd Biesheuvel return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0; 107e98216b5SArd Biesheuvel } 108e98216b5SArd Biesheuvel 109da141706SLaura Abbott static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 110667c2759SCatalin Marinas unsigned long end, unsigned long pfn, 111da141706SLaura Abbott pgprot_t prot, 112d81bbe6dSMark Rutland phys_addr_t (*pgtable_alloc)(void)) 113c1cc1552SCatalin Marinas { 114c1cc1552SCatalin Marinas pte_t *pte; 115c1cc1552SCatalin Marinas 1164133af6cSCatalin Marinas BUG_ON(pmd_sect(*pmd)); 1174133af6cSCatalin Marinas if (pmd_none(*pmd)) { 118132233a7SLaura Abbott phys_addr_t pte_phys; 119132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 120132233a7SLaura Abbott pte_phys = pgtable_alloc(); 121f4710445SMark Rutland pte = pte_set_fixmap(pte_phys); 122f4710445SMark Rutland __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); 123f4710445SMark Rutland pte_clear_fixmap(); 124c1cc1552SCatalin Marinas } 125a1c76574SMark Rutland BUG_ON(pmd_bad(*pmd)); 126c1cc1552SCatalin Marinas 127f4710445SMark Rutland pte = pte_set_fixmap_offset(pmd, addr); 128c1cc1552SCatalin Marinas do { 129e98216b5SArd Biesheuvel pte_t old_pte = *pte; 130e98216b5SArd Biesheuvel 131d81bbe6dSMark Rutland set_pte(pte, pfn_pte(pfn, prot)); 132667c2759SCatalin Marinas pfn++; 133e98216b5SArd Biesheuvel 134e98216b5SArd Biesheuvel /* 135e98216b5SArd Biesheuvel * After the PTE entry has been populated once, we 136e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 137e98216b5SArd Biesheuvel */ 138e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte))); 139e98216b5SArd Biesheuvel 140667c2759SCatalin Marinas } while (pte++, addr += PAGE_SIZE, addr != end); 141f4710445SMark Rutland 142f4710445SMark Rutland pte_clear_fixmap(); 143c1cc1552SCatalin Marinas } 144c1cc1552SCatalin Marinas 14511509a30SMark Rutland static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, 146da141706SLaura Abbott phys_addr_t phys, pgprot_t prot, 14753e1b329SArd Biesheuvel phys_addr_t (*pgtable_alloc)(void), 148f14c66ceSArd Biesheuvel bool page_mappings_only) 149c1cc1552SCatalin Marinas { 150c1cc1552SCatalin Marinas pmd_t *pmd; 151c1cc1552SCatalin Marinas unsigned long next; 152c1cc1552SCatalin Marinas 153c1cc1552SCatalin Marinas /* 154c1cc1552SCatalin Marinas * Check for initial section mappings in the pgd/pud and remove them. 155c1cc1552SCatalin Marinas */ 1564133af6cSCatalin Marinas BUG_ON(pud_sect(*pud)); 1574133af6cSCatalin Marinas if (pud_none(*pud)) { 158132233a7SLaura Abbott phys_addr_t pmd_phys; 159132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 160132233a7SLaura Abbott pmd_phys = pgtable_alloc(); 161f4710445SMark Rutland pmd = pmd_set_fixmap(pmd_phys); 162f4710445SMark Rutland __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); 163f4710445SMark Rutland pmd_clear_fixmap(); 164c1cc1552SCatalin Marinas } 165a1c76574SMark Rutland BUG_ON(pud_bad(*pud)); 166c1cc1552SCatalin Marinas 167f4710445SMark Rutland pmd = pmd_set_fixmap_offset(pud, addr); 168c1cc1552SCatalin Marinas do { 169e98216b5SArd Biesheuvel pmd_t old_pmd = *pmd; 170e98216b5SArd Biesheuvel 171c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 172e98216b5SArd Biesheuvel 173c1cc1552SCatalin Marinas /* try section mapping first */ 17483863f25SLaura Abbott if (((addr | next | phys) & ~SECTION_MASK) == 0 && 175f14c66ceSArd Biesheuvel !page_mappings_only) { 176d81bbe6dSMark Rutland pmd_set_huge(pmd, phys, prot); 177e98216b5SArd Biesheuvel 178a55f9929SCatalin Marinas /* 179e98216b5SArd Biesheuvel * After the PMD entry has been populated once, we 180e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 181a55f9929SCatalin Marinas */ 182e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), 183e98216b5SArd Biesheuvel pmd_val(*pmd))); 184a55f9929SCatalin Marinas } else { 185667c2759SCatalin Marinas alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 186d81bbe6dSMark Rutland prot, pgtable_alloc); 187e98216b5SArd Biesheuvel 188e98216b5SArd Biesheuvel BUG_ON(pmd_val(old_pmd) != 0 && 189e98216b5SArd Biesheuvel pmd_val(old_pmd) != pmd_val(*pmd)); 190a55f9929SCatalin Marinas } 191c1cc1552SCatalin Marinas phys += next - addr; 192c1cc1552SCatalin Marinas } while (pmd++, addr = next, addr != end); 193f4710445SMark Rutland 194f4710445SMark Rutland pmd_clear_fixmap(); 195c1cc1552SCatalin Marinas } 196c1cc1552SCatalin Marinas 197da141706SLaura Abbott static inline bool use_1G_block(unsigned long addr, unsigned long next, 198da141706SLaura Abbott unsigned long phys) 199da141706SLaura Abbott { 200da141706SLaura Abbott if (PAGE_SHIFT != 12) 201da141706SLaura Abbott return false; 202da141706SLaura Abbott 203da141706SLaura Abbott if (((addr | next | phys) & ~PUD_MASK) != 0) 204da141706SLaura Abbott return false; 205da141706SLaura Abbott 206da141706SLaura Abbott return true; 207da141706SLaura Abbott } 208da141706SLaura Abbott 20911509a30SMark Rutland static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 210da141706SLaura Abbott phys_addr_t phys, pgprot_t prot, 21153e1b329SArd Biesheuvel phys_addr_t (*pgtable_alloc)(void), 212f14c66ceSArd Biesheuvel bool page_mappings_only) 213c1cc1552SCatalin Marinas { 214c79b954bSJungseok Lee pud_t *pud; 215c1cc1552SCatalin Marinas unsigned long next; 216c1cc1552SCatalin Marinas 217c79b954bSJungseok Lee if (pgd_none(*pgd)) { 218132233a7SLaura Abbott phys_addr_t pud_phys; 219132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 220132233a7SLaura Abbott pud_phys = pgtable_alloc(); 221f4710445SMark Rutland __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE); 222c79b954bSJungseok Lee } 223c79b954bSJungseok Lee BUG_ON(pgd_bad(*pgd)); 224c79b954bSJungseok Lee 225f4710445SMark Rutland pud = pud_set_fixmap_offset(pgd, addr); 226c1cc1552SCatalin Marinas do { 227e98216b5SArd Biesheuvel pud_t old_pud = *pud; 228e98216b5SArd Biesheuvel 229c1cc1552SCatalin Marinas next = pud_addr_end(addr, end); 230206a2a73SSteve Capper 231206a2a73SSteve Capper /* 232206a2a73SSteve Capper * For 4K granule only, attempt to put down a 1GB block 233206a2a73SSteve Capper */ 234f14c66ceSArd Biesheuvel if (use_1G_block(addr, next, phys) && !page_mappings_only) { 235c661cb1cSMark Rutland pud_set_huge(pud, phys, prot); 236206a2a73SSteve Capper 237206a2a73SSteve Capper /* 238e98216b5SArd Biesheuvel * After the PUD entry has been populated once, we 239e98216b5SArd Biesheuvel * only allow updates to the permission attributes. 240206a2a73SSteve Capper */ 241e98216b5SArd Biesheuvel BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), 242e98216b5SArd Biesheuvel pud_val(*pud))); 243206a2a73SSteve Capper } else { 24411509a30SMark Rutland alloc_init_pmd(pud, addr, next, phys, prot, 245f14c66ceSArd Biesheuvel pgtable_alloc, page_mappings_only); 246e98216b5SArd Biesheuvel 247e98216b5SArd Biesheuvel BUG_ON(pud_val(old_pud) != 0 && 248e98216b5SArd Biesheuvel pud_val(old_pud) != pud_val(*pud)); 249206a2a73SSteve Capper } 250c1cc1552SCatalin Marinas phys += next - addr; 251c1cc1552SCatalin Marinas } while (pud++, addr = next, addr != end); 252f4710445SMark Rutland 253f4710445SMark Rutland pud_clear_fixmap(); 254c1cc1552SCatalin Marinas } 255c1cc1552SCatalin Marinas 25640f87d31SArd Biesheuvel static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 25740f87d31SArd Biesheuvel unsigned long virt, phys_addr_t size, 25840f87d31SArd Biesheuvel pgprot_t prot, 25953e1b329SArd Biesheuvel phys_addr_t (*pgtable_alloc)(void), 260f14c66ceSArd Biesheuvel bool page_mappings_only) 261c1cc1552SCatalin Marinas { 262c1cc1552SCatalin Marinas unsigned long addr, length, end, next; 26340f87d31SArd Biesheuvel pgd_t *pgd = pgd_offset_raw(pgdir, virt); 264c1cc1552SCatalin Marinas 265cc5d2b3bSMark Rutland /* 266cc5d2b3bSMark Rutland * If the virtual and physical address don't have the same offset 267cc5d2b3bSMark Rutland * within a page, we cannot map the region as the caller expects. 268cc5d2b3bSMark Rutland */ 269cc5d2b3bSMark Rutland if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 270cc5d2b3bSMark Rutland return; 271cc5d2b3bSMark Rutland 2729c4e08a3SMark Rutland phys &= PAGE_MASK; 273c1cc1552SCatalin Marinas addr = virt & PAGE_MASK; 274c1cc1552SCatalin Marinas length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); 275c1cc1552SCatalin Marinas 276c1cc1552SCatalin Marinas end = addr + length; 277c1cc1552SCatalin Marinas do { 278c1cc1552SCatalin Marinas next = pgd_addr_end(addr, end); 27953e1b329SArd Biesheuvel alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc, 280f14c66ceSArd Biesheuvel page_mappings_only); 281c1cc1552SCatalin Marinas phys += next - addr; 282c1cc1552SCatalin Marinas } while (pgd++, addr = next, addr != end); 283c1cc1552SCatalin Marinas } 284c1cc1552SCatalin Marinas 2851378dc3dSArd Biesheuvel static phys_addr_t pgd_pgtable_alloc(void) 286da141706SLaura Abbott { 28721ab99c2SMark Rutland void *ptr = (void *)__get_free_page(PGALLOC_GFP); 2881378dc3dSArd Biesheuvel if (!ptr || !pgtable_page_ctor(virt_to_page(ptr))) 2891378dc3dSArd Biesheuvel BUG(); 29021ab99c2SMark Rutland 29121ab99c2SMark Rutland /* Ensure the zeroed page is visible to the page table walker */ 29221ab99c2SMark Rutland dsb(ishst); 293f4710445SMark Rutland return __pa(ptr); 294da141706SLaura Abbott } 295da141706SLaura Abbott 296132233a7SLaura Abbott /* 297132233a7SLaura Abbott * This function can only be used to modify existing table entries, 298132233a7SLaura Abbott * without allocating new levels of table. Note that this permits the 299132233a7SLaura Abbott * creation of new section or page entries. 300132233a7SLaura Abbott */ 301132233a7SLaura Abbott static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 302da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 303d7ecbddfSMark Salter { 304d7ecbddfSMark Salter if (virt < VMALLOC_START) { 305d7ecbddfSMark Salter pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 306d7ecbddfSMark Salter &phys, virt); 307d7ecbddfSMark Salter return; 308d7ecbddfSMark Salter } 309f14c66ceSArd Biesheuvel __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false); 310d7ecbddfSMark Salter } 311d7ecbddfSMark Salter 3128ce837ceSArd Biesheuvel void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 3138ce837ceSArd Biesheuvel unsigned long virt, phys_addr_t size, 314f14c66ceSArd Biesheuvel pgprot_t prot, bool page_mappings_only) 3158ce837ceSArd Biesheuvel { 3161378dc3dSArd Biesheuvel BUG_ON(mm == &init_mm); 3171378dc3dSArd Biesheuvel 31811509a30SMark Rutland __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 319f14c66ceSArd Biesheuvel pgd_pgtable_alloc, page_mappings_only); 320d7ecbddfSMark Salter } 321d7ecbddfSMark Salter 322aa8c09beSArd Biesheuvel static void update_mapping_prot(phys_addr_t phys, unsigned long virt, 323da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 324da141706SLaura Abbott { 325da141706SLaura Abbott if (virt < VMALLOC_START) { 326aa8c09beSArd Biesheuvel pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", 327da141706SLaura Abbott &phys, virt); 328da141706SLaura Abbott return; 329da141706SLaura Abbott } 330da141706SLaura Abbott 33111509a30SMark Rutland __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, 332f14c66ceSArd Biesheuvel NULL, debug_pagealloc_enabled()); 333aa8c09beSArd Biesheuvel 334aa8c09beSArd Biesheuvel /* flush the TLBs after updating live kernel mappings */ 335aa8c09beSArd Biesheuvel flush_tlb_kernel_range(virt, virt + size); 336da141706SLaura Abbott } 337da141706SLaura Abbott 338068a17a5SMark Rutland static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) 339da141706SLaura Abbott { 340eac8017fSMiles Chen phys_addr_t kernel_start = __pa_symbol(_text); 341eac8017fSMiles Chen phys_addr_t kernel_end = __pa_symbol(__init_begin); 342068a17a5SMark Rutland 343da141706SLaura Abbott /* 344f9040773SArd Biesheuvel * Take care not to create a writable alias for the 345f9040773SArd Biesheuvel * read-only text and rodata sections of the kernel image. 346da141706SLaura Abbott */ 347da141706SLaura Abbott 3489fdc14c5SArd Biesheuvel /* No overlap with the kernel text/rodata */ 349068a17a5SMark Rutland if (end < kernel_start || start >= kernel_end) { 350068a17a5SMark Rutland __create_pgd_mapping(pgd, start, __phys_to_virt(start), 351068a17a5SMark Rutland end - start, PAGE_KERNEL, 35253e1b329SArd Biesheuvel early_pgtable_alloc, 353f14c66ceSArd Biesheuvel debug_pagealloc_enabled()); 354068a17a5SMark Rutland return; 355da141706SLaura Abbott } 356da141706SLaura Abbott 357068a17a5SMark Rutland /* 3589fdc14c5SArd Biesheuvel * This block overlaps the kernel text/rodata mappings. 359f9040773SArd Biesheuvel * Map the portion(s) which don't overlap. 360068a17a5SMark Rutland */ 361068a17a5SMark Rutland if (start < kernel_start) 362068a17a5SMark Rutland __create_pgd_mapping(pgd, start, 363068a17a5SMark Rutland __phys_to_virt(start), 364068a17a5SMark Rutland kernel_start - start, PAGE_KERNEL, 36553e1b329SArd Biesheuvel early_pgtable_alloc, 366f14c66ceSArd Biesheuvel debug_pagealloc_enabled()); 367068a17a5SMark Rutland if (kernel_end < end) 368068a17a5SMark Rutland __create_pgd_mapping(pgd, kernel_end, 369068a17a5SMark Rutland __phys_to_virt(kernel_end), 370068a17a5SMark Rutland end - kernel_end, PAGE_KERNEL, 37153e1b329SArd Biesheuvel early_pgtable_alloc, 372f14c66ceSArd Biesheuvel debug_pagealloc_enabled()); 373f9040773SArd Biesheuvel 374f9040773SArd Biesheuvel /* 375*5ea5306cSArd Biesheuvel * Map the linear alias of the [_text, __init_begin) interval 376*5ea5306cSArd Biesheuvel * as non-executable now, and remove the write permission in 377*5ea5306cSArd Biesheuvel * mark_linear_text_alias_ro() below (which will be called after 378*5ea5306cSArd Biesheuvel * alternative patching has completed). This makes the contents 379*5ea5306cSArd Biesheuvel * of the region accessible to subsystems such as hibernate, 380*5ea5306cSArd Biesheuvel * but protects it from inadvertent modification or execution. 381f9040773SArd Biesheuvel */ 382f9040773SArd Biesheuvel __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start), 383*5ea5306cSArd Biesheuvel kernel_end - kernel_start, PAGE_KERNEL, 384f14c66ceSArd Biesheuvel early_pgtable_alloc, debug_pagealloc_enabled()); 385da141706SLaura Abbott } 386da141706SLaura Abbott 387*5ea5306cSArd Biesheuvel void __init mark_linear_text_alias_ro(void) 388*5ea5306cSArd Biesheuvel { 389*5ea5306cSArd Biesheuvel /* 390*5ea5306cSArd Biesheuvel * Remove the write permissions from the linear alias of .text/.rodata 391*5ea5306cSArd Biesheuvel */ 392*5ea5306cSArd Biesheuvel update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), 393*5ea5306cSArd Biesheuvel (unsigned long)__init_begin - (unsigned long)_text, 394*5ea5306cSArd Biesheuvel PAGE_KERNEL_RO); 395*5ea5306cSArd Biesheuvel } 396*5ea5306cSArd Biesheuvel 397068a17a5SMark Rutland static void __init map_mem(pgd_t *pgd) 398c1cc1552SCatalin Marinas { 399c1cc1552SCatalin Marinas struct memblock_region *reg; 400f6bc87c3SSteve Capper 401c1cc1552SCatalin Marinas /* map all the memory banks */ 402c1cc1552SCatalin Marinas for_each_memblock(memory, reg) { 403c1cc1552SCatalin Marinas phys_addr_t start = reg->base; 404c1cc1552SCatalin Marinas phys_addr_t end = start + reg->size; 405c1cc1552SCatalin Marinas 406c1cc1552SCatalin Marinas if (start >= end) 407c1cc1552SCatalin Marinas break; 40868709f45SArd Biesheuvel if (memblock_is_nomap(reg)) 40968709f45SArd Biesheuvel continue; 410c1cc1552SCatalin Marinas 411068a17a5SMark Rutland __map_memblock(pgd, start, end); 412c1cc1552SCatalin Marinas } 413c1cc1552SCatalin Marinas } 414c1cc1552SCatalin Marinas 415da141706SLaura Abbott void mark_rodata_ro(void) 416da141706SLaura Abbott { 4172f39b5f9SJeremy Linton unsigned long section_size; 418f9040773SArd Biesheuvel 4199fdc14c5SArd Biesheuvel section_size = (unsigned long)_etext - (unsigned long)_text; 420aa8c09beSArd Biesheuvel update_mapping_prot(__pa_symbol(_text), (unsigned long)_text, 4212f39b5f9SJeremy Linton section_size, PAGE_KERNEL_ROX); 4222f39b5f9SJeremy Linton /* 4239fdc14c5SArd Biesheuvel * mark .rodata as read only. Use __init_begin rather than __end_rodata 4249fdc14c5SArd Biesheuvel * to cover NOTES and EXCEPTION_TABLE. 4252f39b5f9SJeremy Linton */ 4269fdc14c5SArd Biesheuvel section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; 427aa8c09beSArd Biesheuvel update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, 4282f39b5f9SJeremy Linton section_size, PAGE_KERNEL_RO); 429e98216b5SArd Biesheuvel 4301404d6f1SLaura Abbott debug_checkwx(); 431da141706SLaura Abbott } 432da141706SLaura Abbott 4332c09ec06SArd Biesheuvel static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end, 434f9040773SArd Biesheuvel pgprot_t prot, struct vm_struct *vma) 435068a17a5SMark Rutland { 4362077be67SLaura Abbott phys_addr_t pa_start = __pa_symbol(va_start); 437068a17a5SMark Rutland unsigned long size = va_end - va_start; 438068a17a5SMark Rutland 439068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(pa_start)); 440068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(size)); 441068a17a5SMark Rutland 442068a17a5SMark Rutland __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, 443f14c66ceSArd Biesheuvel early_pgtable_alloc, debug_pagealloc_enabled()); 444f9040773SArd Biesheuvel 445f9040773SArd Biesheuvel vma->addr = va_start; 446f9040773SArd Biesheuvel vma->phys_addr = pa_start; 447f9040773SArd Biesheuvel vma->size = size; 448f9040773SArd Biesheuvel vma->flags = VM_MAP; 449f9040773SArd Biesheuvel vma->caller = __builtin_return_address(0); 450f9040773SArd Biesheuvel 451f9040773SArd Biesheuvel vm_area_add_early(vma); 452068a17a5SMark Rutland } 453068a17a5SMark Rutland 454068a17a5SMark Rutland /* 455068a17a5SMark Rutland * Create fine-grained mappings for the kernel. 456068a17a5SMark Rutland */ 457068a17a5SMark Rutland static void __init map_kernel(pgd_t *pgd) 458068a17a5SMark Rutland { 4592f39b5f9SJeremy Linton static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data; 460068a17a5SMark Rutland 4619fdc14c5SArd Biesheuvel map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text); 4629fdc14c5SArd Biesheuvel map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata); 4632c09ec06SArd Biesheuvel map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC, 464f9040773SArd Biesheuvel &vmlinux_init); 4652c09ec06SArd Biesheuvel map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data); 466068a17a5SMark Rutland 467f9040773SArd Biesheuvel if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { 468068a17a5SMark Rutland /* 469f9040773SArd Biesheuvel * The fixmap falls in a separate pgd to the kernel, and doesn't 470f9040773SArd Biesheuvel * live in the carveout for the swapper_pg_dir. We can simply 471f9040773SArd Biesheuvel * re-use the existing dir for the fixmap. 472068a17a5SMark Rutland */ 473f9040773SArd Biesheuvel set_pgd(pgd_offset_raw(pgd, FIXADDR_START), 474f9040773SArd Biesheuvel *pgd_offset_k(FIXADDR_START)); 475f9040773SArd Biesheuvel } else if (CONFIG_PGTABLE_LEVELS > 3) { 476f9040773SArd Biesheuvel /* 477f9040773SArd Biesheuvel * The fixmap shares its top level pgd entry with the kernel 478f9040773SArd Biesheuvel * mapping. This can really only occur when we are running 479f9040773SArd Biesheuvel * with 16k/4 levels, so we can simply reuse the pud level 480f9040773SArd Biesheuvel * entry instead. 481f9040773SArd Biesheuvel */ 482f9040773SArd Biesheuvel BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 483f9040773SArd Biesheuvel set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START), 4842077be67SLaura Abbott __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE)); 485f9040773SArd Biesheuvel pud_clear_fixmap(); 486f9040773SArd Biesheuvel } else { 487f9040773SArd Biesheuvel BUG(); 488f9040773SArd Biesheuvel } 489068a17a5SMark Rutland 490068a17a5SMark Rutland kasan_copy_shadow(pgd); 491068a17a5SMark Rutland } 492068a17a5SMark Rutland 493c1cc1552SCatalin Marinas /* 494c1cc1552SCatalin Marinas * paging_init() sets up the page tables, initialises the zone memory 495c1cc1552SCatalin Marinas * maps and sets up the zero page. 496c1cc1552SCatalin Marinas */ 497c1cc1552SCatalin Marinas void __init paging_init(void) 498c1cc1552SCatalin Marinas { 499068a17a5SMark Rutland phys_addr_t pgd_phys = early_pgtable_alloc(); 500068a17a5SMark Rutland pgd_t *pgd = pgd_set_fixmap(pgd_phys); 501068a17a5SMark Rutland 502068a17a5SMark Rutland map_kernel(pgd); 503068a17a5SMark Rutland map_mem(pgd); 504068a17a5SMark Rutland 505068a17a5SMark Rutland /* 506068a17a5SMark Rutland * We want to reuse the original swapper_pg_dir so we don't have to 507068a17a5SMark Rutland * communicate the new address to non-coherent secondaries in 508068a17a5SMark Rutland * secondary_entry, and so cpu_switch_mm can generate the address with 509068a17a5SMark Rutland * adrp+add rather than a load from some global variable. 510068a17a5SMark Rutland * 511068a17a5SMark Rutland * To do this we need to go via a temporary pgd. 512068a17a5SMark Rutland */ 513068a17a5SMark Rutland cpu_replace_ttbr1(__va(pgd_phys)); 51412f043ffSArnd Bergmann memcpy(swapper_pg_dir, pgd, PGD_SIZE); 5152077be67SLaura Abbott cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); 516068a17a5SMark Rutland 517068a17a5SMark Rutland pgd_clear_fixmap(); 518068a17a5SMark Rutland memblock_free(pgd_phys, PAGE_SIZE); 519068a17a5SMark Rutland 520068a17a5SMark Rutland /* 521068a17a5SMark Rutland * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd 522068a17a5SMark Rutland * allocated with it. 523068a17a5SMark Rutland */ 5242077be67SLaura Abbott memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE, 525068a17a5SMark Rutland SWAPPER_DIR_SIZE - PAGE_SIZE); 526c1cc1552SCatalin Marinas } 527c1cc1552SCatalin Marinas 528c1cc1552SCatalin Marinas /* 529c1cc1552SCatalin Marinas * Check whether a kernel address is valid (derived from arch/x86/). 530c1cc1552SCatalin Marinas */ 531c1cc1552SCatalin Marinas int kern_addr_valid(unsigned long addr) 532c1cc1552SCatalin Marinas { 533c1cc1552SCatalin Marinas pgd_t *pgd; 534c1cc1552SCatalin Marinas pud_t *pud; 535c1cc1552SCatalin Marinas pmd_t *pmd; 536c1cc1552SCatalin Marinas pte_t *pte; 537c1cc1552SCatalin Marinas 538c1cc1552SCatalin Marinas if ((((long)addr) >> VA_BITS) != -1UL) 539c1cc1552SCatalin Marinas return 0; 540c1cc1552SCatalin Marinas 541c1cc1552SCatalin Marinas pgd = pgd_offset_k(addr); 542c1cc1552SCatalin Marinas if (pgd_none(*pgd)) 543c1cc1552SCatalin Marinas return 0; 544c1cc1552SCatalin Marinas 545c1cc1552SCatalin Marinas pud = pud_offset(pgd, addr); 546c1cc1552SCatalin Marinas if (pud_none(*pud)) 547c1cc1552SCatalin Marinas return 0; 548c1cc1552SCatalin Marinas 549206a2a73SSteve Capper if (pud_sect(*pud)) 550206a2a73SSteve Capper return pfn_valid(pud_pfn(*pud)); 551206a2a73SSteve Capper 552c1cc1552SCatalin Marinas pmd = pmd_offset(pud, addr); 553c1cc1552SCatalin Marinas if (pmd_none(*pmd)) 554c1cc1552SCatalin Marinas return 0; 555c1cc1552SCatalin Marinas 556da6e4cb6SDave Anderson if (pmd_sect(*pmd)) 557da6e4cb6SDave Anderson return pfn_valid(pmd_pfn(*pmd)); 558da6e4cb6SDave Anderson 559c1cc1552SCatalin Marinas pte = pte_offset_kernel(pmd, addr); 560c1cc1552SCatalin Marinas if (pte_none(*pte)) 561c1cc1552SCatalin Marinas return 0; 562c1cc1552SCatalin Marinas 563c1cc1552SCatalin Marinas return pfn_valid(pte_pfn(*pte)); 564c1cc1552SCatalin Marinas } 565c1cc1552SCatalin Marinas #ifdef CONFIG_SPARSEMEM_VMEMMAP 566b433dce0SSuzuki K. Poulose #if !ARM64_SWAPPER_USES_SECTION_MAPS 5670aad818bSJohannes Weiner int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 568c1cc1552SCatalin Marinas { 5690aad818bSJohannes Weiner return vmemmap_populate_basepages(start, end, node); 570c1cc1552SCatalin Marinas } 571b433dce0SSuzuki K. Poulose #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 5720aad818bSJohannes Weiner int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 573c1cc1552SCatalin Marinas { 5740aad818bSJohannes Weiner unsigned long addr = start; 575c1cc1552SCatalin Marinas unsigned long next; 576c1cc1552SCatalin Marinas pgd_t *pgd; 577c1cc1552SCatalin Marinas pud_t *pud; 578c1cc1552SCatalin Marinas pmd_t *pmd; 579c1cc1552SCatalin Marinas 580c1cc1552SCatalin Marinas do { 581c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 582c1cc1552SCatalin Marinas 583c1cc1552SCatalin Marinas pgd = vmemmap_pgd_populate(addr, node); 584c1cc1552SCatalin Marinas if (!pgd) 585c1cc1552SCatalin Marinas return -ENOMEM; 586c1cc1552SCatalin Marinas 587c1cc1552SCatalin Marinas pud = vmemmap_pud_populate(pgd, addr, node); 588c1cc1552SCatalin Marinas if (!pud) 589c1cc1552SCatalin Marinas return -ENOMEM; 590c1cc1552SCatalin Marinas 591c1cc1552SCatalin Marinas pmd = pmd_offset(pud, addr); 592c1cc1552SCatalin Marinas if (pmd_none(*pmd)) { 593c1cc1552SCatalin Marinas void *p = NULL; 594c1cc1552SCatalin Marinas 595c1cc1552SCatalin Marinas p = vmemmap_alloc_block_buf(PMD_SIZE, node); 596c1cc1552SCatalin Marinas if (!p) 597c1cc1552SCatalin Marinas return -ENOMEM; 598c1cc1552SCatalin Marinas 599a501e324SCatalin Marinas set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); 600c1cc1552SCatalin Marinas } else 601c1cc1552SCatalin Marinas vmemmap_verify((pte_t *)pmd, node, addr, next); 602c1cc1552SCatalin Marinas } while (addr = next, addr != end); 603c1cc1552SCatalin Marinas 604c1cc1552SCatalin Marinas return 0; 605c1cc1552SCatalin Marinas } 606c1cc1552SCatalin Marinas #endif /* CONFIG_ARM64_64K_PAGES */ 6070aad818bSJohannes Weiner void vmemmap_free(unsigned long start, unsigned long end) 6080197518cSTang Chen { 6090197518cSTang Chen } 610c1cc1552SCatalin Marinas #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 611af86e597SLaura Abbott 612af86e597SLaura Abbott static inline pud_t * fixmap_pud(unsigned long addr) 613af86e597SLaura Abbott { 614af86e597SLaura Abbott pgd_t *pgd = pgd_offset_k(addr); 615af86e597SLaura Abbott 616af86e597SLaura Abbott BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); 617af86e597SLaura Abbott 618157962f5SArd Biesheuvel return pud_offset_kimg(pgd, addr); 619af86e597SLaura Abbott } 620af86e597SLaura Abbott 621af86e597SLaura Abbott static inline pmd_t * fixmap_pmd(unsigned long addr) 622af86e597SLaura Abbott { 623af86e597SLaura Abbott pud_t *pud = fixmap_pud(addr); 624af86e597SLaura Abbott 625af86e597SLaura Abbott BUG_ON(pud_none(*pud) || pud_bad(*pud)); 626af86e597SLaura Abbott 627157962f5SArd Biesheuvel return pmd_offset_kimg(pud, addr); 628af86e597SLaura Abbott } 629af86e597SLaura Abbott 630af86e597SLaura Abbott static inline pte_t * fixmap_pte(unsigned long addr) 631af86e597SLaura Abbott { 632157962f5SArd Biesheuvel return &bm_pte[pte_index(addr)]; 633af86e597SLaura Abbott } 634af86e597SLaura Abbott 6352077be67SLaura Abbott /* 6362077be67SLaura Abbott * The p*d_populate functions call virt_to_phys implicitly so they can't be used 6372077be67SLaura Abbott * directly on kernel symbols (bm_p*d). This function is called too early to use 6382077be67SLaura Abbott * lm_alias so __p*d_populate functions must be used to populate with the 6392077be67SLaura Abbott * physical address from __pa_symbol. 6402077be67SLaura Abbott */ 641af86e597SLaura Abbott void __init early_fixmap_init(void) 642af86e597SLaura Abbott { 643af86e597SLaura Abbott pgd_t *pgd; 644af86e597SLaura Abbott pud_t *pud; 645af86e597SLaura Abbott pmd_t *pmd; 646af86e597SLaura Abbott unsigned long addr = FIXADDR_START; 647af86e597SLaura Abbott 648af86e597SLaura Abbott pgd = pgd_offset_k(addr); 649f80fb3a3SArd Biesheuvel if (CONFIG_PGTABLE_LEVELS > 3 && 6502077be67SLaura Abbott !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) { 651f9040773SArd Biesheuvel /* 652f9040773SArd Biesheuvel * We only end up here if the kernel mapping and the fixmap 653f9040773SArd Biesheuvel * share the top level pgd entry, which should only happen on 654f9040773SArd Biesheuvel * 16k/4 levels configurations. 655f9040773SArd Biesheuvel */ 656f9040773SArd Biesheuvel BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 657f9040773SArd Biesheuvel pud = pud_offset_kimg(pgd, addr); 658f9040773SArd Biesheuvel } else { 6592077be67SLaura Abbott if (pgd_none(*pgd)) 6602077be67SLaura Abbott __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE); 661157962f5SArd Biesheuvel pud = fixmap_pud(addr); 662f9040773SArd Biesheuvel } 6632077be67SLaura Abbott if (pud_none(*pud)) 6642077be67SLaura Abbott __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); 665157962f5SArd Biesheuvel pmd = fixmap_pmd(addr); 6662077be67SLaura Abbott __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE); 667af86e597SLaura Abbott 668af86e597SLaura Abbott /* 669af86e597SLaura Abbott * The boot-ioremap range spans multiple pmds, for which 670157962f5SArd Biesheuvel * we are not prepared: 671af86e597SLaura Abbott */ 672af86e597SLaura Abbott BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 673af86e597SLaura Abbott != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 674af86e597SLaura Abbott 675af86e597SLaura Abbott if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 676af86e597SLaura Abbott || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 677af86e597SLaura Abbott WARN_ON(1); 678af86e597SLaura Abbott pr_warn("pmd %p != %p, %p\n", 679af86e597SLaura Abbott pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 680af86e597SLaura Abbott fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 681af86e597SLaura Abbott pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 682af86e597SLaura Abbott fix_to_virt(FIX_BTMAP_BEGIN)); 683af86e597SLaura Abbott pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 684af86e597SLaura Abbott fix_to_virt(FIX_BTMAP_END)); 685af86e597SLaura Abbott 686af86e597SLaura Abbott pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 687af86e597SLaura Abbott pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 688af86e597SLaura Abbott } 689af86e597SLaura Abbott } 690af86e597SLaura Abbott 691af86e597SLaura Abbott void __set_fixmap(enum fixed_addresses idx, 692af86e597SLaura Abbott phys_addr_t phys, pgprot_t flags) 693af86e597SLaura Abbott { 694af86e597SLaura Abbott unsigned long addr = __fix_to_virt(idx); 695af86e597SLaura Abbott pte_t *pte; 696af86e597SLaura Abbott 697b63dbef9SMark Rutland BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 698af86e597SLaura Abbott 699af86e597SLaura Abbott pte = fixmap_pte(addr); 700af86e597SLaura Abbott 701af86e597SLaura Abbott if (pgprot_val(flags)) { 702af86e597SLaura Abbott set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 703af86e597SLaura Abbott } else { 704af86e597SLaura Abbott pte_clear(&init_mm, addr, pte); 705af86e597SLaura Abbott flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 706af86e597SLaura Abbott } 707af86e597SLaura Abbott } 70861bd93ceSArd Biesheuvel 709f80fb3a3SArd Biesheuvel void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) 71061bd93ceSArd Biesheuvel { 71161bd93ceSArd Biesheuvel const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 712f80fb3a3SArd Biesheuvel int offset; 71361bd93ceSArd Biesheuvel void *dt_virt; 71461bd93ceSArd Biesheuvel 71561bd93ceSArd Biesheuvel /* 71661bd93ceSArd Biesheuvel * Check whether the physical FDT address is set and meets the minimum 71761bd93ceSArd Biesheuvel * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 71804a84810SArd Biesheuvel * at least 8 bytes so that we can always access the magic and size 71904a84810SArd Biesheuvel * fields of the FDT header after mapping the first chunk, double check 72004a84810SArd Biesheuvel * here if that is indeed the case. 72161bd93ceSArd Biesheuvel */ 72261bd93ceSArd Biesheuvel BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 72361bd93ceSArd Biesheuvel if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 72461bd93ceSArd Biesheuvel return NULL; 72561bd93ceSArd Biesheuvel 72661bd93ceSArd Biesheuvel /* 72761bd93ceSArd Biesheuvel * Make sure that the FDT region can be mapped without the need to 72861bd93ceSArd Biesheuvel * allocate additional translation table pages, so that it is safe 729132233a7SLaura Abbott * to call create_mapping_noalloc() this early. 73061bd93ceSArd Biesheuvel * 73161bd93ceSArd Biesheuvel * On 64k pages, the FDT will be mapped using PTEs, so we need to 73261bd93ceSArd Biesheuvel * be in the same PMD as the rest of the fixmap. 73361bd93ceSArd Biesheuvel * On 4k pages, we'll use section mappings for the FDT so we only 73461bd93ceSArd Biesheuvel * have to be in the same PUD. 73561bd93ceSArd Biesheuvel */ 73661bd93ceSArd Biesheuvel BUILD_BUG_ON(dt_virt_base % SZ_2M); 73761bd93ceSArd Biesheuvel 738b433dce0SSuzuki K. Poulose BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 739b433dce0SSuzuki K. Poulose __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 74061bd93ceSArd Biesheuvel 741b433dce0SSuzuki K. Poulose offset = dt_phys % SWAPPER_BLOCK_SIZE; 74261bd93ceSArd Biesheuvel dt_virt = (void *)dt_virt_base + offset; 74361bd93ceSArd Biesheuvel 74461bd93ceSArd Biesheuvel /* map the first chunk so we can read the size from the header */ 745132233a7SLaura Abbott create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 746132233a7SLaura Abbott dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 74761bd93ceSArd Biesheuvel 74804a84810SArd Biesheuvel if (fdt_magic(dt_virt) != FDT_MAGIC) 74961bd93ceSArd Biesheuvel return NULL; 75061bd93ceSArd Biesheuvel 751f80fb3a3SArd Biesheuvel *size = fdt_totalsize(dt_virt); 752f80fb3a3SArd Biesheuvel if (*size > MAX_FDT_SIZE) 75361bd93ceSArd Biesheuvel return NULL; 75461bd93ceSArd Biesheuvel 755f80fb3a3SArd Biesheuvel if (offset + *size > SWAPPER_BLOCK_SIZE) 756132233a7SLaura Abbott create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 757f80fb3a3SArd Biesheuvel round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); 758f80fb3a3SArd Biesheuvel 759f80fb3a3SArd Biesheuvel return dt_virt; 760f80fb3a3SArd Biesheuvel } 761f80fb3a3SArd Biesheuvel 762f80fb3a3SArd Biesheuvel void *__init fixmap_remap_fdt(phys_addr_t dt_phys) 763f80fb3a3SArd Biesheuvel { 764f80fb3a3SArd Biesheuvel void *dt_virt; 765f80fb3a3SArd Biesheuvel int size; 766f80fb3a3SArd Biesheuvel 767f80fb3a3SArd Biesheuvel dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); 768f80fb3a3SArd Biesheuvel if (!dt_virt) 769f80fb3a3SArd Biesheuvel return NULL; 77061bd93ceSArd Biesheuvel 77161bd93ceSArd Biesheuvel memblock_reserve(dt_phys, size); 77261bd93ceSArd Biesheuvel return dt_virt; 77361bd93ceSArd Biesheuvel } 774324420bfSArd Biesheuvel 775324420bfSArd Biesheuvel int __init arch_ioremap_pud_supported(void) 776324420bfSArd Biesheuvel { 777324420bfSArd Biesheuvel /* only 4k granule supports level 1 block mappings */ 778324420bfSArd Biesheuvel return IS_ENABLED(CONFIG_ARM64_4K_PAGES); 779324420bfSArd Biesheuvel } 780324420bfSArd Biesheuvel 781324420bfSArd Biesheuvel int __init arch_ioremap_pmd_supported(void) 782324420bfSArd Biesheuvel { 783324420bfSArd Biesheuvel return 1; 784324420bfSArd Biesheuvel } 785324420bfSArd Biesheuvel 786324420bfSArd Biesheuvel int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) 787324420bfSArd Biesheuvel { 788324420bfSArd Biesheuvel BUG_ON(phys & ~PUD_MASK); 789324420bfSArd Biesheuvel set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); 790324420bfSArd Biesheuvel return 1; 791324420bfSArd Biesheuvel } 792324420bfSArd Biesheuvel 793324420bfSArd Biesheuvel int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) 794324420bfSArd Biesheuvel { 795324420bfSArd Biesheuvel BUG_ON(phys & ~PMD_MASK); 796324420bfSArd Biesheuvel set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); 797324420bfSArd Biesheuvel return 1; 798324420bfSArd Biesheuvel } 799324420bfSArd Biesheuvel 800324420bfSArd Biesheuvel int pud_clear_huge(pud_t *pud) 801324420bfSArd Biesheuvel { 802324420bfSArd Biesheuvel if (!pud_sect(*pud)) 803324420bfSArd Biesheuvel return 0; 804324420bfSArd Biesheuvel pud_clear(pud); 805324420bfSArd Biesheuvel return 1; 806324420bfSArd Biesheuvel } 807324420bfSArd Biesheuvel 808324420bfSArd Biesheuvel int pmd_clear_huge(pmd_t *pmd) 809324420bfSArd Biesheuvel { 810324420bfSArd Biesheuvel if (!pmd_sect(*pmd)) 811324420bfSArd Biesheuvel return 0; 812324420bfSArd Biesheuvel pmd_clear(pmd); 813324420bfSArd Biesheuvel return 1; 814324420bfSArd Biesheuvel } 815