1c1cc1552SCatalin Marinas /* 2c1cc1552SCatalin Marinas * Based on arch/arm/mm/mmu.c 3c1cc1552SCatalin Marinas * 4c1cc1552SCatalin Marinas * Copyright (C) 1995-2005 Russell King 5c1cc1552SCatalin Marinas * Copyright (C) 2012 ARM Ltd. 6c1cc1552SCatalin Marinas * 7c1cc1552SCatalin Marinas * This program is free software; you can redistribute it and/or modify 8c1cc1552SCatalin Marinas * it under the terms of the GNU General Public License version 2 as 9c1cc1552SCatalin Marinas * published by the Free Software Foundation. 10c1cc1552SCatalin Marinas * 11c1cc1552SCatalin Marinas * This program is distributed in the hope that it will be useful, 12c1cc1552SCatalin Marinas * but WITHOUT ANY WARRANTY; without even the implied warranty of 13c1cc1552SCatalin Marinas * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14c1cc1552SCatalin Marinas * GNU General Public License for more details. 15c1cc1552SCatalin Marinas * 16c1cc1552SCatalin Marinas * You should have received a copy of the GNU General Public License 17c1cc1552SCatalin Marinas * along with this program. If not, see <http://www.gnu.org/licenses/>. 18c1cc1552SCatalin Marinas */ 19c1cc1552SCatalin Marinas 20c1cc1552SCatalin Marinas #include <linux/export.h> 21c1cc1552SCatalin Marinas #include <linux/kernel.h> 22c1cc1552SCatalin Marinas #include <linux/errno.h> 23c1cc1552SCatalin Marinas #include <linux/init.h> 2461bd93ceSArd Biesheuvel #include <linux/libfdt.h> 25c1cc1552SCatalin Marinas #include <linux/mman.h> 26c1cc1552SCatalin Marinas #include <linux/nodemask.h> 27c1cc1552SCatalin Marinas #include <linux/memblock.h> 28c1cc1552SCatalin Marinas #include <linux/fs.h> 292475ff9dSCatalin Marinas #include <linux/io.h> 3041089357SCatalin Marinas #include <linux/slab.h> 31da141706SLaura Abbott #include <linux/stop_machine.h> 32c1cc1552SCatalin Marinas 3321ab99c2SMark Rutland #include <asm/barrier.h> 34c1cc1552SCatalin Marinas #include <asm/cputype.h> 35af86e597SLaura Abbott #include <asm/fixmap.h> 36068a17a5SMark Rutland #include <asm/kasan.h> 37b433dce0SSuzuki K. Poulose #include <asm/kernel-pgtable.h> 38c1cc1552SCatalin Marinas #include <asm/sections.h> 39c1cc1552SCatalin Marinas #include <asm/setup.h> 40c1cc1552SCatalin Marinas #include <asm/sizes.h> 41c1cc1552SCatalin Marinas #include <asm/tlb.h> 42c79b954bSJungseok Lee #include <asm/memblock.h> 43c1cc1552SCatalin Marinas #include <asm/mmu_context.h> 44c1cc1552SCatalin Marinas 45c1cc1552SCatalin Marinas #include "mm.h" 46c1cc1552SCatalin Marinas 47dd006da2SArd Biesheuvel u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 48dd006da2SArd Biesheuvel 49c1cc1552SCatalin Marinas /* 50c1cc1552SCatalin Marinas * Empty_zero_page is a special page that is used for zero-initialized data 51c1cc1552SCatalin Marinas * and COW. 52c1cc1552SCatalin Marinas */ 535227cfa7SMark Rutland unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 54c1cc1552SCatalin Marinas EXPORT_SYMBOL(empty_zero_page); 55c1cc1552SCatalin Marinas 56*f9040773SArd Biesheuvel static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 57*f9040773SArd Biesheuvel static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; 58*f9040773SArd Biesheuvel static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; 59*f9040773SArd Biesheuvel 60c1cc1552SCatalin Marinas pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 61c1cc1552SCatalin Marinas unsigned long size, pgprot_t vma_prot) 62c1cc1552SCatalin Marinas { 63c1cc1552SCatalin Marinas if (!pfn_valid(pfn)) 64c1cc1552SCatalin Marinas return pgprot_noncached(vma_prot); 65c1cc1552SCatalin Marinas else if (file->f_flags & O_SYNC) 66c1cc1552SCatalin Marinas return pgprot_writecombine(vma_prot); 67c1cc1552SCatalin Marinas return vma_prot; 68c1cc1552SCatalin Marinas } 69c1cc1552SCatalin Marinas EXPORT_SYMBOL(phys_mem_access_prot); 70c1cc1552SCatalin Marinas 71f4710445SMark Rutland static phys_addr_t __init early_pgtable_alloc(void) 72c1cc1552SCatalin Marinas { 737142392dSSuzuki K. Poulose phys_addr_t phys; 747142392dSSuzuki K. Poulose void *ptr; 757142392dSSuzuki K. Poulose 7621ab99c2SMark Rutland phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE); 777142392dSSuzuki K. Poulose BUG_ON(!phys); 78f4710445SMark Rutland 79f4710445SMark Rutland /* 80f4710445SMark Rutland * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE 81f4710445SMark Rutland * slot will be free, so we can (ab)use the FIX_PTE slot to initialise 82f4710445SMark Rutland * any level of table. 83f4710445SMark Rutland */ 84f4710445SMark Rutland ptr = pte_set_fixmap(phys); 85f4710445SMark Rutland 8621ab99c2SMark Rutland memset(ptr, 0, PAGE_SIZE); 8721ab99c2SMark Rutland 88f4710445SMark Rutland /* 89f4710445SMark Rutland * Implicit barriers also ensure the zeroed page is visible to the page 90f4710445SMark Rutland * table walker 91f4710445SMark Rutland */ 92f4710445SMark Rutland pte_clear_fixmap(); 93f4710445SMark Rutland 94f4710445SMark Rutland return phys; 95c1cc1552SCatalin Marinas } 96c1cc1552SCatalin Marinas 97da141706SLaura Abbott /* 98da141706SLaura Abbott * remap a PMD into pages 99da141706SLaura Abbott */ 100da141706SLaura Abbott static void split_pmd(pmd_t *pmd, pte_t *pte) 101da141706SLaura Abbott { 102da141706SLaura Abbott unsigned long pfn = pmd_pfn(*pmd); 103da141706SLaura Abbott int i = 0; 104da141706SLaura Abbott 105da141706SLaura Abbott do { 106da141706SLaura Abbott /* 107da141706SLaura Abbott * Need to have the least restrictive permissions available 108667c2759SCatalin Marinas * permissions will be fixed up later 109da141706SLaura Abbott */ 110667c2759SCatalin Marinas set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); 111da141706SLaura Abbott pfn++; 112da141706SLaura Abbott } while (pte++, i++, i < PTRS_PER_PTE); 113da141706SLaura Abbott } 114da141706SLaura Abbott 115da141706SLaura Abbott static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 116667c2759SCatalin Marinas unsigned long end, unsigned long pfn, 117da141706SLaura Abbott pgprot_t prot, 118f4710445SMark Rutland phys_addr_t (*pgtable_alloc)(void)) 119c1cc1552SCatalin Marinas { 120c1cc1552SCatalin Marinas pte_t *pte; 121c1cc1552SCatalin Marinas 122a1c76574SMark Rutland if (pmd_none(*pmd) || pmd_sect(*pmd)) { 123132233a7SLaura Abbott phys_addr_t pte_phys; 124132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 125132233a7SLaura Abbott pte_phys = pgtable_alloc(); 126f4710445SMark Rutland pte = pte_set_fixmap(pte_phys); 127da141706SLaura Abbott if (pmd_sect(*pmd)) 128da141706SLaura Abbott split_pmd(pmd, pte); 129f4710445SMark Rutland __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE); 130da141706SLaura Abbott flush_tlb_all(); 131f4710445SMark Rutland pte_clear_fixmap(); 132c1cc1552SCatalin Marinas } 133a1c76574SMark Rutland BUG_ON(pmd_bad(*pmd)); 134c1cc1552SCatalin Marinas 135f4710445SMark Rutland pte = pte_set_fixmap_offset(pmd, addr); 136c1cc1552SCatalin Marinas do { 137667c2759SCatalin Marinas set_pte(pte, pfn_pte(pfn, prot)); 138667c2759SCatalin Marinas pfn++; 139667c2759SCatalin Marinas } while (pte++, addr += PAGE_SIZE, addr != end); 140f4710445SMark Rutland 141f4710445SMark Rutland pte_clear_fixmap(); 142c1cc1552SCatalin Marinas } 143c1cc1552SCatalin Marinas 1449a17a213SJisheng Zhang static void split_pud(pud_t *old_pud, pmd_t *pmd) 145da141706SLaura Abbott { 146da141706SLaura Abbott unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT; 147da141706SLaura Abbott pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr); 148da141706SLaura Abbott int i = 0; 149da141706SLaura Abbott 150da141706SLaura Abbott do { 1511e43ba9cSArd Biesheuvel set_pmd(pmd, __pmd(addr | pgprot_val(prot))); 152da141706SLaura Abbott addr += PMD_SIZE; 153da141706SLaura Abbott } while (pmd++, i++, i < PTRS_PER_PMD); 154da141706SLaura Abbott } 155da141706SLaura Abbott 15683863f25SLaura Abbott #ifdef CONFIG_DEBUG_PAGEALLOC 15783863f25SLaura Abbott static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void)) 15883863f25SLaura Abbott { 15983863f25SLaura Abbott 16083863f25SLaura Abbott /* 16183863f25SLaura Abbott * If debug_page_alloc is enabled we must map the linear map 16283863f25SLaura Abbott * using pages. However, other mappings created by 16383863f25SLaura Abbott * create_mapping_noalloc must use sections in some cases. Allow 16483863f25SLaura Abbott * sections to be used in those cases, where no pgtable_alloc 16583863f25SLaura Abbott * function is provided. 16683863f25SLaura Abbott */ 16783863f25SLaura Abbott return !pgtable_alloc || !debug_pagealloc_enabled(); 16883863f25SLaura Abbott } 16983863f25SLaura Abbott #else 17083863f25SLaura Abbott static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void)) 17183863f25SLaura Abbott { 17283863f25SLaura Abbott return true; 17383863f25SLaura Abbott } 17483863f25SLaura Abbott #endif 17583863f25SLaura Abbott 17611509a30SMark Rutland static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end, 177da141706SLaura Abbott phys_addr_t phys, pgprot_t prot, 178f4710445SMark Rutland phys_addr_t (*pgtable_alloc)(void)) 179c1cc1552SCatalin Marinas { 180c1cc1552SCatalin Marinas pmd_t *pmd; 181c1cc1552SCatalin Marinas unsigned long next; 182c1cc1552SCatalin Marinas 183c1cc1552SCatalin Marinas /* 184c1cc1552SCatalin Marinas * Check for initial section mappings in the pgd/pud and remove them. 185c1cc1552SCatalin Marinas */ 186a1c76574SMark Rutland if (pud_none(*pud) || pud_sect(*pud)) { 187132233a7SLaura Abbott phys_addr_t pmd_phys; 188132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 189132233a7SLaura Abbott pmd_phys = pgtable_alloc(); 190f4710445SMark Rutland pmd = pmd_set_fixmap(pmd_phys); 191da141706SLaura Abbott if (pud_sect(*pud)) { 192da141706SLaura Abbott /* 193da141706SLaura Abbott * need to have the 1G of mappings continue to be 194da141706SLaura Abbott * present 195da141706SLaura Abbott */ 196da141706SLaura Abbott split_pud(pud, pmd); 197da141706SLaura Abbott } 198f4710445SMark Rutland __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE); 199da141706SLaura Abbott flush_tlb_all(); 200f4710445SMark Rutland pmd_clear_fixmap(); 201c1cc1552SCatalin Marinas } 202a1c76574SMark Rutland BUG_ON(pud_bad(*pud)); 203c1cc1552SCatalin Marinas 204f4710445SMark Rutland pmd = pmd_set_fixmap_offset(pud, addr); 205c1cc1552SCatalin Marinas do { 206c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 207c1cc1552SCatalin Marinas /* try section mapping first */ 20883863f25SLaura Abbott if (((addr | next | phys) & ~SECTION_MASK) == 0 && 20983863f25SLaura Abbott block_mappings_allowed(pgtable_alloc)) { 210a55f9929SCatalin Marinas pmd_t old_pmd =*pmd; 2118ce837ceSArd Biesheuvel set_pmd(pmd, __pmd(phys | 2128ce837ceSArd Biesheuvel pgprot_val(mk_sect_prot(prot)))); 213a55f9929SCatalin Marinas /* 214a55f9929SCatalin Marinas * Check for previous table entries created during 215a55f9929SCatalin Marinas * boot (__create_page_tables) and flush them. 216a55f9929SCatalin Marinas */ 217523d6e9fSzhichang.yuan if (!pmd_none(old_pmd)) { 218a55f9929SCatalin Marinas flush_tlb_all(); 219523d6e9fSzhichang.yuan if (pmd_table(old_pmd)) { 220316b39dbSMark Rutland phys_addr_t table = pmd_page_paddr(old_pmd); 22141089357SCatalin Marinas if (!WARN_ON_ONCE(slab_is_available())) 222523d6e9fSzhichang.yuan memblock_free(table, PAGE_SIZE); 223523d6e9fSzhichang.yuan } 224523d6e9fSzhichang.yuan } 225a55f9929SCatalin Marinas } else { 226667c2759SCatalin Marinas alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys), 22721ab99c2SMark Rutland prot, pgtable_alloc); 228a55f9929SCatalin Marinas } 229c1cc1552SCatalin Marinas phys += next - addr; 230c1cc1552SCatalin Marinas } while (pmd++, addr = next, addr != end); 231f4710445SMark Rutland 232f4710445SMark Rutland pmd_clear_fixmap(); 233c1cc1552SCatalin Marinas } 234c1cc1552SCatalin Marinas 235da141706SLaura Abbott static inline bool use_1G_block(unsigned long addr, unsigned long next, 236da141706SLaura Abbott unsigned long phys) 237da141706SLaura Abbott { 238da141706SLaura Abbott if (PAGE_SHIFT != 12) 239da141706SLaura Abbott return false; 240da141706SLaura Abbott 241da141706SLaura Abbott if (((addr | next | phys) & ~PUD_MASK) != 0) 242da141706SLaura Abbott return false; 243da141706SLaura Abbott 244da141706SLaura Abbott return true; 245da141706SLaura Abbott } 246da141706SLaura Abbott 24711509a30SMark Rutland static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, 248da141706SLaura Abbott phys_addr_t phys, pgprot_t prot, 249f4710445SMark Rutland phys_addr_t (*pgtable_alloc)(void)) 250c1cc1552SCatalin Marinas { 251c79b954bSJungseok Lee pud_t *pud; 252c1cc1552SCatalin Marinas unsigned long next; 253c1cc1552SCatalin Marinas 254c79b954bSJungseok Lee if (pgd_none(*pgd)) { 255132233a7SLaura Abbott phys_addr_t pud_phys; 256132233a7SLaura Abbott BUG_ON(!pgtable_alloc); 257132233a7SLaura Abbott pud_phys = pgtable_alloc(); 258f4710445SMark Rutland __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE); 259c79b954bSJungseok Lee } 260c79b954bSJungseok Lee BUG_ON(pgd_bad(*pgd)); 261c79b954bSJungseok Lee 262f4710445SMark Rutland pud = pud_set_fixmap_offset(pgd, addr); 263c1cc1552SCatalin Marinas do { 264c1cc1552SCatalin Marinas next = pud_addr_end(addr, end); 265206a2a73SSteve Capper 266206a2a73SSteve Capper /* 267206a2a73SSteve Capper * For 4K granule only, attempt to put down a 1GB block 268206a2a73SSteve Capper */ 26983863f25SLaura Abbott if (use_1G_block(addr, next, phys) && 27083863f25SLaura Abbott block_mappings_allowed(pgtable_alloc)) { 271206a2a73SSteve Capper pud_t old_pud = *pud; 2728ce837ceSArd Biesheuvel set_pud(pud, __pud(phys | 2738ce837ceSArd Biesheuvel pgprot_val(mk_sect_prot(prot)))); 274206a2a73SSteve Capper 275206a2a73SSteve Capper /* 276206a2a73SSteve Capper * If we have an old value for a pud, it will 277206a2a73SSteve Capper * be pointing to a pmd table that we no longer 278206a2a73SSteve Capper * need (from swapper_pg_dir). 279206a2a73SSteve Capper * 280206a2a73SSteve Capper * Look up the old pmd table and free it. 281206a2a73SSteve Capper */ 282206a2a73SSteve Capper if (!pud_none(old_pud)) { 283206a2a73SSteve Capper flush_tlb_all(); 284523d6e9fSzhichang.yuan if (pud_table(old_pud)) { 285316b39dbSMark Rutland phys_addr_t table = pud_page_paddr(old_pud); 28641089357SCatalin Marinas if (!WARN_ON_ONCE(slab_is_available())) 287523d6e9fSzhichang.yuan memblock_free(table, PAGE_SIZE); 288523d6e9fSzhichang.yuan } 289206a2a73SSteve Capper } 290206a2a73SSteve Capper } else { 29111509a30SMark Rutland alloc_init_pmd(pud, addr, next, phys, prot, 29221ab99c2SMark Rutland pgtable_alloc); 293206a2a73SSteve Capper } 294c1cc1552SCatalin Marinas phys += next - addr; 295c1cc1552SCatalin Marinas } while (pud++, addr = next, addr != end); 296f4710445SMark Rutland 297f4710445SMark Rutland pud_clear_fixmap(); 298c1cc1552SCatalin Marinas } 299c1cc1552SCatalin Marinas 300c1cc1552SCatalin Marinas /* 301c1cc1552SCatalin Marinas * Create the page directory entries and any necessary page tables for the 302c1cc1552SCatalin Marinas * mapping specified by 'md'. 303c1cc1552SCatalin Marinas */ 30411509a30SMark Rutland static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt, 305da141706SLaura Abbott phys_addr_t size, pgprot_t prot, 306f4710445SMark Rutland phys_addr_t (*pgtable_alloc)(void)) 307c1cc1552SCatalin Marinas { 308c1cc1552SCatalin Marinas unsigned long addr, length, end, next; 309c1cc1552SCatalin Marinas 310cc5d2b3bSMark Rutland /* 311cc5d2b3bSMark Rutland * If the virtual and physical address don't have the same offset 312cc5d2b3bSMark Rutland * within a page, we cannot map the region as the caller expects. 313cc5d2b3bSMark Rutland */ 314cc5d2b3bSMark Rutland if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) 315cc5d2b3bSMark Rutland return; 316cc5d2b3bSMark Rutland 3179c4e08a3SMark Rutland phys &= PAGE_MASK; 318c1cc1552SCatalin Marinas addr = virt & PAGE_MASK; 319c1cc1552SCatalin Marinas length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); 320c1cc1552SCatalin Marinas 321c1cc1552SCatalin Marinas end = addr + length; 322c1cc1552SCatalin Marinas do { 323c1cc1552SCatalin Marinas next = pgd_addr_end(addr, end); 32411509a30SMark Rutland alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc); 325c1cc1552SCatalin Marinas phys += next - addr; 326c1cc1552SCatalin Marinas } while (pgd++, addr = next, addr != end); 327c1cc1552SCatalin Marinas } 328c1cc1552SCatalin Marinas 329f4710445SMark Rutland static phys_addr_t late_pgtable_alloc(void) 330da141706SLaura Abbott { 33121ab99c2SMark Rutland void *ptr = (void *)__get_free_page(PGALLOC_GFP); 332da141706SLaura Abbott BUG_ON(!ptr); 33321ab99c2SMark Rutland 33421ab99c2SMark Rutland /* Ensure the zeroed page is visible to the page table walker */ 33521ab99c2SMark Rutland dsb(ishst); 336f4710445SMark Rutland return __pa(ptr); 337da141706SLaura Abbott } 338da141706SLaura Abbott 33911509a30SMark Rutland static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, 34011509a30SMark Rutland unsigned long virt, phys_addr_t size, 34111509a30SMark Rutland pgprot_t prot, 34211509a30SMark Rutland phys_addr_t (*alloc)(void)) 34311509a30SMark Rutland { 34411509a30SMark Rutland init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc); 34511509a30SMark Rutland } 34611509a30SMark Rutland 347132233a7SLaura Abbott /* 348132233a7SLaura Abbott * This function can only be used to modify existing table entries, 349132233a7SLaura Abbott * without allocating new levels of table. Note that this permits the 350132233a7SLaura Abbott * creation of new section or page entries. 351132233a7SLaura Abbott */ 352132233a7SLaura Abbott static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, 353da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 354d7ecbddfSMark Salter { 355d7ecbddfSMark Salter if (virt < VMALLOC_START) { 356d7ecbddfSMark Salter pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 357d7ecbddfSMark Salter &phys, virt); 358d7ecbddfSMark Salter return; 359d7ecbddfSMark Salter } 36011509a30SMark Rutland __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, 361132233a7SLaura Abbott NULL); 362d7ecbddfSMark Salter } 363d7ecbddfSMark Salter 3648ce837ceSArd Biesheuvel void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, 3658ce837ceSArd Biesheuvel unsigned long virt, phys_addr_t size, 3668ce837ceSArd Biesheuvel pgprot_t prot) 3678ce837ceSArd Biesheuvel { 36811509a30SMark Rutland __create_pgd_mapping(mm->pgd, phys, virt, size, prot, 36921ab99c2SMark Rutland late_pgtable_alloc); 370d7ecbddfSMark Salter } 371d7ecbddfSMark Salter 372da141706SLaura Abbott static void create_mapping_late(phys_addr_t phys, unsigned long virt, 373da141706SLaura Abbott phys_addr_t size, pgprot_t prot) 374da141706SLaura Abbott { 375da141706SLaura Abbott if (virt < VMALLOC_START) { 376da141706SLaura Abbott pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", 377da141706SLaura Abbott &phys, virt); 378da141706SLaura Abbott return; 379da141706SLaura Abbott } 380da141706SLaura Abbott 38111509a30SMark Rutland __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, 38211509a30SMark Rutland late_pgtable_alloc); 383da141706SLaura Abbott } 384da141706SLaura Abbott 385068a17a5SMark Rutland static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end) 386da141706SLaura Abbott { 387068a17a5SMark Rutland unsigned long kernel_start = __pa(_stext); 388*f9040773SArd Biesheuvel unsigned long kernel_end = __pa(_etext); 389068a17a5SMark Rutland 390da141706SLaura Abbott /* 391*f9040773SArd Biesheuvel * Take care not to create a writable alias for the 392*f9040773SArd Biesheuvel * read-only text and rodata sections of the kernel image. 393da141706SLaura Abbott */ 394da141706SLaura Abbott 395*f9040773SArd Biesheuvel /* No overlap with the kernel text */ 396068a17a5SMark Rutland if (end < kernel_start || start >= kernel_end) { 397068a17a5SMark Rutland __create_pgd_mapping(pgd, start, __phys_to_virt(start), 398068a17a5SMark Rutland end - start, PAGE_KERNEL, 399068a17a5SMark Rutland early_pgtable_alloc); 400068a17a5SMark Rutland return; 401da141706SLaura Abbott } 402da141706SLaura Abbott 403068a17a5SMark Rutland /* 404*f9040773SArd Biesheuvel * This block overlaps the kernel text mapping. 405*f9040773SArd Biesheuvel * Map the portion(s) which don't overlap. 406068a17a5SMark Rutland */ 407068a17a5SMark Rutland if (start < kernel_start) 408068a17a5SMark Rutland __create_pgd_mapping(pgd, start, 409068a17a5SMark Rutland __phys_to_virt(start), 410068a17a5SMark Rutland kernel_start - start, PAGE_KERNEL, 411068a17a5SMark Rutland early_pgtable_alloc); 412068a17a5SMark Rutland if (kernel_end < end) 413068a17a5SMark Rutland __create_pgd_mapping(pgd, kernel_end, 414068a17a5SMark Rutland __phys_to_virt(kernel_end), 415068a17a5SMark Rutland end - kernel_end, PAGE_KERNEL, 416068a17a5SMark Rutland early_pgtable_alloc); 417*f9040773SArd Biesheuvel 418*f9040773SArd Biesheuvel /* 419*f9040773SArd Biesheuvel * Map the linear alias of the [_stext, _etext) interval as 420*f9040773SArd Biesheuvel * read-only/non-executable. This makes the contents of the 421*f9040773SArd Biesheuvel * region accessible to subsystems such as hibernate, but 422*f9040773SArd Biesheuvel * protects it from inadvertent modification or execution. 423*f9040773SArd Biesheuvel */ 424*f9040773SArd Biesheuvel __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start), 425*f9040773SArd Biesheuvel kernel_end - kernel_start, PAGE_KERNEL_RO, 426*f9040773SArd Biesheuvel early_pgtable_alloc); 427da141706SLaura Abbott } 428da141706SLaura Abbott 429068a17a5SMark Rutland static void __init map_mem(pgd_t *pgd) 430c1cc1552SCatalin Marinas { 431c1cc1552SCatalin Marinas struct memblock_region *reg; 432f6bc87c3SSteve Capper 433c1cc1552SCatalin Marinas /* map all the memory banks */ 434c1cc1552SCatalin Marinas for_each_memblock(memory, reg) { 435c1cc1552SCatalin Marinas phys_addr_t start = reg->base; 436c1cc1552SCatalin Marinas phys_addr_t end = start + reg->size; 437c1cc1552SCatalin Marinas 438c1cc1552SCatalin Marinas if (start >= end) 439c1cc1552SCatalin Marinas break; 44068709f45SArd Biesheuvel if (memblock_is_nomap(reg)) 44168709f45SArd Biesheuvel continue; 442c1cc1552SCatalin Marinas 443068a17a5SMark Rutland __map_memblock(pgd, start, end); 444c1cc1552SCatalin Marinas } 445c1cc1552SCatalin Marinas } 446c1cc1552SCatalin Marinas 447da141706SLaura Abbott void mark_rodata_ro(void) 448da141706SLaura Abbott { 449*f9040773SArd Biesheuvel if (!IS_ENABLED(CONFIG_DEBUG_RODATA)) 450*f9040773SArd Biesheuvel return; 451*f9040773SArd Biesheuvel 452da141706SLaura Abbott create_mapping_late(__pa(_stext), (unsigned long)_stext, 453da141706SLaura Abbott (unsigned long)_etext - (unsigned long)_stext, 4540b2aa5b8SLaura Abbott PAGE_KERNEL_ROX); 455da141706SLaura Abbott } 456da141706SLaura Abbott 457da141706SLaura Abbott void fixup_init(void) 458da141706SLaura Abbott { 459*f9040773SArd Biesheuvel /* 460*f9040773SArd Biesheuvel * Unmap the __init region but leave the VM area in place. This 461*f9040773SArd Biesheuvel * prevents the region from being reused for kernel modules, which 462*f9040773SArd Biesheuvel * is not supported by kallsyms. 463*f9040773SArd Biesheuvel */ 464*f9040773SArd Biesheuvel unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); 465da141706SLaura Abbott } 466da141706SLaura Abbott 467068a17a5SMark Rutland static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end, 468*f9040773SArd Biesheuvel pgprot_t prot, struct vm_struct *vma) 469068a17a5SMark Rutland { 470068a17a5SMark Rutland phys_addr_t pa_start = __pa(va_start); 471068a17a5SMark Rutland unsigned long size = va_end - va_start; 472068a17a5SMark Rutland 473068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(pa_start)); 474068a17a5SMark Rutland BUG_ON(!PAGE_ALIGNED(size)); 475068a17a5SMark Rutland 476068a17a5SMark Rutland __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot, 477068a17a5SMark Rutland early_pgtable_alloc); 478*f9040773SArd Biesheuvel 479*f9040773SArd Biesheuvel vma->addr = va_start; 480*f9040773SArd Biesheuvel vma->phys_addr = pa_start; 481*f9040773SArd Biesheuvel vma->size = size; 482*f9040773SArd Biesheuvel vma->flags = VM_MAP; 483*f9040773SArd Biesheuvel vma->caller = __builtin_return_address(0); 484*f9040773SArd Biesheuvel 485*f9040773SArd Biesheuvel vm_area_add_early(vma); 486068a17a5SMark Rutland } 487068a17a5SMark Rutland 488068a17a5SMark Rutland /* 489068a17a5SMark Rutland * Create fine-grained mappings for the kernel. 490068a17a5SMark Rutland */ 491068a17a5SMark Rutland static void __init map_kernel(pgd_t *pgd) 492068a17a5SMark Rutland { 493*f9040773SArd Biesheuvel static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data; 494068a17a5SMark Rutland 495*f9040773SArd Biesheuvel map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text); 496*f9040773SArd Biesheuvel map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC, 497*f9040773SArd Biesheuvel &vmlinux_init); 498*f9040773SArd Biesheuvel map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data); 499068a17a5SMark Rutland 500*f9040773SArd Biesheuvel if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) { 501068a17a5SMark Rutland /* 502*f9040773SArd Biesheuvel * The fixmap falls in a separate pgd to the kernel, and doesn't 503*f9040773SArd Biesheuvel * live in the carveout for the swapper_pg_dir. We can simply 504*f9040773SArd Biesheuvel * re-use the existing dir for the fixmap. 505068a17a5SMark Rutland */ 506*f9040773SArd Biesheuvel set_pgd(pgd_offset_raw(pgd, FIXADDR_START), 507*f9040773SArd Biesheuvel *pgd_offset_k(FIXADDR_START)); 508*f9040773SArd Biesheuvel } else if (CONFIG_PGTABLE_LEVELS > 3) { 509*f9040773SArd Biesheuvel /* 510*f9040773SArd Biesheuvel * The fixmap shares its top level pgd entry with the kernel 511*f9040773SArd Biesheuvel * mapping. This can really only occur when we are running 512*f9040773SArd Biesheuvel * with 16k/4 levels, so we can simply reuse the pud level 513*f9040773SArd Biesheuvel * entry instead. 514*f9040773SArd Biesheuvel */ 515*f9040773SArd Biesheuvel BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 516*f9040773SArd Biesheuvel set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START), 517*f9040773SArd Biesheuvel __pud(__pa(bm_pmd) | PUD_TYPE_TABLE)); 518*f9040773SArd Biesheuvel pud_clear_fixmap(); 519*f9040773SArd Biesheuvel } else { 520*f9040773SArd Biesheuvel BUG(); 521*f9040773SArd Biesheuvel } 522068a17a5SMark Rutland 523068a17a5SMark Rutland kasan_copy_shadow(pgd); 524068a17a5SMark Rutland } 525068a17a5SMark Rutland 526c1cc1552SCatalin Marinas /* 527c1cc1552SCatalin Marinas * paging_init() sets up the page tables, initialises the zone memory 528c1cc1552SCatalin Marinas * maps and sets up the zero page. 529c1cc1552SCatalin Marinas */ 530c1cc1552SCatalin Marinas void __init paging_init(void) 531c1cc1552SCatalin Marinas { 532068a17a5SMark Rutland phys_addr_t pgd_phys = early_pgtable_alloc(); 533068a17a5SMark Rutland pgd_t *pgd = pgd_set_fixmap(pgd_phys); 534068a17a5SMark Rutland 535068a17a5SMark Rutland map_kernel(pgd); 536068a17a5SMark Rutland map_mem(pgd); 537068a17a5SMark Rutland 538068a17a5SMark Rutland /* 539068a17a5SMark Rutland * We want to reuse the original swapper_pg_dir so we don't have to 540068a17a5SMark Rutland * communicate the new address to non-coherent secondaries in 541068a17a5SMark Rutland * secondary_entry, and so cpu_switch_mm can generate the address with 542068a17a5SMark Rutland * adrp+add rather than a load from some global variable. 543068a17a5SMark Rutland * 544068a17a5SMark Rutland * To do this we need to go via a temporary pgd. 545068a17a5SMark Rutland */ 546068a17a5SMark Rutland cpu_replace_ttbr1(__va(pgd_phys)); 547068a17a5SMark Rutland memcpy(swapper_pg_dir, pgd, PAGE_SIZE); 548068a17a5SMark Rutland cpu_replace_ttbr1(swapper_pg_dir); 549068a17a5SMark Rutland 550068a17a5SMark Rutland pgd_clear_fixmap(); 551068a17a5SMark Rutland memblock_free(pgd_phys, PAGE_SIZE); 552068a17a5SMark Rutland 553068a17a5SMark Rutland /* 554068a17a5SMark Rutland * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd 555068a17a5SMark Rutland * allocated with it. 556068a17a5SMark Rutland */ 557068a17a5SMark Rutland memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE, 558068a17a5SMark Rutland SWAPPER_DIR_SIZE - PAGE_SIZE); 559c1cc1552SCatalin Marinas 560c1cc1552SCatalin Marinas bootmem_init(); 561c1cc1552SCatalin Marinas } 562c1cc1552SCatalin Marinas 563c1cc1552SCatalin Marinas /* 564c1cc1552SCatalin Marinas * Check whether a kernel address is valid (derived from arch/x86/). 565c1cc1552SCatalin Marinas */ 566c1cc1552SCatalin Marinas int kern_addr_valid(unsigned long addr) 567c1cc1552SCatalin Marinas { 568c1cc1552SCatalin Marinas pgd_t *pgd; 569c1cc1552SCatalin Marinas pud_t *pud; 570c1cc1552SCatalin Marinas pmd_t *pmd; 571c1cc1552SCatalin Marinas pte_t *pte; 572c1cc1552SCatalin Marinas 573c1cc1552SCatalin Marinas if ((((long)addr) >> VA_BITS) != -1UL) 574c1cc1552SCatalin Marinas return 0; 575c1cc1552SCatalin Marinas 576c1cc1552SCatalin Marinas pgd = pgd_offset_k(addr); 577c1cc1552SCatalin Marinas if (pgd_none(*pgd)) 578c1cc1552SCatalin Marinas return 0; 579c1cc1552SCatalin Marinas 580c1cc1552SCatalin Marinas pud = pud_offset(pgd, addr); 581c1cc1552SCatalin Marinas if (pud_none(*pud)) 582c1cc1552SCatalin Marinas return 0; 583c1cc1552SCatalin Marinas 584206a2a73SSteve Capper if (pud_sect(*pud)) 585206a2a73SSteve Capper return pfn_valid(pud_pfn(*pud)); 586206a2a73SSteve Capper 587c1cc1552SCatalin Marinas pmd = pmd_offset(pud, addr); 588c1cc1552SCatalin Marinas if (pmd_none(*pmd)) 589c1cc1552SCatalin Marinas return 0; 590c1cc1552SCatalin Marinas 591da6e4cb6SDave Anderson if (pmd_sect(*pmd)) 592da6e4cb6SDave Anderson return pfn_valid(pmd_pfn(*pmd)); 593da6e4cb6SDave Anderson 594c1cc1552SCatalin Marinas pte = pte_offset_kernel(pmd, addr); 595c1cc1552SCatalin Marinas if (pte_none(*pte)) 596c1cc1552SCatalin Marinas return 0; 597c1cc1552SCatalin Marinas 598c1cc1552SCatalin Marinas return pfn_valid(pte_pfn(*pte)); 599c1cc1552SCatalin Marinas } 600c1cc1552SCatalin Marinas #ifdef CONFIG_SPARSEMEM_VMEMMAP 601b433dce0SSuzuki K. Poulose #if !ARM64_SWAPPER_USES_SECTION_MAPS 6020aad818bSJohannes Weiner int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 603c1cc1552SCatalin Marinas { 6040aad818bSJohannes Weiner return vmemmap_populate_basepages(start, end, node); 605c1cc1552SCatalin Marinas } 606b433dce0SSuzuki K. Poulose #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ 6070aad818bSJohannes Weiner int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) 608c1cc1552SCatalin Marinas { 6090aad818bSJohannes Weiner unsigned long addr = start; 610c1cc1552SCatalin Marinas unsigned long next; 611c1cc1552SCatalin Marinas pgd_t *pgd; 612c1cc1552SCatalin Marinas pud_t *pud; 613c1cc1552SCatalin Marinas pmd_t *pmd; 614c1cc1552SCatalin Marinas 615c1cc1552SCatalin Marinas do { 616c1cc1552SCatalin Marinas next = pmd_addr_end(addr, end); 617c1cc1552SCatalin Marinas 618c1cc1552SCatalin Marinas pgd = vmemmap_pgd_populate(addr, node); 619c1cc1552SCatalin Marinas if (!pgd) 620c1cc1552SCatalin Marinas return -ENOMEM; 621c1cc1552SCatalin Marinas 622c1cc1552SCatalin Marinas pud = vmemmap_pud_populate(pgd, addr, node); 623c1cc1552SCatalin Marinas if (!pud) 624c1cc1552SCatalin Marinas return -ENOMEM; 625c1cc1552SCatalin Marinas 626c1cc1552SCatalin Marinas pmd = pmd_offset(pud, addr); 627c1cc1552SCatalin Marinas if (pmd_none(*pmd)) { 628c1cc1552SCatalin Marinas void *p = NULL; 629c1cc1552SCatalin Marinas 630c1cc1552SCatalin Marinas p = vmemmap_alloc_block_buf(PMD_SIZE, node); 631c1cc1552SCatalin Marinas if (!p) 632c1cc1552SCatalin Marinas return -ENOMEM; 633c1cc1552SCatalin Marinas 634a501e324SCatalin Marinas set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); 635c1cc1552SCatalin Marinas } else 636c1cc1552SCatalin Marinas vmemmap_verify((pte_t *)pmd, node, addr, next); 637c1cc1552SCatalin Marinas } while (addr = next, addr != end); 638c1cc1552SCatalin Marinas 639c1cc1552SCatalin Marinas return 0; 640c1cc1552SCatalin Marinas } 641c1cc1552SCatalin Marinas #endif /* CONFIG_ARM64_64K_PAGES */ 6420aad818bSJohannes Weiner void vmemmap_free(unsigned long start, unsigned long end) 6430197518cSTang Chen { 6440197518cSTang Chen } 645c1cc1552SCatalin Marinas #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 646af86e597SLaura Abbott 647af86e597SLaura Abbott static inline pud_t * fixmap_pud(unsigned long addr) 648af86e597SLaura Abbott { 649af86e597SLaura Abbott pgd_t *pgd = pgd_offset_k(addr); 650af86e597SLaura Abbott 651af86e597SLaura Abbott BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); 652af86e597SLaura Abbott 653157962f5SArd Biesheuvel return pud_offset_kimg(pgd, addr); 654af86e597SLaura Abbott } 655af86e597SLaura Abbott 656af86e597SLaura Abbott static inline pmd_t * fixmap_pmd(unsigned long addr) 657af86e597SLaura Abbott { 658af86e597SLaura Abbott pud_t *pud = fixmap_pud(addr); 659af86e597SLaura Abbott 660af86e597SLaura Abbott BUG_ON(pud_none(*pud) || pud_bad(*pud)); 661af86e597SLaura Abbott 662157962f5SArd Biesheuvel return pmd_offset_kimg(pud, addr); 663af86e597SLaura Abbott } 664af86e597SLaura Abbott 665af86e597SLaura Abbott static inline pte_t * fixmap_pte(unsigned long addr) 666af86e597SLaura Abbott { 667157962f5SArd Biesheuvel return &bm_pte[pte_index(addr)]; 668af86e597SLaura Abbott } 669af86e597SLaura Abbott 670af86e597SLaura Abbott void __init early_fixmap_init(void) 671af86e597SLaura Abbott { 672af86e597SLaura Abbott pgd_t *pgd; 673af86e597SLaura Abbott pud_t *pud; 674af86e597SLaura Abbott pmd_t *pmd; 675af86e597SLaura Abbott unsigned long addr = FIXADDR_START; 676af86e597SLaura Abbott 677af86e597SLaura Abbott pgd = pgd_offset_k(addr); 678*f9040773SArd Biesheuvel if (CONFIG_PGTABLE_LEVELS > 3 && !pgd_none(*pgd)) { 679*f9040773SArd Biesheuvel /* 680*f9040773SArd Biesheuvel * We only end up here if the kernel mapping and the fixmap 681*f9040773SArd Biesheuvel * share the top level pgd entry, which should only happen on 682*f9040773SArd Biesheuvel * 16k/4 levels configurations. 683*f9040773SArd Biesheuvel */ 684*f9040773SArd Biesheuvel BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); 685*f9040773SArd Biesheuvel pud = pud_offset_kimg(pgd, addr); 686*f9040773SArd Biesheuvel } else { 687af86e597SLaura Abbott pgd_populate(&init_mm, pgd, bm_pud); 688157962f5SArd Biesheuvel pud = fixmap_pud(addr); 689*f9040773SArd Biesheuvel } 690af86e597SLaura Abbott pud_populate(&init_mm, pud, bm_pmd); 691157962f5SArd Biesheuvel pmd = fixmap_pmd(addr); 692af86e597SLaura Abbott pmd_populate_kernel(&init_mm, pmd, bm_pte); 693af86e597SLaura Abbott 694af86e597SLaura Abbott /* 695af86e597SLaura Abbott * The boot-ioremap range spans multiple pmds, for which 696157962f5SArd Biesheuvel * we are not prepared: 697af86e597SLaura Abbott */ 698af86e597SLaura Abbott BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) 699af86e597SLaura Abbott != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); 700af86e597SLaura Abbott 701af86e597SLaura Abbott if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) 702af86e597SLaura Abbott || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { 703af86e597SLaura Abbott WARN_ON(1); 704af86e597SLaura Abbott pr_warn("pmd %p != %p, %p\n", 705af86e597SLaura Abbott pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), 706af86e597SLaura Abbott fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); 707af86e597SLaura Abbott pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 708af86e597SLaura Abbott fix_to_virt(FIX_BTMAP_BEGIN)); 709af86e597SLaura Abbott pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", 710af86e597SLaura Abbott fix_to_virt(FIX_BTMAP_END)); 711af86e597SLaura Abbott 712af86e597SLaura Abbott pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 713af86e597SLaura Abbott pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 714af86e597SLaura Abbott } 715af86e597SLaura Abbott } 716af86e597SLaura Abbott 717af86e597SLaura Abbott void __set_fixmap(enum fixed_addresses idx, 718af86e597SLaura Abbott phys_addr_t phys, pgprot_t flags) 719af86e597SLaura Abbott { 720af86e597SLaura Abbott unsigned long addr = __fix_to_virt(idx); 721af86e597SLaura Abbott pte_t *pte; 722af86e597SLaura Abbott 723b63dbef9SMark Rutland BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); 724af86e597SLaura Abbott 725af86e597SLaura Abbott pte = fixmap_pte(addr); 726af86e597SLaura Abbott 727af86e597SLaura Abbott if (pgprot_val(flags)) { 728af86e597SLaura Abbott set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); 729af86e597SLaura Abbott } else { 730af86e597SLaura Abbott pte_clear(&init_mm, addr, pte); 731af86e597SLaura Abbott flush_tlb_kernel_range(addr, addr+PAGE_SIZE); 732af86e597SLaura Abbott } 733af86e597SLaura Abbott } 73461bd93ceSArd Biesheuvel 73561bd93ceSArd Biesheuvel void *__init fixmap_remap_fdt(phys_addr_t dt_phys) 73661bd93ceSArd Biesheuvel { 73761bd93ceSArd Biesheuvel const u64 dt_virt_base = __fix_to_virt(FIX_FDT); 738fb226c3dSArd Biesheuvel pgprot_t prot = PAGE_KERNEL_RO; 739b433dce0SSuzuki K. Poulose int size, offset; 74061bd93ceSArd Biesheuvel void *dt_virt; 74161bd93ceSArd Biesheuvel 74261bd93ceSArd Biesheuvel /* 74361bd93ceSArd Biesheuvel * Check whether the physical FDT address is set and meets the minimum 74461bd93ceSArd Biesheuvel * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be 74561bd93ceSArd Biesheuvel * at least 8 bytes so that we can always access the size field of the 74661bd93ceSArd Biesheuvel * FDT header after mapping the first chunk, double check here if that 74761bd93ceSArd Biesheuvel * is indeed the case. 74861bd93ceSArd Biesheuvel */ 74961bd93ceSArd Biesheuvel BUILD_BUG_ON(MIN_FDT_ALIGN < 8); 75061bd93ceSArd Biesheuvel if (!dt_phys || dt_phys % MIN_FDT_ALIGN) 75161bd93ceSArd Biesheuvel return NULL; 75261bd93ceSArd Biesheuvel 75361bd93ceSArd Biesheuvel /* 75461bd93ceSArd Biesheuvel * Make sure that the FDT region can be mapped without the need to 75561bd93ceSArd Biesheuvel * allocate additional translation table pages, so that it is safe 756132233a7SLaura Abbott * to call create_mapping_noalloc() this early. 75761bd93ceSArd Biesheuvel * 75861bd93ceSArd Biesheuvel * On 64k pages, the FDT will be mapped using PTEs, so we need to 75961bd93ceSArd Biesheuvel * be in the same PMD as the rest of the fixmap. 76061bd93ceSArd Biesheuvel * On 4k pages, we'll use section mappings for the FDT so we only 76161bd93ceSArd Biesheuvel * have to be in the same PUD. 76261bd93ceSArd Biesheuvel */ 76361bd93ceSArd Biesheuvel BUILD_BUG_ON(dt_virt_base % SZ_2M); 76461bd93ceSArd Biesheuvel 765b433dce0SSuzuki K. Poulose BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != 766b433dce0SSuzuki K. Poulose __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); 76761bd93ceSArd Biesheuvel 768b433dce0SSuzuki K. Poulose offset = dt_phys % SWAPPER_BLOCK_SIZE; 76961bd93ceSArd Biesheuvel dt_virt = (void *)dt_virt_base + offset; 77061bd93ceSArd Biesheuvel 77161bd93ceSArd Biesheuvel /* map the first chunk so we can read the size from the header */ 772132233a7SLaura Abbott create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), 773132233a7SLaura Abbott dt_virt_base, SWAPPER_BLOCK_SIZE, prot); 77461bd93ceSArd Biesheuvel 77561bd93ceSArd Biesheuvel if (fdt_check_header(dt_virt) != 0) 77661bd93ceSArd Biesheuvel return NULL; 77761bd93ceSArd Biesheuvel 77861bd93ceSArd Biesheuvel size = fdt_totalsize(dt_virt); 77961bd93ceSArd Biesheuvel if (size > MAX_FDT_SIZE) 78061bd93ceSArd Biesheuvel return NULL; 78161bd93ceSArd Biesheuvel 782b433dce0SSuzuki K. Poulose if (offset + size > SWAPPER_BLOCK_SIZE) 783132233a7SLaura Abbott create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, 784b433dce0SSuzuki K. Poulose round_up(offset + size, SWAPPER_BLOCK_SIZE), prot); 78561bd93ceSArd Biesheuvel 78661bd93ceSArd Biesheuvel memblock_reserve(dt_phys, size); 78761bd93ceSArd Biesheuvel 78861bd93ceSArd Biesheuvel return dt_virt; 78961bd93ceSArd Biesheuvel } 790324420bfSArd Biesheuvel 791324420bfSArd Biesheuvel int __init arch_ioremap_pud_supported(void) 792324420bfSArd Biesheuvel { 793324420bfSArd Biesheuvel /* only 4k granule supports level 1 block mappings */ 794324420bfSArd Biesheuvel return IS_ENABLED(CONFIG_ARM64_4K_PAGES); 795324420bfSArd Biesheuvel } 796324420bfSArd Biesheuvel 797324420bfSArd Biesheuvel int __init arch_ioremap_pmd_supported(void) 798324420bfSArd Biesheuvel { 799324420bfSArd Biesheuvel return 1; 800324420bfSArd Biesheuvel } 801324420bfSArd Biesheuvel 802324420bfSArd Biesheuvel int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) 803324420bfSArd Biesheuvel { 804324420bfSArd Biesheuvel BUG_ON(phys & ~PUD_MASK); 805324420bfSArd Biesheuvel set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); 806324420bfSArd Biesheuvel return 1; 807324420bfSArd Biesheuvel } 808324420bfSArd Biesheuvel 809324420bfSArd Biesheuvel int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) 810324420bfSArd Biesheuvel { 811324420bfSArd Biesheuvel BUG_ON(phys & ~PMD_MASK); 812324420bfSArd Biesheuvel set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); 813324420bfSArd Biesheuvel return 1; 814324420bfSArd Biesheuvel } 815324420bfSArd Biesheuvel 816324420bfSArd Biesheuvel int pud_clear_huge(pud_t *pud) 817324420bfSArd Biesheuvel { 818324420bfSArd Biesheuvel if (!pud_sect(*pud)) 819324420bfSArd Biesheuvel return 0; 820324420bfSArd Biesheuvel pud_clear(pud); 821324420bfSArd Biesheuvel return 1; 822324420bfSArd Biesheuvel } 823324420bfSArd Biesheuvel 824324420bfSArd Biesheuvel int pmd_clear_huge(pmd_t *pmd) 825324420bfSArd Biesheuvel { 826324420bfSArd Biesheuvel if (!pmd_sect(*pmd)) 827324420bfSArd Biesheuvel return 0; 828324420bfSArd Biesheuvel pmd_clear(pmd); 829324420bfSArd Biesheuvel return 1; 830324420bfSArd Biesheuvel } 831