1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * This file contains the routines for initializing the MMU 4 * on the 8xx series of chips. 5 * -- christophe 6 * 7 * Derived from arch/powerpc/mm/40x_mmu.c: 8 */ 9 10 #include <linux/memblock.h> 11 #include <linux/hugetlb.h> 12 13 #include <mm/mmu_decl.h> 14 15 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT) 16 17 extern int __map_without_ltlbs; 18 19 static unsigned long block_mapped_ram; 20 21 /* 22 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap. 23 * Otherwise, returns 0 24 */ 25 phys_addr_t v_block_mapped(unsigned long va) 26 { 27 unsigned long p = PHYS_IMMR_BASE; 28 29 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) 30 return p + va - VIRT_IMMR_BASE; 31 if (__map_without_ltlbs) 32 return 0; 33 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) 34 return __pa(va); 35 return 0; 36 } 37 38 /* 39 * Return VA for a given PA mapped with LTLBs or fixmap 40 * Return 0 if not mapped 41 */ 42 unsigned long p_block_mapped(phys_addr_t pa) 43 { 44 unsigned long p = PHYS_IMMR_BASE; 45 46 if (pa >= p && pa < p + IMMR_SIZE) 47 return VIRT_IMMR_BASE + pa - p; 48 if (__map_without_ltlbs) 49 return 0; 50 if (pa < block_mapped_ram) 51 return (unsigned long)__va(pa); 52 return 0; 53 } 54 55 static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va) 56 { 57 if (hpd_val(*pmdp) == 0) { 58 pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K); 59 60 if (!ptep) 61 return NULL; 62 63 hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M); 64 hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M); 65 } 66 return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); 67 } 68 69 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, 70 pgprot_t prot, int psize, bool new) 71 { 72 pmd_t *pmdp = pmd_off_k(va); 73 pte_t *ptep; 74 75 if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M)) 76 return -EINVAL; 77 78 if (new) { 79 if (WARN_ON(slab_is_available())) 80 return -EINVAL; 81 82 if (psize == MMU_PAGE_512K) 83 ptep = early_pte_alloc_kernel(pmdp, va); 84 else 85 ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va); 86 } else { 87 if (psize == MMU_PAGE_512K) 88 ptep = pte_offset_kernel(pmdp, va); 89 else 90 ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); 91 } 92 93 if (WARN_ON(!ptep)) 94 return -ENOMEM; 95 96 /* The PTE should never be already present */ 97 if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot))) 98 return -EINVAL; 99 100 set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot))); 101 102 return 0; 103 } 104 105 /* 106 * MMU_init_hw does the chip-specific initialization of the MMU hardware. 107 */ 108 void __init MMU_init_hw(void) 109 { 110 } 111 112 static bool immr_is_mapped __initdata; 113 114 void __init mmu_mapin_immr(void) 115 { 116 if (immr_is_mapped) 117 return; 118 119 immr_is_mapped = true; 120 121 __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE, 122 PAGE_KERNEL_NCG, MMU_PAGE_512K, true); 123 } 124 125 static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, 126 pgprot_t prot, bool new) 127 { 128 unsigned long v = PAGE_OFFSET + offset; 129 unsigned long p = offset; 130 131 WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); 132 133 for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K) 134 __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); 135 for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M) 136 __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); 137 for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K) 138 __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); 139 140 if (!new) 141 flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); 142 } 143 144 unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) 145 { 146 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); 147 unsigned long sinittext = __pa(_sinittext); 148 bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence(); 149 unsigned long boundary = strict_boundary ? sinittext : etext8; 150 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); 151 152 WARN_ON(top < einittext8); 153 154 mmu_mapin_immr(); 155 156 if (__map_without_ltlbs) 157 return 0; 158 159 mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true); 160 if (debug_pagealloc_enabled_or_kfence()) { 161 top = boundary; 162 } else { 163 mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true); 164 mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true); 165 } 166 167 if (top > SZ_32M) 168 memblock_set_current_limit(top); 169 170 block_mapped_ram = top; 171 172 return top; 173 } 174 175 void mmu_mark_initmem_nx(void) 176 { 177 unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); 178 unsigned long sinittext = __pa(_sinittext); 179 unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; 180 unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); 181 182 mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false); 183 mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); 184 185 mmu_pin_tlb(block_mapped_ram, false); 186 } 187 188 #ifdef CONFIG_STRICT_KERNEL_RWX 189 void mmu_mark_rodata_ro(void) 190 { 191 unsigned long sinittext = __pa(_sinittext); 192 193 mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); 194 if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) 195 mmu_pin_tlb(block_mapped_ram, true); 196 } 197 #endif 198 199 void __init setup_initial_memory_limit(phys_addr_t first_memblock_base, 200 phys_addr_t first_memblock_size) 201 { 202 /* We don't currently support the first MEMBLOCK not mapping 0 203 * physical on those processors 204 */ 205 BUG_ON(first_memblock_base != 0); 206 207 /* 8xx can only access 32MB at the moment */ 208 memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M)); 209 } 210 211 int pud_clear_huge(pud_t *pud) 212 { 213 return 0; 214 } 215 216 int pmd_clear_huge(pmd_t *pmd) 217 { 218 return 0; 219 } 220