1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * This file contains ioremap and related functions for 64-bit machines. 4 * 5 * Derived from arch/ppc64/mm/init.c 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * 8 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org) 9 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 10 * Copyright (C) 1996 Paul Mackerras 11 * 12 * Derived from "arch/i386/mm/init.c" 13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 14 * 15 * Dave Engebretsen <engebret@us.ibm.com> 16 * Rework for PPC64 port. 17 */ 18 19 #include <linux/signal.h> 20 #include <linux/sched.h> 21 #include <linux/kernel.h> 22 #include <linux/errno.h> 23 #include <linux/string.h> 24 #include <linux/export.h> 25 #include <linux/types.h> 26 #include <linux/mman.h> 27 #include <linux/mm.h> 28 #include <linux/swap.h> 29 #include <linux/stddef.h> 30 #include <linux/vmalloc.h> 31 #include <linux/slab.h> 32 #include <linux/hugetlb.h> 33 34 #include <asm/pgalloc.h> 35 #include <asm/page.h> 36 #include <asm/prom.h> 37 #include <asm/io.h> 38 #include <asm/mmu_context.h> 39 #include <asm/pgtable.h> 40 #include <asm/mmu.h> 41 #include <asm/smp.h> 42 #include <asm/machdep.h> 43 #include <asm/tlb.h> 44 #include <asm/processor.h> 45 #include <asm/cputable.h> 46 #include <asm/sections.h> 47 #include <asm/firmware.h> 48 #include <asm/dma.h> 49 50 #include <mm/mmu_decl.h> 51 52 53 #ifdef CONFIG_PPC_BOOK3S_64 54 /* 55 * partition table and process table for ISA 3.0 56 */ 57 struct prtb_entry *process_tb; 58 struct patb_entry *partition_tb; 59 /* 60 * page table size 61 */ 62 unsigned long __pte_index_size; 63 EXPORT_SYMBOL(__pte_index_size); 64 unsigned long __pmd_index_size; 65 EXPORT_SYMBOL(__pmd_index_size); 66 unsigned long __pud_index_size; 67 EXPORT_SYMBOL(__pud_index_size); 68 unsigned long __pgd_index_size; 69 EXPORT_SYMBOL(__pgd_index_size); 70 unsigned long __pud_cache_index; 71 EXPORT_SYMBOL(__pud_cache_index); 72 unsigned long __pte_table_size; 73 EXPORT_SYMBOL(__pte_table_size); 74 unsigned long __pmd_table_size; 75 EXPORT_SYMBOL(__pmd_table_size); 76 unsigned long __pud_table_size; 77 EXPORT_SYMBOL(__pud_table_size); 78 unsigned long __pgd_table_size; 79 EXPORT_SYMBOL(__pgd_table_size); 80 unsigned long __pmd_val_bits; 81 EXPORT_SYMBOL(__pmd_val_bits); 82 unsigned long __pud_val_bits; 83 EXPORT_SYMBOL(__pud_val_bits); 84 unsigned long __pgd_val_bits; 85 EXPORT_SYMBOL(__pgd_val_bits); 86 unsigned long __kernel_virt_start; 87 EXPORT_SYMBOL(__kernel_virt_start); 88 unsigned long __vmalloc_start; 89 EXPORT_SYMBOL(__vmalloc_start); 90 unsigned long __vmalloc_end; 91 EXPORT_SYMBOL(__vmalloc_end); 92 unsigned long __kernel_io_start; 93 EXPORT_SYMBOL(__kernel_io_start); 94 unsigned long __kernel_io_end; 95 struct page *vmemmap; 96 EXPORT_SYMBOL(vmemmap); 97 unsigned long __pte_frag_nr; 98 EXPORT_SYMBOL(__pte_frag_nr); 99 unsigned long __pte_frag_size_shift; 100 EXPORT_SYMBOL(__pte_frag_size_shift); 101 unsigned long ioremap_bot; 102 #else /* !CONFIG_PPC_BOOK3S_64 */ 103 unsigned long ioremap_bot = IOREMAP_BASE; 104 #endif 105 106 int __weak ioremap_range(unsigned long ea, phys_addr_t pa, unsigned long size, pgprot_t prot, int nid) 107 { 108 unsigned long i; 109 110 for (i = 0; i < size; i += PAGE_SIZE) { 111 int err = map_kernel_page(ea + i, pa + i, prot); 112 if (err) { 113 if (slab_is_available()) 114 unmap_kernel_range(ea, size); 115 else 116 WARN_ON_ONCE(1); /* Should clean up */ 117 return err; 118 } 119 } 120 121 return 0; 122 } 123 124 /** 125 * __ioremap_at - Low level function to establish the page tables 126 * for an IO mapping 127 */ 128 void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot) 129 { 130 /* We don't support the 4K PFN hack with ioremap */ 131 if (pgprot_val(prot) & H_PAGE_4K_PFN) 132 return NULL; 133 134 if ((ea + size) >= (void *)IOREMAP_END) { 135 pr_warn("Outside the supported range\n"); 136 return NULL; 137 } 138 139 WARN_ON(pa & ~PAGE_MASK); 140 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 141 WARN_ON(size & ~PAGE_MASK); 142 143 if (ioremap_range((unsigned long)ea, pa, size, prot, NUMA_NO_NODE)) 144 return NULL; 145 146 return (void __iomem *)ea; 147 } 148 149 /** 150 * __iounmap_from - Low level function to tear down the page tables 151 * for an IO mapping. This is used for mappings that 152 * are manipulated manually, like partial unmapping of 153 * PCI IOs or ISA space. 154 */ 155 void __iounmap_at(void *ea, unsigned long size) 156 { 157 WARN_ON(((unsigned long)ea) & ~PAGE_MASK); 158 WARN_ON(size & ~PAGE_MASK); 159 160 unmap_kernel_range((unsigned long)ea, size); 161 } 162 163 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, 164 pgprot_t prot, void *caller) 165 { 166 phys_addr_t paligned; 167 void __iomem *ret; 168 169 /* 170 * Choose an address to map it to. 171 * Once the imalloc system is running, we use it. 172 * Before that, we map using addresses going 173 * up from ioremap_bot. imalloc will use 174 * the addresses from ioremap_bot through 175 * IMALLOC_END 176 * 177 */ 178 paligned = addr & PAGE_MASK; 179 size = PAGE_ALIGN(addr + size) - paligned; 180 181 if ((size == 0) || (paligned == 0)) 182 return NULL; 183 184 if (slab_is_available()) { 185 struct vm_struct *area; 186 187 area = __get_vm_area_caller(size, VM_IOREMAP, 188 ioremap_bot, IOREMAP_END, 189 caller); 190 if (area == NULL) 191 return NULL; 192 193 area->phys_addr = paligned; 194 ret = __ioremap_at(paligned, area->addr, size, prot); 195 } else { 196 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot); 197 if (ret) 198 ioremap_bot += size; 199 } 200 201 if (ret) 202 ret += addr & ~PAGE_MASK; 203 return ret; 204 } 205 206 void __iomem * ioremap(phys_addr_t addr, unsigned long size) 207 { 208 pgprot_t prot = pgprot_noncached(PAGE_KERNEL); 209 void *caller = __builtin_return_address(0); 210 211 if (ppc_md.ioremap) 212 return ppc_md.ioremap(addr, size, prot, caller); 213 return __ioremap_caller(addr, size, prot, caller); 214 } 215 216 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) 217 { 218 pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL); 219 void *caller = __builtin_return_address(0); 220 221 if (ppc_md.ioremap) 222 return ppc_md.ioremap(addr, size, prot, caller); 223 return __ioremap_caller(addr, size, prot, caller); 224 } 225 226 void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size) 227 { 228 pgprot_t prot = pgprot_cached(PAGE_KERNEL); 229 void *caller = __builtin_return_address(0); 230 231 if (ppc_md.ioremap) 232 return ppc_md.ioremap(addr, size, prot, caller); 233 return __ioremap_caller(addr, size, prot, caller); 234 } 235 236 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, 237 unsigned long flags) 238 { 239 pte_t pte = __pte(flags); 240 void *caller = __builtin_return_address(0); 241 242 /* writeable implies dirty for kernel addresses */ 243 if (pte_write(pte)) 244 pte = pte_mkdirty(pte); 245 246 /* we don't want to let _PAGE_EXEC leak out */ 247 pte = pte_exprotect(pte); 248 /* 249 * Force kernel mapping. 250 */ 251 pte = pte_mkprivileged(pte); 252 253 if (ppc_md.ioremap) 254 return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller); 255 return __ioremap_caller(addr, size, pte_pgprot(pte), caller); 256 } 257 258 259 /* 260 * Unmap an IO region and remove it from imalloc'd list. 261 * Access to IO memory should be serialized by driver. 262 */ 263 void iounmap(volatile void __iomem *token) 264 { 265 void *addr; 266 267 if (!slab_is_available()) 268 return; 269 270 addr = (void *) ((unsigned long __force) 271 PCI_FIX_ADDR(token) & PAGE_MASK); 272 if ((unsigned long)addr < ioremap_bot) { 273 printk(KERN_WARNING "Attempt to iounmap early bolted mapping" 274 " at 0x%p\n", addr); 275 return; 276 } 277 vunmap(addr); 278 } 279 280 EXPORT_SYMBOL(ioremap); 281 EXPORT_SYMBOL(ioremap_wc); 282 EXPORT_SYMBOL(ioremap_prot); 283 EXPORT_SYMBOL(__ioremap_at); 284 EXPORT_SYMBOL(iounmap); 285 EXPORT_SYMBOL(__iounmap_at); 286 287 #ifndef __PAGETABLE_PUD_FOLDED 288 /* 4 level page table */ 289 struct page *pgd_page(pgd_t pgd) 290 { 291 if (pgd_is_leaf(pgd)) { 292 VM_WARN_ON(!pgd_huge(pgd)); 293 return pte_page(pgd_pte(pgd)); 294 } 295 return virt_to_page(pgd_page_vaddr(pgd)); 296 } 297 #endif 298 299 struct page *pud_page(pud_t pud) 300 { 301 if (pud_is_leaf(pud)) { 302 VM_WARN_ON(!pud_huge(pud)); 303 return pte_page(pud_pte(pud)); 304 } 305 return virt_to_page(pud_page_vaddr(pud)); 306 } 307 308 /* 309 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags 310 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. 311 */ 312 struct page *pmd_page(pmd_t pmd) 313 { 314 if (pmd_is_leaf(pmd)) { 315 VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); 316 return pte_page(pmd_pte(pmd)); 317 } 318 return virt_to_page(pmd_page_vaddr(pmd)); 319 } 320 321 #ifdef CONFIG_STRICT_KERNEL_RWX 322 void mark_rodata_ro(void) 323 { 324 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) { 325 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n"); 326 return; 327 } 328 329 if (radix_enabled()) 330 radix__mark_rodata_ro(); 331 else 332 hash__mark_rodata_ro(); 333 334 // mark_initmem_nx() should have already run by now 335 ptdump_check_wx(); 336 } 337 338 void mark_initmem_nx(void) 339 { 340 if (radix_enabled()) 341 radix__mark_initmem_nx(); 342 else 343 hash__mark_initmem_nx(); 344 } 345 #endif 346