1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 7 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 8 * Copyright (C) 1996 Paul Mackerras 9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 10 * 11 * Derived from "arch/i386/mm/init.c" 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 */ 14 15 #include <linux/export.h> 16 #include <linux/sched.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/gfp.h> 21 #include <linux/types.h> 22 #include <linux/mm.h> 23 #include <linux/stddef.h> 24 #include <linux/init.h> 25 #include <linux/memblock.h> 26 #include <linux/highmem.h> 27 #include <linux/initrd.h> 28 #include <linux/pagemap.h> 29 #include <linux/suspend.h> 30 #include <linux/hugetlb.h> 31 #include <linux/slab.h> 32 #include <linux/vmalloc.h> 33 #include <linux/memremap.h> 34 35 #include <asm/pgalloc.h> 36 #include <asm/prom.h> 37 #include <asm/io.h> 38 #include <asm/mmu_context.h> 39 #include <asm/pgtable.h> 40 #include <asm/mmu.h> 41 #include <asm/smp.h> 42 #include <asm/machdep.h> 43 #include <asm/btext.h> 44 #include <asm/tlb.h> 45 #include <asm/sections.h> 46 #include <asm/sparsemem.h> 47 #include <asm/vdso.h> 48 #include <asm/fixmap.h> 49 #include <asm/swiotlb.h> 50 #include <asm/rtas.h> 51 52 #include <mm/mmu_decl.h> 53 54 #ifndef CPU_FTR_COHERENT_ICACHE 55 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 56 #define CPU_FTR_NOEXECUTE 0 57 #endif 58 59 unsigned long long memory_limit; 60 bool init_mem_is_free; 61 62 #ifdef CONFIG_HIGHMEM 63 pte_t *kmap_pte; 64 EXPORT_SYMBOL(kmap_pte); 65 pgprot_t kmap_prot; 66 EXPORT_SYMBOL(kmap_prot); 67 68 static inline pte_t *virt_to_kpte(unsigned long vaddr) 69 { 70 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 71 vaddr), vaddr), vaddr); 72 } 73 #endif 74 75 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 76 unsigned long size, pgprot_t vma_prot) 77 { 78 if (ppc_md.phys_mem_access_prot) 79 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 80 81 if (!page_is_ram(pfn)) 82 vma_prot = pgprot_noncached(vma_prot); 83 84 return vma_prot; 85 } 86 EXPORT_SYMBOL(phys_mem_access_prot); 87 88 #ifdef CONFIG_MEMORY_HOTPLUG 89 90 #ifdef CONFIG_NUMA 91 int memory_add_physaddr_to_nid(u64 start) 92 { 93 return hot_add_scn_to_nid(start); 94 } 95 #endif 96 97 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid) 98 { 99 return -ENODEV; 100 } 101 102 int __weak remove_section_mapping(unsigned long start, unsigned long end) 103 { 104 return -ENODEV; 105 } 106 107 int __ref arch_add_memory(int nid, u64 start, u64 size, 108 struct mhp_restrictions *restrictions) 109 { 110 unsigned long start_pfn = start >> PAGE_SHIFT; 111 unsigned long nr_pages = size >> PAGE_SHIFT; 112 int rc; 113 114 resize_hpt_for_hotplug(memblock_phys_mem_size()); 115 116 start = (unsigned long)__va(start); 117 rc = create_section_mapping(start, start + size, nid); 118 if (rc) { 119 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", 120 start, start + size, rc); 121 return -EFAULT; 122 } 123 flush_dcache_range(start, start + size); 124 125 return __add_pages(nid, start_pfn, nr_pages, restrictions); 126 } 127 128 void __ref arch_remove_memory(int nid, u64 start, u64 size, 129 struct vmem_altmap *altmap) 130 { 131 unsigned long start_pfn = start >> PAGE_SHIFT; 132 unsigned long nr_pages = size >> PAGE_SHIFT; 133 struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap); 134 int ret; 135 136 __remove_pages(page_zone(page), start_pfn, nr_pages, altmap); 137 138 /* Remove htab bolted mappings for this section of memory */ 139 start = (unsigned long)__va(start); 140 flush_dcache_range(start, start + size); 141 ret = remove_section_mapping(start, start + size); 142 WARN_ON_ONCE(ret); 143 144 /* Ensure all vmalloc mappings are flushed in case they also 145 * hit that section of memory 146 */ 147 vm_unmap_aliases(); 148 149 if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC) 150 pr_warn("Hash collision while resizing HPT\n"); 151 } 152 #endif 153 154 #ifndef CONFIG_NEED_MULTIPLE_NODES 155 void __init mem_topology_setup(void) 156 { 157 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 158 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 159 #ifdef CONFIG_HIGHMEM 160 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 161 #endif 162 163 /* Place all memblock_regions in the same node and merge contiguous 164 * memblock_regions 165 */ 166 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 167 } 168 169 void __init initmem_init(void) 170 { 171 /* XXX need to clip this if using highmem? */ 172 sparse_memory_present_with_active_regions(0); 173 sparse_init(); 174 } 175 176 /* mark pages that don't exist as nosave */ 177 static int __init mark_nonram_nosave(void) 178 { 179 struct memblock_region *reg, *prev = NULL; 180 181 for_each_memblock(memory, reg) { 182 if (prev && 183 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 184 register_nosave_region(memblock_region_memory_end_pfn(prev), 185 memblock_region_memory_base_pfn(reg)); 186 prev = reg; 187 } 188 return 0; 189 } 190 #else /* CONFIG_NEED_MULTIPLE_NODES */ 191 static int __init mark_nonram_nosave(void) 192 { 193 return 0; 194 } 195 #endif 196 197 /* 198 * Zones usage: 199 * 200 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be 201 * everything else. GFP_DMA32 page allocations automatically fall back to 202 * ZONE_DMA. 203 * 204 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to 205 * inform the generic DMA mapping code. 32-bit only devices (if not handled 206 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get 207 * otherwise served by ZONE_DMA. 208 */ 209 static unsigned long max_zone_pfns[MAX_NR_ZONES]; 210 211 /* 212 * paging_init() sets up the page tables - in fact we've already done this. 213 */ 214 void __init paging_init(void) 215 { 216 unsigned long long total_ram = memblock_phys_mem_size(); 217 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 218 219 #ifdef CONFIG_PPC32 220 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 221 unsigned long end = __fix_to_virt(FIX_HOLE); 222 223 for (; v < end; v += PAGE_SIZE) 224 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */ 225 #endif 226 227 #ifdef CONFIG_HIGHMEM 228 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */ 229 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 230 231 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 232 kmap_prot = PAGE_KERNEL; 233 #endif /* CONFIG_HIGHMEM */ 234 235 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 236 (unsigned long long)top_of_ram, total_ram); 237 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 238 (long int)((top_of_ram - total_ram) >> 20)); 239 240 #ifdef CONFIG_ZONE_DMA 241 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 242 ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT); 243 #endif 244 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 245 #ifdef CONFIG_HIGHMEM 246 max_zone_pfns[ZONE_HIGHMEM] = max_pfn; 247 #endif 248 249 free_area_init_nodes(max_zone_pfns); 250 251 mark_nonram_nosave(); 252 } 253 254 void __init mem_init(void) 255 { 256 /* 257 * book3s is limited to 16 page sizes due to encoding this in 258 * a 4-bit field for slices. 259 */ 260 BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 261 262 #ifdef CONFIG_SWIOTLB 263 swiotlb_init(0); 264 #endif 265 266 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 267 set_max_mapnr(max_pfn); 268 memblock_free_all(); 269 270 #ifdef CONFIG_HIGHMEM 271 { 272 unsigned long pfn, highmem_mapnr; 273 274 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 275 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 276 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 277 struct page *page = pfn_to_page(pfn); 278 if (!memblock_is_reserved(paddr)) 279 free_highmem_page(page); 280 } 281 } 282 #endif /* CONFIG_HIGHMEM */ 283 284 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 285 /* 286 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 287 * functions.... do it here for the non-smp case. 288 */ 289 per_cpu(next_tlbcam_idx, smp_processor_id()) = 290 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 291 #endif 292 293 mem_init_print_info(NULL); 294 #ifdef CONFIG_PPC32 295 pr_info("Kernel virtual memory layout:\n"); 296 #ifdef CONFIG_KASAN 297 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n", 298 KASAN_SHADOW_START, KASAN_SHADOW_END); 299 #endif 300 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 301 #ifdef CONFIG_HIGHMEM 302 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 303 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 304 #endif /* CONFIG_HIGHMEM */ 305 #ifdef CONFIG_NOT_COHERENT_CACHE 306 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", 307 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); 308 #endif /* CONFIG_NOT_COHERENT_CACHE */ 309 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 310 ioremap_bot, IOREMAP_TOP); 311 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 312 VMALLOC_START, VMALLOC_END); 313 #endif /* CONFIG_PPC32 */ 314 } 315 316 void free_initmem(void) 317 { 318 ppc_md.progress = ppc_printk_progress; 319 mark_initmem_nx(); 320 init_mem_is_free = true; 321 free_initmem_default(POISON_FREE_INITMEM); 322 } 323 324 /* 325 * This is called when a page has been modified by the kernel. 326 * It just marks the page as not i-cache clean. We do the i-cache 327 * flush later when the page is given to a user process, if necessary. 328 */ 329 void flush_dcache_page(struct page *page) 330 { 331 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 332 return; 333 /* avoid an atomic op if possible */ 334 if (test_bit(PG_arch_1, &page->flags)) 335 clear_bit(PG_arch_1, &page->flags); 336 } 337 EXPORT_SYMBOL(flush_dcache_page); 338 339 void flush_dcache_icache_page(struct page *page) 340 { 341 #ifdef CONFIG_HUGETLB_PAGE 342 if (PageCompound(page)) { 343 flush_dcache_icache_hugepage(page); 344 return; 345 } 346 #endif 347 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64) 348 /* On 8xx there is no need to kmap since highmem is not supported */ 349 __flush_dcache_icache(page_address(page)); 350 #else 351 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { 352 void *start = kmap_atomic(page); 353 __flush_dcache_icache(start); 354 kunmap_atomic(start); 355 } else { 356 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 357 } 358 #endif 359 } 360 EXPORT_SYMBOL(flush_dcache_icache_page); 361 362 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 363 { 364 clear_page(page); 365 366 /* 367 * We shouldn't have to do this, but some versions of glibc 368 * require it (ld.so assumes zero filled pages are icache clean) 369 * - Anton 370 */ 371 flush_dcache_page(pg); 372 } 373 EXPORT_SYMBOL(clear_user_page); 374 375 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 376 struct page *pg) 377 { 378 copy_page(vto, vfrom); 379 380 /* 381 * We should be able to use the following optimisation, however 382 * there are two problems. 383 * Firstly a bug in some versions of binutils meant PLT sections 384 * were not marked executable. 385 * Secondly the first word in the GOT section is blrl, used 386 * to establish the GOT address. Until recently the GOT was 387 * not marked executable. 388 * - Anton 389 */ 390 #if 0 391 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 392 return; 393 #endif 394 395 flush_dcache_page(pg); 396 } 397 398 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 399 unsigned long addr, int len) 400 { 401 unsigned long maddr; 402 403 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 404 flush_icache_range(maddr, maddr + len); 405 kunmap(page); 406 } 407 EXPORT_SYMBOL(flush_icache_user_range); 408 409 /* 410 * This is called at the end of handling a user page fault, when the 411 * fault has been handled by updating a PTE in the linux page tables. 412 * We use it to preload an HPTE into the hash table corresponding to 413 * the updated linux PTE. 414 * 415 * This must always be called with the pte lock held. 416 */ 417 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 418 pte_t *ptep) 419 { 420 #ifdef CONFIG_PPC_BOOK3S 421 /* 422 * We don't need to worry about _PAGE_PRESENT here because we are 423 * called with either mm->page_table_lock held or ptl lock held 424 */ 425 unsigned long trap; 426 bool is_exec; 427 428 if (radix_enabled()) { 429 prefetch((void *)address); 430 return; 431 } 432 433 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 434 if (!pte_young(*ptep) || address >= TASK_SIZE) 435 return; 436 437 /* We try to figure out if we are coming from an instruction 438 * access fault and pass that down to __hash_page so we avoid 439 * double-faulting on execution of fresh text. We have to test 440 * for regs NULL since init will get here first thing at boot 441 * 442 * We also avoid filling the hash if not coming from a fault 443 */ 444 445 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; 446 switch (trap) { 447 case 0x300: 448 is_exec = false; 449 break; 450 case 0x400: 451 is_exec = true; 452 break; 453 default: 454 return; 455 } 456 457 hash_preload(vma->vm_mm, address, is_exec, trap); 458 #endif /* CONFIG_PPC_BOOK3S */ 459 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 460 && defined(CONFIG_HUGETLB_PAGE) 461 if (is_vm_hugetlb_page(vma)) 462 book3e_hugetlb_preload(vma, address, *ptep); 463 #endif 464 } 465 466 /* 467 * System memory should not be in /proc/iomem but various tools expect it 468 * (eg kdump). 469 */ 470 static int __init add_system_ram_resources(void) 471 { 472 struct memblock_region *reg; 473 474 for_each_memblock(memory, reg) { 475 struct resource *res; 476 unsigned long base = reg->base; 477 unsigned long size = reg->size; 478 479 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 480 WARN_ON(!res); 481 482 if (res) { 483 res->name = "System RAM"; 484 res->start = base; 485 res->end = base + size - 1; 486 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 487 WARN_ON(request_resource(&iomem_resource, res) < 0); 488 } 489 } 490 491 return 0; 492 } 493 subsys_initcall(add_system_ram_resources); 494 495 #ifdef CONFIG_STRICT_DEVMEM 496 /* 497 * devmem_is_allowed(): check to see if /dev/mem access to a certain address 498 * is valid. The argument is a physical page number. 499 * 500 * Access has to be given to non-kernel-ram areas as well, these contain the 501 * PCI mmio resources as well as potential bios/acpi data regions. 502 */ 503 int devmem_is_allowed(unsigned long pfn) 504 { 505 if (page_is_rtas_user_buf(pfn)) 506 return 1; 507 if (iomem_is_exclusive(PFN_PHYS(pfn))) 508 return 0; 509 if (!page_is_ram(pfn)) 510 return 1; 511 return 0; 512 } 513 #endif /* CONFIG_STRICT_DEVMEM */ 514 515 /* 516 * This is defined in kernel/resource.c but only powerpc needs to export it, for 517 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed. 518 */ 519 EXPORT_SYMBOL_GPL(walk_system_ram_range); 520