1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 7 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 8 * Copyright (C) 1996 Paul Mackerras 9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 10 * 11 * Derived from "arch/i386/mm/init.c" 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 */ 14 15 #include <linux/export.h> 16 #include <linux/sched.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/gfp.h> 21 #include <linux/types.h> 22 #include <linux/mm.h> 23 #include <linux/stddef.h> 24 #include <linux/init.h> 25 #include <linux/memblock.h> 26 #include <linux/highmem.h> 27 #include <linux/initrd.h> 28 #include <linux/pagemap.h> 29 #include <linux/suspend.h> 30 #include <linux/hugetlb.h> 31 #include <linux/slab.h> 32 #include <linux/vmalloc.h> 33 #include <linux/memremap.h> 34 #include <linux/dma-direct.h> 35 36 #include <asm/pgalloc.h> 37 #include <asm/prom.h> 38 #include <asm/io.h> 39 #include <asm/mmu_context.h> 40 #include <asm/pgtable.h> 41 #include <asm/mmu.h> 42 #include <asm/smp.h> 43 #include <asm/machdep.h> 44 #include <asm/btext.h> 45 #include <asm/tlb.h> 46 #include <asm/sections.h> 47 #include <asm/sparsemem.h> 48 #include <asm/vdso.h> 49 #include <asm/fixmap.h> 50 #include <asm/swiotlb.h> 51 #include <asm/rtas.h> 52 #include <asm/kasan.h> 53 54 #include <mm/mmu_decl.h> 55 56 #ifndef CPU_FTR_COHERENT_ICACHE 57 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 58 #define CPU_FTR_NOEXECUTE 0 59 #endif 60 61 unsigned long long memory_limit; 62 bool init_mem_is_free; 63 64 #ifdef CONFIG_HIGHMEM 65 pte_t *kmap_pte; 66 EXPORT_SYMBOL(kmap_pte); 67 pgprot_t kmap_prot; 68 EXPORT_SYMBOL(kmap_prot); 69 #endif 70 71 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 72 unsigned long size, pgprot_t vma_prot) 73 { 74 if (ppc_md.phys_mem_access_prot) 75 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 76 77 if (!page_is_ram(pfn)) 78 vma_prot = pgprot_noncached(vma_prot); 79 80 return vma_prot; 81 } 82 EXPORT_SYMBOL(phys_mem_access_prot); 83 84 #ifdef CONFIG_MEMORY_HOTPLUG 85 86 #ifdef CONFIG_NUMA 87 int memory_add_physaddr_to_nid(u64 start) 88 { 89 return hot_add_scn_to_nid(start); 90 } 91 #endif 92 93 int __weak create_section_mapping(unsigned long start, unsigned long end, int nid) 94 { 95 return -ENODEV; 96 } 97 98 int __weak remove_section_mapping(unsigned long start, unsigned long end) 99 { 100 return -ENODEV; 101 } 102 103 #define FLUSH_CHUNK_SIZE SZ_1G 104 /** 105 * flush_dcache_range_chunked(): Write any modified data cache blocks out to 106 * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE 107 * Does not invalidate the corresponding instruction cache blocks. 108 * 109 * @start: the start address 110 * @stop: the stop address (exclusive) 111 * @chunk: the max size of the chunks 112 */ 113 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, 114 unsigned long chunk) 115 { 116 unsigned long i; 117 118 for (i = start; i < stop; i += chunk) { 119 flush_dcache_range(i, min(stop, i + chunk)); 120 cond_resched(); 121 } 122 } 123 124 int __ref arch_add_memory(int nid, u64 start, u64 size, 125 struct mhp_restrictions *restrictions) 126 { 127 unsigned long start_pfn = start >> PAGE_SHIFT; 128 unsigned long nr_pages = size >> PAGE_SHIFT; 129 int rc; 130 131 resize_hpt_for_hotplug(memblock_phys_mem_size()); 132 133 start = (unsigned long)__va(start); 134 rc = create_section_mapping(start, start + size, nid); 135 if (rc) { 136 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", 137 start, start + size, rc); 138 return -EFAULT; 139 } 140 141 return __add_pages(nid, start_pfn, nr_pages, restrictions); 142 } 143 144 void __ref arch_remove_memory(int nid, u64 start, u64 size, 145 struct vmem_altmap *altmap) 146 { 147 unsigned long start_pfn = start >> PAGE_SHIFT; 148 unsigned long nr_pages = size >> PAGE_SHIFT; 149 int ret; 150 151 __remove_pages(start_pfn, nr_pages, altmap); 152 153 /* Remove htab bolted mappings for this section of memory */ 154 start = (unsigned long)__va(start); 155 flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); 156 157 ret = remove_section_mapping(start, start + size); 158 WARN_ON_ONCE(ret); 159 160 /* Ensure all vmalloc mappings are flushed in case they also 161 * hit that section of memory 162 */ 163 vm_unmap_aliases(); 164 165 if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC) 166 pr_warn("Hash collision while resizing HPT\n"); 167 } 168 #endif 169 170 #ifndef CONFIG_NEED_MULTIPLE_NODES 171 void __init mem_topology_setup(void) 172 { 173 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 174 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 175 #ifdef CONFIG_HIGHMEM 176 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 177 #endif 178 179 /* Place all memblock_regions in the same node and merge contiguous 180 * memblock_regions 181 */ 182 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 183 } 184 185 void __init initmem_init(void) 186 { 187 /* XXX need to clip this if using highmem? */ 188 sparse_memory_present_with_active_regions(0); 189 sparse_init(); 190 } 191 192 /* mark pages that don't exist as nosave */ 193 static int __init mark_nonram_nosave(void) 194 { 195 struct memblock_region *reg, *prev = NULL; 196 197 for_each_memblock(memory, reg) { 198 if (prev && 199 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 200 register_nosave_region(memblock_region_memory_end_pfn(prev), 201 memblock_region_memory_base_pfn(reg)); 202 prev = reg; 203 } 204 return 0; 205 } 206 #else /* CONFIG_NEED_MULTIPLE_NODES */ 207 static int __init mark_nonram_nosave(void) 208 { 209 return 0; 210 } 211 #endif 212 213 /* 214 * Zones usage: 215 * 216 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be 217 * everything else. GFP_DMA32 page allocations automatically fall back to 218 * ZONE_DMA. 219 * 220 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the 221 * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU 222 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by 223 * ZONE_DMA. 224 */ 225 static unsigned long max_zone_pfns[MAX_NR_ZONES]; 226 227 /* 228 * paging_init() sets up the page tables - in fact we've already done this. 229 */ 230 void __init paging_init(void) 231 { 232 unsigned long long total_ram = memblock_phys_mem_size(); 233 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 234 235 #ifdef CONFIG_HIGHMEM 236 unsigned long v = __fix_to_virt(FIX_KMAP_END); 237 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN); 238 239 for (; v < end; v += PAGE_SIZE) 240 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */ 241 242 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */ 243 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 244 245 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 246 kmap_prot = PAGE_KERNEL; 247 #endif /* CONFIG_HIGHMEM */ 248 249 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 250 (unsigned long long)top_of_ram, total_ram); 251 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 252 (long int)((top_of_ram - total_ram) >> 20)); 253 254 /* 255 * Allow 30-bit DMA for very limited Broadcom wifi chips on many 256 * powerbooks. 257 */ 258 if (IS_ENABLED(CONFIG_PPC32)) 259 zone_dma_bits = 30; 260 else 261 zone_dma_bits = 31; 262 263 #ifdef CONFIG_ZONE_DMA 264 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 265 1UL << (zone_dma_bits - PAGE_SHIFT)); 266 #endif 267 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 268 #ifdef CONFIG_HIGHMEM 269 max_zone_pfns[ZONE_HIGHMEM] = max_pfn; 270 #endif 271 272 free_area_init_nodes(max_zone_pfns); 273 274 mark_nonram_nosave(); 275 } 276 277 void __init mem_init(void) 278 { 279 /* 280 * book3s is limited to 16 page sizes due to encoding this in 281 * a 4-bit field for slices. 282 */ 283 BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 284 285 #ifdef CONFIG_SWIOTLB 286 /* 287 * Some platforms (e.g. 85xx) limit DMA-able memory way below 288 * 4G. We force memblock to bottom-up mode to ensure that the 289 * memory allocated in swiotlb_init() is DMA-able. 290 * As it's the last memblock allocation, no need to reset it 291 * back to to-down. 292 */ 293 memblock_set_bottom_up(true); 294 swiotlb_init(0); 295 #endif 296 297 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 298 set_max_mapnr(max_pfn); 299 300 kasan_late_init(); 301 302 memblock_free_all(); 303 304 #ifdef CONFIG_HIGHMEM 305 { 306 unsigned long pfn, highmem_mapnr; 307 308 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 309 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 310 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 311 struct page *page = pfn_to_page(pfn); 312 if (!memblock_is_reserved(paddr)) 313 free_highmem_page(page); 314 } 315 } 316 #endif /* CONFIG_HIGHMEM */ 317 318 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 319 /* 320 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 321 * functions.... do it here for the non-smp case. 322 */ 323 per_cpu(next_tlbcam_idx, smp_processor_id()) = 324 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 325 #endif 326 327 mem_init_print_info(NULL); 328 #ifdef CONFIG_PPC32 329 pr_info("Kernel virtual memory layout:\n"); 330 #ifdef CONFIG_KASAN 331 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n", 332 KASAN_SHADOW_START, KASAN_SHADOW_END); 333 #endif 334 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 335 #ifdef CONFIG_HIGHMEM 336 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 337 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 338 #endif /* CONFIG_HIGHMEM */ 339 if (ioremap_bot != IOREMAP_TOP) 340 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 341 ioremap_bot, IOREMAP_TOP); 342 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 343 VMALLOC_START, VMALLOC_END); 344 #endif /* CONFIG_PPC32 */ 345 } 346 347 void free_initmem(void) 348 { 349 ppc_md.progress = ppc_printk_progress; 350 mark_initmem_nx(); 351 init_mem_is_free = true; 352 free_initmem_default(POISON_FREE_INITMEM); 353 } 354 355 /** 356 * flush_coherent_icache() - if a CPU has a coherent icache, flush it 357 * @addr: The base address to use (can be any valid address, the whole cache will be flushed) 358 * Return true if the cache was flushed, false otherwise 359 */ 360 static inline bool flush_coherent_icache(unsigned long addr) 361 { 362 /* 363 * For a snooping icache, we still need a dummy icbi to purge all the 364 * prefetched instructions from the ifetch buffers. We also need a sync 365 * before the icbi to order the the actual stores to memory that might 366 * have modified instructions with the icbi. 367 */ 368 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { 369 mb(); /* sync */ 370 allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES); 371 icbi((void *)addr); 372 prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES); 373 mb(); /* sync */ 374 isync(); 375 return true; 376 } 377 378 return false; 379 } 380 381 /** 382 * invalidate_icache_range() - Flush the icache by issuing icbi across an address range 383 * @start: the start address 384 * @stop: the stop address (exclusive) 385 */ 386 static void invalidate_icache_range(unsigned long start, unsigned long stop) 387 { 388 unsigned long shift = l1_icache_shift(); 389 unsigned long bytes = l1_icache_bytes(); 390 char *addr = (char *)(start & ~(bytes - 1)); 391 unsigned long size = stop - (unsigned long)addr + (bytes - 1); 392 unsigned long i; 393 394 for (i = 0; i < size >> shift; i++, addr += bytes) 395 icbi(addr); 396 397 mb(); /* sync */ 398 isync(); 399 } 400 401 /** 402 * flush_icache_range: Write any modified data cache blocks out to memory 403 * and invalidate the corresponding blocks in the instruction cache 404 * 405 * Generic code will call this after writing memory, before executing from it. 406 * 407 * @start: the start address 408 * @stop: the stop address (exclusive) 409 */ 410 void flush_icache_range(unsigned long start, unsigned long stop) 411 { 412 if (flush_coherent_icache(start)) 413 return; 414 415 clean_dcache_range(start, stop); 416 417 if (IS_ENABLED(CONFIG_44x)) { 418 /* 419 * Flash invalidate on 44x because we are passed kmapped 420 * addresses and this doesn't work for userspace pages due to 421 * the virtually tagged icache. 422 */ 423 iccci((void *)start); 424 mb(); /* sync */ 425 isync(); 426 } else 427 invalidate_icache_range(start, stop); 428 } 429 EXPORT_SYMBOL(flush_icache_range); 430 431 #if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) 432 /** 433 * flush_dcache_icache_phys() - Flush a page by it's physical address 434 * @physaddr: the physical address of the page 435 */ 436 static void flush_dcache_icache_phys(unsigned long physaddr) 437 { 438 unsigned long bytes = l1_dcache_bytes(); 439 unsigned long nb = PAGE_SIZE / bytes; 440 unsigned long addr = physaddr & PAGE_MASK; 441 unsigned long msr, msr0; 442 unsigned long loop1 = addr, loop2 = addr; 443 444 msr0 = mfmsr(); 445 msr = msr0 & ~MSR_DR; 446 /* 447 * This must remain as ASM to prevent potential memory accesses 448 * while the data MMU is disabled 449 */ 450 asm volatile( 451 " mtctr %2;\n" 452 " mtmsr %3;\n" 453 " isync;\n" 454 "0: dcbst 0, %0;\n" 455 " addi %0, %0, %4;\n" 456 " bdnz 0b;\n" 457 " sync;\n" 458 " mtctr %2;\n" 459 "1: icbi 0, %1;\n" 460 " addi %1, %1, %4;\n" 461 " bdnz 1b;\n" 462 " sync;\n" 463 " mtmsr %5;\n" 464 " isync;\n" 465 : "+&r" (loop1), "+&r" (loop2) 466 : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0) 467 : "ctr", "memory"); 468 } 469 #endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) 470 471 /* 472 * This is called when a page has been modified by the kernel. 473 * It just marks the page as not i-cache clean. We do the i-cache 474 * flush later when the page is given to a user process, if necessary. 475 */ 476 void flush_dcache_page(struct page *page) 477 { 478 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 479 return; 480 /* avoid an atomic op if possible */ 481 if (test_bit(PG_arch_1, &page->flags)) 482 clear_bit(PG_arch_1, &page->flags); 483 } 484 EXPORT_SYMBOL(flush_dcache_page); 485 486 void flush_dcache_icache_page(struct page *page) 487 { 488 #ifdef CONFIG_HUGETLB_PAGE 489 if (PageCompound(page)) { 490 flush_dcache_icache_hugepage(page); 491 return; 492 } 493 #endif 494 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64) 495 /* On 8xx there is no need to kmap since highmem is not supported */ 496 __flush_dcache_icache(page_address(page)); 497 #else 498 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { 499 void *start = kmap_atomic(page); 500 __flush_dcache_icache(start); 501 kunmap_atomic(start); 502 } else { 503 unsigned long addr = page_to_pfn(page) << PAGE_SHIFT; 504 505 if (flush_coherent_icache(addr)) 506 return; 507 flush_dcache_icache_phys(addr); 508 } 509 #endif 510 } 511 EXPORT_SYMBOL(flush_dcache_icache_page); 512 513 /** 514 * __flush_dcache_icache(): Flush a particular page from the data cache to RAM. 515 * Note: this is necessary because the instruction cache does *not* 516 * snoop from the data cache. 517 * 518 * @page: the address of the page to flush 519 */ 520 void __flush_dcache_icache(void *p) 521 { 522 unsigned long addr = (unsigned long)p; 523 524 if (flush_coherent_icache(addr)) 525 return; 526 527 clean_dcache_range(addr, addr + PAGE_SIZE); 528 529 /* 530 * We don't flush the icache on 44x. Those have a virtual icache and we 531 * don't have access to the virtual address here (it's not the page 532 * vaddr but where it's mapped in user space). The flushing of the 533 * icache on these is handled elsewhere, when a change in the address 534 * space occurs, before returning to user space. 535 */ 536 537 if (cpu_has_feature(MMU_FTR_TYPE_44x)) 538 return; 539 540 invalidate_icache_range(addr, addr + PAGE_SIZE); 541 } 542 543 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 544 { 545 clear_page(page); 546 547 /* 548 * We shouldn't have to do this, but some versions of glibc 549 * require it (ld.so assumes zero filled pages are icache clean) 550 * - Anton 551 */ 552 flush_dcache_page(pg); 553 } 554 EXPORT_SYMBOL(clear_user_page); 555 556 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 557 struct page *pg) 558 { 559 copy_page(vto, vfrom); 560 561 /* 562 * We should be able to use the following optimisation, however 563 * there are two problems. 564 * Firstly a bug in some versions of binutils meant PLT sections 565 * were not marked executable. 566 * Secondly the first word in the GOT section is blrl, used 567 * to establish the GOT address. Until recently the GOT was 568 * not marked executable. 569 * - Anton 570 */ 571 #if 0 572 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 573 return; 574 #endif 575 576 flush_dcache_page(pg); 577 } 578 579 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 580 unsigned long addr, int len) 581 { 582 unsigned long maddr; 583 584 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 585 flush_icache_range(maddr, maddr + len); 586 kunmap(page); 587 } 588 EXPORT_SYMBOL(flush_icache_user_range); 589 590 /* 591 * System memory should not be in /proc/iomem but various tools expect it 592 * (eg kdump). 593 */ 594 static int __init add_system_ram_resources(void) 595 { 596 struct memblock_region *reg; 597 598 for_each_memblock(memory, reg) { 599 struct resource *res; 600 unsigned long base = reg->base; 601 unsigned long size = reg->size; 602 603 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 604 WARN_ON(!res); 605 606 if (res) { 607 res->name = "System RAM"; 608 res->start = base; 609 res->end = base + size - 1; 610 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 611 WARN_ON(request_resource(&iomem_resource, res) < 0); 612 } 613 } 614 615 return 0; 616 } 617 subsys_initcall(add_system_ram_resources); 618 619 #ifdef CONFIG_STRICT_DEVMEM 620 /* 621 * devmem_is_allowed(): check to see if /dev/mem access to a certain address 622 * is valid. The argument is a physical page number. 623 * 624 * Access has to be given to non-kernel-ram areas as well, these contain the 625 * PCI mmio resources as well as potential bios/acpi data regions. 626 */ 627 int devmem_is_allowed(unsigned long pfn) 628 { 629 if (page_is_rtas_user_buf(pfn)) 630 return 1; 631 if (iomem_is_exclusive(PFN_PHYS(pfn))) 632 return 0; 633 if (!page_is_ram(pfn)) 634 return 1; 635 return 0; 636 } 637 #endif /* CONFIG_STRICT_DEVMEM */ 638 639 /* 640 * This is defined in kernel/resource.c but only powerpc needs to export it, for 641 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed. 642 */ 643 EXPORT_SYMBOL_GPL(walk_system_ram_range); 644