1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * PowerPC version 4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 5 * 6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 7 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 8 * Copyright (C) 1996 Paul Mackerras 9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 10 * 11 * Derived from "arch/i386/mm/init.c" 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 */ 14 15 #include <linux/export.h> 16 #include <linux/sched.h> 17 #include <linux/kernel.h> 18 #include <linux/errno.h> 19 #include <linux/string.h> 20 #include <linux/gfp.h> 21 #include <linux/types.h> 22 #include <linux/mm.h> 23 #include <linux/stddef.h> 24 #include <linux/init.h> 25 #include <linux/memblock.h> 26 #include <linux/highmem.h> 27 #include <linux/initrd.h> 28 #include <linux/pagemap.h> 29 #include <linux/suspend.h> 30 #include <linux/hugetlb.h> 31 #include <linux/slab.h> 32 #include <linux/vmalloc.h> 33 #include <linux/memremap.h> 34 #include <linux/dma-direct.h> 35 #include <linux/kprobes.h> 36 37 #include <asm/prom.h> 38 #include <asm/io.h> 39 #include <asm/mmu_context.h> 40 #include <asm/mmu.h> 41 #include <asm/smp.h> 42 #include <asm/machdep.h> 43 #include <asm/btext.h> 44 #include <asm/tlb.h> 45 #include <asm/sections.h> 46 #include <asm/sparsemem.h> 47 #include <asm/vdso.h> 48 #include <asm/fixmap.h> 49 #include <asm/swiotlb.h> 50 #include <asm/rtas.h> 51 #include <asm/kasan.h> 52 #include <asm/svm.h> 53 #include <asm/mmzone.h> 54 55 #include <mm/mmu_decl.h> 56 57 #ifndef CPU_FTR_COHERENT_ICACHE 58 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 59 #define CPU_FTR_NOEXECUTE 0 60 #endif 61 62 unsigned long long memory_limit; 63 bool init_mem_is_free; 64 65 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 66 unsigned long size, pgprot_t vma_prot) 67 { 68 if (ppc_md.phys_mem_access_prot) 69 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 70 71 if (!page_is_ram(pfn)) 72 vma_prot = pgprot_noncached(vma_prot); 73 74 return vma_prot; 75 } 76 EXPORT_SYMBOL(phys_mem_access_prot); 77 78 #ifdef CONFIG_MEMORY_HOTPLUG 79 80 #ifdef CONFIG_NUMA 81 int memory_add_physaddr_to_nid(u64 start) 82 { 83 return hot_add_scn_to_nid(start); 84 } 85 #endif 86 87 int __weak create_section_mapping(unsigned long start, unsigned long end, 88 int nid, pgprot_t prot) 89 { 90 return -ENODEV; 91 } 92 93 int __weak remove_section_mapping(unsigned long start, unsigned long end) 94 { 95 return -ENODEV; 96 } 97 98 #define FLUSH_CHUNK_SIZE SZ_1G 99 /** 100 * flush_dcache_range_chunked(): Write any modified data cache blocks out to 101 * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE 102 * Does not invalidate the corresponding instruction cache blocks. 103 * 104 * @start: the start address 105 * @stop: the stop address (exclusive) 106 * @chunk: the max size of the chunks 107 */ 108 static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, 109 unsigned long chunk) 110 { 111 unsigned long i; 112 113 for (i = start; i < stop; i += chunk) { 114 flush_dcache_range(i, min(stop, i + chunk)); 115 cond_resched(); 116 } 117 } 118 119 int __ref arch_add_memory(int nid, u64 start, u64 size, 120 struct mhp_params *params) 121 { 122 unsigned long start_pfn = start >> PAGE_SHIFT; 123 unsigned long nr_pages = size >> PAGE_SHIFT; 124 int rc; 125 126 start = (unsigned long)__va(start); 127 rc = create_section_mapping(start, start + size, nid, 128 params->pgprot); 129 if (rc) { 130 pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", 131 start, start + size, rc); 132 return -EFAULT; 133 } 134 135 return __add_pages(nid, start_pfn, nr_pages, params); 136 } 137 138 void __ref arch_remove_memory(int nid, u64 start, u64 size, 139 struct vmem_altmap *altmap) 140 { 141 unsigned long start_pfn = start >> PAGE_SHIFT; 142 unsigned long nr_pages = size >> PAGE_SHIFT; 143 int ret; 144 145 __remove_pages(start_pfn, nr_pages, altmap); 146 147 /* Remove htab bolted mappings for this section of memory */ 148 start = (unsigned long)__va(start); 149 flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE); 150 151 ret = remove_section_mapping(start, start + size); 152 WARN_ON_ONCE(ret); 153 154 /* Ensure all vmalloc mappings are flushed in case they also 155 * hit that section of memory 156 */ 157 vm_unmap_aliases(); 158 } 159 #endif 160 161 #ifndef CONFIG_NEED_MULTIPLE_NODES 162 void __init mem_topology_setup(void) 163 { 164 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 165 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 166 #ifdef CONFIG_HIGHMEM 167 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 168 #endif 169 170 /* Place all memblock_regions in the same node and merge contiguous 171 * memblock_regions 172 */ 173 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 174 } 175 176 void __init initmem_init(void) 177 { 178 sparse_init(); 179 } 180 181 /* mark pages that don't exist as nosave */ 182 static int __init mark_nonram_nosave(void) 183 { 184 unsigned long spfn, epfn, prev = 0; 185 int i; 186 187 for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) { 188 if (prev && prev < spfn) 189 register_nosave_region(prev, spfn); 190 191 prev = epfn; 192 } 193 194 return 0; 195 } 196 #else /* CONFIG_NEED_MULTIPLE_NODES */ 197 static int __init mark_nonram_nosave(void) 198 { 199 return 0; 200 } 201 #endif 202 203 /* 204 * Zones usage: 205 * 206 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be 207 * everything else. GFP_DMA32 page allocations automatically fall back to 208 * ZONE_DMA. 209 * 210 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the 211 * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU 212 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by 213 * ZONE_DMA. 214 */ 215 static unsigned long max_zone_pfns[MAX_NR_ZONES]; 216 217 /* 218 * paging_init() sets up the page tables - in fact we've already done this. 219 */ 220 void __init paging_init(void) 221 { 222 unsigned long long total_ram = memblock_phys_mem_size(); 223 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 224 225 #ifdef CONFIG_HIGHMEM 226 unsigned long v = __fix_to_virt(FIX_KMAP_END); 227 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN); 228 229 for (; v < end; v += PAGE_SIZE) 230 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */ 231 232 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */ 233 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 234 #endif /* CONFIG_HIGHMEM */ 235 236 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 237 (unsigned long long)top_of_ram, total_ram); 238 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 239 (long int)((top_of_ram - total_ram) >> 20)); 240 241 /* 242 * Allow 30-bit DMA for very limited Broadcom wifi chips on many 243 * powerbooks. 244 */ 245 if (IS_ENABLED(CONFIG_PPC32)) 246 zone_dma_bits = 30; 247 else 248 zone_dma_bits = 31; 249 250 #ifdef CONFIG_ZONE_DMA 251 max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 252 1UL << (zone_dma_bits - PAGE_SHIFT)); 253 #endif 254 max_zone_pfns[ZONE_NORMAL] = max_low_pfn; 255 #ifdef CONFIG_HIGHMEM 256 max_zone_pfns[ZONE_HIGHMEM] = max_pfn; 257 #endif 258 259 free_area_init(max_zone_pfns); 260 261 mark_nonram_nosave(); 262 } 263 264 void __init mem_init(void) 265 { 266 /* 267 * book3s is limited to 16 page sizes due to encoding this in 268 * a 4-bit field for slices. 269 */ 270 BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 271 272 #ifdef CONFIG_SWIOTLB 273 /* 274 * Some platforms (e.g. 85xx) limit DMA-able memory way below 275 * 4G. We force memblock to bottom-up mode to ensure that the 276 * memory allocated in swiotlb_init() is DMA-able. 277 * As it's the last memblock allocation, no need to reset it 278 * back to to-down. 279 */ 280 memblock_set_bottom_up(true); 281 if (is_secure_guest()) 282 svm_swiotlb_init(); 283 else 284 swiotlb_init(0); 285 #endif 286 287 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 288 set_max_mapnr(max_pfn); 289 290 kasan_late_init(); 291 292 memblock_free_all(); 293 294 #ifdef CONFIG_HIGHMEM 295 { 296 unsigned long pfn, highmem_mapnr; 297 298 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 299 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 300 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 301 struct page *page = pfn_to_page(pfn); 302 if (!memblock_is_reserved(paddr)) 303 free_highmem_page(page); 304 } 305 } 306 #endif /* CONFIG_HIGHMEM */ 307 308 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 309 /* 310 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 311 * functions.... do it here for the non-smp case. 312 */ 313 per_cpu(next_tlbcam_idx, smp_processor_id()) = 314 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 315 #endif 316 317 mem_init_print_info(NULL); 318 #ifdef CONFIG_PPC32 319 pr_info("Kernel virtual memory layout:\n"); 320 #ifdef CONFIG_KASAN 321 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n", 322 KASAN_SHADOW_START, KASAN_SHADOW_END); 323 #endif 324 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 325 #ifdef CONFIG_HIGHMEM 326 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 327 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 328 #endif /* CONFIG_HIGHMEM */ 329 if (ioremap_bot != IOREMAP_TOP) 330 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 331 ioremap_bot, IOREMAP_TOP); 332 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 333 VMALLOC_START, VMALLOC_END); 334 #endif /* CONFIG_PPC32 */ 335 } 336 337 void free_initmem(void) 338 { 339 ppc_md.progress = ppc_printk_progress; 340 mark_initmem_nx(); 341 init_mem_is_free = true; 342 free_initmem_default(POISON_FREE_INITMEM); 343 } 344 345 /** 346 * flush_coherent_icache() - if a CPU has a coherent icache, flush it 347 * @addr: The base address to use (can be any valid address, the whole cache will be flushed) 348 * Return true if the cache was flushed, false otherwise 349 */ 350 static inline bool flush_coherent_icache(unsigned long addr) 351 { 352 /* 353 * For a snooping icache, we still need a dummy icbi to purge all the 354 * prefetched instructions from the ifetch buffers. We also need a sync 355 * before the icbi to order the the actual stores to memory that might 356 * have modified instructions with the icbi. 357 */ 358 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { 359 mb(); /* sync */ 360 allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES); 361 icbi((void *)addr); 362 prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES); 363 mb(); /* sync */ 364 isync(); 365 return true; 366 } 367 368 return false; 369 } 370 371 /** 372 * invalidate_icache_range() - Flush the icache by issuing icbi across an address range 373 * @start: the start address 374 * @stop: the stop address (exclusive) 375 */ 376 static void invalidate_icache_range(unsigned long start, unsigned long stop) 377 { 378 unsigned long shift = l1_icache_shift(); 379 unsigned long bytes = l1_icache_bytes(); 380 char *addr = (char *)(start & ~(bytes - 1)); 381 unsigned long size = stop - (unsigned long)addr + (bytes - 1); 382 unsigned long i; 383 384 for (i = 0; i < size >> shift; i++, addr += bytes) 385 icbi(addr); 386 387 mb(); /* sync */ 388 isync(); 389 } 390 391 /** 392 * flush_icache_range: Write any modified data cache blocks out to memory 393 * and invalidate the corresponding blocks in the instruction cache 394 * 395 * Generic code will call this after writing memory, before executing from it. 396 * 397 * @start: the start address 398 * @stop: the stop address (exclusive) 399 */ 400 void flush_icache_range(unsigned long start, unsigned long stop) 401 { 402 if (flush_coherent_icache(start)) 403 return; 404 405 clean_dcache_range(start, stop); 406 407 if (IS_ENABLED(CONFIG_44x)) { 408 /* 409 * Flash invalidate on 44x because we are passed kmapped 410 * addresses and this doesn't work for userspace pages due to 411 * the virtually tagged icache. 412 */ 413 iccci((void *)start); 414 mb(); /* sync */ 415 isync(); 416 } else 417 invalidate_icache_range(start, stop); 418 } 419 EXPORT_SYMBOL(flush_icache_range); 420 421 #if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) 422 /** 423 * flush_dcache_icache_phys() - Flush a page by it's physical address 424 * @physaddr: the physical address of the page 425 */ 426 static void flush_dcache_icache_phys(unsigned long physaddr) 427 { 428 unsigned long bytes = l1_dcache_bytes(); 429 unsigned long nb = PAGE_SIZE / bytes; 430 unsigned long addr = physaddr & PAGE_MASK; 431 unsigned long msr, msr0; 432 unsigned long loop1 = addr, loop2 = addr; 433 434 msr0 = mfmsr(); 435 msr = msr0 & ~MSR_DR; 436 /* 437 * This must remain as ASM to prevent potential memory accesses 438 * while the data MMU is disabled 439 */ 440 asm volatile( 441 " mtctr %2;\n" 442 " mtmsr %3;\n" 443 " isync;\n" 444 "0: dcbst 0, %0;\n" 445 " addi %0, %0, %4;\n" 446 " bdnz 0b;\n" 447 " sync;\n" 448 " mtctr %2;\n" 449 "1: icbi 0, %1;\n" 450 " addi %1, %1, %4;\n" 451 " bdnz 1b;\n" 452 " sync;\n" 453 " mtmsr %5;\n" 454 " isync;\n" 455 : "+&r" (loop1), "+&r" (loop2) 456 : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0) 457 : "ctr", "memory"); 458 } 459 NOKPROBE_SYMBOL(flush_dcache_icache_phys) 460 #endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64) 461 462 /* 463 * This is called when a page has been modified by the kernel. 464 * It just marks the page as not i-cache clean. We do the i-cache 465 * flush later when the page is given to a user process, if necessary. 466 */ 467 void flush_dcache_page(struct page *page) 468 { 469 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 470 return; 471 /* avoid an atomic op if possible */ 472 if (test_bit(PG_arch_1, &page->flags)) 473 clear_bit(PG_arch_1, &page->flags); 474 } 475 EXPORT_SYMBOL(flush_dcache_page); 476 477 void flush_dcache_icache_page(struct page *page) 478 { 479 #ifdef CONFIG_HUGETLB_PAGE 480 if (PageCompound(page)) { 481 flush_dcache_icache_hugepage(page); 482 return; 483 } 484 #endif 485 #if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64) 486 /* On 8xx there is no need to kmap since highmem is not supported */ 487 __flush_dcache_icache(page_address(page)); 488 #else 489 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { 490 void *start = kmap_atomic(page); 491 __flush_dcache_icache(start); 492 kunmap_atomic(start); 493 } else { 494 unsigned long addr = page_to_pfn(page) << PAGE_SHIFT; 495 496 if (flush_coherent_icache(addr)) 497 return; 498 flush_dcache_icache_phys(addr); 499 } 500 #endif 501 } 502 EXPORT_SYMBOL(flush_dcache_icache_page); 503 504 /** 505 * __flush_dcache_icache(): Flush a particular page from the data cache to RAM. 506 * Note: this is necessary because the instruction cache does *not* 507 * snoop from the data cache. 508 * 509 * @page: the address of the page to flush 510 */ 511 void __flush_dcache_icache(void *p) 512 { 513 unsigned long addr = (unsigned long)p; 514 515 if (flush_coherent_icache(addr)) 516 return; 517 518 clean_dcache_range(addr, addr + PAGE_SIZE); 519 520 /* 521 * We don't flush the icache on 44x. Those have a virtual icache and we 522 * don't have access to the virtual address here (it's not the page 523 * vaddr but where it's mapped in user space). The flushing of the 524 * icache on these is handled elsewhere, when a change in the address 525 * space occurs, before returning to user space. 526 */ 527 528 if (cpu_has_feature(MMU_FTR_TYPE_44x)) 529 return; 530 531 invalidate_icache_range(addr, addr + PAGE_SIZE); 532 } 533 534 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 535 { 536 clear_page(page); 537 538 /* 539 * We shouldn't have to do this, but some versions of glibc 540 * require it (ld.so assumes zero filled pages are icache clean) 541 * - Anton 542 */ 543 flush_dcache_page(pg); 544 } 545 EXPORT_SYMBOL(clear_user_page); 546 547 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 548 struct page *pg) 549 { 550 copy_page(vto, vfrom); 551 552 /* 553 * We should be able to use the following optimisation, however 554 * there are two problems. 555 * Firstly a bug in some versions of binutils meant PLT sections 556 * were not marked executable. 557 * Secondly the first word in the GOT section is blrl, used 558 * to establish the GOT address. Until recently the GOT was 559 * not marked executable. 560 * - Anton 561 */ 562 #if 0 563 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 564 return; 565 #endif 566 567 flush_dcache_page(pg); 568 } 569 570 void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, 571 unsigned long addr, int len) 572 { 573 unsigned long maddr; 574 575 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 576 flush_icache_range(maddr, maddr + len); 577 kunmap(page); 578 } 579 580 /* 581 * System memory should not be in /proc/iomem but various tools expect it 582 * (eg kdump). 583 */ 584 static int __init add_system_ram_resources(void) 585 { 586 phys_addr_t start, end; 587 u64 i; 588 589 for_each_mem_range(i, &start, &end) { 590 struct resource *res; 591 592 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 593 WARN_ON(!res); 594 595 if (res) { 596 res->name = "System RAM"; 597 res->start = start; 598 /* 599 * In memblock, end points to the first byte after 600 * the range while in resourses, end points to the 601 * last byte in the range. 602 */ 603 res->end = end - 1; 604 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 605 WARN_ON(request_resource(&iomem_resource, res) < 0); 606 } 607 } 608 609 return 0; 610 } 611 subsys_initcall(add_system_ram_resources); 612 613 #ifdef CONFIG_STRICT_DEVMEM 614 /* 615 * devmem_is_allowed(): check to see if /dev/mem access to a certain address 616 * is valid. The argument is a physical page number. 617 * 618 * Access has to be given to non-kernel-ram areas as well, these contain the 619 * PCI mmio resources as well as potential bios/acpi data regions. 620 */ 621 int devmem_is_allowed(unsigned long pfn) 622 { 623 if (page_is_rtas_user_buf(pfn)) 624 return 1; 625 if (iomem_is_exclusive(PFN_PHYS(pfn))) 626 return 0; 627 if (!page_is_ram(pfn)) 628 return 1; 629 return 0; 630 } 631 #endif /* CONFIG_STRICT_DEVMEM */ 632 633 /* 634 * This is defined in kernel/resource.c but only powerpc needs to export it, for 635 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed. 636 */ 637 EXPORT_SYMBOL_GPL(walk_system_ram_range); 638