1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 9 * 10 * Derived from "arch/i386/mm/init.c" 11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 * 18 */ 19 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/kernel.h> 23 #include <linux/errno.h> 24 #include <linux/string.h> 25 #include <linux/gfp.h> 26 #include <linux/types.h> 27 #include <linux/mm.h> 28 #include <linux/stddef.h> 29 #include <linux/init.h> 30 #include <linux/bootmem.h> 31 #include <linux/highmem.h> 32 #include <linux/initrd.h> 33 #include <linux/pagemap.h> 34 #include <linux/suspend.h> 35 #include <linux/memblock.h> 36 #include <linux/hugetlb.h> 37 #include <linux/slab.h> 38 #include <linux/vmalloc.h> 39 40 #include <asm/pgalloc.h> 41 #include <asm/prom.h> 42 #include <asm/io.h> 43 #include <asm/mmu_context.h> 44 #include <asm/pgtable.h> 45 #include <asm/mmu.h> 46 #include <asm/smp.h> 47 #include <asm/machdep.h> 48 #include <asm/btext.h> 49 #include <asm/tlb.h> 50 #include <asm/sections.h> 51 #include <asm/sparsemem.h> 52 #include <asm/vdso.h> 53 #include <asm/fixmap.h> 54 #include <asm/swiotlb.h> 55 #include <asm/rtas.h> 56 57 #include "mmu_decl.h" 58 59 #ifndef CPU_FTR_COHERENT_ICACHE 60 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 61 #define CPU_FTR_NOEXECUTE 0 62 #endif 63 64 unsigned long long memory_limit; 65 66 #ifdef CONFIG_HIGHMEM 67 pte_t *kmap_pte; 68 EXPORT_SYMBOL(kmap_pte); 69 pgprot_t kmap_prot; 70 EXPORT_SYMBOL(kmap_prot); 71 #define TOP_ZONE ZONE_HIGHMEM 72 73 static inline pte_t *virt_to_kpte(unsigned long vaddr) 74 { 75 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 76 vaddr), vaddr), vaddr); 77 } 78 #else 79 #define TOP_ZONE ZONE_NORMAL 80 #endif 81 82 int page_is_ram(unsigned long pfn) 83 { 84 #ifndef CONFIG_PPC64 /* XXX for now */ 85 return pfn < max_pfn; 86 #else 87 unsigned long paddr = (pfn << PAGE_SHIFT); 88 struct memblock_region *reg; 89 90 for_each_memblock(memory, reg) 91 if (paddr >= reg->base && paddr < (reg->base + reg->size)) 92 return 1; 93 return 0; 94 #endif 95 } 96 97 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 98 unsigned long size, pgprot_t vma_prot) 99 { 100 if (ppc_md.phys_mem_access_prot) 101 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 102 103 if (!page_is_ram(pfn)) 104 vma_prot = pgprot_noncached(vma_prot); 105 106 return vma_prot; 107 } 108 EXPORT_SYMBOL(phys_mem_access_prot); 109 110 #ifdef CONFIG_MEMORY_HOTPLUG 111 112 #ifdef CONFIG_NUMA 113 int memory_add_physaddr_to_nid(u64 start) 114 { 115 return hot_add_scn_to_nid(start); 116 } 117 #endif 118 119 int arch_add_memory(int nid, u64 start, u64 size, bool for_device) 120 { 121 struct pglist_data *pgdata; 122 struct zone *zone; 123 unsigned long start_pfn = start >> PAGE_SHIFT; 124 unsigned long nr_pages = size >> PAGE_SHIFT; 125 int rc; 126 127 pgdata = NODE_DATA(nid); 128 129 start = (unsigned long)__va(start); 130 rc = create_section_mapping(start, start + size); 131 if (rc) { 132 pr_warning( 133 "Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", 134 start, start + size, rc); 135 return -EFAULT; 136 } 137 138 /* this should work for most non-highmem platforms */ 139 zone = pgdata->node_zones + 140 zone_for_memory(nid, start, size, 0, for_device); 141 142 return __add_pages(nid, zone, start_pfn, nr_pages); 143 } 144 145 #ifdef CONFIG_MEMORY_HOTREMOVE 146 int arch_remove_memory(u64 start, u64 size) 147 { 148 unsigned long start_pfn = start >> PAGE_SHIFT; 149 unsigned long nr_pages = size >> PAGE_SHIFT; 150 struct zone *zone; 151 int ret; 152 153 zone = page_zone(pfn_to_page(start_pfn)); 154 ret = __remove_pages(zone, start_pfn, nr_pages); 155 if (ret) 156 return ret; 157 158 /* Remove htab bolted mappings for this section of memory */ 159 start = (unsigned long)__va(start); 160 ret = remove_section_mapping(start, start + size); 161 162 /* Ensure all vmalloc mappings are flushed in case they also 163 * hit that section of memory 164 */ 165 vm_unmap_aliases(); 166 167 return ret; 168 } 169 #endif 170 #endif /* CONFIG_MEMORY_HOTPLUG */ 171 172 /* 173 * walk_memory_resource() needs to make sure there is no holes in a given 174 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 175 * Instead it maintains it in memblock.memory structures. Walk through the 176 * memory regions, find holes and callback for contiguous regions. 177 */ 178 int 179 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 180 void *arg, int (*func)(unsigned long, unsigned long, void *)) 181 { 182 struct memblock_region *reg; 183 unsigned long end_pfn = start_pfn + nr_pages; 184 unsigned long tstart, tend; 185 int ret = -1; 186 187 for_each_memblock(memory, reg) { 188 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); 189 tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); 190 if (tstart >= tend) 191 continue; 192 ret = (*func)(tstart, tend - tstart, arg); 193 if (ret) 194 break; 195 } 196 return ret; 197 } 198 EXPORT_SYMBOL_GPL(walk_system_ram_range); 199 200 #ifndef CONFIG_NEED_MULTIPLE_NODES 201 void __init initmem_init(void) 202 { 203 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 204 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 205 #ifdef CONFIG_HIGHMEM 206 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 207 #endif 208 209 /* Place all memblock_regions in the same node and merge contiguous 210 * memblock_regions 211 */ 212 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 213 214 /* XXX need to clip this if using highmem? */ 215 sparse_memory_present_with_active_regions(0); 216 sparse_init(); 217 } 218 219 /* mark pages that don't exist as nosave */ 220 static int __init mark_nonram_nosave(void) 221 { 222 struct memblock_region *reg, *prev = NULL; 223 224 for_each_memblock(memory, reg) { 225 if (prev && 226 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 227 register_nosave_region(memblock_region_memory_end_pfn(prev), 228 memblock_region_memory_base_pfn(reg)); 229 prev = reg; 230 } 231 return 0; 232 } 233 #else /* CONFIG_NEED_MULTIPLE_NODES */ 234 static int __init mark_nonram_nosave(void) 235 { 236 return 0; 237 } 238 #endif 239 240 static bool zone_limits_final; 241 242 static unsigned long max_zone_pfns[MAX_NR_ZONES] = { 243 [0 ... MAX_NR_ZONES - 1] = ~0UL 244 }; 245 246 /* 247 * Restrict the specified zone and all more restrictive zones 248 * to be below the specified pfn. May not be called after 249 * paging_init(). 250 */ 251 void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit) 252 { 253 int i; 254 255 if (WARN_ON(zone_limits_final)) 256 return; 257 258 for (i = zone; i >= 0; i--) { 259 if (max_zone_pfns[i] > pfn_limit) 260 max_zone_pfns[i] = pfn_limit; 261 } 262 } 263 264 /* 265 * Find the least restrictive zone that is entirely below the 266 * specified pfn limit. Returns < 0 if no suitable zone is found. 267 * 268 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit 269 * systems -- the DMA limit can be higher than any possible real pfn. 270 */ 271 int dma_pfn_limit_to_zone(u64 pfn_limit) 272 { 273 int i; 274 275 for (i = TOP_ZONE; i >= 0; i--) { 276 if (max_zone_pfns[i] <= pfn_limit) 277 return i; 278 } 279 280 return -EPERM; 281 } 282 283 /* 284 * paging_init() sets up the page tables - in fact we've already done this. 285 */ 286 void __init paging_init(void) 287 { 288 unsigned long long total_ram = memblock_phys_mem_size(); 289 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 290 291 #ifdef CONFIG_PPC32 292 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 293 unsigned long end = __fix_to_virt(FIX_HOLE); 294 295 for (; v < end; v += PAGE_SIZE) 296 map_page(v, 0, 0); /* XXX gross */ 297 #endif 298 299 #ifdef CONFIG_HIGHMEM 300 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 301 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 302 303 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 304 kmap_prot = PAGE_KERNEL; 305 #endif /* CONFIG_HIGHMEM */ 306 307 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 308 (unsigned long long)top_of_ram, total_ram); 309 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 310 (long int)((top_of_ram - total_ram) >> 20)); 311 312 #ifdef CONFIG_HIGHMEM 313 limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); 314 #endif 315 limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT); 316 zone_limits_final = true; 317 free_area_init_nodes(max_zone_pfns); 318 319 mark_nonram_nosave(); 320 } 321 322 void __init mem_init(void) 323 { 324 /* 325 * book3s is limited to 16 page sizes due to encoding this in 326 * a 4-bit field for slices. 327 */ 328 BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 329 330 #ifdef CONFIG_SWIOTLB 331 swiotlb_init(0); 332 #endif 333 334 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 335 set_max_mapnr(max_pfn); 336 free_all_bootmem(); 337 338 #ifdef CONFIG_HIGHMEM 339 { 340 unsigned long pfn, highmem_mapnr; 341 342 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 343 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 344 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 345 struct page *page = pfn_to_page(pfn); 346 if (!memblock_is_reserved(paddr)) 347 free_highmem_page(page); 348 } 349 } 350 #endif /* CONFIG_HIGHMEM */ 351 352 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 353 /* 354 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 355 * functions.... do it here for the non-smp case. 356 */ 357 per_cpu(next_tlbcam_idx, smp_processor_id()) = 358 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 359 #endif 360 361 mem_init_print_info(NULL); 362 #ifdef CONFIG_PPC32 363 pr_info("Kernel virtual memory layout:\n"); 364 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 365 #ifdef CONFIG_HIGHMEM 366 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 367 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 368 #endif /* CONFIG_HIGHMEM */ 369 #ifdef CONFIG_NOT_COHERENT_CACHE 370 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", 371 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); 372 #endif /* CONFIG_NOT_COHERENT_CACHE */ 373 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 374 ioremap_bot, IOREMAP_TOP); 375 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 376 VMALLOC_START, VMALLOC_END); 377 #endif /* CONFIG_PPC32 */ 378 } 379 380 void free_initmem(void) 381 { 382 ppc_md.progress = ppc_printk_progress; 383 free_initmem_default(POISON_FREE_INITMEM); 384 } 385 386 #ifdef CONFIG_BLK_DEV_INITRD 387 void __init free_initrd_mem(unsigned long start, unsigned long end) 388 { 389 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 390 } 391 #endif 392 393 /* 394 * This is called when a page has been modified by the kernel. 395 * It just marks the page as not i-cache clean. We do the i-cache 396 * flush later when the page is given to a user process, if necessary. 397 */ 398 void flush_dcache_page(struct page *page) 399 { 400 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 401 return; 402 /* avoid an atomic op if possible */ 403 if (test_bit(PG_arch_1, &page->flags)) 404 clear_bit(PG_arch_1, &page->flags); 405 } 406 EXPORT_SYMBOL(flush_dcache_page); 407 408 void flush_dcache_icache_page(struct page *page) 409 { 410 #ifdef CONFIG_HUGETLB_PAGE 411 if (PageCompound(page)) { 412 flush_dcache_icache_hugepage(page); 413 return; 414 } 415 #endif 416 #if defined(CONFIG_8xx) || defined(CONFIG_PPC64) 417 /* On 8xx there is no need to kmap since highmem is not supported */ 418 __flush_dcache_icache(page_address(page)); 419 #else 420 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { 421 void *start = kmap_atomic(page); 422 __flush_dcache_icache(start); 423 kunmap_atomic(start); 424 } else { 425 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 426 } 427 #endif 428 } 429 EXPORT_SYMBOL(flush_dcache_icache_page); 430 431 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 432 { 433 clear_page(page); 434 435 /* 436 * We shouldn't have to do this, but some versions of glibc 437 * require it (ld.so assumes zero filled pages are icache clean) 438 * - Anton 439 */ 440 flush_dcache_page(pg); 441 } 442 EXPORT_SYMBOL(clear_user_page); 443 444 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 445 struct page *pg) 446 { 447 copy_page(vto, vfrom); 448 449 /* 450 * We should be able to use the following optimisation, however 451 * there are two problems. 452 * Firstly a bug in some versions of binutils meant PLT sections 453 * were not marked executable. 454 * Secondly the first word in the GOT section is blrl, used 455 * to establish the GOT address. Until recently the GOT was 456 * not marked executable. 457 * - Anton 458 */ 459 #if 0 460 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 461 return; 462 #endif 463 464 flush_dcache_page(pg); 465 } 466 467 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 468 unsigned long addr, int len) 469 { 470 unsigned long maddr; 471 472 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 473 flush_icache_range(maddr, maddr + len); 474 kunmap(page); 475 } 476 EXPORT_SYMBOL(flush_icache_user_range); 477 478 /* 479 * This is called at the end of handling a user page fault, when the 480 * fault has been handled by updating a PTE in the linux page tables. 481 * We use it to preload an HPTE into the hash table corresponding to 482 * the updated linux PTE. 483 * 484 * This must always be called with the pte lock held. 485 */ 486 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 487 pte_t *ptep) 488 { 489 #ifdef CONFIG_PPC_STD_MMU 490 /* 491 * We don't need to worry about _PAGE_PRESENT here because we are 492 * called with either mm->page_table_lock held or ptl lock held 493 */ 494 unsigned long access, trap; 495 496 if (radix_enabled()) 497 return; 498 499 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 500 if (!pte_young(*ptep) || address >= TASK_SIZE) 501 return; 502 503 /* We try to figure out if we are coming from an instruction 504 * access fault and pass that down to __hash_page so we avoid 505 * double-faulting on execution of fresh text. We have to test 506 * for regs NULL since init will get here first thing at boot 507 * 508 * We also avoid filling the hash if not coming from a fault 509 */ 510 511 trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL; 512 switch (trap) { 513 case 0x300: 514 access = 0UL; 515 break; 516 case 0x400: 517 access = _PAGE_EXEC; 518 break; 519 default: 520 return; 521 } 522 523 hash_preload(vma->vm_mm, address, access, trap); 524 #endif /* CONFIG_PPC_STD_MMU */ 525 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 526 && defined(CONFIG_HUGETLB_PAGE) 527 if (is_vm_hugetlb_page(vma)) 528 book3e_hugetlb_preload(vma, address, *ptep); 529 #endif 530 } 531 532 /* 533 * System memory should not be in /proc/iomem but various tools expect it 534 * (eg kdump). 535 */ 536 static int __init add_system_ram_resources(void) 537 { 538 struct memblock_region *reg; 539 540 for_each_memblock(memory, reg) { 541 struct resource *res; 542 unsigned long base = reg->base; 543 unsigned long size = reg->size; 544 545 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 546 WARN_ON(!res); 547 548 if (res) { 549 res->name = "System RAM"; 550 res->start = base; 551 res->end = base + size - 1; 552 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 553 WARN_ON(request_resource(&iomem_resource, res) < 0); 554 } 555 } 556 557 return 0; 558 } 559 subsys_initcall(add_system_ram_resources); 560 561 #ifdef CONFIG_STRICT_DEVMEM 562 /* 563 * devmem_is_allowed(): check to see if /dev/mem access to a certain address 564 * is valid. The argument is a physical page number. 565 * 566 * Access has to be given to non-kernel-ram areas as well, these contain the 567 * PCI mmio resources as well as potential bios/acpi data regions. 568 */ 569 int devmem_is_allowed(unsigned long pfn) 570 { 571 if (page_is_rtas_user_buf(pfn)) 572 return 1; 573 if (iomem_is_exclusive(PFN_PHYS(pfn))) 574 return 0; 575 if (!page_is_ram(pfn)) 576 return 1; 577 return 0; 578 } 579 #endif /* CONFIG_STRICT_DEVMEM */ 580