1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 9 * 10 * Derived from "arch/i386/mm/init.c" 11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 * 18 */ 19 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/kernel.h> 23 #include <linux/errno.h> 24 #include <linux/string.h> 25 #include <linux/gfp.h> 26 #include <linux/types.h> 27 #include <linux/mm.h> 28 #include <linux/stddef.h> 29 #include <linux/init.h> 30 #include <linux/bootmem.h> 31 #include <linux/highmem.h> 32 #include <linux/initrd.h> 33 #include <linux/pagemap.h> 34 #include <linux/suspend.h> 35 #include <linux/memblock.h> 36 #include <linux/hugetlb.h> 37 #include <linux/slab.h> 38 #include <linux/vmalloc.h> 39 40 #include <asm/pgalloc.h> 41 #include <asm/prom.h> 42 #include <asm/io.h> 43 #include <asm/mmu_context.h> 44 #include <asm/pgtable.h> 45 #include <asm/mmu.h> 46 #include <asm/smp.h> 47 #include <asm/machdep.h> 48 #include <asm/btext.h> 49 #include <asm/tlb.h> 50 #include <asm/sections.h> 51 #include <asm/sparsemem.h> 52 #include <asm/vdso.h> 53 #include <asm/fixmap.h> 54 #include <asm/swiotlb.h> 55 #include <asm/rtas.h> 56 57 #include "mmu_decl.h" 58 59 #ifndef CPU_FTR_COHERENT_ICACHE 60 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 61 #define CPU_FTR_NOEXECUTE 0 62 #endif 63 64 unsigned long long memory_limit; 65 66 #ifdef CONFIG_HIGHMEM 67 pte_t *kmap_pte; 68 EXPORT_SYMBOL(kmap_pte); 69 pgprot_t kmap_prot; 70 EXPORT_SYMBOL(kmap_prot); 71 72 static inline pte_t *virt_to_kpte(unsigned long vaddr) 73 { 74 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 75 vaddr), vaddr), vaddr); 76 } 77 #endif 78 79 int page_is_ram(unsigned long pfn) 80 { 81 #ifndef CONFIG_PPC64 /* XXX for now */ 82 return pfn < max_pfn; 83 #else 84 unsigned long paddr = (pfn << PAGE_SHIFT); 85 struct memblock_region *reg; 86 87 for_each_memblock(memory, reg) 88 if (paddr >= reg->base && paddr < (reg->base + reg->size)) 89 return 1; 90 return 0; 91 #endif 92 } 93 94 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 95 unsigned long size, pgprot_t vma_prot) 96 { 97 if (ppc_md.phys_mem_access_prot) 98 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 99 100 if (!page_is_ram(pfn)) 101 vma_prot = pgprot_noncached(vma_prot); 102 103 return vma_prot; 104 } 105 EXPORT_SYMBOL(phys_mem_access_prot); 106 107 #ifdef CONFIG_MEMORY_HOTPLUG 108 109 #ifdef CONFIG_NUMA 110 int memory_add_physaddr_to_nid(u64 start) 111 { 112 return hot_add_scn_to_nid(start); 113 } 114 #endif 115 116 int arch_add_memory(int nid, u64 start, u64 size, bool for_device) 117 { 118 struct pglist_data *pgdata; 119 struct zone *zone; 120 unsigned long start_pfn = start >> PAGE_SHIFT; 121 unsigned long nr_pages = size >> PAGE_SHIFT; 122 int rc; 123 124 pgdata = NODE_DATA(nid); 125 126 start = (unsigned long)__va(start); 127 rc = create_section_mapping(start, start + size); 128 if (rc) { 129 pr_warning( 130 "Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n", 131 start, start + size, rc); 132 return -EFAULT; 133 } 134 135 /* this should work for most non-highmem platforms */ 136 zone = pgdata->node_zones + 137 zone_for_memory(nid, start, size, 0, for_device); 138 139 return __add_pages(nid, zone, start_pfn, nr_pages); 140 } 141 142 #ifdef CONFIG_MEMORY_HOTREMOVE 143 int arch_remove_memory(u64 start, u64 size) 144 { 145 unsigned long start_pfn = start >> PAGE_SHIFT; 146 unsigned long nr_pages = size >> PAGE_SHIFT; 147 struct zone *zone; 148 int ret; 149 150 zone = page_zone(pfn_to_page(start_pfn)); 151 ret = __remove_pages(zone, start_pfn, nr_pages); 152 if (ret) 153 return ret; 154 155 /* Remove htab bolted mappings for this section of memory */ 156 start = (unsigned long)__va(start); 157 ret = remove_section_mapping(start, start + size); 158 159 /* Ensure all vmalloc mappings are flushed in case they also 160 * hit that section of memory 161 */ 162 vm_unmap_aliases(); 163 164 return ret; 165 } 166 #endif 167 #endif /* CONFIG_MEMORY_HOTPLUG */ 168 169 /* 170 * walk_memory_resource() needs to make sure there is no holes in a given 171 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 172 * Instead it maintains it in memblock.memory structures. Walk through the 173 * memory regions, find holes and callback for contiguous regions. 174 */ 175 int 176 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 177 void *arg, int (*func)(unsigned long, unsigned long, void *)) 178 { 179 struct memblock_region *reg; 180 unsigned long end_pfn = start_pfn + nr_pages; 181 unsigned long tstart, tend; 182 int ret = -1; 183 184 for_each_memblock(memory, reg) { 185 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); 186 tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); 187 if (tstart >= tend) 188 continue; 189 ret = (*func)(tstart, tend - tstart, arg); 190 if (ret) 191 break; 192 } 193 return ret; 194 } 195 EXPORT_SYMBOL_GPL(walk_system_ram_range); 196 197 #ifndef CONFIG_NEED_MULTIPLE_NODES 198 void __init initmem_init(void) 199 { 200 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 201 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 202 #ifdef CONFIG_HIGHMEM 203 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 204 #endif 205 206 /* Place all memblock_regions in the same node and merge contiguous 207 * memblock_regions 208 */ 209 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 210 211 /* XXX need to clip this if using highmem? */ 212 sparse_memory_present_with_active_regions(0); 213 sparse_init(); 214 } 215 216 /* mark pages that don't exist as nosave */ 217 static int __init mark_nonram_nosave(void) 218 { 219 struct memblock_region *reg, *prev = NULL; 220 221 for_each_memblock(memory, reg) { 222 if (prev && 223 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 224 register_nosave_region(memblock_region_memory_end_pfn(prev), 225 memblock_region_memory_base_pfn(reg)); 226 prev = reg; 227 } 228 return 0; 229 } 230 #else /* CONFIG_NEED_MULTIPLE_NODES */ 231 static int __init mark_nonram_nosave(void) 232 { 233 return 0; 234 } 235 #endif 236 237 static bool zone_limits_final; 238 239 static unsigned long max_zone_pfns[MAX_NR_ZONES] = { 240 [0 ... MAX_NR_ZONES - 1] = ~0UL 241 }; 242 243 /* 244 * Restrict the specified zone and all more restrictive zones 245 * to be below the specified pfn. May not be called after 246 * paging_init(). 247 */ 248 void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit) 249 { 250 int i; 251 252 if (WARN_ON(zone_limits_final)) 253 return; 254 255 for (i = zone; i >= 0; i--) { 256 if (max_zone_pfns[i] > pfn_limit) 257 max_zone_pfns[i] = pfn_limit; 258 } 259 } 260 261 /* 262 * Find the least restrictive zone that is entirely below the 263 * specified pfn limit. Returns < 0 if no suitable zone is found. 264 * 265 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit 266 * systems -- the DMA limit can be higher than any possible real pfn. 267 */ 268 int dma_pfn_limit_to_zone(u64 pfn_limit) 269 { 270 enum zone_type top_zone = ZONE_NORMAL; 271 int i; 272 273 #ifdef CONFIG_HIGHMEM 274 top_zone = ZONE_HIGHMEM; 275 #endif 276 277 for (i = top_zone; i >= 0; i--) { 278 if (max_zone_pfns[i] <= pfn_limit) 279 return i; 280 } 281 282 return -EPERM; 283 } 284 285 /* 286 * paging_init() sets up the page tables - in fact we've already done this. 287 */ 288 void __init paging_init(void) 289 { 290 unsigned long long total_ram = memblock_phys_mem_size(); 291 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 292 enum zone_type top_zone; 293 294 #ifdef CONFIG_PPC32 295 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 296 unsigned long end = __fix_to_virt(FIX_HOLE); 297 298 for (; v < end; v += PAGE_SIZE) 299 map_page(v, 0, 0); /* XXX gross */ 300 #endif 301 302 #ifdef CONFIG_HIGHMEM 303 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 304 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 305 306 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 307 kmap_prot = PAGE_KERNEL; 308 #endif /* CONFIG_HIGHMEM */ 309 310 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 311 (unsigned long long)top_of_ram, total_ram); 312 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 313 (long int)((top_of_ram - total_ram) >> 20)); 314 315 #ifdef CONFIG_HIGHMEM 316 top_zone = ZONE_HIGHMEM; 317 limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); 318 #else 319 top_zone = ZONE_NORMAL; 320 #endif 321 322 limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT); 323 zone_limits_final = true; 324 free_area_init_nodes(max_zone_pfns); 325 326 mark_nonram_nosave(); 327 } 328 329 void __init mem_init(void) 330 { 331 /* 332 * book3s is limited to 16 page sizes due to encoding this in 333 * a 4-bit field for slices. 334 */ 335 BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 336 337 #ifdef CONFIG_SWIOTLB 338 swiotlb_init(0); 339 #endif 340 341 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 342 set_max_mapnr(max_pfn); 343 free_all_bootmem(); 344 345 #ifdef CONFIG_HIGHMEM 346 { 347 unsigned long pfn, highmem_mapnr; 348 349 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 350 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 351 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 352 struct page *page = pfn_to_page(pfn); 353 if (!memblock_is_reserved(paddr)) 354 free_highmem_page(page); 355 } 356 } 357 #endif /* CONFIG_HIGHMEM */ 358 359 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 360 /* 361 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 362 * functions.... do it here for the non-smp case. 363 */ 364 per_cpu(next_tlbcam_idx, smp_processor_id()) = 365 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 366 #endif 367 368 mem_init_print_info(NULL); 369 #ifdef CONFIG_PPC32 370 pr_info("Kernel virtual memory layout:\n"); 371 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 372 #ifdef CONFIG_HIGHMEM 373 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 374 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 375 #endif /* CONFIG_HIGHMEM */ 376 #ifdef CONFIG_NOT_COHERENT_CACHE 377 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", 378 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); 379 #endif /* CONFIG_NOT_COHERENT_CACHE */ 380 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 381 ioremap_bot, IOREMAP_TOP); 382 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 383 VMALLOC_START, VMALLOC_END); 384 #endif /* CONFIG_PPC32 */ 385 } 386 387 void free_initmem(void) 388 { 389 ppc_md.progress = ppc_printk_progress; 390 free_initmem_default(POISON_FREE_INITMEM); 391 } 392 393 #ifdef CONFIG_BLK_DEV_INITRD 394 void __init free_initrd_mem(unsigned long start, unsigned long end) 395 { 396 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 397 } 398 #endif 399 400 /* 401 * This is called when a page has been modified by the kernel. 402 * It just marks the page as not i-cache clean. We do the i-cache 403 * flush later when the page is given to a user process, if necessary. 404 */ 405 void flush_dcache_page(struct page *page) 406 { 407 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 408 return; 409 /* avoid an atomic op if possible */ 410 if (test_bit(PG_arch_1, &page->flags)) 411 clear_bit(PG_arch_1, &page->flags); 412 } 413 EXPORT_SYMBOL(flush_dcache_page); 414 415 void flush_dcache_icache_page(struct page *page) 416 { 417 #ifdef CONFIG_HUGETLB_PAGE 418 if (PageCompound(page)) { 419 flush_dcache_icache_hugepage(page); 420 return; 421 } 422 #endif 423 #if defined(CONFIG_8xx) || defined(CONFIG_PPC64) 424 /* On 8xx there is no need to kmap since highmem is not supported */ 425 __flush_dcache_icache(page_address(page)); 426 #else 427 if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) { 428 void *start = kmap_atomic(page); 429 __flush_dcache_icache(start); 430 kunmap_atomic(start); 431 } else { 432 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 433 } 434 #endif 435 } 436 EXPORT_SYMBOL(flush_dcache_icache_page); 437 438 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 439 { 440 clear_page(page); 441 442 /* 443 * We shouldn't have to do this, but some versions of glibc 444 * require it (ld.so assumes zero filled pages are icache clean) 445 * - Anton 446 */ 447 flush_dcache_page(pg); 448 } 449 EXPORT_SYMBOL(clear_user_page); 450 451 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 452 struct page *pg) 453 { 454 copy_page(vto, vfrom); 455 456 /* 457 * We should be able to use the following optimisation, however 458 * there are two problems. 459 * Firstly a bug in some versions of binutils meant PLT sections 460 * were not marked executable. 461 * Secondly the first word in the GOT section is blrl, used 462 * to establish the GOT address. Until recently the GOT was 463 * not marked executable. 464 * - Anton 465 */ 466 #if 0 467 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 468 return; 469 #endif 470 471 flush_dcache_page(pg); 472 } 473 474 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 475 unsigned long addr, int len) 476 { 477 unsigned long maddr; 478 479 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 480 flush_icache_range(maddr, maddr + len); 481 kunmap(page); 482 } 483 EXPORT_SYMBOL(flush_icache_user_range); 484 485 /* 486 * This is called at the end of handling a user page fault, when the 487 * fault has been handled by updating a PTE in the linux page tables. 488 * We use it to preload an HPTE into the hash table corresponding to 489 * the updated linux PTE. 490 * 491 * This must always be called with the pte lock held. 492 */ 493 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 494 pte_t *ptep) 495 { 496 #ifdef CONFIG_PPC_STD_MMU 497 /* 498 * We don't need to worry about _PAGE_PRESENT here because we are 499 * called with either mm->page_table_lock held or ptl lock held 500 */ 501 unsigned long access = 0, trap; 502 503 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 504 if (!pte_young(*ptep) || address >= TASK_SIZE) 505 return; 506 507 /* We try to figure out if we are coming from an instruction 508 * access fault and pass that down to __hash_page so we avoid 509 * double-faulting on execution of fresh text. We have to test 510 * for regs NULL since init will get here first thing at boot 511 * 512 * We also avoid filling the hash if not coming from a fault 513 */ 514 if (current->thread.regs == NULL) 515 return; 516 trap = TRAP(current->thread.regs); 517 if (trap == 0x400) 518 access |= _PAGE_EXEC; 519 else if (trap != 0x300) 520 return; 521 hash_preload(vma->vm_mm, address, access, trap); 522 #endif /* CONFIG_PPC_STD_MMU */ 523 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 524 && defined(CONFIG_HUGETLB_PAGE) 525 if (is_vm_hugetlb_page(vma)) 526 book3e_hugetlb_preload(vma, address, *ptep); 527 #endif 528 } 529 530 /* 531 * System memory should not be in /proc/iomem but various tools expect it 532 * (eg kdump). 533 */ 534 static int __init add_system_ram_resources(void) 535 { 536 struct memblock_region *reg; 537 538 for_each_memblock(memory, reg) { 539 struct resource *res; 540 unsigned long base = reg->base; 541 unsigned long size = reg->size; 542 543 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 544 WARN_ON(!res); 545 546 if (res) { 547 res->name = "System RAM"; 548 res->start = base; 549 res->end = base + size - 1; 550 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 551 WARN_ON(request_resource(&iomem_resource, res) < 0); 552 } 553 } 554 555 return 0; 556 } 557 subsys_initcall(add_system_ram_resources); 558 559 #ifdef CONFIG_STRICT_DEVMEM 560 /* 561 * devmem_is_allowed(): check to see if /dev/mem access to a certain address 562 * is valid. The argument is a physical page number. 563 * 564 * Access has to be given to non-kernel-ram areas as well, these contain the 565 * PCI mmio resources as well as potential bios/acpi data regions. 566 */ 567 int devmem_is_allowed(unsigned long pfn) 568 { 569 if (page_is_rtas_user_buf(pfn)) 570 return 1; 571 if (iomem_is_exclusive(PFN_PHYS(pfn))) 572 return 0; 573 if (!page_is_ram(pfn)) 574 return 1; 575 return 0; 576 } 577 #endif /* CONFIG_STRICT_DEVMEM */ 578