1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 9 * 10 * Derived from "arch/i386/mm/init.c" 11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 * 18 */ 19 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/kernel.h> 23 #include <linux/errno.h> 24 #include <linux/string.h> 25 #include <linux/gfp.h> 26 #include <linux/types.h> 27 #include <linux/mm.h> 28 #include <linux/stddef.h> 29 #include <linux/init.h> 30 #include <linux/bootmem.h> 31 #include <linux/highmem.h> 32 #include <linux/initrd.h> 33 #include <linux/pagemap.h> 34 #include <linux/suspend.h> 35 #include <linux/memblock.h> 36 #include <linux/hugetlb.h> 37 #include <linux/slab.h> 38 39 #include <asm/pgalloc.h> 40 #include <asm/prom.h> 41 #include <asm/io.h> 42 #include <asm/mmu_context.h> 43 #include <asm/pgtable.h> 44 #include <asm/mmu.h> 45 #include <asm/smp.h> 46 #include <asm/machdep.h> 47 #include <asm/btext.h> 48 #include <asm/tlb.h> 49 #include <asm/sections.h> 50 #include <asm/sparsemem.h> 51 #include <asm/vdso.h> 52 #include <asm/fixmap.h> 53 #include <asm/swiotlb.h> 54 #include <asm/rtas.h> 55 56 #include "mmu_decl.h" 57 58 #ifndef CPU_FTR_COHERENT_ICACHE 59 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 60 #define CPU_FTR_NOEXECUTE 0 61 #endif 62 63 int init_bootmem_done; 64 int mem_init_done; 65 unsigned long long memory_limit; 66 67 #ifdef CONFIG_HIGHMEM 68 pte_t *kmap_pte; 69 pgprot_t kmap_prot; 70 71 EXPORT_SYMBOL(kmap_prot); 72 EXPORT_SYMBOL(kmap_pte); 73 74 static inline pte_t *virt_to_kpte(unsigned long vaddr) 75 { 76 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 77 vaddr), vaddr), vaddr); 78 } 79 #endif 80 81 int page_is_ram(unsigned long pfn) 82 { 83 #ifndef CONFIG_PPC64 /* XXX for now */ 84 return pfn < max_pfn; 85 #else 86 unsigned long paddr = (pfn << PAGE_SHIFT); 87 struct memblock_region *reg; 88 89 for_each_memblock(memory, reg) 90 if (paddr >= reg->base && paddr < (reg->base + reg->size)) 91 return 1; 92 return 0; 93 #endif 94 } 95 96 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 97 unsigned long size, pgprot_t vma_prot) 98 { 99 if (ppc_md.phys_mem_access_prot) 100 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 101 102 if (!page_is_ram(pfn)) 103 vma_prot = pgprot_noncached(vma_prot); 104 105 return vma_prot; 106 } 107 EXPORT_SYMBOL(phys_mem_access_prot); 108 109 #ifdef CONFIG_MEMORY_HOTPLUG 110 111 #ifdef CONFIG_NUMA 112 int memory_add_physaddr_to_nid(u64 start) 113 { 114 return hot_add_scn_to_nid(start); 115 } 116 #endif 117 118 int arch_add_memory(int nid, u64 start, u64 size) 119 { 120 struct pglist_data *pgdata; 121 struct zone *zone; 122 unsigned long start_pfn = start >> PAGE_SHIFT; 123 unsigned long nr_pages = size >> PAGE_SHIFT; 124 125 pgdata = NODE_DATA(nid); 126 127 start = (unsigned long)__va(start); 128 if (create_section_mapping(start, start + size)) 129 return -EINVAL; 130 131 /* this should work for most non-highmem platforms */ 132 zone = pgdata->node_zones; 133 134 return __add_pages(nid, zone, start_pfn, nr_pages); 135 } 136 137 #ifdef CONFIG_MEMORY_HOTREMOVE 138 int arch_remove_memory(u64 start, u64 size) 139 { 140 unsigned long start_pfn = start >> PAGE_SHIFT; 141 unsigned long nr_pages = size >> PAGE_SHIFT; 142 struct zone *zone; 143 144 zone = page_zone(pfn_to_page(start_pfn)); 145 return __remove_pages(zone, start_pfn, nr_pages); 146 } 147 #endif 148 #endif /* CONFIG_MEMORY_HOTPLUG */ 149 150 /* 151 * walk_memory_resource() needs to make sure there is no holes in a given 152 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 153 * Instead it maintains it in memblock.memory structures. Walk through the 154 * memory regions, find holes and callback for contiguous regions. 155 */ 156 int 157 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 158 void *arg, int (*func)(unsigned long, unsigned long, void *)) 159 { 160 struct memblock_region *reg; 161 unsigned long end_pfn = start_pfn + nr_pages; 162 unsigned long tstart, tend; 163 int ret = -1; 164 165 for_each_memblock(memory, reg) { 166 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); 167 tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); 168 if (tstart >= tend) 169 continue; 170 ret = (*func)(tstart, tend - tstart, arg); 171 if (ret) 172 break; 173 } 174 return ret; 175 } 176 EXPORT_SYMBOL_GPL(walk_system_ram_range); 177 178 /* 179 * Initialize the bootmem system and give it all the memory we 180 * have available. If we are using highmem, we only put the 181 * lowmem into the bootmem system. 182 */ 183 #ifndef CONFIG_NEED_MULTIPLE_NODES 184 void __init do_init_bootmem(void) 185 { 186 unsigned long start, bootmap_pages; 187 unsigned long total_pages; 188 struct memblock_region *reg; 189 int boot_mapsize; 190 191 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 192 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 193 #ifdef CONFIG_HIGHMEM 194 total_pages = total_lowmem >> PAGE_SHIFT; 195 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 196 #endif 197 198 /* 199 * Find an area to use for the bootmem bitmap. Calculate the size of 200 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. 201 * Add 1 additional page in case the address isn't page-aligned. 202 */ 203 bootmap_pages = bootmem_bootmap_pages(total_pages); 204 205 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 206 207 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 208 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); 209 210 /* Place all memblock_regions in the same node and merge contiguous 211 * memblock_regions 212 */ 213 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); 214 215 /* Add all physical memory to the bootmem map, mark each area 216 * present. 217 */ 218 #ifdef CONFIG_HIGHMEM 219 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); 220 221 /* reserve the sections we're already using */ 222 for_each_memblock(reserved, reg) { 223 unsigned long top = reg->base + reg->size - 1; 224 if (top < lowmem_end_addr) 225 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 226 else if (reg->base < lowmem_end_addr) { 227 unsigned long trunc_size = lowmem_end_addr - reg->base; 228 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); 229 } 230 } 231 #else 232 free_bootmem_with_active_regions(0, max_pfn); 233 234 /* reserve the sections we're already using */ 235 for_each_memblock(reserved, reg) 236 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 237 #endif 238 /* XXX need to clip this if using highmem? */ 239 sparse_memory_present_with_active_regions(0); 240 241 init_bootmem_done = 1; 242 } 243 244 /* mark pages that don't exist as nosave */ 245 static int __init mark_nonram_nosave(void) 246 { 247 struct memblock_region *reg, *prev = NULL; 248 249 for_each_memblock(memory, reg) { 250 if (prev && 251 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 252 register_nosave_region(memblock_region_memory_end_pfn(prev), 253 memblock_region_memory_base_pfn(reg)); 254 prev = reg; 255 } 256 return 0; 257 } 258 259 /* 260 * paging_init() sets up the page tables - in fact we've already done this. 261 */ 262 void __init paging_init(void) 263 { 264 unsigned long long total_ram = memblock_phys_mem_size(); 265 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 266 unsigned long max_zone_pfns[MAX_NR_ZONES]; 267 268 #ifdef CONFIG_PPC32 269 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 270 unsigned long end = __fix_to_virt(FIX_HOLE); 271 272 for (; v < end; v += PAGE_SIZE) 273 map_page(v, 0, 0); /* XXX gross */ 274 #endif 275 276 #ifdef CONFIG_HIGHMEM 277 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 278 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 279 280 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 281 kmap_prot = PAGE_KERNEL; 282 #endif /* CONFIG_HIGHMEM */ 283 284 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 285 (unsigned long long)top_of_ram, total_ram); 286 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 287 (long int)((top_of_ram - total_ram) >> 20)); 288 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 289 #ifdef CONFIG_HIGHMEM 290 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT; 291 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT; 292 #else 293 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 294 #endif 295 free_area_init_nodes(max_zone_pfns); 296 297 mark_nonram_nosave(); 298 } 299 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 300 301 void __init mem_init(void) 302 { 303 #ifdef CONFIG_NEED_MULTIPLE_NODES 304 int nid; 305 #endif 306 pg_data_t *pgdat; 307 unsigned long i; 308 struct page *page; 309 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 310 311 #ifdef CONFIG_SWIOTLB 312 swiotlb_init(0); 313 #endif 314 315 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; 316 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 317 318 #ifdef CONFIG_NEED_MULTIPLE_NODES 319 for_each_online_node(nid) { 320 if (NODE_DATA(nid)->node_spanned_pages != 0) { 321 printk("freeing bootmem node %d\n", nid); 322 totalram_pages += 323 free_all_bootmem_node(NODE_DATA(nid)); 324 } 325 } 326 #else 327 max_mapnr = max_pfn; 328 totalram_pages += free_all_bootmem(); 329 #endif 330 for_each_online_pgdat(pgdat) { 331 for (i = 0; i < pgdat->node_spanned_pages; i++) { 332 if (!pfn_valid(pgdat->node_start_pfn + i)) 333 continue; 334 page = pgdat_page_nr(pgdat, i); 335 if (PageReserved(page)) 336 reservedpages++; 337 } 338 } 339 340 codesize = (unsigned long)&_sdata - (unsigned long)&_stext; 341 datasize = (unsigned long)&_edata - (unsigned long)&_sdata; 342 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; 343 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; 344 345 #ifdef CONFIG_HIGHMEM 346 { 347 unsigned long pfn, highmem_mapnr; 348 349 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 350 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 351 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 352 struct page *page = pfn_to_page(pfn); 353 if (memblock_is_reserved(paddr)) 354 continue; 355 ClearPageReserved(page); 356 init_page_count(page); 357 __free_page(page); 358 totalhigh_pages++; 359 reservedpages--; 360 } 361 totalram_pages += totalhigh_pages; 362 printk(KERN_DEBUG "High memory: %luk\n", 363 totalhigh_pages << (PAGE_SHIFT-10)); 364 } 365 #endif /* CONFIG_HIGHMEM */ 366 367 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 368 /* 369 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 370 * functions.... do it here for the non-smp case. 371 */ 372 per_cpu(next_tlbcam_idx, smp_processor_id()) = 373 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 374 #endif 375 376 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " 377 "%luk reserved, %luk data, %luk bss, %luk init)\n", 378 nr_free_pages() << (PAGE_SHIFT-10), 379 num_physpages << (PAGE_SHIFT-10), 380 codesize >> 10, 381 reservedpages << (PAGE_SHIFT-10), 382 datasize >> 10, 383 bsssize >> 10, 384 initsize >> 10); 385 386 #ifdef CONFIG_PPC32 387 pr_info("Kernel virtual memory layout:\n"); 388 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 389 #ifdef CONFIG_HIGHMEM 390 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 391 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 392 #endif /* CONFIG_HIGHMEM */ 393 #ifdef CONFIG_NOT_COHERENT_CACHE 394 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", 395 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); 396 #endif /* CONFIG_NOT_COHERENT_CACHE */ 397 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 398 ioremap_bot, IOREMAP_TOP); 399 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 400 VMALLOC_START, VMALLOC_END); 401 #endif /* CONFIG_PPC32 */ 402 403 mem_init_done = 1; 404 } 405 406 void free_initmem(void) 407 { 408 unsigned long addr; 409 410 ppc_md.progress = ppc_printk_progress; 411 412 addr = (unsigned long)__init_begin; 413 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) { 414 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); 415 ClearPageReserved(virt_to_page(addr)); 416 init_page_count(virt_to_page(addr)); 417 free_page(addr); 418 totalram_pages++; 419 } 420 pr_info("Freeing unused kernel memory: %luk freed\n", 421 ((unsigned long)__init_end - 422 (unsigned long)__init_begin) >> 10); 423 } 424 425 #ifdef CONFIG_BLK_DEV_INITRD 426 void __init free_initrd_mem(unsigned long start, unsigned long end) 427 { 428 if (start >= end) 429 return; 430 431 start = _ALIGN_DOWN(start, PAGE_SIZE); 432 end = _ALIGN_UP(end, PAGE_SIZE); 433 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 434 435 for (; start < end; start += PAGE_SIZE) { 436 ClearPageReserved(virt_to_page(start)); 437 init_page_count(virt_to_page(start)); 438 free_page(start); 439 totalram_pages++; 440 } 441 } 442 #endif 443 444 /* 445 * This is called when a page has been modified by the kernel. 446 * It just marks the page as not i-cache clean. We do the i-cache 447 * flush later when the page is given to a user process, if necessary. 448 */ 449 void flush_dcache_page(struct page *page) 450 { 451 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 452 return; 453 /* avoid an atomic op if possible */ 454 if (test_bit(PG_arch_1, &page->flags)) 455 clear_bit(PG_arch_1, &page->flags); 456 } 457 EXPORT_SYMBOL(flush_dcache_page); 458 459 void flush_dcache_icache_page(struct page *page) 460 { 461 #ifdef CONFIG_HUGETLB_PAGE 462 if (PageCompound(page)) { 463 flush_dcache_icache_hugepage(page); 464 return; 465 } 466 #endif 467 #ifdef CONFIG_BOOKE 468 { 469 void *start = kmap_atomic(page); 470 __flush_dcache_icache(start); 471 kunmap_atomic(start); 472 } 473 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 474 /* On 8xx there is no need to kmap since highmem is not supported */ 475 __flush_dcache_icache(page_address(page)); 476 #else 477 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 478 #endif 479 } 480 EXPORT_SYMBOL(flush_dcache_icache_page); 481 482 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 483 { 484 clear_page(page); 485 486 /* 487 * We shouldn't have to do this, but some versions of glibc 488 * require it (ld.so assumes zero filled pages are icache clean) 489 * - Anton 490 */ 491 flush_dcache_page(pg); 492 } 493 EXPORT_SYMBOL(clear_user_page); 494 495 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 496 struct page *pg) 497 { 498 copy_page(vto, vfrom); 499 500 /* 501 * We should be able to use the following optimisation, however 502 * there are two problems. 503 * Firstly a bug in some versions of binutils meant PLT sections 504 * were not marked executable. 505 * Secondly the first word in the GOT section is blrl, used 506 * to establish the GOT address. Until recently the GOT was 507 * not marked executable. 508 * - Anton 509 */ 510 #if 0 511 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 512 return; 513 #endif 514 515 flush_dcache_page(pg); 516 } 517 518 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 519 unsigned long addr, int len) 520 { 521 unsigned long maddr; 522 523 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 524 flush_icache_range(maddr, maddr + len); 525 kunmap(page); 526 } 527 EXPORT_SYMBOL(flush_icache_user_range); 528 529 /* 530 * This is called at the end of handling a user page fault, when the 531 * fault has been handled by updating a PTE in the linux page tables. 532 * We use it to preload an HPTE into the hash table corresponding to 533 * the updated linux PTE. 534 * 535 * This must always be called with the pte lock held. 536 */ 537 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 538 pte_t *ptep) 539 { 540 #ifdef CONFIG_PPC_STD_MMU 541 unsigned long access = 0, trap; 542 543 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 544 if (!pte_young(*ptep) || address >= TASK_SIZE) 545 return; 546 547 /* We try to figure out if we are coming from an instruction 548 * access fault and pass that down to __hash_page so we avoid 549 * double-faulting on execution of fresh text. We have to test 550 * for regs NULL since init will get here first thing at boot 551 * 552 * We also avoid filling the hash if not coming from a fault 553 */ 554 if (current->thread.regs == NULL) 555 return; 556 trap = TRAP(current->thread.regs); 557 if (trap == 0x400) 558 access |= _PAGE_EXEC; 559 else if (trap != 0x300) 560 return; 561 hash_preload(vma->vm_mm, address, access, trap); 562 #endif /* CONFIG_PPC_STD_MMU */ 563 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 564 && defined(CONFIG_HUGETLB_PAGE) 565 if (is_vm_hugetlb_page(vma)) 566 book3e_hugetlb_preload(vma, address, *ptep); 567 #endif 568 } 569 570 /* 571 * System memory should not be in /proc/iomem but various tools expect it 572 * (eg kdump). 573 */ 574 static int add_system_ram_resources(void) 575 { 576 struct memblock_region *reg; 577 578 for_each_memblock(memory, reg) { 579 struct resource *res; 580 unsigned long base = reg->base; 581 unsigned long size = reg->size; 582 583 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 584 WARN_ON(!res); 585 586 if (res) { 587 res->name = "System RAM"; 588 res->start = base; 589 res->end = base + size - 1; 590 res->flags = IORESOURCE_MEM; 591 WARN_ON(request_resource(&iomem_resource, res) < 0); 592 } 593 } 594 595 return 0; 596 } 597 subsys_initcall(add_system_ram_resources); 598 599 #ifdef CONFIG_STRICT_DEVMEM 600 /* 601 * devmem_is_allowed(): check to see if /dev/mem access to a certain address 602 * is valid. The argument is a physical page number. 603 * 604 * Access has to be given to non-kernel-ram areas as well, these contain the 605 * PCI mmio resources as well as potential bios/acpi data regions. 606 */ 607 int devmem_is_allowed(unsigned long pfn) 608 { 609 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) 610 return 0; 611 if (!page_is_ram(pfn)) 612 return 1; 613 if (page_is_rtas_user_buf(pfn)) 614 return 1; 615 return 0; 616 } 617 #endif /* CONFIG_STRICT_DEVMEM */ 618