1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 9 * 10 * Derived from "arch/i386/mm/init.c" 11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 * 18 */ 19 20 #include <linux/export.h> 21 #include <linux/sched.h> 22 #include <linux/kernel.h> 23 #include <linux/errno.h> 24 #include <linux/string.h> 25 #include <linux/gfp.h> 26 #include <linux/types.h> 27 #include <linux/mm.h> 28 #include <linux/stddef.h> 29 #include <linux/init.h> 30 #include <linux/bootmem.h> 31 #include <linux/highmem.h> 32 #include <linux/initrd.h> 33 #include <linux/pagemap.h> 34 #include <linux/suspend.h> 35 #include <linux/memblock.h> 36 #include <linux/hugetlb.h> 37 #include <linux/slab.h> 38 39 #include <asm/pgalloc.h> 40 #include <asm/prom.h> 41 #include <asm/io.h> 42 #include <asm/mmu_context.h> 43 #include <asm/pgtable.h> 44 #include <asm/mmu.h> 45 #include <asm/smp.h> 46 #include <asm/machdep.h> 47 #include <asm/btext.h> 48 #include <asm/tlb.h> 49 #include <asm/sections.h> 50 #include <asm/sparsemem.h> 51 #include <asm/vdso.h> 52 #include <asm/fixmap.h> 53 #include <asm/swiotlb.h> 54 #include <asm/rtas.h> 55 56 #include "mmu_decl.h" 57 58 #ifndef CPU_FTR_COHERENT_ICACHE 59 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 60 #define CPU_FTR_NOEXECUTE 0 61 #endif 62 63 int init_bootmem_done; 64 int mem_init_done; 65 unsigned long long memory_limit; 66 67 #ifdef CONFIG_HIGHMEM 68 pte_t *kmap_pte; 69 EXPORT_SYMBOL(kmap_pte); 70 pgprot_t kmap_prot; 71 EXPORT_SYMBOL(kmap_prot); 72 73 static inline pte_t *virt_to_kpte(unsigned long vaddr) 74 { 75 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 76 vaddr), vaddr), vaddr); 77 } 78 #endif 79 80 int page_is_ram(unsigned long pfn) 81 { 82 #ifndef CONFIG_PPC64 /* XXX for now */ 83 return pfn < max_pfn; 84 #else 85 unsigned long paddr = (pfn << PAGE_SHIFT); 86 struct memblock_region *reg; 87 88 for_each_memblock(memory, reg) 89 if (paddr >= reg->base && paddr < (reg->base + reg->size)) 90 return 1; 91 return 0; 92 #endif 93 } 94 95 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 96 unsigned long size, pgprot_t vma_prot) 97 { 98 if (ppc_md.phys_mem_access_prot) 99 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 100 101 if (!page_is_ram(pfn)) 102 vma_prot = pgprot_noncached(vma_prot); 103 104 return vma_prot; 105 } 106 EXPORT_SYMBOL(phys_mem_access_prot); 107 108 #ifdef CONFIG_MEMORY_HOTPLUG 109 110 #ifdef CONFIG_NUMA 111 int memory_add_physaddr_to_nid(u64 start) 112 { 113 return hot_add_scn_to_nid(start); 114 } 115 #endif 116 117 int arch_add_memory(int nid, u64 start, u64 size) 118 { 119 struct pglist_data *pgdata; 120 struct zone *zone; 121 unsigned long start_pfn = start >> PAGE_SHIFT; 122 unsigned long nr_pages = size >> PAGE_SHIFT; 123 124 pgdata = NODE_DATA(nid); 125 126 start = (unsigned long)__va(start); 127 if (create_section_mapping(start, start + size)) 128 return -EINVAL; 129 130 /* this should work for most non-highmem platforms */ 131 zone = pgdata->node_zones; 132 133 return __add_pages(nid, zone, start_pfn, nr_pages); 134 } 135 136 #ifdef CONFIG_MEMORY_HOTREMOVE 137 int arch_remove_memory(u64 start, u64 size) 138 { 139 unsigned long start_pfn = start >> PAGE_SHIFT; 140 unsigned long nr_pages = size >> PAGE_SHIFT; 141 struct zone *zone; 142 int ret; 143 144 zone = page_zone(pfn_to_page(start_pfn)); 145 ret = __remove_pages(zone, start_pfn, nr_pages); 146 if (!ret && (ppc_md.remove_memory)) 147 ret = ppc_md.remove_memory(start, size); 148 149 return ret; 150 } 151 #endif 152 #endif /* CONFIG_MEMORY_HOTPLUG */ 153 154 /* 155 * walk_memory_resource() needs to make sure there is no holes in a given 156 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 157 * Instead it maintains it in memblock.memory structures. Walk through the 158 * memory regions, find holes and callback for contiguous regions. 159 */ 160 int 161 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, 162 void *arg, int (*func)(unsigned long, unsigned long, void *)) 163 { 164 struct memblock_region *reg; 165 unsigned long end_pfn = start_pfn + nr_pages; 166 unsigned long tstart, tend; 167 int ret = -1; 168 169 for_each_memblock(memory, reg) { 170 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); 171 tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); 172 if (tstart >= tend) 173 continue; 174 ret = (*func)(tstart, tend - tstart, arg); 175 if (ret) 176 break; 177 } 178 return ret; 179 } 180 EXPORT_SYMBOL_GPL(walk_system_ram_range); 181 182 /* 183 * Initialize the bootmem system and give it all the memory we 184 * have available. If we are using highmem, we only put the 185 * lowmem into the bootmem system. 186 */ 187 #ifndef CONFIG_NEED_MULTIPLE_NODES 188 void __init do_init_bootmem(void) 189 { 190 unsigned long start, bootmap_pages; 191 unsigned long total_pages; 192 struct memblock_region *reg; 193 int boot_mapsize; 194 195 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 196 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 197 #ifdef CONFIG_HIGHMEM 198 total_pages = total_lowmem >> PAGE_SHIFT; 199 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 200 #endif 201 202 /* 203 * Find an area to use for the bootmem bitmap. Calculate the size of 204 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. 205 * Add 1 additional page in case the address isn't page-aligned. 206 */ 207 bootmap_pages = bootmem_bootmap_pages(total_pages); 208 209 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 210 211 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 212 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); 213 214 /* Place all memblock_regions in the same node and merge contiguous 215 * memblock_regions 216 */ 217 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 218 219 /* Add all physical memory to the bootmem map, mark each area 220 * present. 221 */ 222 #ifdef CONFIG_HIGHMEM 223 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); 224 225 /* reserve the sections we're already using */ 226 for_each_memblock(reserved, reg) { 227 unsigned long top = reg->base + reg->size - 1; 228 if (top < lowmem_end_addr) 229 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 230 else if (reg->base < lowmem_end_addr) { 231 unsigned long trunc_size = lowmem_end_addr - reg->base; 232 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); 233 } 234 } 235 #else 236 free_bootmem_with_active_regions(0, max_pfn); 237 238 /* reserve the sections we're already using */ 239 for_each_memblock(reserved, reg) 240 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 241 #endif 242 /* XXX need to clip this if using highmem? */ 243 sparse_memory_present_with_active_regions(0); 244 245 init_bootmem_done = 1; 246 } 247 248 /* mark pages that don't exist as nosave */ 249 static int __init mark_nonram_nosave(void) 250 { 251 struct memblock_region *reg, *prev = NULL; 252 253 for_each_memblock(memory, reg) { 254 if (prev && 255 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) 256 register_nosave_region(memblock_region_memory_end_pfn(prev), 257 memblock_region_memory_base_pfn(reg)); 258 prev = reg; 259 } 260 return 0; 261 } 262 263 /* 264 * paging_init() sets up the page tables - in fact we've already done this. 265 */ 266 void __init paging_init(void) 267 { 268 unsigned long long total_ram = memblock_phys_mem_size(); 269 phys_addr_t top_of_ram = memblock_end_of_DRAM(); 270 unsigned long max_zone_pfns[MAX_NR_ZONES]; 271 272 #ifdef CONFIG_PPC32 273 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 274 unsigned long end = __fix_to_virt(FIX_HOLE); 275 276 for (; v < end; v += PAGE_SIZE) 277 map_page(v, 0, 0); /* XXX gross */ 278 #endif 279 280 #ifdef CONFIG_HIGHMEM 281 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 282 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 283 284 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 285 kmap_prot = PAGE_KERNEL; 286 #endif /* CONFIG_HIGHMEM */ 287 288 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", 289 (unsigned long long)top_of_ram, total_ram); 290 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 291 (long int)((top_of_ram - total_ram) >> 20)); 292 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 293 #ifdef CONFIG_HIGHMEM 294 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT; 295 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT; 296 #else 297 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 298 #endif 299 free_area_init_nodes(max_zone_pfns); 300 301 mark_nonram_nosave(); 302 } 303 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 304 305 static void __init register_page_bootmem_info(void) 306 { 307 int i; 308 309 for_each_online_node(i) 310 register_page_bootmem_info_node(NODE_DATA(i)); 311 } 312 313 void __init mem_init(void) 314 { 315 /* 316 * book3s is limited to 16 page sizes due to encoding this in 317 * a 4-bit field for slices. 318 */ 319 BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 320 321 #ifdef CONFIG_SWIOTLB 322 swiotlb_init(0); 323 #endif 324 325 register_page_bootmem_info(); 326 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 327 set_max_mapnr(max_pfn); 328 free_all_bootmem(); 329 330 #ifdef CONFIG_HIGHMEM 331 { 332 unsigned long pfn, highmem_mapnr; 333 334 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 335 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 336 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; 337 struct page *page = pfn_to_page(pfn); 338 if (!memblock_is_reserved(paddr)) 339 free_highmem_page(page); 340 } 341 } 342 #endif /* CONFIG_HIGHMEM */ 343 344 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) 345 /* 346 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up 347 * functions.... do it here for the non-smp case. 348 */ 349 per_cpu(next_tlbcam_idx, smp_processor_id()) = 350 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; 351 #endif 352 353 mem_init_print_info(NULL); 354 #ifdef CONFIG_PPC32 355 pr_info("Kernel virtual memory layout:\n"); 356 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); 357 #ifdef CONFIG_HIGHMEM 358 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", 359 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); 360 #endif /* CONFIG_HIGHMEM */ 361 #ifdef CONFIG_NOT_COHERENT_CACHE 362 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", 363 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); 364 #endif /* CONFIG_NOT_COHERENT_CACHE */ 365 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", 366 ioremap_bot, IOREMAP_TOP); 367 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 368 VMALLOC_START, VMALLOC_END); 369 #endif /* CONFIG_PPC32 */ 370 371 mem_init_done = 1; 372 } 373 374 void free_initmem(void) 375 { 376 ppc_md.progress = ppc_printk_progress; 377 free_initmem_default(POISON_FREE_INITMEM); 378 } 379 380 #ifdef CONFIG_BLK_DEV_INITRD 381 void __init free_initrd_mem(unsigned long start, unsigned long end) 382 { 383 free_reserved_area((void *)start, (void *)end, -1, "initrd"); 384 } 385 #endif 386 387 /* 388 * This is called when a page has been modified by the kernel. 389 * It just marks the page as not i-cache clean. We do the i-cache 390 * flush later when the page is given to a user process, if necessary. 391 */ 392 void flush_dcache_page(struct page *page) 393 { 394 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 395 return; 396 /* avoid an atomic op if possible */ 397 if (test_bit(PG_arch_1, &page->flags)) 398 clear_bit(PG_arch_1, &page->flags); 399 } 400 EXPORT_SYMBOL(flush_dcache_page); 401 402 void flush_dcache_icache_page(struct page *page) 403 { 404 #ifdef CONFIG_HUGETLB_PAGE 405 if (PageCompound(page)) { 406 flush_dcache_icache_hugepage(page); 407 return; 408 } 409 #endif 410 #ifdef CONFIG_BOOKE 411 { 412 void *start = kmap_atomic(page); 413 __flush_dcache_icache(start); 414 kunmap_atomic(start); 415 } 416 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 417 /* On 8xx there is no need to kmap since highmem is not supported */ 418 __flush_dcache_icache(page_address(page)); 419 #else 420 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 421 #endif 422 } 423 EXPORT_SYMBOL(flush_dcache_icache_page); 424 425 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 426 { 427 clear_page(page); 428 429 /* 430 * We shouldn't have to do this, but some versions of glibc 431 * require it (ld.so assumes zero filled pages are icache clean) 432 * - Anton 433 */ 434 flush_dcache_page(pg); 435 } 436 EXPORT_SYMBOL(clear_user_page); 437 438 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 439 struct page *pg) 440 { 441 copy_page(vto, vfrom); 442 443 /* 444 * We should be able to use the following optimisation, however 445 * there are two problems. 446 * Firstly a bug in some versions of binutils meant PLT sections 447 * were not marked executable. 448 * Secondly the first word in the GOT section is blrl, used 449 * to establish the GOT address. Until recently the GOT was 450 * not marked executable. 451 * - Anton 452 */ 453 #if 0 454 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 455 return; 456 #endif 457 458 flush_dcache_page(pg); 459 } 460 461 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 462 unsigned long addr, int len) 463 { 464 unsigned long maddr; 465 466 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 467 flush_icache_range(maddr, maddr + len); 468 kunmap(page); 469 } 470 EXPORT_SYMBOL(flush_icache_user_range); 471 472 /* 473 * This is called at the end of handling a user page fault, when the 474 * fault has been handled by updating a PTE in the linux page tables. 475 * We use it to preload an HPTE into the hash table corresponding to 476 * the updated linux PTE. 477 * 478 * This must always be called with the pte lock held. 479 */ 480 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 481 pte_t *ptep) 482 { 483 #ifdef CONFIG_PPC_STD_MMU 484 /* 485 * We don't need to worry about _PAGE_PRESENT here because we are 486 * called with either mm->page_table_lock held or ptl lock held 487 */ 488 unsigned long access = 0, trap; 489 490 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 491 if (!pte_young(*ptep) || address >= TASK_SIZE) 492 return; 493 494 /* We try to figure out if we are coming from an instruction 495 * access fault and pass that down to __hash_page so we avoid 496 * double-faulting on execution of fresh text. We have to test 497 * for regs NULL since init will get here first thing at boot 498 * 499 * We also avoid filling the hash if not coming from a fault 500 */ 501 if (current->thread.regs == NULL) 502 return; 503 trap = TRAP(current->thread.regs); 504 if (trap == 0x400) 505 access |= _PAGE_EXEC; 506 else if (trap != 0x300) 507 return; 508 hash_preload(vma->vm_mm, address, access, trap); 509 #endif /* CONFIG_PPC_STD_MMU */ 510 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ 511 && defined(CONFIG_HUGETLB_PAGE) 512 if (is_vm_hugetlb_page(vma)) 513 book3e_hugetlb_preload(vma, address, *ptep); 514 #endif 515 } 516 517 /* 518 * System memory should not be in /proc/iomem but various tools expect it 519 * (eg kdump). 520 */ 521 static int __init add_system_ram_resources(void) 522 { 523 struct memblock_region *reg; 524 525 for_each_memblock(memory, reg) { 526 struct resource *res; 527 unsigned long base = reg->base; 528 unsigned long size = reg->size; 529 530 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 531 WARN_ON(!res); 532 533 if (res) { 534 res->name = "System RAM"; 535 res->start = base; 536 res->end = base + size - 1; 537 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 538 WARN_ON(request_resource(&iomem_resource, res) < 0); 539 } 540 } 541 542 return 0; 543 } 544 subsys_initcall(add_system_ram_resources); 545 546 #ifdef CONFIG_STRICT_DEVMEM 547 /* 548 * devmem_is_allowed(): check to see if /dev/mem access to a certain address 549 * is valid. The argument is a physical page number. 550 * 551 * Access has to be given to non-kernel-ram areas as well, these contain the 552 * PCI mmio resources as well as potential bios/acpi data regions. 553 */ 554 int devmem_is_allowed(unsigned long pfn) 555 { 556 if (iomem_is_exclusive(pfn << PAGE_SHIFT)) 557 return 0; 558 if (!page_is_ram(pfn)) 559 return 1; 560 if (page_is_rtas_user_buf(pfn)) 561 return 1; 562 return 0; 563 } 564 #endif /* CONFIG_STRICT_DEVMEM */ 565