1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 9 * 10 * Derived from "arch/i386/mm/init.c" 11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 * 18 */ 19 20 #include <linux/module.h> 21 #include <linux/sched.h> 22 #include <linux/kernel.h> 23 #include <linux/errno.h> 24 #include <linux/string.h> 25 #include <linux/types.h> 26 #include <linux/mm.h> 27 #include <linux/stddef.h> 28 #include <linux/init.h> 29 #include <linux/bootmem.h> 30 #include <linux/highmem.h> 31 #include <linux/initrd.h> 32 #include <linux/pagemap.h> 33 #include <linux/suspend.h> 34 #include <linux/lmb.h> 35 36 #include <asm/pgalloc.h> 37 #include <asm/prom.h> 38 #include <asm/io.h> 39 #include <asm/mmu_context.h> 40 #include <asm/pgtable.h> 41 #include <asm/mmu.h> 42 #include <asm/smp.h> 43 #include <asm/machdep.h> 44 #include <asm/btext.h> 45 #include <asm/tlb.h> 46 #include <asm/sections.h> 47 #include <asm/vdso.h> 48 #include <asm/fixmap.h> 49 50 #include "mmu_decl.h" 51 52 #ifndef CPU_FTR_COHERENT_ICACHE 53 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 54 #define CPU_FTR_NOEXECUTE 0 55 #endif 56 57 int init_bootmem_done; 58 int mem_init_done; 59 unsigned long memory_limit; 60 61 #ifdef CONFIG_HIGHMEM 62 pte_t *kmap_pte; 63 pgprot_t kmap_prot; 64 65 EXPORT_SYMBOL(kmap_prot); 66 EXPORT_SYMBOL(kmap_pte); 67 68 static inline pte_t *virt_to_kpte(unsigned long vaddr) 69 { 70 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), 71 vaddr), vaddr), vaddr); 72 } 73 #endif 74 75 int page_is_ram(unsigned long pfn) 76 { 77 unsigned long paddr = (pfn << PAGE_SHIFT); 78 79 #ifndef CONFIG_PPC64 /* XXX for now */ 80 return paddr < __pa(high_memory); 81 #else 82 int i; 83 for (i=0; i < lmb.memory.cnt; i++) { 84 unsigned long base; 85 86 base = lmb.memory.region[i].base; 87 88 if ((paddr >= base) && 89 (paddr < (base + lmb.memory.region[i].size))) { 90 return 1; 91 } 92 } 93 94 return 0; 95 #endif 96 } 97 98 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 99 unsigned long size, pgprot_t vma_prot) 100 { 101 if (ppc_md.phys_mem_access_prot) 102 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 103 104 if (!page_is_ram(pfn)) 105 vma_prot = __pgprot(pgprot_val(vma_prot) 106 | _PAGE_GUARDED | _PAGE_NO_CACHE); 107 return vma_prot; 108 } 109 EXPORT_SYMBOL(phys_mem_access_prot); 110 111 #ifdef CONFIG_MEMORY_HOTPLUG 112 113 #ifdef CONFIG_NUMA 114 int memory_add_physaddr_to_nid(u64 start) 115 { 116 return hot_add_scn_to_nid(start); 117 } 118 #endif 119 120 int arch_add_memory(int nid, u64 start, u64 size) 121 { 122 struct pglist_data *pgdata; 123 struct zone *zone; 124 unsigned long start_pfn = start >> PAGE_SHIFT; 125 unsigned long nr_pages = size >> PAGE_SHIFT; 126 127 pgdata = NODE_DATA(nid); 128 129 start = (unsigned long)__va(start); 130 create_section_mapping(start, start + size); 131 132 /* this should work for most non-highmem platforms */ 133 zone = pgdata->node_zones; 134 135 return __add_pages(zone, start_pfn, nr_pages); 136 } 137 138 #ifdef CONFIG_MEMORY_HOTREMOVE 139 int remove_memory(u64 start, u64 size) 140 { 141 unsigned long start_pfn, end_pfn; 142 int ret; 143 144 start_pfn = start >> PAGE_SHIFT; 145 end_pfn = start_pfn + (size >> PAGE_SHIFT); 146 ret = offline_pages(start_pfn, end_pfn, 120 * HZ); 147 if (ret) 148 goto out; 149 /* Arch-specific calls go here - next patch */ 150 out: 151 return ret; 152 } 153 #endif /* CONFIG_MEMORY_HOTREMOVE */ 154 #endif /* CONFIG_MEMORY_HOTPLUG */ 155 156 /* 157 * walk_memory_resource() needs to make sure there is no holes in a given 158 * memory range. PPC64 does not maintain the memory layout in /proc/iomem. 159 * Instead it maintains it in lmb.memory structures. Walk through the 160 * memory regions, find holes and callback for contiguous regions. 161 */ 162 int 163 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg, 164 int (*func)(unsigned long, unsigned long, void *)) 165 { 166 struct lmb_property res; 167 unsigned long pfn, len; 168 u64 end; 169 int ret = -1; 170 171 res.base = (u64) start_pfn << PAGE_SHIFT; 172 res.size = (u64) nr_pages << PAGE_SHIFT; 173 174 end = res.base + res.size - 1; 175 while ((res.base < end) && (lmb_find(&res) >= 0)) { 176 pfn = (unsigned long)(res.base >> PAGE_SHIFT); 177 len = (unsigned long)(res.size >> PAGE_SHIFT); 178 ret = (*func)(pfn, len, arg); 179 if (ret) 180 break; 181 res.base += (res.size + 1); 182 res.size = (end - res.base + 1); 183 } 184 return ret; 185 } 186 EXPORT_SYMBOL_GPL(walk_memory_resource); 187 188 void show_mem(void) 189 { 190 unsigned long total = 0, reserved = 0; 191 unsigned long shared = 0, cached = 0; 192 unsigned long highmem = 0; 193 struct page *page; 194 pg_data_t *pgdat; 195 unsigned long i; 196 197 printk("Mem-info:\n"); 198 show_free_areas(); 199 for_each_online_pgdat(pgdat) { 200 unsigned long flags; 201 pgdat_resize_lock(pgdat, &flags); 202 for (i = 0; i < pgdat->node_spanned_pages; i++) { 203 if (!pfn_valid(pgdat->node_start_pfn + i)) 204 continue; 205 page = pgdat_page_nr(pgdat, i); 206 total++; 207 if (PageHighMem(page)) 208 highmem++; 209 if (PageReserved(page)) 210 reserved++; 211 else if (PageSwapCache(page)) 212 cached++; 213 else if (page_count(page)) 214 shared += page_count(page) - 1; 215 } 216 pgdat_resize_unlock(pgdat, &flags); 217 } 218 printk("%ld pages of RAM\n", total); 219 #ifdef CONFIG_HIGHMEM 220 printk("%ld pages of HIGHMEM\n", highmem); 221 #endif 222 printk("%ld reserved pages\n", reserved); 223 printk("%ld pages shared\n", shared); 224 printk("%ld pages swap cached\n", cached); 225 } 226 227 /* 228 * Initialize the bootmem system and give it all the memory we 229 * have available. If we are using highmem, we only put the 230 * lowmem into the bootmem system. 231 */ 232 #ifndef CONFIG_NEED_MULTIPLE_NODES 233 void __init do_init_bootmem(void) 234 { 235 unsigned long i; 236 unsigned long start, bootmap_pages; 237 unsigned long total_pages; 238 int boot_mapsize; 239 240 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 241 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 242 #ifdef CONFIG_HIGHMEM 243 total_pages = total_lowmem >> PAGE_SHIFT; 244 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 245 #endif 246 247 /* 248 * Find an area to use for the bootmem bitmap. Calculate the size of 249 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. 250 * Add 1 additional page in case the address isn't page-aligned. 251 */ 252 bootmap_pages = bootmem_bootmap_pages(total_pages); 253 254 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 255 256 min_low_pfn = MEMORY_START >> PAGE_SHIFT; 257 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); 258 259 /* Add active regions with valid PFNs */ 260 for (i = 0; i < lmb.memory.cnt; i++) { 261 unsigned long start_pfn, end_pfn; 262 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 263 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 264 add_active_range(0, start_pfn, end_pfn); 265 } 266 267 /* Add all physical memory to the bootmem map, mark each area 268 * present. 269 */ 270 #ifdef CONFIG_HIGHMEM 271 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); 272 273 /* reserve the sections we're already using */ 274 for (i = 0; i < lmb.reserved.cnt; i++) { 275 unsigned long addr = lmb.reserved.region[i].base + 276 lmb_size_bytes(&lmb.reserved, i) - 1; 277 if (addr < lowmem_end_addr) 278 reserve_bootmem(lmb.reserved.region[i].base, 279 lmb_size_bytes(&lmb.reserved, i), 280 BOOTMEM_DEFAULT); 281 else if (lmb.reserved.region[i].base < lowmem_end_addr) { 282 unsigned long adjusted_size = lowmem_end_addr - 283 lmb.reserved.region[i].base; 284 reserve_bootmem(lmb.reserved.region[i].base, 285 adjusted_size, BOOTMEM_DEFAULT); 286 } 287 } 288 #else 289 free_bootmem_with_active_regions(0, max_pfn); 290 291 /* reserve the sections we're already using */ 292 for (i = 0; i < lmb.reserved.cnt; i++) 293 reserve_bootmem(lmb.reserved.region[i].base, 294 lmb_size_bytes(&lmb.reserved, i), 295 BOOTMEM_DEFAULT); 296 297 #endif 298 /* XXX need to clip this if using highmem? */ 299 sparse_memory_present_with_active_regions(0); 300 301 init_bootmem_done = 1; 302 } 303 304 /* mark pages that don't exist as nosave */ 305 static int __init mark_nonram_nosave(void) 306 { 307 unsigned long lmb_next_region_start_pfn, 308 lmb_region_max_pfn; 309 int i; 310 311 for (i = 0; i < lmb.memory.cnt - 1; i++) { 312 lmb_region_max_pfn = 313 (lmb.memory.region[i].base >> PAGE_SHIFT) + 314 (lmb.memory.region[i].size >> PAGE_SHIFT); 315 lmb_next_region_start_pfn = 316 lmb.memory.region[i+1].base >> PAGE_SHIFT; 317 318 if (lmb_region_max_pfn < lmb_next_region_start_pfn) 319 register_nosave_region(lmb_region_max_pfn, 320 lmb_next_region_start_pfn); 321 } 322 323 return 0; 324 } 325 326 /* 327 * paging_init() sets up the page tables - in fact we've already done this. 328 */ 329 void __init paging_init(void) 330 { 331 unsigned long total_ram = lmb_phys_mem_size(); 332 unsigned long top_of_ram = lmb_end_of_DRAM(); 333 unsigned long max_zone_pfns[MAX_NR_ZONES]; 334 335 #ifdef CONFIG_PPC32 336 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); 337 unsigned long end = __fix_to_virt(FIX_HOLE); 338 339 for (; v < end; v += PAGE_SIZE) 340 map_page(v, 0, 0); /* XXX gross */ 341 #endif 342 343 #ifdef CONFIG_HIGHMEM 344 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 345 pkmap_page_table = virt_to_kpte(PKMAP_BASE); 346 347 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); 348 kmap_prot = PAGE_KERNEL; 349 #endif /* CONFIG_HIGHMEM */ 350 351 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 352 top_of_ram, total_ram); 353 printk(KERN_DEBUG "Memory hole size: %ldMB\n", 354 (top_of_ram - total_ram) >> 20); 355 memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); 356 #ifdef CONFIG_HIGHMEM 357 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT; 358 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT; 359 #else 360 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 361 #endif 362 free_area_init_nodes(max_zone_pfns); 363 364 mark_nonram_nosave(); 365 } 366 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 367 368 void __init mem_init(void) 369 { 370 #ifdef CONFIG_NEED_MULTIPLE_NODES 371 int nid; 372 #endif 373 pg_data_t *pgdat; 374 unsigned long i; 375 struct page *page; 376 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 377 378 num_physpages = lmb.memory.size >> PAGE_SHIFT; 379 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 380 381 #ifdef CONFIG_NEED_MULTIPLE_NODES 382 for_each_online_node(nid) { 383 if (NODE_DATA(nid)->node_spanned_pages != 0) { 384 printk("freeing bootmem node %d\n", nid); 385 totalram_pages += 386 free_all_bootmem_node(NODE_DATA(nid)); 387 } 388 } 389 #else 390 max_mapnr = max_pfn; 391 totalram_pages += free_all_bootmem(); 392 #endif 393 for_each_online_pgdat(pgdat) { 394 for (i = 0; i < pgdat->node_spanned_pages; i++) { 395 if (!pfn_valid(pgdat->node_start_pfn + i)) 396 continue; 397 page = pgdat_page_nr(pgdat, i); 398 if (PageReserved(page)) 399 reservedpages++; 400 } 401 } 402 403 codesize = (unsigned long)&_sdata - (unsigned long)&_stext; 404 datasize = (unsigned long)&_edata - (unsigned long)&_sdata; 405 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; 406 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; 407 408 #ifdef CONFIG_HIGHMEM 409 { 410 unsigned long pfn, highmem_mapnr; 411 412 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; 413 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 414 struct page *page = pfn_to_page(pfn); 415 if (lmb_is_reserved(pfn << PAGE_SHIFT)) 416 continue; 417 ClearPageReserved(page); 418 init_page_count(page); 419 __free_page(page); 420 totalhigh_pages++; 421 reservedpages--; 422 } 423 totalram_pages += totalhigh_pages; 424 printk(KERN_DEBUG "High memory: %luk\n", 425 totalhigh_pages << (PAGE_SHIFT-10)); 426 } 427 #endif /* CONFIG_HIGHMEM */ 428 429 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " 430 "%luk reserved, %luk data, %luk bss, %luk init)\n", 431 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), 432 num_physpages << (PAGE_SHIFT-10), 433 codesize >> 10, 434 reservedpages << (PAGE_SHIFT-10), 435 datasize >> 10, 436 bsssize >> 10, 437 initsize >> 10); 438 439 mem_init_done = 1; 440 } 441 442 /* 443 * This is called when a page has been modified by the kernel. 444 * It just marks the page as not i-cache clean. We do the i-cache 445 * flush later when the page is given to a user process, if necessary. 446 */ 447 void flush_dcache_page(struct page *page) 448 { 449 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 450 return; 451 /* avoid an atomic op if possible */ 452 if (test_bit(PG_arch_1, &page->flags)) 453 clear_bit(PG_arch_1, &page->flags); 454 } 455 EXPORT_SYMBOL(flush_dcache_page); 456 457 void flush_dcache_icache_page(struct page *page) 458 { 459 #ifdef CONFIG_BOOKE 460 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 461 __flush_dcache_icache(start); 462 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 463 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 464 /* On 8xx there is no need to kmap since highmem is not supported */ 465 __flush_dcache_icache(page_address(page)); 466 #else 467 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 468 #endif 469 470 } 471 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 472 { 473 clear_page(page); 474 475 /* 476 * We shouldnt have to do this, but some versions of glibc 477 * require it (ld.so assumes zero filled pages are icache clean) 478 * - Anton 479 */ 480 flush_dcache_page(pg); 481 } 482 EXPORT_SYMBOL(clear_user_page); 483 484 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 485 struct page *pg) 486 { 487 copy_page(vto, vfrom); 488 489 /* 490 * We should be able to use the following optimisation, however 491 * there are two problems. 492 * Firstly a bug in some versions of binutils meant PLT sections 493 * were not marked executable. 494 * Secondly the first word in the GOT section is blrl, used 495 * to establish the GOT address. Until recently the GOT was 496 * not marked executable. 497 * - Anton 498 */ 499 #if 0 500 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 501 return; 502 #endif 503 504 flush_dcache_page(pg); 505 } 506 507 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 508 unsigned long addr, int len) 509 { 510 unsigned long maddr; 511 512 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 513 flush_icache_range(maddr, maddr + len); 514 kunmap(page); 515 } 516 EXPORT_SYMBOL(flush_icache_user_range); 517 518 /* 519 * This is called at the end of handling a user page fault, when the 520 * fault has been handled by updating a PTE in the linux page tables. 521 * We use it to preload an HPTE into the hash table corresponding to 522 * the updated linux PTE. 523 * 524 * This must always be called with the pte lock held. 525 */ 526 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 527 pte_t pte) 528 { 529 #ifdef CONFIG_PPC_STD_MMU 530 unsigned long access = 0, trap; 531 #endif 532 unsigned long pfn = pte_pfn(pte); 533 534 /* handle i-cache coherency */ 535 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && 536 !cpu_has_feature(CPU_FTR_NOEXECUTE) && 537 pfn_valid(pfn)) { 538 struct page *page = pfn_to_page(pfn); 539 #ifdef CONFIG_8xx 540 /* On 8xx, cache control instructions (particularly 541 * "dcbst" from flush_dcache_icache) fault as write 542 * operation if there is an unpopulated TLB entry 543 * for the address in question. To workaround that, 544 * we invalidate the TLB here, thus avoiding dcbst 545 * misbehaviour. 546 */ 547 _tlbie(address, 0 /* 8xx doesn't care about PID */); 548 #endif 549 /* The _PAGE_USER test should really be _PAGE_EXEC, but 550 * older glibc versions execute some code from no-exec 551 * pages, which for now we are supporting. If exec-only 552 * pages are ever implemented, this will have to change. 553 */ 554 if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER) 555 && !test_bit(PG_arch_1, &page->flags)) { 556 if (vma->vm_mm == current->active_mm) { 557 __flush_dcache_icache((void *) address); 558 } else 559 flush_dcache_icache_page(page); 560 set_bit(PG_arch_1, &page->flags); 561 } 562 } 563 564 #ifdef CONFIG_PPC_STD_MMU 565 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 566 if (!pte_young(pte) || address >= TASK_SIZE) 567 return; 568 569 /* We try to figure out if we are coming from an instruction 570 * access fault and pass that down to __hash_page so we avoid 571 * double-faulting on execution of fresh text. We have to test 572 * for regs NULL since init will get here first thing at boot 573 * 574 * We also avoid filling the hash if not coming from a fault 575 */ 576 if (current->thread.regs == NULL) 577 return; 578 trap = TRAP(current->thread.regs); 579 if (trap == 0x400) 580 access |= _PAGE_EXEC; 581 else if (trap != 0x300) 582 return; 583 hash_preload(vma->vm_mm, address, access, trap); 584 #endif /* CONFIG_PPC_STD_MMU */ 585 } 586