1 /* 2 * PowerPC version 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 4 * 5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) 6 * and Cort Dougan (PReP) (cort@cs.nmt.edu) 7 * Copyright (C) 1996 Paul Mackerras 8 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) 10 * 11 * Derived from "arch/i386/mm/init.c" 12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 */ 20 21 #include <linux/config.h> 22 #include <linux/module.h> 23 #include <linux/sched.h> 24 #include <linux/kernel.h> 25 #include <linux/errno.h> 26 #include <linux/string.h> 27 #include <linux/types.h> 28 #include <linux/mm.h> 29 #include <linux/stddef.h> 30 #include <linux/init.h> 31 #include <linux/bootmem.h> 32 #include <linux/highmem.h> 33 #include <linux/initrd.h> 34 #include <linux/pagemap.h> 35 36 #include <asm/pgalloc.h> 37 #include <asm/prom.h> 38 #include <asm/io.h> 39 #include <asm/mmu_context.h> 40 #include <asm/pgtable.h> 41 #include <asm/mmu.h> 42 #include <asm/smp.h> 43 #include <asm/machdep.h> 44 #include <asm/btext.h> 45 #include <asm/tlb.h> 46 #include <asm/prom.h> 47 #include <asm/lmb.h> 48 #include <asm/sections.h> 49 #ifdef CONFIG_PPC64 50 #include <asm/vdso.h> 51 #endif 52 53 #include "mmu_decl.h" 54 55 #ifndef CPU_FTR_COHERENT_ICACHE 56 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */ 57 #define CPU_FTR_NOEXECUTE 0 58 #endif 59 60 int init_bootmem_done; 61 int mem_init_done; 62 unsigned long memory_limit; 63 64 extern void hash_preload(struct mm_struct *mm, unsigned long ea, 65 unsigned long access, unsigned long trap); 66 67 /* 68 * This is called by /dev/mem to know if a given address has to 69 * be mapped non-cacheable or not 70 */ 71 int page_is_ram(unsigned long pfn) 72 { 73 unsigned long paddr = (pfn << PAGE_SHIFT); 74 75 #ifndef CONFIG_PPC64 /* XXX for now */ 76 return paddr < __pa(high_memory); 77 #else 78 int i; 79 for (i=0; i < lmb.memory.cnt; i++) { 80 unsigned long base; 81 82 base = lmb.memory.region[i].base; 83 84 if ((paddr >= base) && 85 (paddr < (base + lmb.memory.region[i].size))) { 86 return 1; 87 } 88 } 89 90 return 0; 91 #endif 92 } 93 EXPORT_SYMBOL(page_is_ram); 94 95 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 96 unsigned long size, pgprot_t vma_prot) 97 { 98 if (ppc_md.phys_mem_access_prot) 99 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); 100 101 if (!page_is_ram(pfn)) 102 vma_prot = __pgprot(pgprot_val(vma_prot) 103 | _PAGE_GUARDED | _PAGE_NO_CACHE); 104 return vma_prot; 105 } 106 EXPORT_SYMBOL(phys_mem_access_prot); 107 108 #ifdef CONFIG_MEMORY_HOTPLUG 109 110 void online_page(struct page *page) 111 { 112 ClearPageReserved(page); 113 free_cold_page(page); 114 totalram_pages++; 115 num_physpages++; 116 } 117 118 /* 119 * This works only for the non-NUMA case. Later, we'll need a lookup 120 * to convert from real physical addresses to nid, that doesn't use 121 * pfn_to_nid(). 122 */ 123 int __devinit add_memory(u64 start, u64 size) 124 { 125 struct pglist_data *pgdata = NODE_DATA(0); 126 struct zone *zone; 127 unsigned long start_pfn = start >> PAGE_SHIFT; 128 unsigned long nr_pages = size >> PAGE_SHIFT; 129 130 /* this should work for most non-highmem platforms */ 131 zone = pgdata->node_zones; 132 133 return __add_pages(zone, start_pfn, nr_pages); 134 135 return 0; 136 } 137 138 /* 139 * First pass at this code will check to determine if the remove 140 * request is within the RMO. Do not allow removal within the RMO. 141 */ 142 int __devinit remove_memory(u64 start, u64 size) 143 { 144 struct zone *zone; 145 unsigned long start_pfn, end_pfn, nr_pages; 146 147 start_pfn = start >> PAGE_SHIFT; 148 nr_pages = size >> PAGE_SHIFT; 149 end_pfn = start_pfn + nr_pages; 150 151 printk("%s(): Attempting to remove memoy in range " 152 "%lx to %lx\n", __func__, start, start+size); 153 /* 154 * check for range within RMO 155 */ 156 zone = page_zone(pfn_to_page(start_pfn)); 157 158 printk("%s(): memory will be removed from " 159 "the %s zone\n", __func__, zone->name); 160 161 /* 162 * not handling removing memory ranges that 163 * overlap multiple zones yet 164 */ 165 if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages)) 166 goto overlap; 167 168 /* make sure it is NOT in RMO */ 169 if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) { 170 printk("%s(): range to be removed must NOT be in RMO!\n", 171 __func__); 172 goto in_rmo; 173 } 174 175 return __remove_pages(zone, start_pfn, nr_pages); 176 177 overlap: 178 printk("%s(): memory range to be removed overlaps " 179 "multiple zones!!!\n", __func__); 180 in_rmo: 181 return -1; 182 } 183 #endif /* CONFIG_MEMORY_HOTPLUG */ 184 185 void show_mem(void) 186 { 187 unsigned long total = 0, reserved = 0; 188 unsigned long shared = 0, cached = 0; 189 unsigned long highmem = 0; 190 struct page *page; 191 pg_data_t *pgdat; 192 unsigned long i; 193 194 printk("Mem-info:\n"); 195 show_free_areas(); 196 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 197 for_each_pgdat(pgdat) { 198 unsigned long flags; 199 pgdat_resize_lock(pgdat, &flags); 200 for (i = 0; i < pgdat->node_spanned_pages; i++) { 201 page = pgdat_page_nr(pgdat, i); 202 total++; 203 if (PageHighMem(page)) 204 highmem++; 205 if (PageReserved(page)) 206 reserved++; 207 else if (PageSwapCache(page)) 208 cached++; 209 else if (page_count(page)) 210 shared += page_count(page) - 1; 211 } 212 pgdat_resize_unlock(pgdat, &flags); 213 } 214 printk("%ld pages of RAM\n", total); 215 #ifdef CONFIG_HIGHMEM 216 printk("%ld pages of HIGHMEM\n", highmem); 217 #endif 218 printk("%ld reserved pages\n", reserved); 219 printk("%ld pages shared\n", shared); 220 printk("%ld pages swap cached\n", cached); 221 } 222 223 /* 224 * Initialize the bootmem system and give it all the memory we 225 * have available. If we are using highmem, we only put the 226 * lowmem into the bootmem system. 227 */ 228 #ifndef CONFIG_NEED_MULTIPLE_NODES 229 void __init do_init_bootmem(void) 230 { 231 unsigned long i; 232 unsigned long start, bootmap_pages; 233 unsigned long total_pages; 234 int boot_mapsize; 235 236 max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; 237 #ifdef CONFIG_HIGHMEM 238 total_pages = total_lowmem >> PAGE_SHIFT; 239 #endif 240 241 /* 242 * Find an area to use for the bootmem bitmap. Calculate the size of 243 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE. 244 * Add 1 additional page in case the address isn't page-aligned. 245 */ 246 bootmap_pages = bootmem_bootmap_pages(total_pages); 247 248 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 249 BUG_ON(!start); 250 251 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); 252 253 /* Add all physical memory to the bootmem map, mark each area 254 * present. 255 */ 256 for (i = 0; i < lmb.memory.cnt; i++) { 257 unsigned long base = lmb.memory.region[i].base; 258 unsigned long size = lmb_size_bytes(&lmb.memory, i); 259 #ifdef CONFIG_HIGHMEM 260 if (base >= total_lowmem) 261 continue; 262 if (base + size > total_lowmem) 263 size = total_lowmem - base; 264 #endif 265 free_bootmem(base, size); 266 } 267 268 /* reserve the sections we're already using */ 269 for (i = 0; i < lmb.reserved.cnt; i++) 270 reserve_bootmem(lmb.reserved.region[i].base, 271 lmb_size_bytes(&lmb.reserved, i)); 272 273 /* XXX need to clip this if using highmem? */ 274 for (i = 0; i < lmb.memory.cnt; i++) 275 memory_present(0, lmb_start_pfn(&lmb.memory, i), 276 lmb_end_pfn(&lmb.memory, i)); 277 init_bootmem_done = 1; 278 } 279 280 /* 281 * paging_init() sets up the page tables - in fact we've already done this. 282 */ 283 void __init paging_init(void) 284 { 285 unsigned long zones_size[MAX_NR_ZONES]; 286 unsigned long zholes_size[MAX_NR_ZONES]; 287 unsigned long total_ram = lmb_phys_mem_size(); 288 unsigned long top_of_ram = lmb_end_of_DRAM(); 289 290 #ifdef CONFIG_HIGHMEM 291 map_page(PKMAP_BASE, 0, 0); /* XXX gross */ 292 pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k 293 (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); 294 map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ 295 kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k 296 (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); 297 kmap_prot = PAGE_KERNEL; 298 #endif /* CONFIG_HIGHMEM */ 299 300 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 301 top_of_ram, total_ram); 302 printk(KERN_INFO "Memory hole size: %ldMB\n", 303 (top_of_ram - total_ram) >> 20); 304 /* 305 * All pages are DMA-able so we put them all in the DMA zone. 306 */ 307 memset(zones_size, 0, sizeof(zones_size)); 308 memset(zholes_size, 0, sizeof(zholes_size)); 309 310 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 311 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; 312 313 #ifdef CONFIG_HIGHMEM 314 zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; 315 zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; 316 zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT; 317 #else 318 zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; 319 zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; 320 #endif /* CONFIG_HIGHMEM */ 321 322 free_area_init_node(0, NODE_DATA(0), zones_size, 323 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); 324 } 325 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 326 327 void __init mem_init(void) 328 { 329 #ifdef CONFIG_NEED_MULTIPLE_NODES 330 int nid; 331 #endif 332 pg_data_t *pgdat; 333 unsigned long i; 334 struct page *page; 335 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; 336 337 num_physpages = max_pfn; /* RAM is assumed contiguous */ 338 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 339 340 #ifdef CONFIG_NEED_MULTIPLE_NODES 341 for_each_online_node(nid) { 342 if (NODE_DATA(nid)->node_spanned_pages != 0) { 343 printk("freeing bootmem node %x\n", nid); 344 totalram_pages += 345 free_all_bootmem_node(NODE_DATA(nid)); 346 } 347 } 348 #else 349 max_mapnr = num_physpages; 350 totalram_pages += free_all_bootmem(); 351 #endif 352 for_each_pgdat(pgdat) { 353 for (i = 0; i < pgdat->node_spanned_pages; i++) { 354 page = pgdat_page_nr(pgdat, i); 355 if (PageReserved(page)) 356 reservedpages++; 357 } 358 } 359 360 codesize = (unsigned long)&_sdata - (unsigned long)&_stext; 361 datasize = (unsigned long)&__init_begin - (unsigned long)&_sdata; 362 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; 363 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; 364 365 #ifdef CONFIG_HIGHMEM 366 { 367 unsigned long pfn, highmem_mapnr; 368 369 highmem_mapnr = total_lowmem >> PAGE_SHIFT; 370 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { 371 struct page *page = pfn_to_page(pfn); 372 373 ClearPageReserved(page); 374 set_page_count(page, 1); 375 __free_page(page); 376 totalhigh_pages++; 377 } 378 totalram_pages += totalhigh_pages; 379 printk(KERN_INFO "High memory: %luk\n", 380 totalhigh_pages << (PAGE_SHIFT-10)); 381 } 382 #endif /* CONFIG_HIGHMEM */ 383 384 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " 385 "%luk reserved, %luk data, %luk bss, %luk init)\n", 386 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), 387 num_physpages << (PAGE_SHIFT-10), 388 codesize >> 10, 389 reservedpages << (PAGE_SHIFT-10), 390 datasize >> 10, 391 bsssize >> 10, 392 initsize >> 10); 393 394 mem_init_done = 1; 395 396 #ifdef CONFIG_PPC64 397 /* Initialize the vDSO */ 398 vdso_init(); 399 #endif 400 } 401 402 /* 403 * This is called when a page has been modified by the kernel. 404 * It just marks the page as not i-cache clean. We do the i-cache 405 * flush later when the page is given to a user process, if necessary. 406 */ 407 void flush_dcache_page(struct page *page) 408 { 409 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 410 return; 411 /* avoid an atomic op if possible */ 412 if (test_bit(PG_arch_1, &page->flags)) 413 clear_bit(PG_arch_1, &page->flags); 414 } 415 EXPORT_SYMBOL(flush_dcache_page); 416 417 void flush_dcache_icache_page(struct page *page) 418 { 419 #ifdef CONFIG_BOOKE 420 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 421 __flush_dcache_icache(start); 422 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 423 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 424 /* On 8xx there is no need to kmap since highmem is not supported */ 425 __flush_dcache_icache(page_address(page)); 426 #else 427 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); 428 #endif 429 430 } 431 void clear_user_page(void *page, unsigned long vaddr, struct page *pg) 432 { 433 clear_page(page); 434 435 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 436 return; 437 /* 438 * We shouldnt have to do this, but some versions of glibc 439 * require it (ld.so assumes zero filled pages are icache clean) 440 * - Anton 441 */ 442 443 /* avoid an atomic op if possible */ 444 if (test_bit(PG_arch_1, &pg->flags)) 445 clear_bit(PG_arch_1, &pg->flags); 446 } 447 EXPORT_SYMBOL(clear_user_page); 448 449 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 450 struct page *pg) 451 { 452 copy_page(vto, vfrom); 453 454 /* 455 * We should be able to use the following optimisation, however 456 * there are two problems. 457 * Firstly a bug in some versions of binutils meant PLT sections 458 * were not marked executable. 459 * Secondly the first word in the GOT section is blrl, used 460 * to establish the GOT address. Until recently the GOT was 461 * not marked executable. 462 * - Anton 463 */ 464 #if 0 465 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0)) 466 return; 467 #endif 468 469 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 470 return; 471 472 /* avoid an atomic op if possible */ 473 if (test_bit(PG_arch_1, &pg->flags)) 474 clear_bit(PG_arch_1, &pg->flags); 475 } 476 477 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 478 unsigned long addr, int len) 479 { 480 unsigned long maddr; 481 482 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK); 483 flush_icache_range(maddr, maddr + len); 484 kunmap(page); 485 } 486 EXPORT_SYMBOL(flush_icache_user_range); 487 488 /* 489 * This is called at the end of handling a user page fault, when the 490 * fault has been handled by updating a PTE in the linux page tables. 491 * We use it to preload an HPTE into the hash table corresponding to 492 * the updated linux PTE. 493 * 494 * This must always be called with the mm->page_table_lock held 495 */ 496 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 497 pte_t pte) 498 { 499 #ifdef CONFIG_PPC_STD_MMU 500 unsigned long access = 0, trap; 501 #endif 502 unsigned long pfn = pte_pfn(pte); 503 504 /* handle i-cache coherency */ 505 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && 506 !cpu_has_feature(CPU_FTR_NOEXECUTE) && 507 pfn_valid(pfn)) { 508 struct page *page = pfn_to_page(pfn); 509 if (!PageReserved(page) 510 && !test_bit(PG_arch_1, &page->flags)) { 511 if (vma->vm_mm == current->active_mm) { 512 #ifdef CONFIG_8xx 513 /* On 8xx, cache control instructions (particularly 514 * "dcbst" from flush_dcache_icache) fault as write 515 * operation if there is an unpopulated TLB entry 516 * for the address in question. To workaround that, 517 * we invalidate the TLB here, thus avoiding dcbst 518 * misbehaviour. 519 */ 520 _tlbie(address); 521 #endif 522 __flush_dcache_icache((void *) address); 523 } else 524 flush_dcache_icache_page(page); 525 set_bit(PG_arch_1, &page->flags); 526 } 527 } 528 529 #ifdef CONFIG_PPC_STD_MMU 530 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ 531 if (!pte_young(pte) || address >= TASK_SIZE) 532 return; 533 534 /* We try to figure out if we are coming from an instruction 535 * access fault and pass that down to __hash_page so we avoid 536 * double-faulting on execution of fresh text. We have to test 537 * for regs NULL since init will get here first thing at boot 538 * 539 * We also avoid filling the hash if not coming from a fault 540 */ 541 if (current->thread.regs == NULL) 542 return; 543 trap = TRAP(current->thread.regs); 544 if (trap == 0x400) 545 access |= _PAGE_EXEC; 546 else if (trap != 0x300) 547 return; 548 hash_preload(vma->vm_mm, address, access, trap); 549 #endif /* CONFIG_PPC_STD_MMU */ 550 } 551