1 /* 2 * linux/arch/arm/mm/init.c 3 * 4 * Copyright (C) 1995-2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/config.h> 11 #include <linux/kernel.h> 12 #include <linux/errno.h> 13 #include <linux/ptrace.h> 14 #include <linux/swap.h> 15 #include <linux/init.h> 16 #include <linux/bootmem.h> 17 #include <linux/mman.h> 18 #include <linux/nodemask.h> 19 #include <linux/initrd.h> 20 21 #include <asm/mach-types.h> 22 #include <asm/hardware.h> 23 #include <asm/setup.h> 24 #include <asm/tlb.h> 25 26 #include <asm/mach/arch.h> 27 #include <asm/mach/map.h> 28 29 #define TABLE_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t)) 30 31 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 32 33 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 34 extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end; 35 extern unsigned long phys_initrd_start; 36 extern unsigned long phys_initrd_size; 37 38 /* 39 * The sole use of this is to pass memory configuration 40 * data from paging_init to mem_init. 41 */ 42 static struct meminfo meminfo __initdata = { 0, }; 43 44 /* 45 * empty_zero_page is a special page that is used for 46 * zero-initialized data and COW. 47 */ 48 struct page *empty_zero_page; 49 50 void show_mem(void) 51 { 52 int free = 0, total = 0, reserved = 0; 53 int shared = 0, cached = 0, slab = 0, node; 54 55 printk("Mem-info:\n"); 56 show_free_areas(); 57 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 58 59 for_each_online_node(node) { 60 struct page *page, *end; 61 62 page = NODE_MEM_MAP(node); 63 end = page + NODE_DATA(node)->node_spanned_pages; 64 65 do { 66 total++; 67 if (PageReserved(page)) 68 reserved++; 69 else if (PageSwapCache(page)) 70 cached++; 71 else if (PageSlab(page)) 72 slab++; 73 else if (!page_count(page)) 74 free++; 75 else 76 shared += page_count(page) - 1; 77 page++; 78 } while (page < end); 79 } 80 81 printk("%d pages of RAM\n", total); 82 printk("%d free pages\n", free); 83 printk("%d reserved pages\n", reserved); 84 printk("%d slab pages\n", slab); 85 printk("%d pages shared\n", shared); 86 printk("%d pages swap cached\n", cached); 87 } 88 89 struct node_info { 90 unsigned int start; 91 unsigned int end; 92 int bootmap_pages; 93 }; 94 95 #define O_PFN_DOWN(x) ((x) >> PAGE_SHIFT) 96 #define O_PFN_UP(x) (PAGE_ALIGN(x) >> PAGE_SHIFT) 97 98 /* 99 * FIXME: We really want to avoid allocating the bootmap bitmap 100 * over the top of the initrd. Hopefully, this is located towards 101 * the start of a bank, so if we allocate the bootmap bitmap at 102 * the end, we won't clash. 103 */ 104 static unsigned int __init 105 find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) 106 { 107 unsigned int start_pfn, bank, bootmap_pfn; 108 109 start_pfn = O_PFN_UP(__pa(&_end)); 110 bootmap_pfn = 0; 111 112 for (bank = 0; bank < mi->nr_banks; bank ++) { 113 unsigned int start, end; 114 115 if (mi->bank[bank].node != node) 116 continue; 117 118 start = mi->bank[bank].start >> PAGE_SHIFT; 119 end = (mi->bank[bank].size + 120 mi->bank[bank].start) >> PAGE_SHIFT; 121 122 if (end < start_pfn) 123 continue; 124 125 if (start < start_pfn) 126 start = start_pfn; 127 128 if (end <= start) 129 continue; 130 131 if (end - start >= bootmap_pages) { 132 bootmap_pfn = start; 133 break; 134 } 135 } 136 137 if (bootmap_pfn == 0) 138 BUG(); 139 140 return bootmap_pfn; 141 } 142 143 /* 144 * Scan the memory info structure and pull out: 145 * - the end of memory 146 * - the number of nodes 147 * - the pfn range of each node 148 * - the number of bootmem bitmap pages 149 */ 150 static unsigned int __init 151 find_memend_and_nodes(struct meminfo *mi, struct node_info *np) 152 { 153 unsigned int i, bootmem_pages = 0, memend_pfn = 0; 154 155 for (i = 0; i < MAX_NUMNODES; i++) { 156 np[i].start = -1U; 157 np[i].end = 0; 158 np[i].bootmap_pages = 0; 159 } 160 161 for (i = 0; i < mi->nr_banks; i++) { 162 unsigned long start, end; 163 int node; 164 165 if (mi->bank[i].size == 0) { 166 /* 167 * Mark this bank with an invalid node number 168 */ 169 mi->bank[i].node = -1; 170 continue; 171 } 172 173 node = mi->bank[i].node; 174 175 /* 176 * Make sure we haven't exceeded the maximum number of nodes 177 * that we have in this configuration. If we have, we're in 178 * trouble. (maybe we ought to limit, instead of bugging?) 179 */ 180 if (node >= MAX_NUMNODES) 181 BUG(); 182 node_set_online(node); 183 184 /* 185 * Get the start and end pfns for this bank 186 */ 187 start = mi->bank[i].start >> PAGE_SHIFT; 188 end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; 189 190 if (np[node].start > start) 191 np[node].start = start; 192 193 if (np[node].end < end) 194 np[node].end = end; 195 196 if (memend_pfn < end) 197 memend_pfn = end; 198 } 199 200 /* 201 * Calculate the number of pages we require to 202 * store the bootmem bitmaps. 203 */ 204 for_each_online_node(i) { 205 if (np[i].end == 0) 206 continue; 207 208 np[i].bootmap_pages = bootmem_bootmap_pages(np[i].end - 209 np[i].start); 210 bootmem_pages += np[i].bootmap_pages; 211 } 212 213 high_memory = __va(memend_pfn << PAGE_SHIFT); 214 215 /* 216 * This doesn't seem to be used by the Linux memory 217 * manager any more. If we can get rid of it, we 218 * also get rid of some of the stuff above as well. 219 * 220 * Note: max_low_pfn and max_pfn reflect the number 221 * of _pages_ in the system, not the maximum PFN. 222 */ 223 max_low_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET); 224 max_pfn = memend_pfn - O_PFN_DOWN(PHYS_OFFSET); 225 226 return bootmem_pages; 227 } 228 229 static int __init check_initrd(struct meminfo *mi) 230 { 231 int initrd_node = -2; 232 #ifdef CONFIG_BLK_DEV_INITRD 233 unsigned long end = phys_initrd_start + phys_initrd_size; 234 235 /* 236 * Make sure that the initrd is within a valid area of 237 * memory. 238 */ 239 if (phys_initrd_size) { 240 unsigned int i; 241 242 initrd_node = -1; 243 244 for (i = 0; i < mi->nr_banks; i++) { 245 unsigned long bank_end; 246 247 bank_end = mi->bank[i].start + mi->bank[i].size; 248 249 if (mi->bank[i].start <= phys_initrd_start && 250 end <= bank_end) 251 initrd_node = mi->bank[i].node; 252 } 253 } 254 255 if (initrd_node == -1) { 256 printk(KERN_ERR "initrd (0x%08lx - 0x%08lx) extends beyond " 257 "physical memory - disabling initrd\n", 258 phys_initrd_start, end); 259 phys_initrd_start = phys_initrd_size = 0; 260 } 261 #endif 262 263 return initrd_node; 264 } 265 266 /* 267 * Reserve the various regions of node 0 268 */ 269 static __init void reserve_node_zero(unsigned int bootmap_pfn, unsigned int bootmap_pages) 270 { 271 pg_data_t *pgdat = NODE_DATA(0); 272 unsigned long res_size = 0; 273 274 /* 275 * Register the kernel text and data with bootmem. 276 * Note that this can only be in node 0. 277 */ 278 #ifdef CONFIG_XIP_KERNEL 279 reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start); 280 #else 281 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext); 282 #endif 283 284 /* 285 * Reserve the page tables. These are already in use, 286 * and can only be in node 0. 287 */ 288 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), 289 PTRS_PER_PGD * sizeof(pgd_t)); 290 291 /* 292 * And don't forget to reserve the allocator bitmap, 293 * which will be freed later. 294 */ 295 reserve_bootmem_node(pgdat, bootmap_pfn << PAGE_SHIFT, 296 bootmap_pages << PAGE_SHIFT); 297 298 /* 299 * Hmm... This should go elsewhere, but we really really need to 300 * stop things allocating the low memory; ideally we need a better 301 * implementation of GFP_DMA which does not assume that DMA-able 302 * memory starts at zero. 303 */ 304 if (machine_is_integrator() || machine_is_cintegrator()) 305 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; 306 307 /* 308 * These should likewise go elsewhere. They pre-reserve the 309 * screen memory region at the start of main system memory. 310 */ 311 if (machine_is_edb7211()) 312 res_size = 0x00020000; 313 if (machine_is_p720t()) 314 res_size = 0x00014000; 315 316 #ifdef CONFIG_SA1111 317 /* 318 * Because of the SA1111 DMA bug, we want to preserve our 319 * precious DMA-able memory... 320 */ 321 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; 322 #endif 323 if (res_size) 324 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size); 325 } 326 327 /* 328 * Register all available RAM in this node with the bootmem allocator. 329 */ 330 static inline void free_bootmem_node_bank(int node, struct meminfo *mi) 331 { 332 pg_data_t *pgdat = NODE_DATA(node); 333 int bank; 334 335 for (bank = 0; bank < mi->nr_banks; bank++) 336 if (mi->bank[bank].node == node) 337 free_bootmem_node(pgdat, mi->bank[bank].start, 338 mi->bank[bank].size); 339 } 340 341 /* 342 * Initialise the bootmem allocator for all nodes. This is called 343 * early during the architecture specific initialisation. 344 */ 345 static void __init bootmem_init(struct meminfo *mi) 346 { 347 struct node_info node_info[MAX_NUMNODES], *np = node_info; 348 unsigned int bootmap_pages, bootmap_pfn, map_pg; 349 int node, initrd_node; 350 351 bootmap_pages = find_memend_and_nodes(mi, np); 352 bootmap_pfn = find_bootmap_pfn(0, mi, bootmap_pages); 353 initrd_node = check_initrd(mi); 354 355 map_pg = bootmap_pfn; 356 357 /* 358 * Initialise the bootmem nodes. 359 * 360 * What we really want to do is: 361 * 362 * unmap_all_regions_except_kernel(); 363 * for_each_node_in_reverse_order(node) { 364 * map_node(node); 365 * allocate_bootmem_map(node); 366 * init_bootmem_node(node); 367 * free_bootmem_node(node); 368 * } 369 * 370 * but this is a 2.5-type change. For now, we just set 371 * the nodes up in reverse order. 372 * 373 * (we could also do with rolling bootmem_init and paging_init 374 * into one generic "memory_init" type function). 375 */ 376 np += num_online_nodes() - 1; 377 for (node = num_online_nodes() - 1; node >= 0; node--, np--) { 378 /* 379 * If there are no pages in this node, ignore it. 380 * Note that node 0 must always have some pages. 381 */ 382 if (np->end == 0 || !node_online(node)) { 383 if (node == 0) 384 BUG(); 385 continue; 386 } 387 388 /* 389 * Initialise the bootmem allocator. 390 */ 391 init_bootmem_node(NODE_DATA(node), map_pg, np->start, np->end); 392 free_bootmem_node_bank(node, mi); 393 map_pg += np->bootmap_pages; 394 395 /* 396 * If this is node 0, we need to reserve some areas ASAP - 397 * we may use bootmem on node 0 to setup the other nodes. 398 */ 399 if (node == 0) 400 reserve_node_zero(bootmap_pfn, bootmap_pages); 401 } 402 403 404 #ifdef CONFIG_BLK_DEV_INITRD 405 if (phys_initrd_size && initrd_node >= 0) { 406 reserve_bootmem_node(NODE_DATA(initrd_node), phys_initrd_start, 407 phys_initrd_size); 408 initrd_start = __phys_to_virt(phys_initrd_start); 409 initrd_end = initrd_start + phys_initrd_size; 410 } 411 #endif 412 413 BUG_ON(map_pg != bootmap_pfn + bootmap_pages); 414 } 415 416 /* 417 * paging_init() sets up the page tables, initialises the zone memory 418 * maps, and sets up the zero page, bad page and bad page tables. 419 */ 420 void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) 421 { 422 void *zero_page; 423 int node; 424 425 bootmem_init(mi); 426 427 memcpy(&meminfo, mi, sizeof(meminfo)); 428 429 /* 430 * allocate the zero page. Note that we count on this going ok. 431 */ 432 zero_page = alloc_bootmem_low_pages(PAGE_SIZE); 433 434 /* 435 * initialise the page tables. 436 */ 437 memtable_init(mi); 438 if (mdesc->map_io) 439 mdesc->map_io(); 440 flush_tlb_all(); 441 442 /* 443 * initialise the zones within each node 444 */ 445 for_each_online_node(node) { 446 unsigned long zone_size[MAX_NR_ZONES]; 447 unsigned long zhole_size[MAX_NR_ZONES]; 448 struct bootmem_data *bdata; 449 pg_data_t *pgdat; 450 int i; 451 452 /* 453 * Initialise the zone size information. 454 */ 455 for (i = 0; i < MAX_NR_ZONES; i++) { 456 zone_size[i] = 0; 457 zhole_size[i] = 0; 458 } 459 460 pgdat = NODE_DATA(node); 461 bdata = pgdat->bdata; 462 463 /* 464 * The size of this node has already been determined. 465 * If we need to do anything fancy with the allocation 466 * of this memory to the zones, now is the time to do 467 * it. 468 */ 469 zone_size[0] = bdata->node_low_pfn - 470 (bdata->node_boot_start >> PAGE_SHIFT); 471 472 /* 473 * If this zone has zero size, skip it. 474 */ 475 if (!zone_size[0]) 476 continue; 477 478 /* 479 * For each bank in this node, calculate the size of the 480 * holes. holes = node_size - sum(bank_sizes_in_node) 481 */ 482 zhole_size[0] = zone_size[0]; 483 for (i = 0; i < mi->nr_banks; i++) { 484 if (mi->bank[i].node != node) 485 continue; 486 487 zhole_size[0] -= mi->bank[i].size >> PAGE_SHIFT; 488 } 489 490 /* 491 * Adjust the sizes according to any special 492 * requirements for this machine type. 493 */ 494 arch_adjust_zones(node, zone_size, zhole_size); 495 496 free_area_init_node(node, pgdat, zone_size, 497 bdata->node_boot_start >> PAGE_SHIFT, zhole_size); 498 } 499 500 /* 501 * finish off the bad pages once 502 * the mem_map is initialised 503 */ 504 memzero(zero_page, PAGE_SIZE); 505 empty_zero_page = virt_to_page(zero_page); 506 flush_dcache_page(empty_zero_page); 507 } 508 509 static inline void free_area(unsigned long addr, unsigned long end, char *s) 510 { 511 unsigned int size = (end - addr) >> 10; 512 513 for (; addr < end; addr += PAGE_SIZE) { 514 struct page *page = virt_to_page(addr); 515 ClearPageReserved(page); 516 set_page_count(page, 1); 517 free_page(addr); 518 totalram_pages++; 519 } 520 521 if (size && s) 522 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); 523 } 524 525 /* 526 * mem_init() marks the free areas in the mem_map and tells us how much 527 * memory is free. This is done after various parts of the system have 528 * claimed their memory after the kernel image. 529 */ 530 void __init mem_init(void) 531 { 532 unsigned int codepages, datapages, initpages; 533 int i, node; 534 535 codepages = &_etext - &_text; 536 datapages = &_end - &__data_start; 537 initpages = &__init_end - &__init_begin; 538 539 #ifndef CONFIG_DISCONTIGMEM 540 max_mapnr = virt_to_page(high_memory) - mem_map; 541 #endif 542 543 /* 544 * We may have non-contiguous memory. 545 */ 546 if (meminfo.nr_banks != 1) 547 create_memmap_holes(&meminfo); 548 549 /* this will put all unused low memory onto the freelists */ 550 for_each_online_node(node) { 551 pg_data_t *pgdat = NODE_DATA(node); 552 553 if (pgdat->node_spanned_pages != 0) 554 totalram_pages += free_all_bootmem_node(pgdat); 555 } 556 557 #ifdef CONFIG_SA1111 558 /* now that our DMA memory is actually so designated, we can free it */ 559 free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); 560 #endif 561 562 /* 563 * Since our memory may not be contiguous, calculate the 564 * real number of pages we have in this system 565 */ 566 printk(KERN_INFO "Memory:"); 567 568 num_physpages = 0; 569 for (i = 0; i < meminfo.nr_banks; i++) { 570 num_physpages += meminfo.bank[i].size >> PAGE_SHIFT; 571 printk(" %ldMB", meminfo.bank[i].size >> 20); 572 } 573 574 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); 575 printk(KERN_NOTICE "Memory: %luKB available (%dK code, " 576 "%dK data, %dK init)\n", 577 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 578 codepages >> 10, datapages >> 10, initpages >> 10); 579 580 if (PAGE_SIZE >= 16384 && num_physpages <= 128) { 581 extern int sysctl_overcommit_memory; 582 /* 583 * On a machine this small we won't get 584 * anywhere without overcommit, so turn 585 * it on by default. 586 */ 587 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 588 } 589 } 590 591 void free_initmem(void) 592 { 593 if (!machine_is_integrator() && !machine_is_cintegrator()) { 594 free_area((unsigned long)(&__init_begin), 595 (unsigned long)(&__init_end), 596 "init"); 597 } 598 } 599 600 #ifdef CONFIG_BLK_DEV_INITRD 601 602 static int keep_initrd; 603 604 void free_initrd_mem(unsigned long start, unsigned long end) 605 { 606 if (!keep_initrd) 607 free_area(start, end, "initrd"); 608 } 609 610 static int __init keepinitrd_setup(char *__unused) 611 { 612 keep_initrd = 1; 613 return 1; 614 } 615 616 __setup("keepinitrd", keepinitrd_setup); 617 #endif 618