1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/bootmem.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/suspend.h> 27 #include <linux/pagevec.h> 28 #include <linux/blkdev.h> 29 #include <linux/slab.h> 30 #include <linux/notifier.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/memory_hotplug.h> 36 #include <linux/nodemask.h> 37 #include <linux/vmalloc.h> 38 #include <linux/mempolicy.h> 39 #include <linux/stop_machine.h> 40 #include <linux/sort.h> 41 #include <linux/pfn.h> 42 #include <linux/backing-dev.h> 43 #include <linux/fault-inject.h> 44 45 #include <asm/tlbflush.h> 46 #include <asm/div64.h> 47 #include "internal.h" 48 49 /* 50 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 51 * initializer cleaner 52 */ 53 nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 54 EXPORT_SYMBOL(node_online_map); 55 nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 56 EXPORT_SYMBOL(node_possible_map); 57 unsigned long totalram_pages __read_mostly; 58 unsigned long totalreserve_pages __read_mostly; 59 long nr_swap_pages; 60 int percpu_pagelist_fraction; 61 62 static void __free_pages_ok(struct page *page, unsigned int order); 63 64 /* 65 * results with 256, 32 in the lowmem_reserve sysctl: 66 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 67 * 1G machine -> (16M dma, 784M normal, 224M high) 68 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 69 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 70 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 71 * 72 * TBD: should special case ZONE_DMA32 machines here - in those we normally 73 * don't need any ZONE_NORMAL reservation 74 */ 75 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 76 #ifdef CONFIG_ZONE_DMA 77 256, 78 #endif 79 #ifdef CONFIG_ZONE_DMA32 80 256, 81 #endif 82 #ifdef CONFIG_HIGHMEM 83 32 84 #endif 85 }; 86 87 EXPORT_SYMBOL(totalram_pages); 88 89 static char * const zone_names[MAX_NR_ZONES] = { 90 #ifdef CONFIG_ZONE_DMA 91 "DMA", 92 #endif 93 #ifdef CONFIG_ZONE_DMA32 94 "DMA32", 95 #endif 96 "Normal", 97 #ifdef CONFIG_HIGHMEM 98 "HighMem" 99 #endif 100 }; 101 102 int min_free_kbytes = 1024; 103 104 unsigned long __meminitdata nr_kernel_pages; 105 unsigned long __meminitdata nr_all_pages; 106 static unsigned long __meminitdata dma_reserve; 107 108 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 109 /* 110 * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct 111 * ranges of memory (RAM) that may be registered with add_active_range(). 112 * Ranges passed to add_active_range() will be merged if possible 113 * so the number of times add_active_range() can be called is 114 * related to the number of nodes and the number of holes 115 */ 116 #ifdef CONFIG_MAX_ACTIVE_REGIONS 117 /* Allow an architecture to set MAX_ACTIVE_REGIONS to save memory */ 118 #define MAX_ACTIVE_REGIONS CONFIG_MAX_ACTIVE_REGIONS 119 #else 120 #if MAX_NUMNODES >= 32 121 /* If there can be many nodes, allow up to 50 holes per node */ 122 #define MAX_ACTIVE_REGIONS (MAX_NUMNODES*50) 123 #else 124 /* By default, allow up to 256 distinct regions */ 125 #define MAX_ACTIVE_REGIONS 256 126 #endif 127 #endif 128 129 struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS]; 130 int __meminitdata nr_nodemap_entries; 131 unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 132 unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 133 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 134 unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES]; 135 unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES]; 136 #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 137 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 138 139 #if MAX_NUMNODES > 1 140 int nr_node_ids __read_mostly = MAX_NUMNODES; 141 EXPORT_SYMBOL(nr_node_ids); 142 #endif 143 144 #ifdef CONFIG_DEBUG_VM 145 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 146 { 147 int ret = 0; 148 unsigned seq; 149 unsigned long pfn = page_to_pfn(page); 150 151 do { 152 seq = zone_span_seqbegin(zone); 153 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 154 ret = 1; 155 else if (pfn < zone->zone_start_pfn) 156 ret = 1; 157 } while (zone_span_seqretry(zone, seq)); 158 159 return ret; 160 } 161 162 static int page_is_consistent(struct zone *zone, struct page *page) 163 { 164 if (!pfn_valid_within(page_to_pfn(page))) 165 return 0; 166 if (zone != page_zone(page)) 167 return 0; 168 169 return 1; 170 } 171 /* 172 * Temporary debugging check for pages not lying within a given zone. 173 */ 174 static int bad_range(struct zone *zone, struct page *page) 175 { 176 if (page_outside_zone_boundaries(zone, page)) 177 return 1; 178 if (!page_is_consistent(zone, page)) 179 return 1; 180 181 return 0; 182 } 183 #else 184 static inline int bad_range(struct zone *zone, struct page *page) 185 { 186 return 0; 187 } 188 #endif 189 190 static void bad_page(struct page *page) 191 { 192 printk(KERN_EMERG "Bad page state in process '%s'\n" 193 KERN_EMERG "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 194 KERN_EMERG "Trying to fix it up, but a reboot is needed\n" 195 KERN_EMERG "Backtrace:\n", 196 current->comm, page, (int)(2*sizeof(unsigned long)), 197 (unsigned long)page->flags, page->mapping, 198 page_mapcount(page), page_count(page)); 199 dump_stack(); 200 page->flags &= ~(1 << PG_lru | 201 1 << PG_private | 202 1 << PG_locked | 203 1 << PG_active | 204 1 << PG_dirty | 205 1 << PG_reclaim | 206 1 << PG_slab | 207 1 << PG_swapcache | 208 1 << PG_writeback | 209 1 << PG_buddy ); 210 set_page_count(page, 0); 211 reset_page_mapcount(page); 212 page->mapping = NULL; 213 add_taint(TAINT_BAD_PAGE); 214 } 215 216 /* 217 * Higher-order pages are called "compound pages". They are structured thusly: 218 * 219 * The first PAGE_SIZE page is called the "head page". 220 * 221 * The remaining PAGE_SIZE pages are called "tail pages". 222 * 223 * All pages have PG_compound set. All pages have their ->private pointing at 224 * the head page (even the head page has this). 225 * 226 * The first tail page's ->lru.next holds the address of the compound page's 227 * put_page() function. Its ->lru.prev holds the order of allocation. 228 * This usage means that zero-order pages may not be compound. 229 */ 230 231 static void free_compound_page(struct page *page) 232 { 233 __free_pages_ok(page, compound_order(page)); 234 } 235 236 static void prep_compound_page(struct page *page, unsigned long order) 237 { 238 int i; 239 int nr_pages = 1 << order; 240 241 set_compound_page_dtor(page, free_compound_page); 242 set_compound_order(page, order); 243 __SetPageHead(page); 244 for (i = 1; i < nr_pages; i++) { 245 struct page *p = page + i; 246 247 __SetPageTail(p); 248 p->first_page = page; 249 } 250 } 251 252 static void destroy_compound_page(struct page *page, unsigned long order) 253 { 254 int i; 255 int nr_pages = 1 << order; 256 257 if (unlikely(compound_order(page) != order)) 258 bad_page(page); 259 260 if (unlikely(!PageHead(page))) 261 bad_page(page); 262 __ClearPageHead(page); 263 for (i = 1; i < nr_pages; i++) { 264 struct page *p = page + i; 265 266 if (unlikely(!PageTail(p) | 267 (p->first_page != page))) 268 bad_page(page); 269 __ClearPageTail(p); 270 } 271 } 272 273 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 274 { 275 int i; 276 277 VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 278 /* 279 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 280 * and __GFP_HIGHMEM from hard or soft interrupt context. 281 */ 282 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 283 for (i = 0; i < (1 << order); i++) 284 clear_highpage(page + i); 285 } 286 287 /* 288 * function for dealing with page's order in buddy system. 289 * zone->lock is already acquired when we use these. 290 * So, we don't need atomic page->flags operations here. 291 */ 292 static inline unsigned long page_order(struct page *page) 293 { 294 return page_private(page); 295 } 296 297 static inline void set_page_order(struct page *page, int order) 298 { 299 set_page_private(page, order); 300 __SetPageBuddy(page); 301 } 302 303 static inline void rmv_page_order(struct page *page) 304 { 305 __ClearPageBuddy(page); 306 set_page_private(page, 0); 307 } 308 309 /* 310 * Locate the struct page for both the matching buddy in our 311 * pair (buddy1) and the combined O(n+1) page they form (page). 312 * 313 * 1) Any buddy B1 will have an order O twin B2 which satisfies 314 * the following equation: 315 * B2 = B1 ^ (1 << O) 316 * For example, if the starting buddy (buddy2) is #8 its order 317 * 1 buddy is #10: 318 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 319 * 320 * 2) Any buddy B will have an order O+1 parent P which 321 * satisfies the following equation: 322 * P = B & ~(1 << O) 323 * 324 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 325 */ 326 static inline struct page * 327 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 328 { 329 unsigned long buddy_idx = page_idx ^ (1 << order); 330 331 return page + (buddy_idx - page_idx); 332 } 333 334 static inline unsigned long 335 __find_combined_index(unsigned long page_idx, unsigned int order) 336 { 337 return (page_idx & ~(1 << order)); 338 } 339 340 /* 341 * This function checks whether a page is free && is the buddy 342 * we can do coalesce a page and its buddy if 343 * (a) the buddy is not in a hole && 344 * (b) the buddy is in the buddy system && 345 * (c) a page and its buddy have the same order && 346 * (d) a page and its buddy are in the same zone. 347 * 348 * For recording whether a page is in the buddy system, we use PG_buddy. 349 * Setting, clearing, and testing PG_buddy is serialized by zone->lock. 350 * 351 * For recording page's order, we use page_private(page). 352 */ 353 static inline int page_is_buddy(struct page *page, struct page *buddy, 354 int order) 355 { 356 if (!pfn_valid_within(page_to_pfn(buddy))) 357 return 0; 358 359 if (page_zone_id(page) != page_zone_id(buddy)) 360 return 0; 361 362 if (PageBuddy(buddy) && page_order(buddy) == order) { 363 BUG_ON(page_count(buddy) != 0); 364 return 1; 365 } 366 return 0; 367 } 368 369 /* 370 * Freeing function for a buddy system allocator. 371 * 372 * The concept of a buddy system is to maintain direct-mapped table 373 * (containing bit values) for memory blocks of various "orders". 374 * The bottom level table contains the map for the smallest allocatable 375 * units of memory (here, pages), and each level above it describes 376 * pairs of units from the levels below, hence, "buddies". 377 * At a high level, all that happens here is marking the table entry 378 * at the bottom level available, and propagating the changes upward 379 * as necessary, plus some accounting needed to play nicely with other 380 * parts of the VM system. 381 * At each level, we keep a list of pages, which are heads of continuous 382 * free pages of length of (1 << order) and marked with PG_buddy. Page's 383 * order is recorded in page_private(page) field. 384 * So when we are allocating or freeing one, we can derive the state of the 385 * other. That is, if we allocate a small block, and both were 386 * free, the remainder of the region must be split into blocks. 387 * If a block is freed, and its buddy is also free, then this 388 * triggers coalescing into a block of larger size. 389 * 390 * -- wli 391 */ 392 393 static inline void __free_one_page(struct page *page, 394 struct zone *zone, unsigned int order) 395 { 396 unsigned long page_idx; 397 int order_size = 1 << order; 398 399 if (unlikely(PageCompound(page))) 400 destroy_compound_page(page, order); 401 402 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 403 404 VM_BUG_ON(page_idx & (order_size - 1)); 405 VM_BUG_ON(bad_range(zone, page)); 406 407 __mod_zone_page_state(zone, NR_FREE_PAGES, order_size); 408 while (order < MAX_ORDER-1) { 409 unsigned long combined_idx; 410 struct free_area *area; 411 struct page *buddy; 412 413 buddy = __page_find_buddy(page, page_idx, order); 414 if (!page_is_buddy(page, buddy, order)) 415 break; /* Move the buddy up one level. */ 416 417 list_del(&buddy->lru); 418 area = zone->free_area + order; 419 area->nr_free--; 420 rmv_page_order(buddy); 421 combined_idx = __find_combined_index(page_idx, order); 422 page = page + (combined_idx - page_idx); 423 page_idx = combined_idx; 424 order++; 425 } 426 set_page_order(page, order); 427 list_add(&page->lru, &zone->free_area[order].free_list); 428 zone->free_area[order].nr_free++; 429 } 430 431 static inline int free_pages_check(struct page *page) 432 { 433 if (unlikely(page_mapcount(page) | 434 (page->mapping != NULL) | 435 (page_count(page) != 0) | 436 (page->flags & ( 437 1 << PG_lru | 438 1 << PG_private | 439 1 << PG_locked | 440 1 << PG_active | 441 1 << PG_slab | 442 1 << PG_swapcache | 443 1 << PG_writeback | 444 1 << PG_reserved | 445 1 << PG_buddy )))) 446 bad_page(page); 447 /* 448 * PageReclaim == PageTail. It is only an error 449 * for PageReclaim to be set if PageCompound is clear. 450 */ 451 if (unlikely(!PageCompound(page) && PageReclaim(page))) 452 bad_page(page); 453 if (PageDirty(page)) 454 __ClearPageDirty(page); 455 /* 456 * For now, we report if PG_reserved was found set, but do not 457 * clear it, and do not free the page. But we shall soon need 458 * to do more, for when the ZERO_PAGE count wraps negative. 459 */ 460 return PageReserved(page); 461 } 462 463 /* 464 * Frees a list of pages. 465 * Assumes all pages on list are in same zone, and of same order. 466 * count is the number of pages to free. 467 * 468 * If the zone was previously in an "all pages pinned" state then look to 469 * see if this freeing clears that state. 470 * 471 * And clear the zone's pages_scanned counter, to hold off the "all pages are 472 * pinned" detection logic. 473 */ 474 static void free_pages_bulk(struct zone *zone, int count, 475 struct list_head *list, int order) 476 { 477 spin_lock(&zone->lock); 478 zone->all_unreclaimable = 0; 479 zone->pages_scanned = 0; 480 while (count--) { 481 struct page *page; 482 483 VM_BUG_ON(list_empty(list)); 484 page = list_entry(list->prev, struct page, lru); 485 /* have to delete it as __free_one_page list manipulates */ 486 list_del(&page->lru); 487 __free_one_page(page, zone, order); 488 } 489 spin_unlock(&zone->lock); 490 } 491 492 static void free_one_page(struct zone *zone, struct page *page, int order) 493 { 494 spin_lock(&zone->lock); 495 zone->all_unreclaimable = 0; 496 zone->pages_scanned = 0; 497 __free_one_page(page, zone, order); 498 spin_unlock(&zone->lock); 499 } 500 501 static void __free_pages_ok(struct page *page, unsigned int order) 502 { 503 unsigned long flags; 504 int i; 505 int reserved = 0; 506 507 for (i = 0 ; i < (1 << order) ; ++i) 508 reserved += free_pages_check(page + i); 509 if (reserved) 510 return; 511 512 if (!PageHighMem(page)) 513 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 514 arch_free_page(page, order); 515 kernel_map_pages(page, 1 << order, 0); 516 517 local_irq_save(flags); 518 __count_vm_events(PGFREE, 1 << order); 519 free_one_page(page_zone(page), page, order); 520 local_irq_restore(flags); 521 } 522 523 /* 524 * permit the bootmem allocator to evade page validation on high-order frees 525 */ 526 void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) 527 { 528 if (order == 0) { 529 __ClearPageReserved(page); 530 set_page_count(page, 0); 531 set_page_refcounted(page); 532 __free_page(page); 533 } else { 534 int loop; 535 536 prefetchw(page); 537 for (loop = 0; loop < BITS_PER_LONG; loop++) { 538 struct page *p = &page[loop]; 539 540 if (loop + 1 < BITS_PER_LONG) 541 prefetchw(p + 1); 542 __ClearPageReserved(p); 543 set_page_count(p, 0); 544 } 545 546 set_page_refcounted(page); 547 __free_pages(page, order); 548 } 549 } 550 551 552 /* 553 * The order of subdivision here is critical for the IO subsystem. 554 * Please do not alter this order without good reasons and regression 555 * testing. Specifically, as large blocks of memory are subdivided, 556 * the order in which smaller blocks are delivered depends on the order 557 * they're subdivided in this function. This is the primary factor 558 * influencing the order in which pages are delivered to the IO 559 * subsystem according to empirical testing, and this is also justified 560 * by considering the behavior of a buddy system containing a single 561 * large block of memory acted on by a series of small allocations. 562 * This behavior is a critical factor in sglist merging's success. 563 * 564 * -- wli 565 */ 566 static inline void expand(struct zone *zone, struct page *page, 567 int low, int high, struct free_area *area) 568 { 569 unsigned long size = 1 << high; 570 571 while (high > low) { 572 area--; 573 high--; 574 size >>= 1; 575 VM_BUG_ON(bad_range(zone, &page[size])); 576 list_add(&page[size].lru, &area->free_list); 577 area->nr_free++; 578 set_page_order(&page[size], high); 579 } 580 } 581 582 /* 583 * This page is about to be returned from the page allocator 584 */ 585 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 586 { 587 if (unlikely(page_mapcount(page) | 588 (page->mapping != NULL) | 589 (page_count(page) != 0) | 590 (page->flags & ( 591 1 << PG_lru | 592 1 << PG_private | 593 1 << PG_locked | 594 1 << PG_active | 595 1 << PG_dirty | 596 1 << PG_reclaim | 597 1 << PG_slab | 598 1 << PG_swapcache | 599 1 << PG_writeback | 600 1 << PG_reserved | 601 1 << PG_buddy )))) 602 bad_page(page); 603 604 /* 605 * For now, we report if PG_reserved was found set, but do not 606 * clear it, and do not allocate the page: as a safety net. 607 */ 608 if (PageReserved(page)) 609 return 1; 610 611 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 612 1 << PG_referenced | 1 << PG_arch_1 | 613 1 << PG_owner_priv_1 | 1 << PG_mappedtodisk); 614 set_page_private(page, 0); 615 set_page_refcounted(page); 616 617 arch_alloc_page(page, order); 618 kernel_map_pages(page, 1 << order, 1); 619 620 if (gfp_flags & __GFP_ZERO) 621 prep_zero_page(page, order, gfp_flags); 622 623 if (order && (gfp_flags & __GFP_COMP)) 624 prep_compound_page(page, order); 625 626 return 0; 627 } 628 629 /* 630 * Do the hard work of removing an element from the buddy allocator. 631 * Call me with the zone->lock already held. 632 */ 633 static struct page *__rmqueue(struct zone *zone, unsigned int order) 634 { 635 struct free_area * area; 636 unsigned int current_order; 637 struct page *page; 638 639 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 640 area = zone->free_area + current_order; 641 if (list_empty(&area->free_list)) 642 continue; 643 644 page = list_entry(area->free_list.next, struct page, lru); 645 list_del(&page->lru); 646 rmv_page_order(page); 647 area->nr_free--; 648 __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); 649 expand(zone, page, order, current_order, area); 650 return page; 651 } 652 653 return NULL; 654 } 655 656 /* 657 * Obtain a specified number of elements from the buddy allocator, all under 658 * a single hold of the lock, for efficiency. Add them to the supplied list. 659 * Returns the number of new pages which were placed at *list. 660 */ 661 static int rmqueue_bulk(struct zone *zone, unsigned int order, 662 unsigned long count, struct list_head *list) 663 { 664 int i; 665 666 spin_lock(&zone->lock); 667 for (i = 0; i < count; ++i) { 668 struct page *page = __rmqueue(zone, order); 669 if (unlikely(page == NULL)) 670 break; 671 list_add_tail(&page->lru, list); 672 } 673 spin_unlock(&zone->lock); 674 return i; 675 } 676 677 #ifdef CONFIG_NUMA 678 /* 679 * Called from the vmstat counter updater to drain pagesets of this 680 * currently executing processor on remote nodes after they have 681 * expired. 682 * 683 * Note that this function must be called with the thread pinned to 684 * a single processor. 685 */ 686 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 687 { 688 unsigned long flags; 689 int to_drain; 690 691 local_irq_save(flags); 692 if (pcp->count >= pcp->batch) 693 to_drain = pcp->batch; 694 else 695 to_drain = pcp->count; 696 free_pages_bulk(zone, to_drain, &pcp->list, 0); 697 pcp->count -= to_drain; 698 local_irq_restore(flags); 699 } 700 #endif 701 702 static void __drain_pages(unsigned int cpu) 703 { 704 unsigned long flags; 705 struct zone *zone; 706 int i; 707 708 for_each_zone(zone) { 709 struct per_cpu_pageset *pset; 710 711 if (!populated_zone(zone)) 712 continue; 713 714 pset = zone_pcp(zone, cpu); 715 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 716 struct per_cpu_pages *pcp; 717 718 pcp = &pset->pcp[i]; 719 local_irq_save(flags); 720 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 721 pcp->count = 0; 722 local_irq_restore(flags); 723 } 724 } 725 } 726 727 #ifdef CONFIG_PM 728 729 void mark_free_pages(struct zone *zone) 730 { 731 unsigned long pfn, max_zone_pfn; 732 unsigned long flags; 733 int order; 734 struct list_head *curr; 735 736 if (!zone->spanned_pages) 737 return; 738 739 spin_lock_irqsave(&zone->lock, flags); 740 741 max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; 742 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 743 if (pfn_valid(pfn)) { 744 struct page *page = pfn_to_page(pfn); 745 746 if (!swsusp_page_is_forbidden(page)) 747 swsusp_unset_page_free(page); 748 } 749 750 for (order = MAX_ORDER - 1; order >= 0; --order) 751 list_for_each(curr, &zone->free_area[order].free_list) { 752 unsigned long i; 753 754 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 755 for (i = 0; i < (1UL << order); i++) 756 swsusp_set_page_free(pfn_to_page(pfn + i)); 757 } 758 759 spin_unlock_irqrestore(&zone->lock, flags); 760 } 761 762 /* 763 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 764 */ 765 void drain_local_pages(void) 766 { 767 unsigned long flags; 768 769 local_irq_save(flags); 770 __drain_pages(smp_processor_id()); 771 local_irq_restore(flags); 772 } 773 #endif /* CONFIG_PM */ 774 775 /* 776 * Free a 0-order page 777 */ 778 static void fastcall free_hot_cold_page(struct page *page, int cold) 779 { 780 struct zone *zone = page_zone(page); 781 struct per_cpu_pages *pcp; 782 unsigned long flags; 783 784 if (PageAnon(page)) 785 page->mapping = NULL; 786 if (free_pages_check(page)) 787 return; 788 789 if (!PageHighMem(page)) 790 debug_check_no_locks_freed(page_address(page), PAGE_SIZE); 791 arch_free_page(page, 0); 792 kernel_map_pages(page, 1, 0); 793 794 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 795 local_irq_save(flags); 796 __count_vm_event(PGFREE); 797 list_add(&page->lru, &pcp->list); 798 pcp->count++; 799 if (pcp->count >= pcp->high) { 800 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 801 pcp->count -= pcp->batch; 802 } 803 local_irq_restore(flags); 804 put_cpu(); 805 } 806 807 void fastcall free_hot_page(struct page *page) 808 { 809 free_hot_cold_page(page, 0); 810 } 811 812 void fastcall free_cold_page(struct page *page) 813 { 814 free_hot_cold_page(page, 1); 815 } 816 817 /* 818 * split_page takes a non-compound higher-order page, and splits it into 819 * n (1<<order) sub-pages: page[0..n] 820 * Each sub-page must be freed individually. 821 * 822 * Note: this is probably too low level an operation for use in drivers. 823 * Please consult with lkml before using this in your driver. 824 */ 825 void split_page(struct page *page, unsigned int order) 826 { 827 int i; 828 829 VM_BUG_ON(PageCompound(page)); 830 VM_BUG_ON(!page_count(page)); 831 for (i = 1; i < (1 << order); i++) 832 set_page_refcounted(page + i); 833 } 834 835 /* 836 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 837 * we cheat by calling it from here, in the order > 0 path. Saves a branch 838 * or two. 839 */ 840 static struct page *buffered_rmqueue(struct zonelist *zonelist, 841 struct zone *zone, int order, gfp_t gfp_flags) 842 { 843 unsigned long flags; 844 struct page *page; 845 int cold = !!(gfp_flags & __GFP_COLD); 846 int cpu; 847 848 again: 849 cpu = get_cpu(); 850 if (likely(order == 0)) { 851 struct per_cpu_pages *pcp; 852 853 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 854 local_irq_save(flags); 855 if (!pcp->count) { 856 pcp->count = rmqueue_bulk(zone, 0, 857 pcp->batch, &pcp->list); 858 if (unlikely(!pcp->count)) 859 goto failed; 860 } 861 page = list_entry(pcp->list.next, struct page, lru); 862 list_del(&page->lru); 863 pcp->count--; 864 } else { 865 spin_lock_irqsave(&zone->lock, flags); 866 page = __rmqueue(zone, order); 867 spin_unlock(&zone->lock); 868 if (!page) 869 goto failed; 870 } 871 872 __count_zone_vm_events(PGALLOC, zone, 1 << order); 873 zone_statistics(zonelist, zone); 874 local_irq_restore(flags); 875 put_cpu(); 876 877 VM_BUG_ON(bad_range(zone, page)); 878 if (prep_new_page(page, order, gfp_flags)) 879 goto again; 880 return page; 881 882 failed: 883 local_irq_restore(flags); 884 put_cpu(); 885 return NULL; 886 } 887 888 #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 889 #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 890 #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 891 #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 892 #define ALLOC_HARDER 0x10 /* try to alloc harder */ 893 #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 894 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 895 896 #ifdef CONFIG_FAIL_PAGE_ALLOC 897 898 static struct fail_page_alloc_attr { 899 struct fault_attr attr; 900 901 u32 ignore_gfp_highmem; 902 u32 ignore_gfp_wait; 903 904 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 905 906 struct dentry *ignore_gfp_highmem_file; 907 struct dentry *ignore_gfp_wait_file; 908 909 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 910 911 } fail_page_alloc = { 912 .attr = FAULT_ATTR_INITIALIZER, 913 .ignore_gfp_wait = 1, 914 .ignore_gfp_highmem = 1, 915 }; 916 917 static int __init setup_fail_page_alloc(char *str) 918 { 919 return setup_fault_attr(&fail_page_alloc.attr, str); 920 } 921 __setup("fail_page_alloc=", setup_fail_page_alloc); 922 923 static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 924 { 925 if (gfp_mask & __GFP_NOFAIL) 926 return 0; 927 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 928 return 0; 929 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 930 return 0; 931 932 return should_fail(&fail_page_alloc.attr, 1 << order); 933 } 934 935 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 936 937 static int __init fail_page_alloc_debugfs(void) 938 { 939 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 940 struct dentry *dir; 941 int err; 942 943 err = init_fault_attr_dentries(&fail_page_alloc.attr, 944 "fail_page_alloc"); 945 if (err) 946 return err; 947 dir = fail_page_alloc.attr.dentries.dir; 948 949 fail_page_alloc.ignore_gfp_wait_file = 950 debugfs_create_bool("ignore-gfp-wait", mode, dir, 951 &fail_page_alloc.ignore_gfp_wait); 952 953 fail_page_alloc.ignore_gfp_highmem_file = 954 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 955 &fail_page_alloc.ignore_gfp_highmem); 956 957 if (!fail_page_alloc.ignore_gfp_wait_file || 958 !fail_page_alloc.ignore_gfp_highmem_file) { 959 err = -ENOMEM; 960 debugfs_remove(fail_page_alloc.ignore_gfp_wait_file); 961 debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file); 962 cleanup_fault_attr_dentries(&fail_page_alloc.attr); 963 } 964 965 return err; 966 } 967 968 late_initcall(fail_page_alloc_debugfs); 969 970 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 971 972 #else /* CONFIG_FAIL_PAGE_ALLOC */ 973 974 static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 975 { 976 return 0; 977 } 978 979 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 980 981 /* 982 * Return 1 if free pages are above 'mark'. This takes into account the order 983 * of the allocation. 984 */ 985 int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 986 int classzone_idx, int alloc_flags) 987 { 988 /* free_pages my go negative - that's OK */ 989 long min = mark; 990 long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1; 991 int o; 992 993 if (alloc_flags & ALLOC_HIGH) 994 min -= min / 2; 995 if (alloc_flags & ALLOC_HARDER) 996 min -= min / 4; 997 998 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 999 return 0; 1000 for (o = 0; o < order; o++) { 1001 /* At the next order, this order's pages become unavailable */ 1002 free_pages -= z->free_area[o].nr_free << o; 1003 1004 /* Require fewer higher order pages to be free */ 1005 min >>= 1; 1006 1007 if (free_pages <= min) 1008 return 0; 1009 } 1010 return 1; 1011 } 1012 1013 #ifdef CONFIG_NUMA 1014 /* 1015 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1016 * skip over zones that are not allowed by the cpuset, or that have 1017 * been recently (in last second) found to be nearly full. See further 1018 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1019 * that have to skip over alot of full or unallowed zones. 1020 * 1021 * If the zonelist cache is present in the passed in zonelist, then 1022 * returns a pointer to the allowed node mask (either the current 1023 * tasks mems_allowed, or node_online_map.) 1024 * 1025 * If the zonelist cache is not available for this zonelist, does 1026 * nothing and returns NULL. 1027 * 1028 * If the fullzones BITMAP in the zonelist cache is stale (more than 1029 * a second since last zap'd) then we zap it out (clear its bits.) 1030 * 1031 * We hold off even calling zlc_setup, until after we've checked the 1032 * first zone in the zonelist, on the theory that most allocations will 1033 * be satisfied from that first zone, so best to examine that zone as 1034 * quickly as we can. 1035 */ 1036 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1037 { 1038 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1039 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1040 1041 zlc = zonelist->zlcache_ptr; 1042 if (!zlc) 1043 return NULL; 1044 1045 if (jiffies - zlc->last_full_zap > 1 * HZ) { 1046 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1047 zlc->last_full_zap = jiffies; 1048 } 1049 1050 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1051 &cpuset_current_mems_allowed : 1052 &node_online_map; 1053 return allowednodes; 1054 } 1055 1056 /* 1057 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1058 * if it is worth looking at further for free memory: 1059 * 1) Check that the zone isn't thought to be full (doesn't have its 1060 * bit set in the zonelist_cache fullzones BITMAP). 1061 * 2) Check that the zones node (obtained from the zonelist_cache 1062 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1063 * Return true (non-zero) if zone is worth looking at further, or 1064 * else return false (zero) if it is not. 1065 * 1066 * This check -ignores- the distinction between various watermarks, 1067 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1068 * found to be full for any variation of these watermarks, it will 1069 * be considered full for up to one second by all requests, unless 1070 * we are so low on memory on all allowed nodes that we are forced 1071 * into the second scan of the zonelist. 1072 * 1073 * In the second scan we ignore this zonelist cache and exactly 1074 * apply the watermarks to all zones, even it is slower to do so. 1075 * We are low on memory in the second scan, and should leave no stone 1076 * unturned looking for a free page. 1077 */ 1078 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, 1079 nodemask_t *allowednodes) 1080 { 1081 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1082 int i; /* index of *z in zonelist zones */ 1083 int n; /* node that zone *z is on */ 1084 1085 zlc = zonelist->zlcache_ptr; 1086 if (!zlc) 1087 return 1; 1088 1089 i = z - zonelist->zones; 1090 n = zlc->z_to_n[i]; 1091 1092 /* This zone is worth trying if it is allowed but not full */ 1093 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1094 } 1095 1096 /* 1097 * Given 'z' scanning a zonelist, set the corresponding bit in 1098 * zlc->fullzones, so that subsequent attempts to allocate a page 1099 * from that zone don't waste time re-examining it. 1100 */ 1101 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) 1102 { 1103 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1104 int i; /* index of *z in zonelist zones */ 1105 1106 zlc = zonelist->zlcache_ptr; 1107 if (!zlc) 1108 return; 1109 1110 i = z - zonelist->zones; 1111 1112 set_bit(i, zlc->fullzones); 1113 } 1114 1115 #else /* CONFIG_NUMA */ 1116 1117 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1118 { 1119 return NULL; 1120 } 1121 1122 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z, 1123 nodemask_t *allowednodes) 1124 { 1125 return 1; 1126 } 1127 1128 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z) 1129 { 1130 } 1131 #endif /* CONFIG_NUMA */ 1132 1133 /* 1134 * get_page_from_freelist goes through the zonelist trying to allocate 1135 * a page. 1136 */ 1137 static struct page * 1138 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 1139 struct zonelist *zonelist, int alloc_flags) 1140 { 1141 struct zone **z; 1142 struct page *page = NULL; 1143 int classzone_idx = zone_idx(zonelist->zones[0]); 1144 struct zone *zone; 1145 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1146 int zlc_active = 0; /* set if using zonelist_cache */ 1147 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1148 1149 zonelist_scan: 1150 /* 1151 * Scan zonelist, looking for a zone with enough free. 1152 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1153 */ 1154 z = zonelist->zones; 1155 1156 do { 1157 if (NUMA_BUILD && zlc_active && 1158 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1159 continue; 1160 zone = *z; 1161 if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) && 1162 zone->zone_pgdat != zonelist->zones[0]->zone_pgdat)) 1163 break; 1164 if ((alloc_flags & ALLOC_CPUSET) && 1165 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1166 goto try_next_zone; 1167 1168 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 1169 unsigned long mark; 1170 if (alloc_flags & ALLOC_WMARK_MIN) 1171 mark = zone->pages_min; 1172 else if (alloc_flags & ALLOC_WMARK_LOW) 1173 mark = zone->pages_low; 1174 else 1175 mark = zone->pages_high; 1176 if (!zone_watermark_ok(zone, order, mark, 1177 classzone_idx, alloc_flags)) { 1178 if (!zone_reclaim_mode || 1179 !zone_reclaim(zone, gfp_mask, order)) 1180 goto this_zone_full; 1181 } 1182 } 1183 1184 page = buffered_rmqueue(zonelist, zone, order, gfp_mask); 1185 if (page) 1186 break; 1187 this_zone_full: 1188 if (NUMA_BUILD) 1189 zlc_mark_zone_full(zonelist, z); 1190 try_next_zone: 1191 if (NUMA_BUILD && !did_zlc_setup) { 1192 /* we do zlc_setup after the first zone is tried */ 1193 allowednodes = zlc_setup(zonelist, alloc_flags); 1194 zlc_active = 1; 1195 did_zlc_setup = 1; 1196 } 1197 } while (*(++z) != NULL); 1198 1199 if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) { 1200 /* Disable zlc cache for second zonelist scan */ 1201 zlc_active = 0; 1202 goto zonelist_scan; 1203 } 1204 return page; 1205 } 1206 1207 /* 1208 * This is the 'heart' of the zoned buddy allocator. 1209 */ 1210 struct page * fastcall 1211 __alloc_pages(gfp_t gfp_mask, unsigned int order, 1212 struct zonelist *zonelist) 1213 { 1214 const gfp_t wait = gfp_mask & __GFP_WAIT; 1215 struct zone **z; 1216 struct page *page; 1217 struct reclaim_state reclaim_state; 1218 struct task_struct *p = current; 1219 int do_retry; 1220 int alloc_flags; 1221 int did_some_progress; 1222 1223 might_sleep_if(wait); 1224 1225 if (should_fail_alloc_page(gfp_mask, order)) 1226 return NULL; 1227 1228 restart: 1229 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 1230 1231 if (unlikely(*z == NULL)) { 1232 /* Should this ever happen?? */ 1233 return NULL; 1234 } 1235 1236 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1237 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 1238 if (page) 1239 goto got_pg; 1240 1241 /* 1242 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 1243 * __GFP_NOWARN set) should not cause reclaim since the subsystem 1244 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 1245 * using a larger set of nodes after it has established that the 1246 * allowed per node queues are empty and that nodes are 1247 * over allocated. 1248 */ 1249 if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 1250 goto nopage; 1251 1252 for (z = zonelist->zones; *z; z++) 1253 wakeup_kswapd(*z, order); 1254 1255 /* 1256 * OK, we're below the kswapd watermark and have kicked background 1257 * reclaim. Now things get more complex, so set up alloc_flags according 1258 * to how we want to proceed. 1259 * 1260 * The caller may dip into page reserves a bit more if the caller 1261 * cannot run direct reclaim, or if the caller has realtime scheduling 1262 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 1263 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 1264 */ 1265 alloc_flags = ALLOC_WMARK_MIN; 1266 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 1267 alloc_flags |= ALLOC_HARDER; 1268 if (gfp_mask & __GFP_HIGH) 1269 alloc_flags |= ALLOC_HIGH; 1270 if (wait) 1271 alloc_flags |= ALLOC_CPUSET; 1272 1273 /* 1274 * Go through the zonelist again. Let __GFP_HIGH and allocations 1275 * coming from realtime tasks go deeper into reserves. 1276 * 1277 * This is the last chance, in general, before the goto nopage. 1278 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 1279 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 1280 */ 1281 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 1282 if (page) 1283 goto got_pg; 1284 1285 /* This allocation should allow future memory freeing. */ 1286 1287 rebalance: 1288 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 1289 && !in_interrupt()) { 1290 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 1291 nofail_alloc: 1292 /* go through the zonelist yet again, ignoring mins */ 1293 page = get_page_from_freelist(gfp_mask, order, 1294 zonelist, ALLOC_NO_WATERMARKS); 1295 if (page) 1296 goto got_pg; 1297 if (gfp_mask & __GFP_NOFAIL) { 1298 congestion_wait(WRITE, HZ/50); 1299 goto nofail_alloc; 1300 } 1301 } 1302 goto nopage; 1303 } 1304 1305 /* Atomic allocations - we can't balance anything */ 1306 if (!wait) 1307 goto nopage; 1308 1309 cond_resched(); 1310 1311 /* We now go into synchronous reclaim */ 1312 cpuset_memory_pressure_bump(); 1313 p->flags |= PF_MEMALLOC; 1314 reclaim_state.reclaimed_slab = 0; 1315 p->reclaim_state = &reclaim_state; 1316 1317 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 1318 1319 p->reclaim_state = NULL; 1320 p->flags &= ~PF_MEMALLOC; 1321 1322 cond_resched(); 1323 1324 if (likely(did_some_progress)) { 1325 page = get_page_from_freelist(gfp_mask, order, 1326 zonelist, alloc_flags); 1327 if (page) 1328 goto got_pg; 1329 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1330 /* 1331 * Go through the zonelist yet one more time, keep 1332 * very high watermark here, this is only to catch 1333 * a parallel oom killing, we must fail if we're still 1334 * under heavy pressure. 1335 */ 1336 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1337 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1338 if (page) 1339 goto got_pg; 1340 1341 out_of_memory(zonelist, gfp_mask, order); 1342 goto restart; 1343 } 1344 1345 /* 1346 * Don't let big-order allocations loop unless the caller explicitly 1347 * requests that. Wait for some write requests to complete then retry. 1348 * 1349 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 1350 * <= 3, but that may not be true in other implementations. 1351 */ 1352 do_retry = 0; 1353 if (!(gfp_mask & __GFP_NORETRY)) { 1354 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1355 do_retry = 1; 1356 if (gfp_mask & __GFP_NOFAIL) 1357 do_retry = 1; 1358 } 1359 if (do_retry) { 1360 congestion_wait(WRITE, HZ/50); 1361 goto rebalance; 1362 } 1363 1364 nopage: 1365 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1366 printk(KERN_WARNING "%s: page allocation failure." 1367 " order:%d, mode:0x%x\n", 1368 p->comm, order, gfp_mask); 1369 dump_stack(); 1370 show_mem(); 1371 } 1372 got_pg: 1373 return page; 1374 } 1375 1376 EXPORT_SYMBOL(__alloc_pages); 1377 1378 /* 1379 * Common helper functions. 1380 */ 1381 fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1382 { 1383 struct page * page; 1384 page = alloc_pages(gfp_mask, order); 1385 if (!page) 1386 return 0; 1387 return (unsigned long) page_address(page); 1388 } 1389 1390 EXPORT_SYMBOL(__get_free_pages); 1391 1392 fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1393 { 1394 struct page * page; 1395 1396 /* 1397 * get_zeroed_page() returns a 32-bit address, which cannot represent 1398 * a highmem page 1399 */ 1400 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1401 1402 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1403 if (page) 1404 return (unsigned long) page_address(page); 1405 return 0; 1406 } 1407 1408 EXPORT_SYMBOL(get_zeroed_page); 1409 1410 void __pagevec_free(struct pagevec *pvec) 1411 { 1412 int i = pagevec_count(pvec); 1413 1414 while (--i >= 0) 1415 free_hot_cold_page(pvec->pages[i], pvec->cold); 1416 } 1417 1418 fastcall void __free_pages(struct page *page, unsigned int order) 1419 { 1420 if (put_page_testzero(page)) { 1421 if (order == 0) 1422 free_hot_page(page); 1423 else 1424 __free_pages_ok(page, order); 1425 } 1426 } 1427 1428 EXPORT_SYMBOL(__free_pages); 1429 1430 fastcall void free_pages(unsigned long addr, unsigned int order) 1431 { 1432 if (addr != 0) { 1433 VM_BUG_ON(!virt_addr_valid((void *)addr)); 1434 __free_pages(virt_to_page((void *)addr), order); 1435 } 1436 } 1437 1438 EXPORT_SYMBOL(free_pages); 1439 1440 static unsigned int nr_free_zone_pages(int offset) 1441 { 1442 /* Just pick one node, since fallback list is circular */ 1443 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1444 unsigned int sum = 0; 1445 1446 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1447 struct zone **zonep = zonelist->zones; 1448 struct zone *zone; 1449 1450 for (zone = *zonep++; zone; zone = *zonep++) { 1451 unsigned long size = zone->present_pages; 1452 unsigned long high = zone->pages_high; 1453 if (size > high) 1454 sum += size - high; 1455 } 1456 1457 return sum; 1458 } 1459 1460 /* 1461 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1462 */ 1463 unsigned int nr_free_buffer_pages(void) 1464 { 1465 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1466 } 1467 1468 /* 1469 * Amount of free RAM allocatable within all zones 1470 */ 1471 unsigned int nr_free_pagecache_pages(void) 1472 { 1473 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1474 } 1475 1476 static inline void show_node(struct zone *zone) 1477 { 1478 if (NUMA_BUILD) 1479 printk("Node %d ", zone_to_nid(zone)); 1480 } 1481 1482 void si_meminfo(struct sysinfo *val) 1483 { 1484 val->totalram = totalram_pages; 1485 val->sharedram = 0; 1486 val->freeram = global_page_state(NR_FREE_PAGES); 1487 val->bufferram = nr_blockdev_pages(); 1488 val->totalhigh = totalhigh_pages; 1489 val->freehigh = nr_free_highpages(); 1490 val->mem_unit = PAGE_SIZE; 1491 } 1492 1493 EXPORT_SYMBOL(si_meminfo); 1494 1495 #ifdef CONFIG_NUMA 1496 void si_meminfo_node(struct sysinfo *val, int nid) 1497 { 1498 pg_data_t *pgdat = NODE_DATA(nid); 1499 1500 val->totalram = pgdat->node_present_pages; 1501 val->freeram = node_page_state(nid, NR_FREE_PAGES); 1502 #ifdef CONFIG_HIGHMEM 1503 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1504 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 1505 NR_FREE_PAGES); 1506 #else 1507 val->totalhigh = 0; 1508 val->freehigh = 0; 1509 #endif 1510 val->mem_unit = PAGE_SIZE; 1511 } 1512 #endif 1513 1514 #define K(x) ((x) << (PAGE_SHIFT-10)) 1515 1516 /* 1517 * Show free area list (used inside shift_scroll-lock stuff) 1518 * We also calculate the percentage fragmentation. We do this by counting the 1519 * memory on each free list with the exception of the first item on the list. 1520 */ 1521 void show_free_areas(void) 1522 { 1523 int cpu; 1524 struct zone *zone; 1525 1526 for_each_zone(zone) { 1527 if (!populated_zone(zone)) 1528 continue; 1529 1530 show_node(zone); 1531 printk("%s per-cpu:\n", zone->name); 1532 1533 for_each_online_cpu(cpu) { 1534 struct per_cpu_pageset *pageset; 1535 1536 pageset = zone_pcp(zone, cpu); 1537 1538 printk("CPU %4d: Hot: hi:%5d, btch:%4d usd:%4d " 1539 "Cold: hi:%5d, btch:%4d usd:%4d\n", 1540 cpu, pageset->pcp[0].high, 1541 pageset->pcp[0].batch, pageset->pcp[0].count, 1542 pageset->pcp[1].high, pageset->pcp[1].batch, 1543 pageset->pcp[1].count); 1544 } 1545 } 1546 1547 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n" 1548 " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n", 1549 global_page_state(NR_ACTIVE), 1550 global_page_state(NR_INACTIVE), 1551 global_page_state(NR_FILE_DIRTY), 1552 global_page_state(NR_WRITEBACK), 1553 global_page_state(NR_UNSTABLE_NFS), 1554 global_page_state(NR_FREE_PAGES), 1555 global_page_state(NR_SLAB_RECLAIMABLE) + 1556 global_page_state(NR_SLAB_UNRECLAIMABLE), 1557 global_page_state(NR_FILE_MAPPED), 1558 global_page_state(NR_PAGETABLE), 1559 global_page_state(NR_BOUNCE)); 1560 1561 for_each_zone(zone) { 1562 int i; 1563 1564 if (!populated_zone(zone)) 1565 continue; 1566 1567 show_node(zone); 1568 printk("%s" 1569 " free:%lukB" 1570 " min:%lukB" 1571 " low:%lukB" 1572 " high:%lukB" 1573 " active:%lukB" 1574 " inactive:%lukB" 1575 " present:%lukB" 1576 " pages_scanned:%lu" 1577 " all_unreclaimable? %s" 1578 "\n", 1579 zone->name, 1580 K(zone_page_state(zone, NR_FREE_PAGES)), 1581 K(zone->pages_min), 1582 K(zone->pages_low), 1583 K(zone->pages_high), 1584 K(zone_page_state(zone, NR_ACTIVE)), 1585 K(zone_page_state(zone, NR_INACTIVE)), 1586 K(zone->present_pages), 1587 zone->pages_scanned, 1588 (zone->all_unreclaimable ? "yes" : "no") 1589 ); 1590 printk("lowmem_reserve[]:"); 1591 for (i = 0; i < MAX_NR_ZONES; i++) 1592 printk(" %lu", zone->lowmem_reserve[i]); 1593 printk("\n"); 1594 } 1595 1596 for_each_zone(zone) { 1597 unsigned long nr[MAX_ORDER], flags, order, total = 0; 1598 1599 if (!populated_zone(zone)) 1600 continue; 1601 1602 show_node(zone); 1603 printk("%s: ", zone->name); 1604 1605 spin_lock_irqsave(&zone->lock, flags); 1606 for (order = 0; order < MAX_ORDER; order++) { 1607 nr[order] = zone->free_area[order].nr_free; 1608 total += nr[order] << order; 1609 } 1610 spin_unlock_irqrestore(&zone->lock, flags); 1611 for (order = 0; order < MAX_ORDER; order++) 1612 printk("%lu*%lukB ", nr[order], K(1UL) << order); 1613 printk("= %lukB\n", K(total)); 1614 } 1615 1616 show_swap_cache_info(); 1617 } 1618 1619 /* 1620 * Builds allocation fallback zone lists. 1621 * 1622 * Add all populated zones of a node to the zonelist. 1623 */ 1624 static int __meminit build_zonelists_node(pg_data_t *pgdat, 1625 struct zonelist *zonelist, int nr_zones, enum zone_type zone_type) 1626 { 1627 struct zone *zone; 1628 1629 BUG_ON(zone_type >= MAX_NR_ZONES); 1630 zone_type++; 1631 1632 do { 1633 zone_type--; 1634 zone = pgdat->node_zones + zone_type; 1635 if (populated_zone(zone)) { 1636 zonelist->zones[nr_zones++] = zone; 1637 check_highest_zone(zone_type); 1638 } 1639 1640 } while (zone_type); 1641 return nr_zones; 1642 } 1643 1644 #ifdef CONFIG_NUMA 1645 #define MAX_NODE_LOAD (num_online_nodes()) 1646 static int __meminitdata node_load[MAX_NUMNODES]; 1647 /** 1648 * find_next_best_node - find the next node that should appear in a given node's fallback list 1649 * @node: node whose fallback list we're appending 1650 * @used_node_mask: nodemask_t of already used nodes 1651 * 1652 * We use a number of factors to determine which is the next node that should 1653 * appear on a given node's fallback list. The node should not have appeared 1654 * already in @node's fallback list, and it should be the next closest node 1655 * according to the distance array (which contains arbitrary distance values 1656 * from each node to each node in the system), and should also prefer nodes 1657 * with no CPUs, since presumably they'll have very little allocation pressure 1658 * on them otherwise. 1659 * It returns -1 if no node is found. 1660 */ 1661 static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask) 1662 { 1663 int n, val; 1664 int min_val = INT_MAX; 1665 int best_node = -1; 1666 1667 /* Use the local node if we haven't already */ 1668 if (!node_isset(node, *used_node_mask)) { 1669 node_set(node, *used_node_mask); 1670 return node; 1671 } 1672 1673 for_each_online_node(n) { 1674 cpumask_t tmp; 1675 1676 /* Don't want a node to appear more than once */ 1677 if (node_isset(n, *used_node_mask)) 1678 continue; 1679 1680 /* Use the distance array to find the distance */ 1681 val = node_distance(node, n); 1682 1683 /* Penalize nodes under us ("prefer the next node") */ 1684 val += (n < node); 1685 1686 /* Give preference to headless and unused nodes */ 1687 tmp = node_to_cpumask(n); 1688 if (!cpus_empty(tmp)) 1689 val += PENALTY_FOR_NODE_WITH_CPUS; 1690 1691 /* Slight preference for less loaded node */ 1692 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1693 val += node_load[n]; 1694 1695 if (val < min_val) { 1696 min_val = val; 1697 best_node = n; 1698 } 1699 } 1700 1701 if (best_node >= 0) 1702 node_set(best_node, *used_node_mask); 1703 1704 return best_node; 1705 } 1706 1707 static void __meminit build_zonelists(pg_data_t *pgdat) 1708 { 1709 int j, node, local_node; 1710 enum zone_type i; 1711 int prev_node, load; 1712 struct zonelist *zonelist; 1713 nodemask_t used_mask; 1714 1715 /* initialize zonelists */ 1716 for (i = 0; i < MAX_NR_ZONES; i++) { 1717 zonelist = pgdat->node_zonelists + i; 1718 zonelist->zones[0] = NULL; 1719 } 1720 1721 /* NUMA-aware ordering of nodes */ 1722 local_node = pgdat->node_id; 1723 load = num_online_nodes(); 1724 prev_node = local_node; 1725 nodes_clear(used_mask); 1726 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1727 int distance = node_distance(local_node, node); 1728 1729 /* 1730 * If another node is sufficiently far away then it is better 1731 * to reclaim pages in a zone before going off node. 1732 */ 1733 if (distance > RECLAIM_DISTANCE) 1734 zone_reclaim_mode = 1; 1735 1736 /* 1737 * We don't want to pressure a particular node. 1738 * So adding penalty to the first node in same 1739 * distance group to make it round-robin. 1740 */ 1741 1742 if (distance != node_distance(local_node, prev_node)) 1743 node_load[node] += load; 1744 prev_node = node; 1745 load--; 1746 for (i = 0; i < MAX_NR_ZONES; i++) { 1747 zonelist = pgdat->node_zonelists + i; 1748 for (j = 0; zonelist->zones[j] != NULL; j++); 1749 1750 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1751 zonelist->zones[j] = NULL; 1752 } 1753 } 1754 } 1755 1756 /* Construct the zonelist performance cache - see further mmzone.h */ 1757 static void __meminit build_zonelist_cache(pg_data_t *pgdat) 1758 { 1759 int i; 1760 1761 for (i = 0; i < MAX_NR_ZONES; i++) { 1762 struct zonelist *zonelist; 1763 struct zonelist_cache *zlc; 1764 struct zone **z; 1765 1766 zonelist = pgdat->node_zonelists + i; 1767 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 1768 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1769 for (z = zonelist->zones; *z; z++) 1770 zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z); 1771 } 1772 } 1773 1774 #else /* CONFIG_NUMA */ 1775 1776 static void __meminit build_zonelists(pg_data_t *pgdat) 1777 { 1778 int node, local_node; 1779 enum zone_type i,j; 1780 1781 local_node = pgdat->node_id; 1782 for (i = 0; i < MAX_NR_ZONES; i++) { 1783 struct zonelist *zonelist; 1784 1785 zonelist = pgdat->node_zonelists + i; 1786 1787 j = build_zonelists_node(pgdat, zonelist, 0, i); 1788 /* 1789 * Now we build the zonelist so that it contains the zones 1790 * of all the other nodes. 1791 * We don't want to pressure a particular node, so when 1792 * building the zones for node N, we make sure that the 1793 * zones coming right after the local ones are those from 1794 * node N+1 (modulo N) 1795 */ 1796 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1797 if (!node_online(node)) 1798 continue; 1799 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1800 } 1801 for (node = 0; node < local_node; node++) { 1802 if (!node_online(node)) 1803 continue; 1804 j = build_zonelists_node(NODE_DATA(node), zonelist, j, i); 1805 } 1806 1807 zonelist->zones[j] = NULL; 1808 } 1809 } 1810 1811 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 1812 static void __meminit build_zonelist_cache(pg_data_t *pgdat) 1813 { 1814 int i; 1815 1816 for (i = 0; i < MAX_NR_ZONES; i++) 1817 pgdat->node_zonelists[i].zlcache_ptr = NULL; 1818 } 1819 1820 #endif /* CONFIG_NUMA */ 1821 1822 /* return values int ....just for stop_machine_run() */ 1823 static int __meminit __build_all_zonelists(void *dummy) 1824 { 1825 int nid; 1826 1827 for_each_online_node(nid) { 1828 build_zonelists(NODE_DATA(nid)); 1829 build_zonelist_cache(NODE_DATA(nid)); 1830 } 1831 return 0; 1832 } 1833 1834 void __meminit build_all_zonelists(void) 1835 { 1836 if (system_state == SYSTEM_BOOTING) { 1837 __build_all_zonelists(NULL); 1838 cpuset_init_current_mems_allowed(); 1839 } else { 1840 /* we have to stop all cpus to guaranntee there is no user 1841 of zonelist */ 1842 stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); 1843 /* cpuset refresh routine should be here */ 1844 } 1845 vm_total_pages = nr_free_pagecache_pages(); 1846 printk("Built %i zonelists. Total pages: %ld\n", 1847 num_online_nodes(), vm_total_pages); 1848 } 1849 1850 /* 1851 * Helper functions to size the waitqueue hash table. 1852 * Essentially these want to choose hash table sizes sufficiently 1853 * large so that collisions trying to wait on pages are rare. 1854 * But in fact, the number of active page waitqueues on typical 1855 * systems is ridiculously low, less than 200. So this is even 1856 * conservative, even though it seems large. 1857 * 1858 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1859 * waitqueues, i.e. the size of the waitq table given the number of pages. 1860 */ 1861 #define PAGES_PER_WAITQUEUE 256 1862 1863 #ifndef CONFIG_MEMORY_HOTPLUG 1864 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1865 { 1866 unsigned long size = 1; 1867 1868 pages /= PAGES_PER_WAITQUEUE; 1869 1870 while (size < pages) 1871 size <<= 1; 1872 1873 /* 1874 * Once we have dozens or even hundreds of threads sleeping 1875 * on IO we've got bigger problems than wait queue collision. 1876 * Limit the size of the wait table to a reasonable size. 1877 */ 1878 size = min(size, 4096UL); 1879 1880 return max(size, 4UL); 1881 } 1882 #else 1883 /* 1884 * A zone's size might be changed by hot-add, so it is not possible to determine 1885 * a suitable size for its wait_table. So we use the maximum size now. 1886 * 1887 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 1888 * 1889 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 1890 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 1891 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 1892 * 1893 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 1894 * or more by the traditional way. (See above). It equals: 1895 * 1896 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 1897 * ia64(16K page size) : = ( 8G + 4M)byte. 1898 * powerpc (64K page size) : = (32G +16M)byte. 1899 */ 1900 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 1901 { 1902 return 4096UL; 1903 } 1904 #endif 1905 1906 /* 1907 * This is an integer logarithm so that shifts can be used later 1908 * to extract the more random high bits from the multiplicative 1909 * hash function before the remainder is taken. 1910 */ 1911 static inline unsigned long wait_table_bits(unsigned long size) 1912 { 1913 return ffz(~size); 1914 } 1915 1916 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1917 1918 /* 1919 * Initially all pages are reserved - free ones are freed 1920 * up by free_all_bootmem() once the early boot process is 1921 * done. Non-atomic initialization, single-pass. 1922 */ 1923 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1924 unsigned long start_pfn, enum memmap_context context) 1925 { 1926 struct page *page; 1927 unsigned long end_pfn = start_pfn + size; 1928 unsigned long pfn; 1929 1930 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 1931 /* 1932 * There can be holes in boot-time mem_map[]s 1933 * handed to this function. They do not 1934 * exist on hotplugged memory. 1935 */ 1936 if (context == MEMMAP_EARLY) { 1937 if (!early_pfn_valid(pfn)) 1938 continue; 1939 if (!early_pfn_in_nid(pfn, nid)) 1940 continue; 1941 } 1942 page = pfn_to_page(pfn); 1943 set_page_links(page, zone, nid, pfn); 1944 init_page_count(page); 1945 reset_page_mapcount(page); 1946 SetPageReserved(page); 1947 INIT_LIST_HEAD(&page->lru); 1948 #ifdef WANT_PAGE_VIRTUAL 1949 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1950 if (!is_highmem_idx(zone)) 1951 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1952 #endif 1953 } 1954 } 1955 1956 void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1957 unsigned long size) 1958 { 1959 int order; 1960 for (order = 0; order < MAX_ORDER ; order++) { 1961 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1962 zone->free_area[order].nr_free = 0; 1963 } 1964 } 1965 1966 #ifndef __HAVE_ARCH_MEMMAP_INIT 1967 #define memmap_init(size, nid, zone, start_pfn) \ 1968 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 1969 #endif 1970 1971 static int __devinit zone_batchsize(struct zone *zone) 1972 { 1973 int batch; 1974 1975 /* 1976 * The per-cpu-pages pools are set to around 1000th of the 1977 * size of the zone. But no more than 1/2 of a meg. 1978 * 1979 * OK, so we don't know how big the cache is. So guess. 1980 */ 1981 batch = zone->present_pages / 1024; 1982 if (batch * PAGE_SIZE > 512 * 1024) 1983 batch = (512 * 1024) / PAGE_SIZE; 1984 batch /= 4; /* We effectively *= 4 below */ 1985 if (batch < 1) 1986 batch = 1; 1987 1988 /* 1989 * Clamp the batch to a 2^n - 1 value. Having a power 1990 * of 2 value was found to be more likely to have 1991 * suboptimal cache aliasing properties in some cases. 1992 * 1993 * For example if 2 tasks are alternately allocating 1994 * batches of pages, one task can end up with a lot 1995 * of pages of one half of the possible page colors 1996 * and the other with pages of the other colors. 1997 */ 1998 batch = (1 << (fls(batch + batch/2)-1)) - 1; 1999 2000 return batch; 2001 } 2002 2003 inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 2004 { 2005 struct per_cpu_pages *pcp; 2006 2007 memset(p, 0, sizeof(*p)); 2008 2009 pcp = &p->pcp[0]; /* hot */ 2010 pcp->count = 0; 2011 pcp->high = 6 * batch; 2012 pcp->batch = max(1UL, 1 * batch); 2013 INIT_LIST_HEAD(&pcp->list); 2014 2015 pcp = &p->pcp[1]; /* cold*/ 2016 pcp->count = 0; 2017 pcp->high = 2 * batch; 2018 pcp->batch = max(1UL, batch/2); 2019 INIT_LIST_HEAD(&pcp->list); 2020 } 2021 2022 /* 2023 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 2024 * to the value high for the pageset p. 2025 */ 2026 2027 static void setup_pagelist_highmark(struct per_cpu_pageset *p, 2028 unsigned long high) 2029 { 2030 struct per_cpu_pages *pcp; 2031 2032 pcp = &p->pcp[0]; /* hot list */ 2033 pcp->high = high; 2034 pcp->batch = max(1UL, high/4); 2035 if ((high/4) > (PAGE_SHIFT * 8)) 2036 pcp->batch = PAGE_SHIFT * 8; 2037 } 2038 2039 2040 #ifdef CONFIG_NUMA 2041 /* 2042 * Boot pageset table. One per cpu which is going to be used for all 2043 * zones and all nodes. The parameters will be set in such a way 2044 * that an item put on a list will immediately be handed over to 2045 * the buddy list. This is safe since pageset manipulation is done 2046 * with interrupts disabled. 2047 * 2048 * Some NUMA counter updates may also be caught by the boot pagesets. 2049 * 2050 * The boot_pagesets must be kept even after bootup is complete for 2051 * unused processors and/or zones. They do play a role for bootstrapping 2052 * hotplugged processors. 2053 * 2054 * zoneinfo_show() and maybe other functions do 2055 * not check if the processor is online before following the pageset pointer. 2056 * Other parts of the kernel may not check if the zone is available. 2057 */ 2058 static struct per_cpu_pageset boot_pageset[NR_CPUS]; 2059 2060 /* 2061 * Dynamically allocate memory for the 2062 * per cpu pageset array in struct zone. 2063 */ 2064 static int __cpuinit process_zones(int cpu) 2065 { 2066 struct zone *zone, *dzone; 2067 2068 for_each_zone(zone) { 2069 2070 if (!populated_zone(zone)) 2071 continue; 2072 2073 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 2074 GFP_KERNEL, cpu_to_node(cpu)); 2075 if (!zone_pcp(zone, cpu)) 2076 goto bad; 2077 2078 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 2079 2080 if (percpu_pagelist_fraction) 2081 setup_pagelist_highmark(zone_pcp(zone, cpu), 2082 (zone->present_pages / percpu_pagelist_fraction)); 2083 } 2084 2085 return 0; 2086 bad: 2087 for_each_zone(dzone) { 2088 if (dzone == zone) 2089 break; 2090 kfree(zone_pcp(dzone, cpu)); 2091 zone_pcp(dzone, cpu) = NULL; 2092 } 2093 return -ENOMEM; 2094 } 2095 2096 static inline void free_zone_pagesets(int cpu) 2097 { 2098 struct zone *zone; 2099 2100 for_each_zone(zone) { 2101 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 2102 2103 /* Free per_cpu_pageset if it is slab allocated */ 2104 if (pset != &boot_pageset[cpu]) 2105 kfree(pset); 2106 zone_pcp(zone, cpu) = NULL; 2107 } 2108 } 2109 2110 static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb, 2111 unsigned long action, 2112 void *hcpu) 2113 { 2114 int cpu = (long)hcpu; 2115 int ret = NOTIFY_OK; 2116 2117 switch (action) { 2118 case CPU_UP_PREPARE: 2119 case CPU_UP_PREPARE_FROZEN: 2120 if (process_zones(cpu)) 2121 ret = NOTIFY_BAD; 2122 break; 2123 case CPU_UP_CANCELED: 2124 case CPU_UP_CANCELED_FROZEN: 2125 case CPU_DEAD: 2126 case CPU_DEAD_FROZEN: 2127 free_zone_pagesets(cpu); 2128 break; 2129 default: 2130 break; 2131 } 2132 return ret; 2133 } 2134 2135 static struct notifier_block __cpuinitdata pageset_notifier = 2136 { &pageset_cpuup_callback, NULL, 0 }; 2137 2138 void __init setup_per_cpu_pageset(void) 2139 { 2140 int err; 2141 2142 /* Initialize per_cpu_pageset for cpu 0. 2143 * A cpuup callback will do this for every cpu 2144 * as it comes online 2145 */ 2146 err = process_zones(smp_processor_id()); 2147 BUG_ON(err); 2148 register_cpu_notifier(&pageset_notifier); 2149 } 2150 2151 #endif 2152 2153 static noinline __init_refok 2154 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 2155 { 2156 int i; 2157 struct pglist_data *pgdat = zone->zone_pgdat; 2158 size_t alloc_size; 2159 2160 /* 2161 * The per-page waitqueue mechanism uses hashed waitqueues 2162 * per zone. 2163 */ 2164 zone->wait_table_hash_nr_entries = 2165 wait_table_hash_nr_entries(zone_size_pages); 2166 zone->wait_table_bits = 2167 wait_table_bits(zone->wait_table_hash_nr_entries); 2168 alloc_size = zone->wait_table_hash_nr_entries 2169 * sizeof(wait_queue_head_t); 2170 2171 if (system_state == SYSTEM_BOOTING) { 2172 zone->wait_table = (wait_queue_head_t *) 2173 alloc_bootmem_node(pgdat, alloc_size); 2174 } else { 2175 /* 2176 * This case means that a zone whose size was 0 gets new memory 2177 * via memory hot-add. 2178 * But it may be the case that a new node was hot-added. In 2179 * this case vmalloc() will not be able to use this new node's 2180 * memory - this wait_table must be initialized to use this new 2181 * node itself as well. 2182 * To use this new node's memory, further consideration will be 2183 * necessary. 2184 */ 2185 zone->wait_table = (wait_queue_head_t *)vmalloc(alloc_size); 2186 } 2187 if (!zone->wait_table) 2188 return -ENOMEM; 2189 2190 for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) 2191 init_waitqueue_head(zone->wait_table + i); 2192 2193 return 0; 2194 } 2195 2196 static __meminit void zone_pcp_init(struct zone *zone) 2197 { 2198 int cpu; 2199 unsigned long batch = zone_batchsize(zone); 2200 2201 for (cpu = 0; cpu < NR_CPUS; cpu++) { 2202 #ifdef CONFIG_NUMA 2203 /* Early boot. Slab allocator not functional yet */ 2204 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 2205 setup_pageset(&boot_pageset[cpu],0); 2206 #else 2207 setup_pageset(zone_pcp(zone,cpu), batch); 2208 #endif 2209 } 2210 if (zone->present_pages) 2211 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 2212 zone->name, zone->present_pages, batch); 2213 } 2214 2215 __meminit int init_currently_empty_zone(struct zone *zone, 2216 unsigned long zone_start_pfn, 2217 unsigned long size, 2218 enum memmap_context context) 2219 { 2220 struct pglist_data *pgdat = zone->zone_pgdat; 2221 int ret; 2222 ret = zone_wait_table_init(zone, size); 2223 if (ret) 2224 return ret; 2225 pgdat->nr_zones = zone_idx(zone) + 1; 2226 2227 zone->zone_start_pfn = zone_start_pfn; 2228 2229 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2230 2231 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 2232 2233 return 0; 2234 } 2235 2236 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2237 /* 2238 * Basic iterator support. Return the first range of PFNs for a node 2239 * Note: nid == MAX_NUMNODES returns first region regardless of node 2240 */ 2241 static int __meminit first_active_region_index_in_nid(int nid) 2242 { 2243 int i; 2244 2245 for (i = 0; i < nr_nodemap_entries; i++) 2246 if (nid == MAX_NUMNODES || early_node_map[i].nid == nid) 2247 return i; 2248 2249 return -1; 2250 } 2251 2252 /* 2253 * Basic iterator support. Return the next active range of PFNs for a node 2254 * Note: nid == MAX_NUMNODES returns next region regardles of node 2255 */ 2256 static int __meminit next_active_region_index_in_nid(int index, int nid) 2257 { 2258 for (index = index + 1; index < nr_nodemap_entries; index++) 2259 if (nid == MAX_NUMNODES || early_node_map[index].nid == nid) 2260 return index; 2261 2262 return -1; 2263 } 2264 2265 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 2266 /* 2267 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 2268 * Architectures may implement their own version but if add_active_range() 2269 * was used and there are no special requirements, this is a convenient 2270 * alternative 2271 */ 2272 int __meminit early_pfn_to_nid(unsigned long pfn) 2273 { 2274 int i; 2275 2276 for (i = 0; i < nr_nodemap_entries; i++) { 2277 unsigned long start_pfn = early_node_map[i].start_pfn; 2278 unsigned long end_pfn = early_node_map[i].end_pfn; 2279 2280 if (start_pfn <= pfn && pfn < end_pfn) 2281 return early_node_map[i].nid; 2282 } 2283 2284 return 0; 2285 } 2286 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 2287 2288 /* Basic iterator support to walk early_node_map[] */ 2289 #define for_each_active_range_index_in_nid(i, nid) \ 2290 for (i = first_active_region_index_in_nid(nid); i != -1; \ 2291 i = next_active_region_index_in_nid(i, nid)) 2292 2293 /** 2294 * free_bootmem_with_active_regions - Call free_bootmem_node for each active range 2295 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 2296 * @max_low_pfn: The highest PFN that will be passed to free_bootmem_node 2297 * 2298 * If an architecture guarantees that all ranges registered with 2299 * add_active_ranges() contain no holes and may be freed, this 2300 * this function may be used instead of calling free_bootmem() manually. 2301 */ 2302 void __init free_bootmem_with_active_regions(int nid, 2303 unsigned long max_low_pfn) 2304 { 2305 int i; 2306 2307 for_each_active_range_index_in_nid(i, nid) { 2308 unsigned long size_pages = 0; 2309 unsigned long end_pfn = early_node_map[i].end_pfn; 2310 2311 if (early_node_map[i].start_pfn >= max_low_pfn) 2312 continue; 2313 2314 if (end_pfn > max_low_pfn) 2315 end_pfn = max_low_pfn; 2316 2317 size_pages = end_pfn - early_node_map[i].start_pfn; 2318 free_bootmem_node(NODE_DATA(early_node_map[i].nid), 2319 PFN_PHYS(early_node_map[i].start_pfn), 2320 size_pages << PAGE_SHIFT); 2321 } 2322 } 2323 2324 /** 2325 * sparse_memory_present_with_active_regions - Call memory_present for each active range 2326 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 2327 * 2328 * If an architecture guarantees that all ranges registered with 2329 * add_active_ranges() contain no holes and may be freed, this 2330 * function may be used instead of calling memory_present() manually. 2331 */ 2332 void __init sparse_memory_present_with_active_regions(int nid) 2333 { 2334 int i; 2335 2336 for_each_active_range_index_in_nid(i, nid) 2337 memory_present(early_node_map[i].nid, 2338 early_node_map[i].start_pfn, 2339 early_node_map[i].end_pfn); 2340 } 2341 2342 /** 2343 * push_node_boundaries - Push node boundaries to at least the requested boundary 2344 * @nid: The nid of the node to push the boundary for 2345 * @start_pfn: The start pfn of the node 2346 * @end_pfn: The end pfn of the node 2347 * 2348 * In reserve-based hot-add, mem_map is allocated that is unused until hotadd 2349 * time. Specifically, on x86_64, SRAT will report ranges that can potentially 2350 * be hotplugged even though no physical memory exists. This function allows 2351 * an arch to push out the node boundaries so mem_map is allocated that can 2352 * be used later. 2353 */ 2354 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2355 void __init push_node_boundaries(unsigned int nid, 2356 unsigned long start_pfn, unsigned long end_pfn) 2357 { 2358 printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", 2359 nid, start_pfn, end_pfn); 2360 2361 /* Initialise the boundary for this node if necessary */ 2362 if (node_boundary_end_pfn[nid] == 0) 2363 node_boundary_start_pfn[nid] = -1UL; 2364 2365 /* Update the boundaries */ 2366 if (node_boundary_start_pfn[nid] > start_pfn) 2367 node_boundary_start_pfn[nid] = start_pfn; 2368 if (node_boundary_end_pfn[nid] < end_pfn) 2369 node_boundary_end_pfn[nid] = end_pfn; 2370 } 2371 2372 /* If necessary, push the node boundary out for reserve hotadd */ 2373 static void __init account_node_boundary(unsigned int nid, 2374 unsigned long *start_pfn, unsigned long *end_pfn) 2375 { 2376 printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", 2377 nid, *start_pfn, *end_pfn); 2378 2379 /* Return if boundary information has not been provided */ 2380 if (node_boundary_end_pfn[nid] == 0) 2381 return; 2382 2383 /* Check the boundaries and update if necessary */ 2384 if (node_boundary_start_pfn[nid] < *start_pfn) 2385 *start_pfn = node_boundary_start_pfn[nid]; 2386 if (node_boundary_end_pfn[nid] > *end_pfn) 2387 *end_pfn = node_boundary_end_pfn[nid]; 2388 } 2389 #else 2390 void __init push_node_boundaries(unsigned int nid, 2391 unsigned long start_pfn, unsigned long end_pfn) {} 2392 2393 static void __init account_node_boundary(unsigned int nid, 2394 unsigned long *start_pfn, unsigned long *end_pfn) {} 2395 #endif 2396 2397 2398 /** 2399 * get_pfn_range_for_nid - Return the start and end page frames for a node 2400 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 2401 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 2402 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 2403 * 2404 * It returns the start and end page frame of a node based on information 2405 * provided by an arch calling add_active_range(). If called for a node 2406 * with no available memory, a warning is printed and the start and end 2407 * PFNs will be 0. 2408 */ 2409 void __meminit get_pfn_range_for_nid(unsigned int nid, 2410 unsigned long *start_pfn, unsigned long *end_pfn) 2411 { 2412 int i; 2413 *start_pfn = -1UL; 2414 *end_pfn = 0; 2415 2416 for_each_active_range_index_in_nid(i, nid) { 2417 *start_pfn = min(*start_pfn, early_node_map[i].start_pfn); 2418 *end_pfn = max(*end_pfn, early_node_map[i].end_pfn); 2419 } 2420 2421 if (*start_pfn == -1UL) { 2422 printk(KERN_WARNING "Node %u active with no memory\n", nid); 2423 *start_pfn = 0; 2424 } 2425 2426 /* Push the node boundaries out if requested */ 2427 account_node_boundary(nid, start_pfn, end_pfn); 2428 } 2429 2430 /* 2431 * Return the number of pages a zone spans in a node, including holes 2432 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 2433 */ 2434 unsigned long __meminit zone_spanned_pages_in_node(int nid, 2435 unsigned long zone_type, 2436 unsigned long *ignored) 2437 { 2438 unsigned long node_start_pfn, node_end_pfn; 2439 unsigned long zone_start_pfn, zone_end_pfn; 2440 2441 /* Get the start and end of the node and zone */ 2442 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2443 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 2444 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 2445 2446 /* Check that this node has pages within the zone's required range */ 2447 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 2448 return 0; 2449 2450 /* Move the zone boundaries inside the node if necessary */ 2451 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 2452 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 2453 2454 /* Return the spanned pages */ 2455 return zone_end_pfn - zone_start_pfn; 2456 } 2457 2458 /* 2459 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 2460 * then all holes in the requested range will be accounted for. 2461 */ 2462 unsigned long __meminit __absent_pages_in_range(int nid, 2463 unsigned long range_start_pfn, 2464 unsigned long range_end_pfn) 2465 { 2466 int i = 0; 2467 unsigned long prev_end_pfn = 0, hole_pages = 0; 2468 unsigned long start_pfn; 2469 2470 /* Find the end_pfn of the first active range of pfns in the node */ 2471 i = first_active_region_index_in_nid(nid); 2472 if (i == -1) 2473 return 0; 2474 2475 /* Account for ranges before physical memory on this node */ 2476 if (early_node_map[i].start_pfn > range_start_pfn) 2477 hole_pages = early_node_map[i].start_pfn - range_start_pfn; 2478 2479 prev_end_pfn = early_node_map[i].start_pfn; 2480 2481 /* Find all holes for the zone within the node */ 2482 for (; i != -1; i = next_active_region_index_in_nid(i, nid)) { 2483 2484 /* No need to continue if prev_end_pfn is outside the zone */ 2485 if (prev_end_pfn >= range_end_pfn) 2486 break; 2487 2488 /* Make sure the end of the zone is not within the hole */ 2489 start_pfn = min(early_node_map[i].start_pfn, range_end_pfn); 2490 prev_end_pfn = max(prev_end_pfn, range_start_pfn); 2491 2492 /* Update the hole size cound and move on */ 2493 if (start_pfn > range_start_pfn) { 2494 BUG_ON(prev_end_pfn > start_pfn); 2495 hole_pages += start_pfn - prev_end_pfn; 2496 } 2497 prev_end_pfn = early_node_map[i].end_pfn; 2498 } 2499 2500 /* Account for ranges past physical memory on this node */ 2501 if (range_end_pfn > prev_end_pfn) 2502 hole_pages += range_end_pfn - 2503 max(range_start_pfn, prev_end_pfn); 2504 2505 return hole_pages; 2506 } 2507 2508 /** 2509 * absent_pages_in_range - Return number of page frames in holes within a range 2510 * @start_pfn: The start PFN to start searching for holes 2511 * @end_pfn: The end PFN to stop searching for holes 2512 * 2513 * It returns the number of pages frames in memory holes within a range. 2514 */ 2515 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 2516 unsigned long end_pfn) 2517 { 2518 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 2519 } 2520 2521 /* Return the number of page frames in holes in a zone on a node */ 2522 unsigned long __meminit zone_absent_pages_in_node(int nid, 2523 unsigned long zone_type, 2524 unsigned long *ignored) 2525 { 2526 unsigned long node_start_pfn, node_end_pfn; 2527 unsigned long zone_start_pfn, zone_end_pfn; 2528 2529 get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn); 2530 zone_start_pfn = max(arch_zone_lowest_possible_pfn[zone_type], 2531 node_start_pfn); 2532 zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type], 2533 node_end_pfn); 2534 2535 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 2536 } 2537 2538 #else 2539 static inline unsigned long zone_spanned_pages_in_node(int nid, 2540 unsigned long zone_type, 2541 unsigned long *zones_size) 2542 { 2543 return zones_size[zone_type]; 2544 } 2545 2546 static inline unsigned long zone_absent_pages_in_node(int nid, 2547 unsigned long zone_type, 2548 unsigned long *zholes_size) 2549 { 2550 if (!zholes_size) 2551 return 0; 2552 2553 return zholes_size[zone_type]; 2554 } 2555 2556 #endif 2557 2558 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 2559 unsigned long *zones_size, unsigned long *zholes_size) 2560 { 2561 unsigned long realtotalpages, totalpages = 0; 2562 enum zone_type i; 2563 2564 for (i = 0; i < MAX_NR_ZONES; i++) 2565 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 2566 zones_size); 2567 pgdat->node_spanned_pages = totalpages; 2568 2569 realtotalpages = totalpages; 2570 for (i = 0; i < MAX_NR_ZONES; i++) 2571 realtotalpages -= 2572 zone_absent_pages_in_node(pgdat->node_id, i, 2573 zholes_size); 2574 pgdat->node_present_pages = realtotalpages; 2575 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 2576 realtotalpages); 2577 } 2578 2579 /* 2580 * Set up the zone data structures: 2581 * - mark all pages reserved 2582 * - mark all memory queues empty 2583 * - clear the memory bitmaps 2584 */ 2585 static void __meminit free_area_init_core(struct pglist_data *pgdat, 2586 unsigned long *zones_size, unsigned long *zholes_size) 2587 { 2588 enum zone_type j; 2589 int nid = pgdat->node_id; 2590 unsigned long zone_start_pfn = pgdat->node_start_pfn; 2591 int ret; 2592 2593 pgdat_resize_init(pgdat); 2594 pgdat->nr_zones = 0; 2595 init_waitqueue_head(&pgdat->kswapd_wait); 2596 pgdat->kswapd_max_order = 0; 2597 2598 for (j = 0; j < MAX_NR_ZONES; j++) { 2599 struct zone *zone = pgdat->node_zones + j; 2600 unsigned long size, realsize, memmap_pages; 2601 2602 size = zone_spanned_pages_in_node(nid, j, zones_size); 2603 realsize = size - zone_absent_pages_in_node(nid, j, 2604 zholes_size); 2605 2606 /* 2607 * Adjust realsize so that it accounts for how much memory 2608 * is used by this zone for memmap. This affects the watermark 2609 * and per-cpu initialisations 2610 */ 2611 memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT; 2612 if (realsize >= memmap_pages) { 2613 realsize -= memmap_pages; 2614 printk(KERN_DEBUG 2615 " %s zone: %lu pages used for memmap\n", 2616 zone_names[j], memmap_pages); 2617 } else 2618 printk(KERN_WARNING 2619 " %s zone: %lu pages exceeds realsize %lu\n", 2620 zone_names[j], memmap_pages, realsize); 2621 2622 /* Account for reserved pages */ 2623 if (j == 0 && realsize > dma_reserve) { 2624 realsize -= dma_reserve; 2625 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 2626 zone_names[0], dma_reserve); 2627 } 2628 2629 if (!is_highmem_idx(j)) 2630 nr_kernel_pages += realsize; 2631 nr_all_pages += realsize; 2632 2633 zone->spanned_pages = size; 2634 zone->present_pages = realsize; 2635 #ifdef CONFIG_NUMA 2636 zone->node = nid; 2637 zone->min_unmapped_pages = (realsize*sysctl_min_unmapped_ratio) 2638 / 100; 2639 zone->min_slab_pages = (realsize * sysctl_min_slab_ratio) / 100; 2640 #endif 2641 zone->name = zone_names[j]; 2642 spin_lock_init(&zone->lock); 2643 spin_lock_init(&zone->lru_lock); 2644 zone_seqlock_init(zone); 2645 zone->zone_pgdat = pgdat; 2646 2647 zone->prev_priority = DEF_PRIORITY; 2648 2649 zone_pcp_init(zone); 2650 INIT_LIST_HEAD(&zone->active_list); 2651 INIT_LIST_HEAD(&zone->inactive_list); 2652 zone->nr_scan_active = 0; 2653 zone->nr_scan_inactive = 0; 2654 zap_zone_vm_stats(zone); 2655 atomic_set(&zone->reclaim_in_progress, 0); 2656 if (!size) 2657 continue; 2658 2659 ret = init_currently_empty_zone(zone, zone_start_pfn, 2660 size, MEMMAP_EARLY); 2661 BUG_ON(ret); 2662 zone_start_pfn += size; 2663 } 2664 } 2665 2666 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 2667 { 2668 /* Skip empty nodes */ 2669 if (!pgdat->node_spanned_pages) 2670 return; 2671 2672 #ifdef CONFIG_FLAT_NODE_MEM_MAP 2673 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2674 if (!pgdat->node_mem_map) { 2675 unsigned long size, start, end; 2676 struct page *map; 2677 2678 /* 2679 * The zone's endpoints aren't required to be MAX_ORDER 2680 * aligned but the node_mem_map endpoints must be in order 2681 * for the buddy allocator to function correctly. 2682 */ 2683 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 2684 end = pgdat->node_start_pfn + pgdat->node_spanned_pages; 2685 end = ALIGN(end, MAX_ORDER_NR_PAGES); 2686 size = (end - start) * sizeof(struct page); 2687 map = alloc_remap(pgdat->node_id, size); 2688 if (!map) 2689 map = alloc_bootmem_node(pgdat, size); 2690 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 2691 } 2692 #ifndef CONFIG_NEED_MULTIPLE_NODES 2693 /* 2694 * With no DISCONTIG, the global mem_map is just set as node 0's 2695 */ 2696 if (pgdat == NODE_DATA(0)) { 2697 mem_map = NODE_DATA(0)->node_mem_map; 2698 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2699 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 2700 mem_map -= pgdat->node_start_pfn; 2701 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2702 } 2703 #endif 2704 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2705 } 2706 2707 void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, 2708 unsigned long *zones_size, unsigned long node_start_pfn, 2709 unsigned long *zholes_size) 2710 { 2711 pgdat->node_id = nid; 2712 pgdat->node_start_pfn = node_start_pfn; 2713 calculate_node_totalpages(pgdat, zones_size, zholes_size); 2714 2715 alloc_node_mem_map(pgdat); 2716 2717 free_area_init_core(pgdat, zones_size, zholes_size); 2718 } 2719 2720 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP 2721 2722 #if MAX_NUMNODES > 1 2723 /* 2724 * Figure out the number of possible node ids. 2725 */ 2726 static void __init setup_nr_node_ids(void) 2727 { 2728 unsigned int node; 2729 unsigned int highest = 0; 2730 2731 for_each_node_mask(node, node_possible_map) 2732 highest = node; 2733 nr_node_ids = highest + 1; 2734 } 2735 #else 2736 static inline void setup_nr_node_ids(void) 2737 { 2738 } 2739 #endif 2740 2741 /** 2742 * add_active_range - Register a range of PFNs backed by physical memory 2743 * @nid: The node ID the range resides on 2744 * @start_pfn: The start PFN of the available physical memory 2745 * @end_pfn: The end PFN of the available physical memory 2746 * 2747 * These ranges are stored in an early_node_map[] and later used by 2748 * free_area_init_nodes() to calculate zone sizes and holes. If the 2749 * range spans a memory hole, it is up to the architecture to ensure 2750 * the memory is not freed by the bootmem allocator. If possible 2751 * the range being registered will be merged with existing ranges. 2752 */ 2753 void __init add_active_range(unsigned int nid, unsigned long start_pfn, 2754 unsigned long end_pfn) 2755 { 2756 int i; 2757 2758 printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " 2759 "%d entries of %d used\n", 2760 nid, start_pfn, end_pfn, 2761 nr_nodemap_entries, MAX_ACTIVE_REGIONS); 2762 2763 /* Merge with existing active regions if possible */ 2764 for (i = 0; i < nr_nodemap_entries; i++) { 2765 if (early_node_map[i].nid != nid) 2766 continue; 2767 2768 /* Skip if an existing region covers this new one */ 2769 if (start_pfn >= early_node_map[i].start_pfn && 2770 end_pfn <= early_node_map[i].end_pfn) 2771 return; 2772 2773 /* Merge forward if suitable */ 2774 if (start_pfn <= early_node_map[i].end_pfn && 2775 end_pfn > early_node_map[i].end_pfn) { 2776 early_node_map[i].end_pfn = end_pfn; 2777 return; 2778 } 2779 2780 /* Merge backward if suitable */ 2781 if (start_pfn < early_node_map[i].end_pfn && 2782 end_pfn >= early_node_map[i].start_pfn) { 2783 early_node_map[i].start_pfn = start_pfn; 2784 return; 2785 } 2786 } 2787 2788 /* Check that early_node_map is large enough */ 2789 if (i >= MAX_ACTIVE_REGIONS) { 2790 printk(KERN_CRIT "More than %d memory regions, truncating\n", 2791 MAX_ACTIVE_REGIONS); 2792 return; 2793 } 2794 2795 early_node_map[i].nid = nid; 2796 early_node_map[i].start_pfn = start_pfn; 2797 early_node_map[i].end_pfn = end_pfn; 2798 nr_nodemap_entries = i + 1; 2799 } 2800 2801 /** 2802 * shrink_active_range - Shrink an existing registered range of PFNs 2803 * @nid: The node id the range is on that should be shrunk 2804 * @old_end_pfn: The old end PFN of the range 2805 * @new_end_pfn: The new PFN of the range 2806 * 2807 * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. 2808 * The map is kept at the end physical page range that has already been 2809 * registered with add_active_range(). This function allows an arch to shrink 2810 * an existing registered range. 2811 */ 2812 void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, 2813 unsigned long new_end_pfn) 2814 { 2815 int i; 2816 2817 /* Find the old active region end and shrink */ 2818 for_each_active_range_index_in_nid(i, nid) 2819 if (early_node_map[i].end_pfn == old_end_pfn) { 2820 early_node_map[i].end_pfn = new_end_pfn; 2821 break; 2822 } 2823 } 2824 2825 /** 2826 * remove_all_active_ranges - Remove all currently registered regions 2827 * 2828 * During discovery, it may be found that a table like SRAT is invalid 2829 * and an alternative discovery method must be used. This function removes 2830 * all currently registered regions. 2831 */ 2832 void __init remove_all_active_ranges(void) 2833 { 2834 memset(early_node_map, 0, sizeof(early_node_map)); 2835 nr_nodemap_entries = 0; 2836 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE 2837 memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn)); 2838 memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn)); 2839 #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ 2840 } 2841 2842 /* Compare two active node_active_regions */ 2843 static int __init cmp_node_active_region(const void *a, const void *b) 2844 { 2845 struct node_active_region *arange = (struct node_active_region *)a; 2846 struct node_active_region *brange = (struct node_active_region *)b; 2847 2848 /* Done this way to avoid overflows */ 2849 if (arange->start_pfn > brange->start_pfn) 2850 return 1; 2851 if (arange->start_pfn < brange->start_pfn) 2852 return -1; 2853 2854 return 0; 2855 } 2856 2857 /* sort the node_map by start_pfn */ 2858 static void __init sort_node_map(void) 2859 { 2860 sort(early_node_map, (size_t)nr_nodemap_entries, 2861 sizeof(struct node_active_region), 2862 cmp_node_active_region, NULL); 2863 } 2864 2865 /* Find the lowest pfn for a node */ 2866 unsigned long __init find_min_pfn_for_node(unsigned long nid) 2867 { 2868 int i; 2869 unsigned long min_pfn = ULONG_MAX; 2870 2871 /* Assuming a sorted map, the first range found has the starting pfn */ 2872 for_each_active_range_index_in_nid(i, nid) 2873 min_pfn = min(min_pfn, early_node_map[i].start_pfn); 2874 2875 if (min_pfn == ULONG_MAX) { 2876 printk(KERN_WARNING 2877 "Could not find start_pfn for node %lu\n", nid); 2878 return 0; 2879 } 2880 2881 return min_pfn; 2882 } 2883 2884 /** 2885 * find_min_pfn_with_active_regions - Find the minimum PFN registered 2886 * 2887 * It returns the minimum PFN based on information provided via 2888 * add_active_range(). 2889 */ 2890 unsigned long __init find_min_pfn_with_active_regions(void) 2891 { 2892 return find_min_pfn_for_node(MAX_NUMNODES); 2893 } 2894 2895 /** 2896 * find_max_pfn_with_active_regions - Find the maximum PFN registered 2897 * 2898 * It returns the maximum PFN based on information provided via 2899 * add_active_range(). 2900 */ 2901 unsigned long __init find_max_pfn_with_active_regions(void) 2902 { 2903 int i; 2904 unsigned long max_pfn = 0; 2905 2906 for (i = 0; i < nr_nodemap_entries; i++) 2907 max_pfn = max(max_pfn, early_node_map[i].end_pfn); 2908 2909 return max_pfn; 2910 } 2911 2912 /** 2913 * free_area_init_nodes - Initialise all pg_data_t and zone data 2914 * @max_zone_pfn: an array of max PFNs for each zone 2915 * 2916 * This will call free_area_init_node() for each active node in the system. 2917 * Using the page ranges provided by add_active_range(), the size of each 2918 * zone in each node and their holes is calculated. If the maximum PFN 2919 * between two adjacent zones match, it is assumed that the zone is empty. 2920 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 2921 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 2922 * starts where the previous one ended. For example, ZONE_DMA32 starts 2923 * at arch_max_dma_pfn. 2924 */ 2925 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 2926 { 2927 unsigned long nid; 2928 enum zone_type i; 2929 2930 /* Sort early_node_map as initialisation assumes it is sorted */ 2931 sort_node_map(); 2932 2933 /* Record where the zone boundaries are */ 2934 memset(arch_zone_lowest_possible_pfn, 0, 2935 sizeof(arch_zone_lowest_possible_pfn)); 2936 memset(arch_zone_highest_possible_pfn, 0, 2937 sizeof(arch_zone_highest_possible_pfn)); 2938 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 2939 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 2940 for (i = 1; i < MAX_NR_ZONES; i++) { 2941 arch_zone_lowest_possible_pfn[i] = 2942 arch_zone_highest_possible_pfn[i-1]; 2943 arch_zone_highest_possible_pfn[i] = 2944 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 2945 } 2946 2947 /* Print out the zone ranges */ 2948 printk("Zone PFN ranges:\n"); 2949 for (i = 0; i < MAX_NR_ZONES; i++) 2950 printk(" %-8s %8lu -> %8lu\n", 2951 zone_names[i], 2952 arch_zone_lowest_possible_pfn[i], 2953 arch_zone_highest_possible_pfn[i]); 2954 2955 /* Print out the early_node_map[] */ 2956 printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); 2957 for (i = 0; i < nr_nodemap_entries; i++) 2958 printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, 2959 early_node_map[i].start_pfn, 2960 early_node_map[i].end_pfn); 2961 2962 /* Initialise every node */ 2963 setup_nr_node_ids(); 2964 for_each_online_node(nid) { 2965 pg_data_t *pgdat = NODE_DATA(nid); 2966 free_area_init_node(nid, pgdat, NULL, 2967 find_min_pfn_for_node(nid), NULL); 2968 } 2969 } 2970 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ 2971 2972 /** 2973 * set_dma_reserve - set the specified number of pages reserved in the first zone 2974 * @new_dma_reserve: The number of pages to mark reserved 2975 * 2976 * The per-cpu batchsize and zone watermarks are determined by present_pages. 2977 * In the DMA zone, a significant percentage may be consumed by kernel image 2978 * and other unfreeable allocations which can skew the watermarks badly. This 2979 * function may optionally be used to account for unfreeable pages in the 2980 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 2981 * smaller per-cpu batchsize. 2982 */ 2983 void __init set_dma_reserve(unsigned long new_dma_reserve) 2984 { 2985 dma_reserve = new_dma_reserve; 2986 } 2987 2988 #ifndef CONFIG_NEED_MULTIPLE_NODES 2989 static bootmem_data_t contig_bootmem_data; 2990 struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2991 2992 EXPORT_SYMBOL(contig_page_data); 2993 #endif 2994 2995 void __init free_area_init(unsigned long *zones_size) 2996 { 2997 free_area_init_node(0, NODE_DATA(0), zones_size, 2998 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2999 } 3000 3001 static int page_alloc_cpu_notify(struct notifier_block *self, 3002 unsigned long action, void *hcpu) 3003 { 3004 int cpu = (unsigned long)hcpu; 3005 3006 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 3007 local_irq_disable(); 3008 __drain_pages(cpu); 3009 vm_events_fold_cpu(cpu); 3010 local_irq_enable(); 3011 refresh_cpu_vm_stats(cpu); 3012 } 3013 return NOTIFY_OK; 3014 } 3015 3016 void __init page_alloc_init(void) 3017 { 3018 hotcpu_notifier(page_alloc_cpu_notify, 0); 3019 } 3020 3021 /* 3022 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 3023 * or min_free_kbytes changes. 3024 */ 3025 static void calculate_totalreserve_pages(void) 3026 { 3027 struct pglist_data *pgdat; 3028 unsigned long reserve_pages = 0; 3029 enum zone_type i, j; 3030 3031 for_each_online_pgdat(pgdat) { 3032 for (i = 0; i < MAX_NR_ZONES; i++) { 3033 struct zone *zone = pgdat->node_zones + i; 3034 unsigned long max = 0; 3035 3036 /* Find valid and maximum lowmem_reserve in the zone */ 3037 for (j = i; j < MAX_NR_ZONES; j++) { 3038 if (zone->lowmem_reserve[j] > max) 3039 max = zone->lowmem_reserve[j]; 3040 } 3041 3042 /* we treat pages_high as reserved pages. */ 3043 max += zone->pages_high; 3044 3045 if (max > zone->present_pages) 3046 max = zone->present_pages; 3047 reserve_pages += max; 3048 } 3049 } 3050 totalreserve_pages = reserve_pages; 3051 } 3052 3053 /* 3054 * setup_per_zone_lowmem_reserve - called whenever 3055 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 3056 * has a correct pages reserved value, so an adequate number of 3057 * pages are left in the zone after a successful __alloc_pages(). 3058 */ 3059 static void setup_per_zone_lowmem_reserve(void) 3060 { 3061 struct pglist_data *pgdat; 3062 enum zone_type j, idx; 3063 3064 for_each_online_pgdat(pgdat) { 3065 for (j = 0; j < MAX_NR_ZONES; j++) { 3066 struct zone *zone = pgdat->node_zones + j; 3067 unsigned long present_pages = zone->present_pages; 3068 3069 zone->lowmem_reserve[j] = 0; 3070 3071 idx = j; 3072 while (idx) { 3073 struct zone *lower_zone; 3074 3075 idx--; 3076 3077 if (sysctl_lowmem_reserve_ratio[idx] < 1) 3078 sysctl_lowmem_reserve_ratio[idx] = 1; 3079 3080 lower_zone = pgdat->node_zones + idx; 3081 lower_zone->lowmem_reserve[j] = present_pages / 3082 sysctl_lowmem_reserve_ratio[idx]; 3083 present_pages += lower_zone->present_pages; 3084 } 3085 } 3086 } 3087 3088 /* update totalreserve_pages */ 3089 calculate_totalreserve_pages(); 3090 } 3091 3092 /** 3093 * setup_per_zone_pages_min - called when min_free_kbytes changes. 3094 * 3095 * Ensures that the pages_{min,low,high} values for each zone are set correctly 3096 * with respect to min_free_kbytes. 3097 */ 3098 void setup_per_zone_pages_min(void) 3099 { 3100 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 3101 unsigned long lowmem_pages = 0; 3102 struct zone *zone; 3103 unsigned long flags; 3104 3105 /* Calculate total number of !ZONE_HIGHMEM pages */ 3106 for_each_zone(zone) { 3107 if (!is_highmem(zone)) 3108 lowmem_pages += zone->present_pages; 3109 } 3110 3111 for_each_zone(zone) { 3112 u64 tmp; 3113 3114 spin_lock_irqsave(&zone->lru_lock, flags); 3115 tmp = (u64)pages_min * zone->present_pages; 3116 do_div(tmp, lowmem_pages); 3117 if (is_highmem(zone)) { 3118 /* 3119 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 3120 * need highmem pages, so cap pages_min to a small 3121 * value here. 3122 * 3123 * The (pages_high-pages_low) and (pages_low-pages_min) 3124 * deltas controls asynch page reclaim, and so should 3125 * not be capped for highmem. 3126 */ 3127 int min_pages; 3128 3129 min_pages = zone->present_pages / 1024; 3130 if (min_pages < SWAP_CLUSTER_MAX) 3131 min_pages = SWAP_CLUSTER_MAX; 3132 if (min_pages > 128) 3133 min_pages = 128; 3134 zone->pages_min = min_pages; 3135 } else { 3136 /* 3137 * If it's a lowmem zone, reserve a number of pages 3138 * proportionate to the zone's size. 3139 */ 3140 zone->pages_min = tmp; 3141 } 3142 3143 zone->pages_low = zone->pages_min + (tmp >> 2); 3144 zone->pages_high = zone->pages_min + (tmp >> 1); 3145 spin_unlock_irqrestore(&zone->lru_lock, flags); 3146 } 3147 3148 /* update totalreserve_pages */ 3149 calculate_totalreserve_pages(); 3150 } 3151 3152 /* 3153 * Initialise min_free_kbytes. 3154 * 3155 * For small machines we want it small (128k min). For large machines 3156 * we want it large (64MB max). But it is not linear, because network 3157 * bandwidth does not increase linearly with machine size. We use 3158 * 3159 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 3160 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 3161 * 3162 * which yields 3163 * 3164 * 16MB: 512k 3165 * 32MB: 724k 3166 * 64MB: 1024k 3167 * 128MB: 1448k 3168 * 256MB: 2048k 3169 * 512MB: 2896k 3170 * 1024MB: 4096k 3171 * 2048MB: 5792k 3172 * 4096MB: 8192k 3173 * 8192MB: 11584k 3174 * 16384MB: 16384k 3175 */ 3176 static int __init init_per_zone_pages_min(void) 3177 { 3178 unsigned long lowmem_kbytes; 3179 3180 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 3181 3182 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 3183 if (min_free_kbytes < 128) 3184 min_free_kbytes = 128; 3185 if (min_free_kbytes > 65536) 3186 min_free_kbytes = 65536; 3187 setup_per_zone_pages_min(); 3188 setup_per_zone_lowmem_reserve(); 3189 return 0; 3190 } 3191 module_init(init_per_zone_pages_min) 3192 3193 /* 3194 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 3195 * that we can call two helper functions whenever min_free_kbytes 3196 * changes. 3197 */ 3198 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 3199 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3200 { 3201 proc_dointvec(table, write, file, buffer, length, ppos); 3202 if (write) 3203 setup_per_zone_pages_min(); 3204 return 0; 3205 } 3206 3207 #ifdef CONFIG_NUMA 3208 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 3209 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3210 { 3211 struct zone *zone; 3212 int rc; 3213 3214 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3215 if (rc) 3216 return rc; 3217 3218 for_each_zone(zone) 3219 zone->min_unmapped_pages = (zone->present_pages * 3220 sysctl_min_unmapped_ratio) / 100; 3221 return 0; 3222 } 3223 3224 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 3225 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3226 { 3227 struct zone *zone; 3228 int rc; 3229 3230 rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3231 if (rc) 3232 return rc; 3233 3234 for_each_zone(zone) 3235 zone->min_slab_pages = (zone->present_pages * 3236 sysctl_min_slab_ratio) / 100; 3237 return 0; 3238 } 3239 #endif 3240 3241 /* 3242 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 3243 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 3244 * whenever sysctl_lowmem_reserve_ratio changes. 3245 * 3246 * The reserve ratio obviously has absolutely no relation with the 3247 * pages_min watermarks. The lowmem reserve ratio can only make sense 3248 * if in function of the boot time zone sizes. 3249 */ 3250 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 3251 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3252 { 3253 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3254 setup_per_zone_lowmem_reserve(); 3255 return 0; 3256 } 3257 3258 /* 3259 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 3260 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 3261 * can have before it gets flushed back to buddy allocator. 3262 */ 3263 3264 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 3265 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 3266 { 3267 struct zone *zone; 3268 unsigned int cpu; 3269 int ret; 3270 3271 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 3272 if (!write || (ret == -EINVAL)) 3273 return ret; 3274 for_each_zone(zone) { 3275 for_each_online_cpu(cpu) { 3276 unsigned long high; 3277 high = zone->present_pages / percpu_pagelist_fraction; 3278 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 3279 } 3280 } 3281 return 0; 3282 } 3283 3284 int hashdist = HASHDIST_DEFAULT; 3285 3286 #ifdef CONFIG_NUMA 3287 static int __init set_hashdist(char *str) 3288 { 3289 if (!str) 3290 return 0; 3291 hashdist = simple_strtoul(str, &str, 0); 3292 return 1; 3293 } 3294 __setup("hashdist=", set_hashdist); 3295 #endif 3296 3297 /* 3298 * allocate a large system hash table from bootmem 3299 * - it is assumed that the hash table must contain an exact power-of-2 3300 * quantity of entries 3301 * - limit is the number of hash buckets, not the total allocation size 3302 */ 3303 void *__init alloc_large_system_hash(const char *tablename, 3304 unsigned long bucketsize, 3305 unsigned long numentries, 3306 int scale, 3307 int flags, 3308 unsigned int *_hash_shift, 3309 unsigned int *_hash_mask, 3310 unsigned long limit) 3311 { 3312 unsigned long long max = limit; 3313 unsigned long log2qty, size; 3314 void *table = NULL; 3315 3316 /* allow the kernel cmdline to have a say */ 3317 if (!numentries) { 3318 /* round applicable memory size up to nearest megabyte */ 3319 numentries = nr_kernel_pages; 3320 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 3321 numentries >>= 20 - PAGE_SHIFT; 3322 numentries <<= 20 - PAGE_SHIFT; 3323 3324 /* limit to 1 bucket per 2^scale bytes of low memory */ 3325 if (scale > PAGE_SHIFT) 3326 numentries >>= (scale - PAGE_SHIFT); 3327 else 3328 numentries <<= (PAGE_SHIFT - scale); 3329 3330 /* Make sure we've got at least a 0-order allocation.. */ 3331 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 3332 numentries = PAGE_SIZE / bucketsize; 3333 } 3334 numentries = roundup_pow_of_two(numentries); 3335 3336 /* limit allocation size to 1/16 total memory by default */ 3337 if (max == 0) { 3338 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 3339 do_div(max, bucketsize); 3340 } 3341 3342 if (numentries > max) 3343 numentries = max; 3344 3345 log2qty = ilog2(numentries); 3346 3347 do { 3348 size = bucketsize << log2qty; 3349 if (flags & HASH_EARLY) 3350 table = alloc_bootmem(size); 3351 else if (hashdist) 3352 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 3353 else { 3354 unsigned long order; 3355 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 3356 ; 3357 table = (void*) __get_free_pages(GFP_ATOMIC, order); 3358 } 3359 } while (!table && size > PAGE_SIZE && --log2qty); 3360 3361 if (!table) 3362 panic("Failed to allocate %s hash table\n", tablename); 3363 3364 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 3365 tablename, 3366 (1U << log2qty), 3367 ilog2(size) - PAGE_SHIFT, 3368 size); 3369 3370 if (_hash_shift) 3371 *_hash_shift = log2qty; 3372 if (_hash_mask) 3373 *_hash_mask = (1 << log2qty) - 1; 3374 3375 return table; 3376 } 3377 3378 #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE 3379 struct page *pfn_to_page(unsigned long pfn) 3380 { 3381 return __pfn_to_page(pfn); 3382 } 3383 unsigned long page_to_pfn(struct page *page) 3384 { 3385 return __page_to_pfn(page); 3386 } 3387 EXPORT_SYMBOL(pfn_to_page); 3388 EXPORT_SYMBOL(page_to_pfn); 3389 #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ 3390 3391 3392