1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/config.h> 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/swap.h> 21 #include <linux/interrupt.h> 22 #include <linux/pagemap.h> 23 #include <linux/bootmem.h> 24 #include <linux/compiler.h> 25 #include <linux/kernel.h> 26 #include <linux/module.h> 27 #include <linux/suspend.h> 28 #include <linux/pagevec.h> 29 #include <linux/blkdev.h> 30 #include <linux/slab.h> 31 #include <linux/notifier.h> 32 #include <linux/topology.h> 33 #include <linux/sysctl.h> 34 #include <linux/cpu.h> 35 #include <linux/cpuset.h> 36 #include <linux/memory_hotplug.h> 37 #include <linux/nodemask.h> 38 #include <linux/vmalloc.h> 39 #include <linux/mempolicy.h> 40 41 #include <asm/tlbflush.h> 42 #include "internal.h" 43 44 /* 45 * MCD - HACK: Find somewhere to initialize this EARLY, or make this 46 * initializer cleaner 47 */ 48 nodemask_t node_online_map __read_mostly = { { [0] = 1UL } }; 49 EXPORT_SYMBOL(node_online_map); 50 nodemask_t node_possible_map __read_mostly = NODE_MASK_ALL; 51 EXPORT_SYMBOL(node_possible_map); 52 struct pglist_data *pgdat_list __read_mostly; 53 unsigned long totalram_pages __read_mostly; 54 unsigned long totalhigh_pages __read_mostly; 55 long nr_swap_pages; 56 int percpu_pagelist_fraction; 57 58 static void fastcall free_hot_cold_page(struct page *page, int cold); 59 60 /* 61 * results with 256, 32 in the lowmem_reserve sysctl: 62 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 63 * 1G machine -> (16M dma, 784M normal, 224M high) 64 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 65 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 66 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 67 * 68 * TBD: should special case ZONE_DMA32 machines here - in those we normally 69 * don't need any ZONE_NORMAL reservation 70 */ 71 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 }; 72 73 EXPORT_SYMBOL(totalram_pages); 74 75 /* 76 * Used by page_zone() to look up the address of the struct zone whose 77 * id is encoded in the upper bits of page->flags 78 */ 79 struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; 80 EXPORT_SYMBOL(zone_table); 81 82 static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; 83 int min_free_kbytes = 1024; 84 85 unsigned long __initdata nr_kernel_pages; 86 unsigned long __initdata nr_all_pages; 87 88 #ifdef CONFIG_DEBUG_VM 89 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 90 { 91 int ret = 0; 92 unsigned seq; 93 unsigned long pfn = page_to_pfn(page); 94 95 do { 96 seq = zone_span_seqbegin(zone); 97 if (pfn >= zone->zone_start_pfn + zone->spanned_pages) 98 ret = 1; 99 else if (pfn < zone->zone_start_pfn) 100 ret = 1; 101 } while (zone_span_seqretry(zone, seq)); 102 103 return ret; 104 } 105 106 static int page_is_consistent(struct zone *zone, struct page *page) 107 { 108 #ifdef CONFIG_HOLES_IN_ZONE 109 if (!pfn_valid(page_to_pfn(page))) 110 return 0; 111 #endif 112 if (zone != page_zone(page)) 113 return 0; 114 115 return 1; 116 } 117 /* 118 * Temporary debugging check for pages not lying within a given zone. 119 */ 120 static int bad_range(struct zone *zone, struct page *page) 121 { 122 if (page_outside_zone_boundaries(zone, page)) 123 return 1; 124 if (!page_is_consistent(zone, page)) 125 return 1; 126 127 return 0; 128 } 129 130 #else 131 static inline int bad_range(struct zone *zone, struct page *page) 132 { 133 return 0; 134 } 135 #endif 136 137 static void bad_page(struct page *page) 138 { 139 printk(KERN_EMERG "Bad page state in process '%s'\n" 140 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" 141 "Trying to fix it up, but a reboot is needed\n" 142 "Backtrace:\n", 143 current->comm, page, (int)(2*sizeof(unsigned long)), 144 (unsigned long)page->flags, page->mapping, 145 page_mapcount(page), page_count(page)); 146 dump_stack(); 147 page->flags &= ~(1 << PG_lru | 148 1 << PG_private | 149 1 << PG_locked | 150 1 << PG_active | 151 1 << PG_dirty | 152 1 << PG_reclaim | 153 1 << PG_slab | 154 1 << PG_swapcache | 155 1 << PG_writeback ); 156 set_page_count(page, 0); 157 reset_page_mapcount(page); 158 page->mapping = NULL; 159 add_taint(TAINT_BAD_PAGE); 160 } 161 162 /* 163 * Higher-order pages are called "compound pages". They are structured thusly: 164 * 165 * The first PAGE_SIZE page is called the "head page". 166 * 167 * The remaining PAGE_SIZE pages are called "tail pages". 168 * 169 * All pages have PG_compound set. All pages have their ->private pointing at 170 * the head page (even the head page has this). 171 * 172 * The first tail page's ->mapping, if non-zero, holds the address of the 173 * compound page's put_page() function. 174 * 175 * The order of the allocation is stored in the first tail page's ->index 176 * This is only for debug at present. This usage means that zero-order pages 177 * may not be compound. 178 */ 179 static void prep_compound_page(struct page *page, unsigned long order) 180 { 181 int i; 182 int nr_pages = 1 << order; 183 184 page[1].mapping = NULL; 185 page[1].index = order; 186 for (i = 0; i < nr_pages; i++) { 187 struct page *p = page + i; 188 189 SetPageCompound(p); 190 set_page_private(p, (unsigned long)page); 191 } 192 } 193 194 static void destroy_compound_page(struct page *page, unsigned long order) 195 { 196 int i; 197 int nr_pages = 1 << order; 198 199 if (unlikely(page[1].index != order)) 200 bad_page(page); 201 202 for (i = 0; i < nr_pages; i++) { 203 struct page *p = page + i; 204 205 if (unlikely(!PageCompound(p) | 206 (page_private(p) != (unsigned long)page))) 207 bad_page(page); 208 ClearPageCompound(p); 209 } 210 } 211 212 /* 213 * function for dealing with page's order in buddy system. 214 * zone->lock is already acquired when we use these. 215 * So, we don't need atomic page->flags operations here. 216 */ 217 static inline unsigned long page_order(struct page *page) { 218 return page_private(page); 219 } 220 221 static inline void set_page_order(struct page *page, int order) { 222 set_page_private(page, order); 223 __SetPagePrivate(page); 224 } 225 226 static inline void rmv_page_order(struct page *page) 227 { 228 __ClearPagePrivate(page); 229 set_page_private(page, 0); 230 } 231 232 /* 233 * Locate the struct page for both the matching buddy in our 234 * pair (buddy1) and the combined O(n+1) page they form (page). 235 * 236 * 1) Any buddy B1 will have an order O twin B2 which satisfies 237 * the following equation: 238 * B2 = B1 ^ (1 << O) 239 * For example, if the starting buddy (buddy2) is #8 its order 240 * 1 buddy is #10: 241 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 242 * 243 * 2) Any buddy B will have an order O+1 parent P which 244 * satisfies the following equation: 245 * P = B & ~(1 << O) 246 * 247 * Assumption: *_mem_map is contigious at least up to MAX_ORDER 248 */ 249 static inline struct page * 250 __page_find_buddy(struct page *page, unsigned long page_idx, unsigned int order) 251 { 252 unsigned long buddy_idx = page_idx ^ (1 << order); 253 254 return page + (buddy_idx - page_idx); 255 } 256 257 static inline unsigned long 258 __find_combined_index(unsigned long page_idx, unsigned int order) 259 { 260 return (page_idx & ~(1 << order)); 261 } 262 263 /* 264 * This function checks whether a page is free && is the buddy 265 * we can do coalesce a page and its buddy if 266 * (a) the buddy is not in a hole && 267 * (b) the buddy is free && 268 * (c) the buddy is on the buddy system && 269 * (d) a page and its buddy have the same order. 270 * for recording page's order, we use page_private(page) and PG_private. 271 * 272 */ 273 static inline int page_is_buddy(struct page *page, int order) 274 { 275 #ifdef CONFIG_HOLES_IN_ZONE 276 if (!pfn_valid(page_to_pfn(page))) 277 return 0; 278 #endif 279 280 if (PagePrivate(page) && 281 (page_order(page) == order) && 282 page_count(page) == 0) 283 return 1; 284 return 0; 285 } 286 287 /* 288 * Freeing function for a buddy system allocator. 289 * 290 * The concept of a buddy system is to maintain direct-mapped table 291 * (containing bit values) for memory blocks of various "orders". 292 * The bottom level table contains the map for the smallest allocatable 293 * units of memory (here, pages), and each level above it describes 294 * pairs of units from the levels below, hence, "buddies". 295 * At a high level, all that happens here is marking the table entry 296 * at the bottom level available, and propagating the changes upward 297 * as necessary, plus some accounting needed to play nicely with other 298 * parts of the VM system. 299 * At each level, we keep a list of pages, which are heads of continuous 300 * free pages of length of (1 << order) and marked with PG_Private.Page's 301 * order is recorded in page_private(page) field. 302 * So when we are allocating or freeing one, we can derive the state of the 303 * other. That is, if we allocate a small block, and both were 304 * free, the remainder of the region must be split into blocks. 305 * If a block is freed, and its buddy is also free, then this 306 * triggers coalescing into a block of larger size. 307 * 308 * -- wli 309 */ 310 311 static inline void __free_one_page(struct page *page, 312 struct zone *zone, unsigned int order) 313 { 314 unsigned long page_idx; 315 int order_size = 1 << order; 316 317 if (unlikely(PageCompound(page))) 318 destroy_compound_page(page, order); 319 320 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 321 322 BUG_ON(page_idx & (order_size - 1)); 323 BUG_ON(bad_range(zone, page)); 324 325 zone->free_pages += order_size; 326 while (order < MAX_ORDER-1) { 327 unsigned long combined_idx; 328 struct free_area *area; 329 struct page *buddy; 330 331 buddy = __page_find_buddy(page, page_idx, order); 332 if (!page_is_buddy(buddy, order)) 333 break; /* Move the buddy up one level. */ 334 335 list_del(&buddy->lru); 336 area = zone->free_area + order; 337 area->nr_free--; 338 rmv_page_order(buddy); 339 combined_idx = __find_combined_index(page_idx, order); 340 page = page + (combined_idx - page_idx); 341 page_idx = combined_idx; 342 order++; 343 } 344 set_page_order(page, order); 345 list_add(&page->lru, &zone->free_area[order].free_list); 346 zone->free_area[order].nr_free++; 347 } 348 349 static inline int free_pages_check(struct page *page) 350 { 351 if (unlikely(page_mapcount(page) | 352 (page->mapping != NULL) | 353 (page_count(page) != 0) | 354 (page->flags & ( 355 1 << PG_lru | 356 1 << PG_private | 357 1 << PG_locked | 358 1 << PG_active | 359 1 << PG_reclaim | 360 1 << PG_slab | 361 1 << PG_swapcache | 362 1 << PG_writeback | 363 1 << PG_reserved )))) 364 bad_page(page); 365 if (PageDirty(page)) 366 __ClearPageDirty(page); 367 /* 368 * For now, we report if PG_reserved was found set, but do not 369 * clear it, and do not free the page. But we shall soon need 370 * to do more, for when the ZERO_PAGE count wraps negative. 371 */ 372 return PageReserved(page); 373 } 374 375 /* 376 * Frees a list of pages. 377 * Assumes all pages on list are in same zone, and of same order. 378 * count is the number of pages to free. 379 * 380 * If the zone was previously in an "all pages pinned" state then look to 381 * see if this freeing clears that state. 382 * 383 * And clear the zone's pages_scanned counter, to hold off the "all pages are 384 * pinned" detection logic. 385 */ 386 static void free_pages_bulk(struct zone *zone, int count, 387 struct list_head *list, int order) 388 { 389 spin_lock(&zone->lock); 390 zone->all_unreclaimable = 0; 391 zone->pages_scanned = 0; 392 while (count--) { 393 struct page *page; 394 395 BUG_ON(list_empty(list)); 396 page = list_entry(list->prev, struct page, lru); 397 /* have to delete it as __free_one_page list manipulates */ 398 list_del(&page->lru); 399 __free_one_page(page, zone, order); 400 } 401 spin_unlock(&zone->lock); 402 } 403 404 static void free_one_page(struct zone *zone, struct page *page, int order) 405 { 406 LIST_HEAD(list); 407 list_add(&page->lru, &list); 408 free_pages_bulk(zone, 1, &list, order); 409 } 410 411 static void __free_pages_ok(struct page *page, unsigned int order) 412 { 413 unsigned long flags; 414 int i; 415 int reserved = 0; 416 417 arch_free_page(page, order); 418 if (!PageHighMem(page)) 419 mutex_debug_check_no_locks_freed(page_address(page), 420 page_address(page+(1<<order))); 421 422 #ifndef CONFIG_MMU 423 for (i = 1 ; i < (1 << order) ; ++i) 424 __put_page(page + i); 425 #endif 426 427 for (i = 0 ; i < (1 << order) ; ++i) 428 reserved += free_pages_check(page + i); 429 if (reserved) 430 return; 431 432 kernel_map_pages(page, 1 << order, 0); 433 local_irq_save(flags); 434 __mod_page_state(pgfree, 1 << order); 435 free_one_page(page_zone(page), page, order); 436 local_irq_restore(flags); 437 } 438 439 /* 440 * permit the bootmem allocator to evade page validation on high-order frees 441 */ 442 void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) 443 { 444 if (order == 0) { 445 __ClearPageReserved(page); 446 set_page_count(page, 0); 447 448 free_hot_cold_page(page, 0); 449 } else { 450 LIST_HEAD(list); 451 int loop; 452 453 for (loop = 0; loop < BITS_PER_LONG; loop++) { 454 struct page *p = &page[loop]; 455 456 if (loop + 16 < BITS_PER_LONG) 457 prefetchw(p + 16); 458 __ClearPageReserved(p); 459 set_page_count(p, 0); 460 } 461 462 arch_free_page(page, order); 463 464 mod_page_state(pgfree, 1 << order); 465 466 list_add(&page->lru, &list); 467 kernel_map_pages(page, 1 << order, 0); 468 free_pages_bulk(page_zone(page), 1, &list, order); 469 } 470 } 471 472 473 /* 474 * The order of subdivision here is critical for the IO subsystem. 475 * Please do not alter this order without good reasons and regression 476 * testing. Specifically, as large blocks of memory are subdivided, 477 * the order in which smaller blocks are delivered depends on the order 478 * they're subdivided in this function. This is the primary factor 479 * influencing the order in which pages are delivered to the IO 480 * subsystem according to empirical testing, and this is also justified 481 * by considering the behavior of a buddy system containing a single 482 * large block of memory acted on by a series of small allocations. 483 * This behavior is a critical factor in sglist merging's success. 484 * 485 * -- wli 486 */ 487 static inline void expand(struct zone *zone, struct page *page, 488 int low, int high, struct free_area *area) 489 { 490 unsigned long size = 1 << high; 491 492 while (high > low) { 493 area--; 494 high--; 495 size >>= 1; 496 BUG_ON(bad_range(zone, &page[size])); 497 list_add(&page[size].lru, &area->free_list); 498 area->nr_free++; 499 set_page_order(&page[size], high); 500 } 501 } 502 503 /* 504 * This page is about to be returned from the page allocator 505 */ 506 static int prep_new_page(struct page *page, int order) 507 { 508 if (unlikely(page_mapcount(page) | 509 (page->mapping != NULL) | 510 (page_count(page) != 0) | 511 (page->flags & ( 512 1 << PG_lru | 513 1 << PG_private | 514 1 << PG_locked | 515 1 << PG_active | 516 1 << PG_dirty | 517 1 << PG_reclaim | 518 1 << PG_slab | 519 1 << PG_swapcache | 520 1 << PG_writeback | 521 1 << PG_reserved )))) 522 bad_page(page); 523 524 /* 525 * For now, we report if PG_reserved was found set, but do not 526 * clear it, and do not allocate the page: as a safety net. 527 */ 528 if (PageReserved(page)) 529 return 1; 530 531 page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 532 1 << PG_referenced | 1 << PG_arch_1 | 533 1 << PG_checked | 1 << PG_mappedtodisk); 534 set_page_private(page, 0); 535 set_page_refs(page, order); 536 kernel_map_pages(page, 1 << order, 1); 537 return 0; 538 } 539 540 /* 541 * Do the hard work of removing an element from the buddy allocator. 542 * Call me with the zone->lock already held. 543 */ 544 static struct page *__rmqueue(struct zone *zone, unsigned int order) 545 { 546 struct free_area * area; 547 unsigned int current_order; 548 struct page *page; 549 550 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 551 area = zone->free_area + current_order; 552 if (list_empty(&area->free_list)) 553 continue; 554 555 page = list_entry(area->free_list.next, struct page, lru); 556 list_del(&page->lru); 557 rmv_page_order(page); 558 area->nr_free--; 559 zone->free_pages -= 1UL << order; 560 expand(zone, page, order, current_order, area); 561 return page; 562 } 563 564 return NULL; 565 } 566 567 /* 568 * Obtain a specified number of elements from the buddy allocator, all under 569 * a single hold of the lock, for efficiency. Add them to the supplied list. 570 * Returns the number of new pages which were placed at *list. 571 */ 572 static int rmqueue_bulk(struct zone *zone, unsigned int order, 573 unsigned long count, struct list_head *list) 574 { 575 int i; 576 577 spin_lock(&zone->lock); 578 for (i = 0; i < count; ++i) { 579 struct page *page = __rmqueue(zone, order); 580 if (unlikely(page == NULL)) 581 break; 582 list_add_tail(&page->lru, list); 583 } 584 spin_unlock(&zone->lock); 585 return i; 586 } 587 588 #ifdef CONFIG_NUMA 589 /* Called from the slab reaper to drain remote pagesets */ 590 void drain_remote_pages(void) 591 { 592 struct zone *zone; 593 int i; 594 unsigned long flags; 595 596 local_irq_save(flags); 597 for_each_zone(zone) { 598 struct per_cpu_pageset *pset; 599 600 /* Do not drain local pagesets */ 601 if (zone->zone_pgdat->node_id == numa_node_id()) 602 continue; 603 604 pset = zone_pcp(zone, smp_processor_id()); 605 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 606 struct per_cpu_pages *pcp; 607 608 pcp = &pset->pcp[i]; 609 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 610 pcp->count = 0; 611 } 612 } 613 local_irq_restore(flags); 614 } 615 #endif 616 617 #if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU) 618 static void __drain_pages(unsigned int cpu) 619 { 620 unsigned long flags; 621 struct zone *zone; 622 int i; 623 624 for_each_zone(zone) { 625 struct per_cpu_pageset *pset; 626 627 pset = zone_pcp(zone, cpu); 628 for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) { 629 struct per_cpu_pages *pcp; 630 631 pcp = &pset->pcp[i]; 632 local_irq_save(flags); 633 free_pages_bulk(zone, pcp->count, &pcp->list, 0); 634 pcp->count = 0; 635 local_irq_restore(flags); 636 } 637 } 638 } 639 #endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */ 640 641 #ifdef CONFIG_PM 642 643 void mark_free_pages(struct zone *zone) 644 { 645 unsigned long zone_pfn, flags; 646 int order; 647 struct list_head *curr; 648 649 if (!zone->spanned_pages) 650 return; 651 652 spin_lock_irqsave(&zone->lock, flags); 653 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 654 ClearPageNosaveFree(pfn_to_page(zone_pfn + zone->zone_start_pfn)); 655 656 for (order = MAX_ORDER - 1; order >= 0; --order) 657 list_for_each(curr, &zone->free_area[order].free_list) { 658 unsigned long start_pfn, i; 659 660 start_pfn = page_to_pfn(list_entry(curr, struct page, lru)); 661 662 for (i=0; i < (1<<order); i++) 663 SetPageNosaveFree(pfn_to_page(start_pfn+i)); 664 } 665 spin_unlock_irqrestore(&zone->lock, flags); 666 } 667 668 /* 669 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 670 */ 671 void drain_local_pages(void) 672 { 673 unsigned long flags; 674 675 local_irq_save(flags); 676 __drain_pages(smp_processor_id()); 677 local_irq_restore(flags); 678 } 679 #endif /* CONFIG_PM */ 680 681 static void zone_statistics(struct zonelist *zonelist, struct zone *z, int cpu) 682 { 683 #ifdef CONFIG_NUMA 684 pg_data_t *pg = z->zone_pgdat; 685 pg_data_t *orig = zonelist->zones[0]->zone_pgdat; 686 struct per_cpu_pageset *p; 687 688 p = zone_pcp(z, cpu); 689 if (pg == orig) { 690 p->numa_hit++; 691 } else { 692 p->numa_miss++; 693 zone_pcp(zonelist->zones[0], cpu)->numa_foreign++; 694 } 695 if (pg == NODE_DATA(numa_node_id())) 696 p->local_node++; 697 else 698 p->other_node++; 699 #endif 700 } 701 702 /* 703 * Free a 0-order page 704 */ 705 static void fastcall free_hot_cold_page(struct page *page, int cold) 706 { 707 struct zone *zone = page_zone(page); 708 struct per_cpu_pages *pcp; 709 unsigned long flags; 710 711 arch_free_page(page, 0); 712 713 if (PageAnon(page)) 714 page->mapping = NULL; 715 if (free_pages_check(page)) 716 return; 717 718 kernel_map_pages(page, 1, 0); 719 720 pcp = &zone_pcp(zone, get_cpu())->pcp[cold]; 721 local_irq_save(flags); 722 __inc_page_state(pgfree); 723 list_add(&page->lru, &pcp->list); 724 pcp->count++; 725 if (pcp->count >= pcp->high) { 726 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 727 pcp->count -= pcp->batch; 728 } 729 local_irq_restore(flags); 730 put_cpu(); 731 } 732 733 void fastcall free_hot_page(struct page *page) 734 { 735 free_hot_cold_page(page, 0); 736 } 737 738 void fastcall free_cold_page(struct page *page) 739 { 740 free_hot_cold_page(page, 1); 741 } 742 743 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 744 { 745 int i; 746 747 BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); 748 for(i = 0; i < (1 << order); i++) 749 clear_highpage(page + i); 750 } 751 752 /* 753 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 754 * we cheat by calling it from here, in the order > 0 path. Saves a branch 755 * or two. 756 */ 757 static struct page *buffered_rmqueue(struct zonelist *zonelist, 758 struct zone *zone, int order, gfp_t gfp_flags) 759 { 760 unsigned long flags; 761 struct page *page; 762 int cold = !!(gfp_flags & __GFP_COLD); 763 int cpu; 764 765 again: 766 cpu = get_cpu(); 767 if (likely(order == 0)) { 768 struct per_cpu_pages *pcp; 769 770 pcp = &zone_pcp(zone, cpu)->pcp[cold]; 771 local_irq_save(flags); 772 if (!pcp->count) { 773 pcp->count += rmqueue_bulk(zone, 0, 774 pcp->batch, &pcp->list); 775 if (unlikely(!pcp->count)) 776 goto failed; 777 } 778 page = list_entry(pcp->list.next, struct page, lru); 779 list_del(&page->lru); 780 pcp->count--; 781 } else { 782 spin_lock_irqsave(&zone->lock, flags); 783 page = __rmqueue(zone, order); 784 spin_unlock(&zone->lock); 785 if (!page) 786 goto failed; 787 } 788 789 __mod_page_state_zone(zone, pgalloc, 1 << order); 790 zone_statistics(zonelist, zone, cpu); 791 local_irq_restore(flags); 792 put_cpu(); 793 794 BUG_ON(bad_range(zone, page)); 795 if (prep_new_page(page, order)) 796 goto again; 797 798 if (gfp_flags & __GFP_ZERO) 799 prep_zero_page(page, order, gfp_flags); 800 801 if (order && (gfp_flags & __GFP_COMP)) 802 prep_compound_page(page, order); 803 return page; 804 805 failed: 806 local_irq_restore(flags); 807 put_cpu(); 808 return NULL; 809 } 810 811 #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ 812 #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ 813 #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ 814 #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ 815 #define ALLOC_HARDER 0x10 /* try to alloc harder */ 816 #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ 817 #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ 818 819 /* 820 * Return 1 if free pages are above 'mark'. This takes into account the order 821 * of the allocation. 822 */ 823 int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 824 int classzone_idx, int alloc_flags) 825 { 826 /* free_pages my go negative - that's OK */ 827 long min = mark, free_pages = z->free_pages - (1 << order) + 1; 828 int o; 829 830 if (alloc_flags & ALLOC_HIGH) 831 min -= min / 2; 832 if (alloc_flags & ALLOC_HARDER) 833 min -= min / 4; 834 835 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 836 return 0; 837 for (o = 0; o < order; o++) { 838 /* At the next order, this order's pages become unavailable */ 839 free_pages -= z->free_area[o].nr_free << o; 840 841 /* Require fewer higher order pages to be free */ 842 min >>= 1; 843 844 if (free_pages <= min) 845 return 0; 846 } 847 return 1; 848 } 849 850 /* 851 * get_page_from_freeliest goes through the zonelist trying to allocate 852 * a page. 853 */ 854 static struct page * 855 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, 856 struct zonelist *zonelist, int alloc_flags) 857 { 858 struct zone **z = zonelist->zones; 859 struct page *page = NULL; 860 int classzone_idx = zone_idx(*z); 861 862 /* 863 * Go through the zonelist once, looking for a zone with enough free. 864 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 865 */ 866 do { 867 if ((alloc_flags & ALLOC_CPUSET) && 868 !cpuset_zone_allowed(*z, gfp_mask)) 869 continue; 870 871 if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { 872 unsigned long mark; 873 if (alloc_flags & ALLOC_WMARK_MIN) 874 mark = (*z)->pages_min; 875 else if (alloc_flags & ALLOC_WMARK_LOW) 876 mark = (*z)->pages_low; 877 else 878 mark = (*z)->pages_high; 879 if (!zone_watermark_ok(*z, order, mark, 880 classzone_idx, alloc_flags)) 881 continue; 882 } 883 884 page = buffered_rmqueue(zonelist, *z, order, gfp_mask); 885 if (page) { 886 break; 887 } 888 } while (*(++z) != NULL); 889 return page; 890 } 891 892 /* 893 * This is the 'heart' of the zoned buddy allocator. 894 */ 895 struct page * fastcall 896 __alloc_pages(gfp_t gfp_mask, unsigned int order, 897 struct zonelist *zonelist) 898 { 899 const gfp_t wait = gfp_mask & __GFP_WAIT; 900 struct zone **z; 901 struct page *page; 902 struct reclaim_state reclaim_state; 903 struct task_struct *p = current; 904 int do_retry; 905 int alloc_flags; 906 int did_some_progress; 907 908 might_sleep_if(wait); 909 910 restart: 911 z = zonelist->zones; /* the list of zones suitable for gfp_mask */ 912 913 if (unlikely(*z == NULL)) { 914 /* Should this ever happen?? */ 915 return NULL; 916 } 917 918 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 919 zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); 920 if (page) 921 goto got_pg; 922 923 do { 924 wakeup_kswapd(*z, order); 925 } while (*(++z)); 926 927 /* 928 * OK, we're below the kswapd watermark and have kicked background 929 * reclaim. Now things get more complex, so set up alloc_flags according 930 * to how we want to proceed. 931 * 932 * The caller may dip into page reserves a bit more if the caller 933 * cannot run direct reclaim, or if the caller has realtime scheduling 934 * policy. 935 */ 936 alloc_flags = ALLOC_WMARK_MIN; 937 if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) 938 alloc_flags |= ALLOC_HARDER; 939 if (gfp_mask & __GFP_HIGH) 940 alloc_flags |= ALLOC_HIGH; 941 alloc_flags |= ALLOC_CPUSET; 942 943 /* 944 * Go through the zonelist again. Let __GFP_HIGH and allocations 945 * coming from realtime tasks go deeper into reserves. 946 * 947 * This is the last chance, in general, before the goto nopage. 948 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 949 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 950 */ 951 page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags); 952 if (page) 953 goto got_pg; 954 955 /* This allocation should allow future memory freeing. */ 956 957 if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) 958 && !in_interrupt()) { 959 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 960 nofail_alloc: 961 /* go through the zonelist yet again, ignoring mins */ 962 page = get_page_from_freelist(gfp_mask, order, 963 zonelist, ALLOC_NO_WATERMARKS); 964 if (page) 965 goto got_pg; 966 if (gfp_mask & __GFP_NOFAIL) { 967 blk_congestion_wait(WRITE, HZ/50); 968 goto nofail_alloc; 969 } 970 } 971 goto nopage; 972 } 973 974 /* Atomic allocations - we can't balance anything */ 975 if (!wait) 976 goto nopage; 977 978 rebalance: 979 cond_resched(); 980 981 /* We now go into synchronous reclaim */ 982 cpuset_memory_pressure_bump(); 983 p->flags |= PF_MEMALLOC; 984 reclaim_state.reclaimed_slab = 0; 985 p->reclaim_state = &reclaim_state; 986 987 did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask); 988 989 p->reclaim_state = NULL; 990 p->flags &= ~PF_MEMALLOC; 991 992 cond_resched(); 993 994 if (likely(did_some_progress)) { 995 page = get_page_from_freelist(gfp_mask, order, 996 zonelist, alloc_flags); 997 if (page) 998 goto got_pg; 999 } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) { 1000 /* 1001 * Go through the zonelist yet one more time, keep 1002 * very high watermark here, this is only to catch 1003 * a parallel oom killing, we must fail if we're still 1004 * under heavy pressure. 1005 */ 1006 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, 1007 zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); 1008 if (page) 1009 goto got_pg; 1010 1011 out_of_memory(gfp_mask, order); 1012 goto restart; 1013 } 1014 1015 /* 1016 * Don't let big-order allocations loop unless the caller explicitly 1017 * requests that. Wait for some write requests to complete then retry. 1018 * 1019 * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order 1020 * <= 3, but that may not be true in other implementations. 1021 */ 1022 do_retry = 0; 1023 if (!(gfp_mask & __GFP_NORETRY)) { 1024 if ((order <= 3) || (gfp_mask & __GFP_REPEAT)) 1025 do_retry = 1; 1026 if (gfp_mask & __GFP_NOFAIL) 1027 do_retry = 1; 1028 } 1029 if (do_retry) { 1030 blk_congestion_wait(WRITE, HZ/50); 1031 goto rebalance; 1032 } 1033 1034 nopage: 1035 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) { 1036 printk(KERN_WARNING "%s: page allocation failure." 1037 " order:%d, mode:0x%x\n", 1038 p->comm, order, gfp_mask); 1039 dump_stack(); 1040 show_mem(); 1041 } 1042 got_pg: 1043 return page; 1044 } 1045 1046 EXPORT_SYMBOL(__alloc_pages); 1047 1048 /* 1049 * Common helper functions. 1050 */ 1051 fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 1052 { 1053 struct page * page; 1054 page = alloc_pages(gfp_mask, order); 1055 if (!page) 1056 return 0; 1057 return (unsigned long) page_address(page); 1058 } 1059 1060 EXPORT_SYMBOL(__get_free_pages); 1061 1062 fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) 1063 { 1064 struct page * page; 1065 1066 /* 1067 * get_zeroed_page() returns a 32-bit address, which cannot represent 1068 * a highmem page 1069 */ 1070 BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 1071 1072 page = alloc_pages(gfp_mask | __GFP_ZERO, 0); 1073 if (page) 1074 return (unsigned long) page_address(page); 1075 return 0; 1076 } 1077 1078 EXPORT_SYMBOL(get_zeroed_page); 1079 1080 void __pagevec_free(struct pagevec *pvec) 1081 { 1082 int i = pagevec_count(pvec); 1083 1084 while (--i >= 0) 1085 free_hot_cold_page(pvec->pages[i], pvec->cold); 1086 } 1087 1088 fastcall void __free_pages(struct page *page, unsigned int order) 1089 { 1090 if (put_page_testzero(page)) { 1091 if (order == 0) 1092 free_hot_page(page); 1093 else 1094 __free_pages_ok(page, order); 1095 } 1096 } 1097 1098 EXPORT_SYMBOL(__free_pages); 1099 1100 fastcall void free_pages(unsigned long addr, unsigned int order) 1101 { 1102 if (addr != 0) { 1103 BUG_ON(!virt_addr_valid((void *)addr)); 1104 __free_pages(virt_to_page((void *)addr), order); 1105 } 1106 } 1107 1108 EXPORT_SYMBOL(free_pages); 1109 1110 /* 1111 * Total amount of free (allocatable) RAM: 1112 */ 1113 unsigned int nr_free_pages(void) 1114 { 1115 unsigned int sum = 0; 1116 struct zone *zone; 1117 1118 for_each_zone(zone) 1119 sum += zone->free_pages; 1120 1121 return sum; 1122 } 1123 1124 EXPORT_SYMBOL(nr_free_pages); 1125 1126 #ifdef CONFIG_NUMA 1127 unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) 1128 { 1129 unsigned int i, sum = 0; 1130 1131 for (i = 0; i < MAX_NR_ZONES; i++) 1132 sum += pgdat->node_zones[i].free_pages; 1133 1134 return sum; 1135 } 1136 #endif 1137 1138 static unsigned int nr_free_zone_pages(int offset) 1139 { 1140 /* Just pick one node, since fallback list is circular */ 1141 pg_data_t *pgdat = NODE_DATA(numa_node_id()); 1142 unsigned int sum = 0; 1143 1144 struct zonelist *zonelist = pgdat->node_zonelists + offset; 1145 struct zone **zonep = zonelist->zones; 1146 struct zone *zone; 1147 1148 for (zone = *zonep++; zone; zone = *zonep++) { 1149 unsigned long size = zone->present_pages; 1150 unsigned long high = zone->pages_high; 1151 if (size > high) 1152 sum += size - high; 1153 } 1154 1155 return sum; 1156 } 1157 1158 /* 1159 * Amount of free RAM allocatable within ZONE_DMA and ZONE_NORMAL 1160 */ 1161 unsigned int nr_free_buffer_pages(void) 1162 { 1163 return nr_free_zone_pages(gfp_zone(GFP_USER)); 1164 } 1165 1166 /* 1167 * Amount of free RAM allocatable within all zones 1168 */ 1169 unsigned int nr_free_pagecache_pages(void) 1170 { 1171 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER)); 1172 } 1173 1174 #ifdef CONFIG_HIGHMEM 1175 unsigned int nr_free_highpages (void) 1176 { 1177 pg_data_t *pgdat; 1178 unsigned int pages = 0; 1179 1180 for_each_pgdat(pgdat) 1181 pages += pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1182 1183 return pages; 1184 } 1185 #endif 1186 1187 #ifdef CONFIG_NUMA 1188 static void show_node(struct zone *zone) 1189 { 1190 printk("Node %d ", zone->zone_pgdat->node_id); 1191 } 1192 #else 1193 #define show_node(zone) do { } while (0) 1194 #endif 1195 1196 /* 1197 * Accumulate the page_state information across all CPUs. 1198 * The result is unavoidably approximate - it can change 1199 * during and after execution of this function. 1200 */ 1201 static DEFINE_PER_CPU(struct page_state, page_states) = {0}; 1202 1203 atomic_t nr_pagecache = ATOMIC_INIT(0); 1204 EXPORT_SYMBOL(nr_pagecache); 1205 #ifdef CONFIG_SMP 1206 DEFINE_PER_CPU(long, nr_pagecache_local) = 0; 1207 #endif 1208 1209 static void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) 1210 { 1211 int cpu = 0; 1212 1213 memset(ret, 0, sizeof(*ret)); 1214 cpus_and(*cpumask, *cpumask, cpu_online_map); 1215 1216 cpu = first_cpu(*cpumask); 1217 while (cpu < NR_CPUS) { 1218 unsigned long *in, *out, off; 1219 1220 in = (unsigned long *)&per_cpu(page_states, cpu); 1221 1222 cpu = next_cpu(cpu, *cpumask); 1223 1224 if (cpu < NR_CPUS) 1225 prefetch(&per_cpu(page_states, cpu)); 1226 1227 out = (unsigned long *)ret; 1228 for (off = 0; off < nr; off++) 1229 *out++ += *in++; 1230 } 1231 } 1232 1233 void get_page_state_node(struct page_state *ret, int node) 1234 { 1235 int nr; 1236 cpumask_t mask = node_to_cpumask(node); 1237 1238 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); 1239 nr /= sizeof(unsigned long); 1240 1241 __get_page_state(ret, nr+1, &mask); 1242 } 1243 1244 void get_page_state(struct page_state *ret) 1245 { 1246 int nr; 1247 cpumask_t mask = CPU_MASK_ALL; 1248 1249 nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); 1250 nr /= sizeof(unsigned long); 1251 1252 __get_page_state(ret, nr + 1, &mask); 1253 } 1254 1255 void get_full_page_state(struct page_state *ret) 1256 { 1257 cpumask_t mask = CPU_MASK_ALL; 1258 1259 __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); 1260 } 1261 1262 unsigned long read_page_state_offset(unsigned long offset) 1263 { 1264 unsigned long ret = 0; 1265 int cpu; 1266 1267 for_each_online_cpu(cpu) { 1268 unsigned long in; 1269 1270 in = (unsigned long)&per_cpu(page_states, cpu) + offset; 1271 ret += *((unsigned long *)in); 1272 } 1273 return ret; 1274 } 1275 1276 void __mod_page_state_offset(unsigned long offset, unsigned long delta) 1277 { 1278 void *ptr; 1279 1280 ptr = &__get_cpu_var(page_states); 1281 *(unsigned long *)(ptr + offset) += delta; 1282 } 1283 EXPORT_SYMBOL(__mod_page_state_offset); 1284 1285 void mod_page_state_offset(unsigned long offset, unsigned long delta) 1286 { 1287 unsigned long flags; 1288 void *ptr; 1289 1290 local_irq_save(flags); 1291 ptr = &__get_cpu_var(page_states); 1292 *(unsigned long *)(ptr + offset) += delta; 1293 local_irq_restore(flags); 1294 } 1295 EXPORT_SYMBOL(mod_page_state_offset); 1296 1297 void __get_zone_counts(unsigned long *active, unsigned long *inactive, 1298 unsigned long *free, struct pglist_data *pgdat) 1299 { 1300 struct zone *zones = pgdat->node_zones; 1301 int i; 1302 1303 *active = 0; 1304 *inactive = 0; 1305 *free = 0; 1306 for (i = 0; i < MAX_NR_ZONES; i++) { 1307 *active += zones[i].nr_active; 1308 *inactive += zones[i].nr_inactive; 1309 *free += zones[i].free_pages; 1310 } 1311 } 1312 1313 void get_zone_counts(unsigned long *active, 1314 unsigned long *inactive, unsigned long *free) 1315 { 1316 struct pglist_data *pgdat; 1317 1318 *active = 0; 1319 *inactive = 0; 1320 *free = 0; 1321 for_each_pgdat(pgdat) { 1322 unsigned long l, m, n; 1323 __get_zone_counts(&l, &m, &n, pgdat); 1324 *active += l; 1325 *inactive += m; 1326 *free += n; 1327 } 1328 } 1329 1330 void si_meminfo(struct sysinfo *val) 1331 { 1332 val->totalram = totalram_pages; 1333 val->sharedram = 0; 1334 val->freeram = nr_free_pages(); 1335 val->bufferram = nr_blockdev_pages(); 1336 #ifdef CONFIG_HIGHMEM 1337 val->totalhigh = totalhigh_pages; 1338 val->freehigh = nr_free_highpages(); 1339 #else 1340 val->totalhigh = 0; 1341 val->freehigh = 0; 1342 #endif 1343 val->mem_unit = PAGE_SIZE; 1344 } 1345 1346 EXPORT_SYMBOL(si_meminfo); 1347 1348 #ifdef CONFIG_NUMA 1349 void si_meminfo_node(struct sysinfo *val, int nid) 1350 { 1351 pg_data_t *pgdat = NODE_DATA(nid); 1352 1353 val->totalram = pgdat->node_present_pages; 1354 val->freeram = nr_free_pages_pgdat(pgdat); 1355 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages; 1356 val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages; 1357 val->mem_unit = PAGE_SIZE; 1358 } 1359 #endif 1360 1361 #define K(x) ((x) << (PAGE_SHIFT-10)) 1362 1363 /* 1364 * Show free area list (used inside shift_scroll-lock stuff) 1365 * We also calculate the percentage fragmentation. We do this by counting the 1366 * memory on each free list with the exception of the first item on the list. 1367 */ 1368 void show_free_areas(void) 1369 { 1370 struct page_state ps; 1371 int cpu, temperature; 1372 unsigned long active; 1373 unsigned long inactive; 1374 unsigned long free; 1375 struct zone *zone; 1376 1377 for_each_zone(zone) { 1378 show_node(zone); 1379 printk("%s per-cpu:", zone->name); 1380 1381 if (!populated_zone(zone)) { 1382 printk(" empty\n"); 1383 continue; 1384 } else 1385 printk("\n"); 1386 1387 for_each_online_cpu(cpu) { 1388 struct per_cpu_pageset *pageset; 1389 1390 pageset = zone_pcp(zone, cpu); 1391 1392 for (temperature = 0; temperature < 2; temperature++) 1393 printk("cpu %d %s: high %d, batch %d used:%d\n", 1394 cpu, 1395 temperature ? "cold" : "hot", 1396 pageset->pcp[temperature].high, 1397 pageset->pcp[temperature].batch, 1398 pageset->pcp[temperature].count); 1399 } 1400 } 1401 1402 get_page_state(&ps); 1403 get_zone_counts(&active, &inactive, &free); 1404 1405 printk("Free pages: %11ukB (%ukB HighMem)\n", 1406 K(nr_free_pages()), 1407 K(nr_free_highpages())); 1408 1409 printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu " 1410 "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n", 1411 active, 1412 inactive, 1413 ps.nr_dirty, 1414 ps.nr_writeback, 1415 ps.nr_unstable, 1416 nr_free_pages(), 1417 ps.nr_slab, 1418 ps.nr_mapped, 1419 ps.nr_page_table_pages); 1420 1421 for_each_zone(zone) { 1422 int i; 1423 1424 show_node(zone); 1425 printk("%s" 1426 " free:%lukB" 1427 " min:%lukB" 1428 " low:%lukB" 1429 " high:%lukB" 1430 " active:%lukB" 1431 " inactive:%lukB" 1432 " present:%lukB" 1433 " pages_scanned:%lu" 1434 " all_unreclaimable? %s" 1435 "\n", 1436 zone->name, 1437 K(zone->free_pages), 1438 K(zone->pages_min), 1439 K(zone->pages_low), 1440 K(zone->pages_high), 1441 K(zone->nr_active), 1442 K(zone->nr_inactive), 1443 K(zone->present_pages), 1444 zone->pages_scanned, 1445 (zone->all_unreclaimable ? "yes" : "no") 1446 ); 1447 printk("lowmem_reserve[]:"); 1448 for (i = 0; i < MAX_NR_ZONES; i++) 1449 printk(" %lu", zone->lowmem_reserve[i]); 1450 printk("\n"); 1451 } 1452 1453 for_each_zone(zone) { 1454 unsigned long nr, flags, order, total = 0; 1455 1456 show_node(zone); 1457 printk("%s: ", zone->name); 1458 if (!populated_zone(zone)) { 1459 printk("empty\n"); 1460 continue; 1461 } 1462 1463 spin_lock_irqsave(&zone->lock, flags); 1464 for (order = 0; order < MAX_ORDER; order++) { 1465 nr = zone->free_area[order].nr_free; 1466 total += nr << order; 1467 printk("%lu*%lukB ", nr, K(1UL) << order); 1468 } 1469 spin_unlock_irqrestore(&zone->lock, flags); 1470 printk("= %lukB\n", K(total)); 1471 } 1472 1473 show_swap_cache_info(); 1474 } 1475 1476 /* 1477 * Builds allocation fallback zone lists. 1478 * 1479 * Add all populated zones of a node to the zonelist. 1480 */ 1481 static int __init build_zonelists_node(pg_data_t *pgdat, 1482 struct zonelist *zonelist, int nr_zones, int zone_type) 1483 { 1484 struct zone *zone; 1485 1486 BUG_ON(zone_type > ZONE_HIGHMEM); 1487 1488 do { 1489 zone = pgdat->node_zones + zone_type; 1490 if (populated_zone(zone)) { 1491 #ifndef CONFIG_HIGHMEM 1492 BUG_ON(zone_type > ZONE_NORMAL); 1493 #endif 1494 zonelist->zones[nr_zones++] = zone; 1495 check_highest_zone(zone_type); 1496 } 1497 zone_type--; 1498 1499 } while (zone_type >= 0); 1500 return nr_zones; 1501 } 1502 1503 static inline int highest_zone(int zone_bits) 1504 { 1505 int res = ZONE_NORMAL; 1506 if (zone_bits & (__force int)__GFP_HIGHMEM) 1507 res = ZONE_HIGHMEM; 1508 if (zone_bits & (__force int)__GFP_DMA32) 1509 res = ZONE_DMA32; 1510 if (zone_bits & (__force int)__GFP_DMA) 1511 res = ZONE_DMA; 1512 return res; 1513 } 1514 1515 #ifdef CONFIG_NUMA 1516 #define MAX_NODE_LOAD (num_online_nodes()) 1517 static int __initdata node_load[MAX_NUMNODES]; 1518 /** 1519 * find_next_best_node - find the next node that should appear in a given node's fallback list 1520 * @node: node whose fallback list we're appending 1521 * @used_node_mask: nodemask_t of already used nodes 1522 * 1523 * We use a number of factors to determine which is the next node that should 1524 * appear on a given node's fallback list. The node should not have appeared 1525 * already in @node's fallback list, and it should be the next closest node 1526 * according to the distance array (which contains arbitrary distance values 1527 * from each node to each node in the system), and should also prefer nodes 1528 * with no CPUs, since presumably they'll have very little allocation pressure 1529 * on them otherwise. 1530 * It returns -1 if no node is found. 1531 */ 1532 static int __init find_next_best_node(int node, nodemask_t *used_node_mask) 1533 { 1534 int i, n, val; 1535 int min_val = INT_MAX; 1536 int best_node = -1; 1537 1538 for_each_online_node(i) { 1539 cpumask_t tmp; 1540 1541 /* Start from local node */ 1542 n = (node+i) % num_online_nodes(); 1543 1544 /* Don't want a node to appear more than once */ 1545 if (node_isset(n, *used_node_mask)) 1546 continue; 1547 1548 /* Use the local node if we haven't already */ 1549 if (!node_isset(node, *used_node_mask)) { 1550 best_node = node; 1551 break; 1552 } 1553 1554 /* Use the distance array to find the distance */ 1555 val = node_distance(node, n); 1556 1557 /* Give preference to headless and unused nodes */ 1558 tmp = node_to_cpumask(n); 1559 if (!cpus_empty(tmp)) 1560 val += PENALTY_FOR_NODE_WITH_CPUS; 1561 1562 /* Slight preference for less loaded node */ 1563 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 1564 val += node_load[n]; 1565 1566 if (val < min_val) { 1567 min_val = val; 1568 best_node = n; 1569 } 1570 } 1571 1572 if (best_node >= 0) 1573 node_set(best_node, *used_node_mask); 1574 1575 return best_node; 1576 } 1577 1578 static void __init build_zonelists(pg_data_t *pgdat) 1579 { 1580 int i, j, k, node, local_node; 1581 int prev_node, load; 1582 struct zonelist *zonelist; 1583 nodemask_t used_mask; 1584 1585 /* initialize zonelists */ 1586 for (i = 0; i < GFP_ZONETYPES; i++) { 1587 zonelist = pgdat->node_zonelists + i; 1588 zonelist->zones[0] = NULL; 1589 } 1590 1591 /* NUMA-aware ordering of nodes */ 1592 local_node = pgdat->node_id; 1593 load = num_online_nodes(); 1594 prev_node = local_node; 1595 nodes_clear(used_mask); 1596 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 1597 /* 1598 * We don't want to pressure a particular node. 1599 * So adding penalty to the first node in same 1600 * distance group to make it round-robin. 1601 */ 1602 if (node_distance(local_node, node) != 1603 node_distance(local_node, prev_node)) 1604 node_load[node] += load; 1605 prev_node = node; 1606 load--; 1607 for (i = 0; i < GFP_ZONETYPES; i++) { 1608 zonelist = pgdat->node_zonelists + i; 1609 for (j = 0; zonelist->zones[j] != NULL; j++); 1610 1611 k = highest_zone(i); 1612 1613 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1614 zonelist->zones[j] = NULL; 1615 } 1616 } 1617 } 1618 1619 #else /* CONFIG_NUMA */ 1620 1621 static void __init build_zonelists(pg_data_t *pgdat) 1622 { 1623 int i, j, k, node, local_node; 1624 1625 local_node = pgdat->node_id; 1626 for (i = 0; i < GFP_ZONETYPES; i++) { 1627 struct zonelist *zonelist; 1628 1629 zonelist = pgdat->node_zonelists + i; 1630 1631 j = 0; 1632 k = highest_zone(i); 1633 j = build_zonelists_node(pgdat, zonelist, j, k); 1634 /* 1635 * Now we build the zonelist so that it contains the zones 1636 * of all the other nodes. 1637 * We don't want to pressure a particular node, so when 1638 * building the zones for node N, we make sure that the 1639 * zones coming right after the local ones are those from 1640 * node N+1 (modulo N) 1641 */ 1642 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 1643 if (!node_online(node)) 1644 continue; 1645 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1646 } 1647 for (node = 0; node < local_node; node++) { 1648 if (!node_online(node)) 1649 continue; 1650 j = build_zonelists_node(NODE_DATA(node), zonelist, j, k); 1651 } 1652 1653 zonelist->zones[j] = NULL; 1654 } 1655 } 1656 1657 #endif /* CONFIG_NUMA */ 1658 1659 void __init build_all_zonelists(void) 1660 { 1661 int i; 1662 1663 for_each_online_node(i) 1664 build_zonelists(NODE_DATA(i)); 1665 printk("Built %i zonelists\n", num_online_nodes()); 1666 cpuset_init_current_mems_allowed(); 1667 } 1668 1669 /* 1670 * Helper functions to size the waitqueue hash table. 1671 * Essentially these want to choose hash table sizes sufficiently 1672 * large so that collisions trying to wait on pages are rare. 1673 * But in fact, the number of active page waitqueues on typical 1674 * systems is ridiculously low, less than 200. So this is even 1675 * conservative, even though it seems large. 1676 * 1677 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 1678 * waitqueues, i.e. the size of the waitq table given the number of pages. 1679 */ 1680 #define PAGES_PER_WAITQUEUE 256 1681 1682 static inline unsigned long wait_table_size(unsigned long pages) 1683 { 1684 unsigned long size = 1; 1685 1686 pages /= PAGES_PER_WAITQUEUE; 1687 1688 while (size < pages) 1689 size <<= 1; 1690 1691 /* 1692 * Once we have dozens or even hundreds of threads sleeping 1693 * on IO we've got bigger problems than wait queue collision. 1694 * Limit the size of the wait table to a reasonable size. 1695 */ 1696 size = min(size, 4096UL); 1697 1698 return max(size, 4UL); 1699 } 1700 1701 /* 1702 * This is an integer logarithm so that shifts can be used later 1703 * to extract the more random high bits from the multiplicative 1704 * hash function before the remainder is taken. 1705 */ 1706 static inline unsigned long wait_table_bits(unsigned long size) 1707 { 1708 return ffz(~size); 1709 } 1710 1711 #define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 1712 1713 static void __init calculate_zone_totalpages(struct pglist_data *pgdat, 1714 unsigned long *zones_size, unsigned long *zholes_size) 1715 { 1716 unsigned long realtotalpages, totalpages = 0; 1717 int i; 1718 1719 for (i = 0; i < MAX_NR_ZONES; i++) 1720 totalpages += zones_size[i]; 1721 pgdat->node_spanned_pages = totalpages; 1722 1723 realtotalpages = totalpages; 1724 if (zholes_size) 1725 for (i = 0; i < MAX_NR_ZONES; i++) 1726 realtotalpages -= zholes_size[i]; 1727 pgdat->node_present_pages = realtotalpages; 1728 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1729 } 1730 1731 1732 /* 1733 * Initially all pages are reserved - free ones are freed 1734 * up by free_all_bootmem() once the early boot process is 1735 * done. Non-atomic initialization, single-pass. 1736 */ 1737 void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 1738 unsigned long start_pfn) 1739 { 1740 struct page *page; 1741 unsigned long end_pfn = start_pfn + size; 1742 unsigned long pfn; 1743 1744 for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) { 1745 if (!early_pfn_valid(pfn)) 1746 continue; 1747 page = pfn_to_page(pfn); 1748 set_page_links(page, zone, nid, pfn); 1749 set_page_count(page, 1); 1750 reset_page_mapcount(page); 1751 SetPageReserved(page); 1752 INIT_LIST_HEAD(&page->lru); 1753 #ifdef WANT_PAGE_VIRTUAL 1754 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1755 if (!is_highmem_idx(zone)) 1756 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1757 #endif 1758 } 1759 } 1760 1761 void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone, 1762 unsigned long size) 1763 { 1764 int order; 1765 for (order = 0; order < MAX_ORDER ; order++) { 1766 INIT_LIST_HEAD(&zone->free_area[order].free_list); 1767 zone->free_area[order].nr_free = 0; 1768 } 1769 } 1770 1771 #define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) 1772 void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, 1773 unsigned long size) 1774 { 1775 unsigned long snum = pfn_to_section_nr(pfn); 1776 unsigned long end = pfn_to_section_nr(pfn + size); 1777 1778 if (FLAGS_HAS_NODE) 1779 zone_table[ZONETABLE_INDEX(nid, zid)] = zone; 1780 else 1781 for (; snum <= end; snum++) 1782 zone_table[ZONETABLE_INDEX(snum, zid)] = zone; 1783 } 1784 1785 #ifndef __HAVE_ARCH_MEMMAP_INIT 1786 #define memmap_init(size, nid, zone, start_pfn) \ 1787 memmap_init_zone((size), (nid), (zone), (start_pfn)) 1788 #endif 1789 1790 static int __devinit zone_batchsize(struct zone *zone) 1791 { 1792 int batch; 1793 1794 /* 1795 * The per-cpu-pages pools are set to around 1000th of the 1796 * size of the zone. But no more than 1/2 of a meg. 1797 * 1798 * OK, so we don't know how big the cache is. So guess. 1799 */ 1800 batch = zone->present_pages / 1024; 1801 if (batch * PAGE_SIZE > 512 * 1024) 1802 batch = (512 * 1024) / PAGE_SIZE; 1803 batch /= 4; /* We effectively *= 4 below */ 1804 if (batch < 1) 1805 batch = 1; 1806 1807 /* 1808 * Clamp the batch to a 2^n - 1 value. Having a power 1809 * of 2 value was found to be more likely to have 1810 * suboptimal cache aliasing properties in some cases. 1811 * 1812 * For example if 2 tasks are alternately allocating 1813 * batches of pages, one task can end up with a lot 1814 * of pages of one half of the possible page colors 1815 * and the other with pages of the other colors. 1816 */ 1817 batch = (1 << (fls(batch + batch/2)-1)) - 1; 1818 1819 return batch; 1820 } 1821 1822 inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 1823 { 1824 struct per_cpu_pages *pcp; 1825 1826 memset(p, 0, sizeof(*p)); 1827 1828 pcp = &p->pcp[0]; /* hot */ 1829 pcp->count = 0; 1830 pcp->high = 6 * batch; 1831 pcp->batch = max(1UL, 1 * batch); 1832 INIT_LIST_HEAD(&pcp->list); 1833 1834 pcp = &p->pcp[1]; /* cold*/ 1835 pcp->count = 0; 1836 pcp->high = 2 * batch; 1837 pcp->batch = max(1UL, batch/2); 1838 INIT_LIST_HEAD(&pcp->list); 1839 } 1840 1841 /* 1842 * setup_pagelist_highmark() sets the high water mark for hot per_cpu_pagelist 1843 * to the value high for the pageset p. 1844 */ 1845 1846 static void setup_pagelist_highmark(struct per_cpu_pageset *p, 1847 unsigned long high) 1848 { 1849 struct per_cpu_pages *pcp; 1850 1851 pcp = &p->pcp[0]; /* hot list */ 1852 pcp->high = high; 1853 pcp->batch = max(1UL, high/4); 1854 if ((high/4) > (PAGE_SHIFT * 8)) 1855 pcp->batch = PAGE_SHIFT * 8; 1856 } 1857 1858 1859 #ifdef CONFIG_NUMA 1860 /* 1861 * Boot pageset table. One per cpu which is going to be used for all 1862 * zones and all nodes. The parameters will be set in such a way 1863 * that an item put on a list will immediately be handed over to 1864 * the buddy list. This is safe since pageset manipulation is done 1865 * with interrupts disabled. 1866 * 1867 * Some NUMA counter updates may also be caught by the boot pagesets. 1868 * 1869 * The boot_pagesets must be kept even after bootup is complete for 1870 * unused processors and/or zones. They do play a role for bootstrapping 1871 * hotplugged processors. 1872 * 1873 * zoneinfo_show() and maybe other functions do 1874 * not check if the processor is online before following the pageset pointer. 1875 * Other parts of the kernel may not check if the zone is available. 1876 */ 1877 static struct per_cpu_pageset 1878 boot_pageset[NR_CPUS]; 1879 1880 /* 1881 * Dynamically allocate memory for the 1882 * per cpu pageset array in struct zone. 1883 */ 1884 static int __devinit process_zones(int cpu) 1885 { 1886 struct zone *zone, *dzone; 1887 1888 for_each_zone(zone) { 1889 1890 zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset), 1891 GFP_KERNEL, cpu_to_node(cpu)); 1892 if (!zone_pcp(zone, cpu)) 1893 goto bad; 1894 1895 setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone)); 1896 1897 if (percpu_pagelist_fraction) 1898 setup_pagelist_highmark(zone_pcp(zone, cpu), 1899 (zone->present_pages / percpu_pagelist_fraction)); 1900 } 1901 1902 return 0; 1903 bad: 1904 for_each_zone(dzone) { 1905 if (dzone == zone) 1906 break; 1907 kfree(zone_pcp(dzone, cpu)); 1908 zone_pcp(dzone, cpu) = NULL; 1909 } 1910 return -ENOMEM; 1911 } 1912 1913 static inline void free_zone_pagesets(int cpu) 1914 { 1915 struct zone *zone; 1916 1917 for_each_zone(zone) { 1918 struct per_cpu_pageset *pset = zone_pcp(zone, cpu); 1919 1920 zone_pcp(zone, cpu) = NULL; 1921 kfree(pset); 1922 } 1923 } 1924 1925 static int __devinit pageset_cpuup_callback(struct notifier_block *nfb, 1926 unsigned long action, 1927 void *hcpu) 1928 { 1929 int cpu = (long)hcpu; 1930 int ret = NOTIFY_OK; 1931 1932 switch (action) { 1933 case CPU_UP_PREPARE: 1934 if (process_zones(cpu)) 1935 ret = NOTIFY_BAD; 1936 break; 1937 case CPU_UP_CANCELED: 1938 case CPU_DEAD: 1939 free_zone_pagesets(cpu); 1940 break; 1941 default: 1942 break; 1943 } 1944 return ret; 1945 } 1946 1947 static struct notifier_block pageset_notifier = 1948 { &pageset_cpuup_callback, NULL, 0 }; 1949 1950 void __init setup_per_cpu_pageset(void) 1951 { 1952 int err; 1953 1954 /* Initialize per_cpu_pageset for cpu 0. 1955 * A cpuup callback will do this for every cpu 1956 * as it comes online 1957 */ 1958 err = process_zones(smp_processor_id()); 1959 BUG_ON(err); 1960 register_cpu_notifier(&pageset_notifier); 1961 } 1962 1963 #endif 1964 1965 static __devinit 1966 void zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 1967 { 1968 int i; 1969 struct pglist_data *pgdat = zone->zone_pgdat; 1970 1971 /* 1972 * The per-page waitqueue mechanism uses hashed waitqueues 1973 * per zone. 1974 */ 1975 zone->wait_table_size = wait_table_size(zone_size_pages); 1976 zone->wait_table_bits = wait_table_bits(zone->wait_table_size); 1977 zone->wait_table = (wait_queue_head_t *) 1978 alloc_bootmem_node(pgdat, zone->wait_table_size 1979 * sizeof(wait_queue_head_t)); 1980 1981 for(i = 0; i < zone->wait_table_size; ++i) 1982 init_waitqueue_head(zone->wait_table + i); 1983 } 1984 1985 static __devinit void zone_pcp_init(struct zone *zone) 1986 { 1987 int cpu; 1988 unsigned long batch = zone_batchsize(zone); 1989 1990 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1991 #ifdef CONFIG_NUMA 1992 /* Early boot. Slab allocator not functional yet */ 1993 zone_pcp(zone, cpu) = &boot_pageset[cpu]; 1994 setup_pageset(&boot_pageset[cpu],0); 1995 #else 1996 setup_pageset(zone_pcp(zone,cpu), batch); 1997 #endif 1998 } 1999 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n", 2000 zone->name, zone->present_pages, batch); 2001 } 2002 2003 static __devinit void init_currently_empty_zone(struct zone *zone, 2004 unsigned long zone_start_pfn, unsigned long size) 2005 { 2006 struct pglist_data *pgdat = zone->zone_pgdat; 2007 2008 zone_wait_table_init(zone, size); 2009 pgdat->nr_zones = zone_idx(zone) + 1; 2010 2011 zone->zone_mem_map = pfn_to_page(zone_start_pfn); 2012 zone->zone_start_pfn = zone_start_pfn; 2013 2014 memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn); 2015 2016 zone_init_free_lists(pgdat, zone, zone->spanned_pages); 2017 } 2018 2019 /* 2020 * Set up the zone data structures: 2021 * - mark all pages reserved 2022 * - mark all memory queues empty 2023 * - clear the memory bitmaps 2024 */ 2025 static void __init free_area_init_core(struct pglist_data *pgdat, 2026 unsigned long *zones_size, unsigned long *zholes_size) 2027 { 2028 unsigned long j; 2029 int nid = pgdat->node_id; 2030 unsigned long zone_start_pfn = pgdat->node_start_pfn; 2031 2032 pgdat_resize_init(pgdat); 2033 pgdat->nr_zones = 0; 2034 init_waitqueue_head(&pgdat->kswapd_wait); 2035 pgdat->kswapd_max_order = 0; 2036 2037 for (j = 0; j < MAX_NR_ZONES; j++) { 2038 struct zone *zone = pgdat->node_zones + j; 2039 unsigned long size, realsize; 2040 2041 realsize = size = zones_size[j]; 2042 if (zholes_size) 2043 realsize -= zholes_size[j]; 2044 2045 if (j < ZONE_HIGHMEM) 2046 nr_kernel_pages += realsize; 2047 nr_all_pages += realsize; 2048 2049 zone->spanned_pages = size; 2050 zone->present_pages = realsize; 2051 zone->name = zone_names[j]; 2052 spin_lock_init(&zone->lock); 2053 spin_lock_init(&zone->lru_lock); 2054 zone_seqlock_init(zone); 2055 zone->zone_pgdat = pgdat; 2056 zone->free_pages = 0; 2057 2058 zone->temp_priority = zone->prev_priority = DEF_PRIORITY; 2059 2060 zone_pcp_init(zone); 2061 INIT_LIST_HEAD(&zone->active_list); 2062 INIT_LIST_HEAD(&zone->inactive_list); 2063 zone->nr_scan_active = 0; 2064 zone->nr_scan_inactive = 0; 2065 zone->nr_active = 0; 2066 zone->nr_inactive = 0; 2067 atomic_set(&zone->reclaim_in_progress, 0); 2068 if (!size) 2069 continue; 2070 2071 zonetable_add(zone, nid, j, zone_start_pfn, size); 2072 init_currently_empty_zone(zone, zone_start_pfn, size); 2073 zone_start_pfn += size; 2074 } 2075 } 2076 2077 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 2078 { 2079 /* Skip empty nodes */ 2080 if (!pgdat->node_spanned_pages) 2081 return; 2082 2083 #ifdef CONFIG_FLAT_NODE_MEM_MAP 2084 /* ia64 gets its own node_mem_map, before this, without bootmem */ 2085 if (!pgdat->node_mem_map) { 2086 unsigned long size; 2087 struct page *map; 2088 2089 size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); 2090 map = alloc_remap(pgdat->node_id, size); 2091 if (!map) 2092 map = alloc_bootmem_node(pgdat, size); 2093 pgdat->node_mem_map = map; 2094 } 2095 #ifdef CONFIG_FLATMEM 2096 /* 2097 * With no DISCONTIG, the global mem_map is just set as node 0's 2098 */ 2099 if (pgdat == NODE_DATA(0)) 2100 mem_map = NODE_DATA(0)->node_mem_map; 2101 #endif 2102 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 2103 } 2104 2105 void __init free_area_init_node(int nid, struct pglist_data *pgdat, 2106 unsigned long *zones_size, unsigned long node_start_pfn, 2107 unsigned long *zholes_size) 2108 { 2109 pgdat->node_id = nid; 2110 pgdat->node_start_pfn = node_start_pfn; 2111 calculate_zone_totalpages(pgdat, zones_size, zholes_size); 2112 2113 alloc_node_mem_map(pgdat); 2114 2115 free_area_init_core(pgdat, zones_size, zholes_size); 2116 } 2117 2118 #ifndef CONFIG_NEED_MULTIPLE_NODES 2119 static bootmem_data_t contig_bootmem_data; 2120 struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; 2121 2122 EXPORT_SYMBOL(contig_page_data); 2123 #endif 2124 2125 void __init free_area_init(unsigned long *zones_size) 2126 { 2127 free_area_init_node(0, NODE_DATA(0), zones_size, 2128 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 2129 } 2130 2131 #ifdef CONFIG_PROC_FS 2132 2133 #include <linux/seq_file.h> 2134 2135 static void *frag_start(struct seq_file *m, loff_t *pos) 2136 { 2137 pg_data_t *pgdat; 2138 loff_t node = *pos; 2139 2140 for (pgdat = pgdat_list; pgdat && node; pgdat = pgdat->pgdat_next) 2141 --node; 2142 2143 return pgdat; 2144 } 2145 2146 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 2147 { 2148 pg_data_t *pgdat = (pg_data_t *)arg; 2149 2150 (*pos)++; 2151 return pgdat->pgdat_next; 2152 } 2153 2154 static void frag_stop(struct seq_file *m, void *arg) 2155 { 2156 } 2157 2158 /* 2159 * This walks the free areas for each zone. 2160 */ 2161 static int frag_show(struct seq_file *m, void *arg) 2162 { 2163 pg_data_t *pgdat = (pg_data_t *)arg; 2164 struct zone *zone; 2165 struct zone *node_zones = pgdat->node_zones; 2166 unsigned long flags; 2167 int order; 2168 2169 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 2170 if (!populated_zone(zone)) 2171 continue; 2172 2173 spin_lock_irqsave(&zone->lock, flags); 2174 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 2175 for (order = 0; order < MAX_ORDER; ++order) 2176 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); 2177 spin_unlock_irqrestore(&zone->lock, flags); 2178 seq_putc(m, '\n'); 2179 } 2180 return 0; 2181 } 2182 2183 struct seq_operations fragmentation_op = { 2184 .start = frag_start, 2185 .next = frag_next, 2186 .stop = frag_stop, 2187 .show = frag_show, 2188 }; 2189 2190 /* 2191 * Output information about zones in @pgdat. 2192 */ 2193 static int zoneinfo_show(struct seq_file *m, void *arg) 2194 { 2195 pg_data_t *pgdat = arg; 2196 struct zone *zone; 2197 struct zone *node_zones = pgdat->node_zones; 2198 unsigned long flags; 2199 2200 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; zone++) { 2201 int i; 2202 2203 if (!populated_zone(zone)) 2204 continue; 2205 2206 spin_lock_irqsave(&zone->lock, flags); 2207 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 2208 seq_printf(m, 2209 "\n pages free %lu" 2210 "\n min %lu" 2211 "\n low %lu" 2212 "\n high %lu" 2213 "\n active %lu" 2214 "\n inactive %lu" 2215 "\n scanned %lu (a: %lu i: %lu)" 2216 "\n spanned %lu" 2217 "\n present %lu", 2218 zone->free_pages, 2219 zone->pages_min, 2220 zone->pages_low, 2221 zone->pages_high, 2222 zone->nr_active, 2223 zone->nr_inactive, 2224 zone->pages_scanned, 2225 zone->nr_scan_active, zone->nr_scan_inactive, 2226 zone->spanned_pages, 2227 zone->present_pages); 2228 seq_printf(m, 2229 "\n protection: (%lu", 2230 zone->lowmem_reserve[0]); 2231 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 2232 seq_printf(m, ", %lu", zone->lowmem_reserve[i]); 2233 seq_printf(m, 2234 ")" 2235 "\n pagesets"); 2236 for_each_online_cpu(i) { 2237 struct per_cpu_pageset *pageset; 2238 int j; 2239 2240 pageset = zone_pcp(zone, i); 2241 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 2242 if (pageset->pcp[j].count) 2243 break; 2244 } 2245 if (j == ARRAY_SIZE(pageset->pcp)) 2246 continue; 2247 for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) { 2248 seq_printf(m, 2249 "\n cpu: %i pcp: %i" 2250 "\n count: %i" 2251 "\n high: %i" 2252 "\n batch: %i", 2253 i, j, 2254 pageset->pcp[j].count, 2255 pageset->pcp[j].high, 2256 pageset->pcp[j].batch); 2257 } 2258 #ifdef CONFIG_NUMA 2259 seq_printf(m, 2260 "\n numa_hit: %lu" 2261 "\n numa_miss: %lu" 2262 "\n numa_foreign: %lu" 2263 "\n interleave_hit: %lu" 2264 "\n local_node: %lu" 2265 "\n other_node: %lu", 2266 pageset->numa_hit, 2267 pageset->numa_miss, 2268 pageset->numa_foreign, 2269 pageset->interleave_hit, 2270 pageset->local_node, 2271 pageset->other_node); 2272 #endif 2273 } 2274 seq_printf(m, 2275 "\n all_unreclaimable: %u" 2276 "\n prev_priority: %i" 2277 "\n temp_priority: %i" 2278 "\n start_pfn: %lu", 2279 zone->all_unreclaimable, 2280 zone->prev_priority, 2281 zone->temp_priority, 2282 zone->zone_start_pfn); 2283 spin_unlock_irqrestore(&zone->lock, flags); 2284 seq_putc(m, '\n'); 2285 } 2286 return 0; 2287 } 2288 2289 struct seq_operations zoneinfo_op = { 2290 .start = frag_start, /* iterate over all zones. The same as in 2291 * fragmentation. */ 2292 .next = frag_next, 2293 .stop = frag_stop, 2294 .show = zoneinfo_show, 2295 }; 2296 2297 static char *vmstat_text[] = { 2298 "nr_dirty", 2299 "nr_writeback", 2300 "nr_unstable", 2301 "nr_page_table_pages", 2302 "nr_mapped", 2303 "nr_slab", 2304 2305 "pgpgin", 2306 "pgpgout", 2307 "pswpin", 2308 "pswpout", 2309 2310 "pgalloc_high", 2311 "pgalloc_normal", 2312 "pgalloc_dma32", 2313 "pgalloc_dma", 2314 2315 "pgfree", 2316 "pgactivate", 2317 "pgdeactivate", 2318 2319 "pgfault", 2320 "pgmajfault", 2321 2322 "pgrefill_high", 2323 "pgrefill_normal", 2324 "pgrefill_dma32", 2325 "pgrefill_dma", 2326 2327 "pgsteal_high", 2328 "pgsteal_normal", 2329 "pgsteal_dma32", 2330 "pgsteal_dma", 2331 2332 "pgscan_kswapd_high", 2333 "pgscan_kswapd_normal", 2334 "pgscan_kswapd_dma32", 2335 "pgscan_kswapd_dma", 2336 2337 "pgscan_direct_high", 2338 "pgscan_direct_normal", 2339 "pgscan_direct_dma32", 2340 "pgscan_direct_dma", 2341 2342 "pginodesteal", 2343 "slabs_scanned", 2344 "kswapd_steal", 2345 "kswapd_inodesteal", 2346 "pageoutrun", 2347 "allocstall", 2348 2349 "pgrotated", 2350 "nr_bounce", 2351 }; 2352 2353 static void *vmstat_start(struct seq_file *m, loff_t *pos) 2354 { 2355 struct page_state *ps; 2356 2357 if (*pos >= ARRAY_SIZE(vmstat_text)) 2358 return NULL; 2359 2360 ps = kmalloc(sizeof(*ps), GFP_KERNEL); 2361 m->private = ps; 2362 if (!ps) 2363 return ERR_PTR(-ENOMEM); 2364 get_full_page_state(ps); 2365 ps->pgpgin /= 2; /* sectors -> kbytes */ 2366 ps->pgpgout /= 2; 2367 return (unsigned long *)ps + *pos; 2368 } 2369 2370 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 2371 { 2372 (*pos)++; 2373 if (*pos >= ARRAY_SIZE(vmstat_text)) 2374 return NULL; 2375 return (unsigned long *)m->private + *pos; 2376 } 2377 2378 static int vmstat_show(struct seq_file *m, void *arg) 2379 { 2380 unsigned long *l = arg; 2381 unsigned long off = l - (unsigned long *)m->private; 2382 2383 seq_printf(m, "%s %lu\n", vmstat_text[off], *l); 2384 return 0; 2385 } 2386 2387 static void vmstat_stop(struct seq_file *m, void *arg) 2388 { 2389 kfree(m->private); 2390 m->private = NULL; 2391 } 2392 2393 struct seq_operations vmstat_op = { 2394 .start = vmstat_start, 2395 .next = vmstat_next, 2396 .stop = vmstat_stop, 2397 .show = vmstat_show, 2398 }; 2399 2400 #endif /* CONFIG_PROC_FS */ 2401 2402 #ifdef CONFIG_HOTPLUG_CPU 2403 static int page_alloc_cpu_notify(struct notifier_block *self, 2404 unsigned long action, void *hcpu) 2405 { 2406 int cpu = (unsigned long)hcpu; 2407 long *count; 2408 unsigned long *src, *dest; 2409 2410 if (action == CPU_DEAD) { 2411 int i; 2412 2413 /* Drain local pagecache count. */ 2414 count = &per_cpu(nr_pagecache_local, cpu); 2415 atomic_add(*count, &nr_pagecache); 2416 *count = 0; 2417 local_irq_disable(); 2418 __drain_pages(cpu); 2419 2420 /* Add dead cpu's page_states to our own. */ 2421 dest = (unsigned long *)&__get_cpu_var(page_states); 2422 src = (unsigned long *)&per_cpu(page_states, cpu); 2423 2424 for (i = 0; i < sizeof(struct page_state)/sizeof(unsigned long); 2425 i++) { 2426 dest[i] += src[i]; 2427 src[i] = 0; 2428 } 2429 2430 local_irq_enable(); 2431 } 2432 return NOTIFY_OK; 2433 } 2434 #endif /* CONFIG_HOTPLUG_CPU */ 2435 2436 void __init page_alloc_init(void) 2437 { 2438 hotcpu_notifier(page_alloc_cpu_notify, 0); 2439 } 2440 2441 /* 2442 * setup_per_zone_lowmem_reserve - called whenever 2443 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 2444 * has a correct pages reserved value, so an adequate number of 2445 * pages are left in the zone after a successful __alloc_pages(). 2446 */ 2447 static void setup_per_zone_lowmem_reserve(void) 2448 { 2449 struct pglist_data *pgdat; 2450 int j, idx; 2451 2452 for_each_pgdat(pgdat) { 2453 for (j = 0; j < MAX_NR_ZONES; j++) { 2454 struct zone *zone = pgdat->node_zones + j; 2455 unsigned long present_pages = zone->present_pages; 2456 2457 zone->lowmem_reserve[j] = 0; 2458 2459 for (idx = j-1; idx >= 0; idx--) { 2460 struct zone *lower_zone; 2461 2462 if (sysctl_lowmem_reserve_ratio[idx] < 1) 2463 sysctl_lowmem_reserve_ratio[idx] = 1; 2464 2465 lower_zone = pgdat->node_zones + idx; 2466 lower_zone->lowmem_reserve[j] = present_pages / 2467 sysctl_lowmem_reserve_ratio[idx]; 2468 present_pages += lower_zone->present_pages; 2469 } 2470 } 2471 } 2472 } 2473 2474 /* 2475 * setup_per_zone_pages_min - called when min_free_kbytes changes. Ensures 2476 * that the pages_{min,low,high} values for each zone are set correctly 2477 * with respect to min_free_kbytes. 2478 */ 2479 void setup_per_zone_pages_min(void) 2480 { 2481 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 2482 unsigned long lowmem_pages = 0; 2483 struct zone *zone; 2484 unsigned long flags; 2485 2486 /* Calculate total number of !ZONE_HIGHMEM pages */ 2487 for_each_zone(zone) { 2488 if (!is_highmem(zone)) 2489 lowmem_pages += zone->present_pages; 2490 } 2491 2492 for_each_zone(zone) { 2493 unsigned long tmp; 2494 spin_lock_irqsave(&zone->lru_lock, flags); 2495 tmp = (pages_min * zone->present_pages) / lowmem_pages; 2496 if (is_highmem(zone)) { 2497 /* 2498 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 2499 * need highmem pages, so cap pages_min to a small 2500 * value here. 2501 * 2502 * The (pages_high-pages_low) and (pages_low-pages_min) 2503 * deltas controls asynch page reclaim, and so should 2504 * not be capped for highmem. 2505 */ 2506 int min_pages; 2507 2508 min_pages = zone->present_pages / 1024; 2509 if (min_pages < SWAP_CLUSTER_MAX) 2510 min_pages = SWAP_CLUSTER_MAX; 2511 if (min_pages > 128) 2512 min_pages = 128; 2513 zone->pages_min = min_pages; 2514 } else { 2515 /* 2516 * If it's a lowmem zone, reserve a number of pages 2517 * proportionate to the zone's size. 2518 */ 2519 zone->pages_min = tmp; 2520 } 2521 2522 zone->pages_low = zone->pages_min + tmp / 4; 2523 zone->pages_high = zone->pages_min + tmp / 2; 2524 spin_unlock_irqrestore(&zone->lru_lock, flags); 2525 } 2526 } 2527 2528 /* 2529 * Initialise min_free_kbytes. 2530 * 2531 * For small machines we want it small (128k min). For large machines 2532 * we want it large (64MB max). But it is not linear, because network 2533 * bandwidth does not increase linearly with machine size. We use 2534 * 2535 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 2536 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 2537 * 2538 * which yields 2539 * 2540 * 16MB: 512k 2541 * 32MB: 724k 2542 * 64MB: 1024k 2543 * 128MB: 1448k 2544 * 256MB: 2048k 2545 * 512MB: 2896k 2546 * 1024MB: 4096k 2547 * 2048MB: 5792k 2548 * 4096MB: 8192k 2549 * 8192MB: 11584k 2550 * 16384MB: 16384k 2551 */ 2552 static int __init init_per_zone_pages_min(void) 2553 { 2554 unsigned long lowmem_kbytes; 2555 2556 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 2557 2558 min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 2559 if (min_free_kbytes < 128) 2560 min_free_kbytes = 128; 2561 if (min_free_kbytes > 65536) 2562 min_free_kbytes = 65536; 2563 setup_per_zone_pages_min(); 2564 setup_per_zone_lowmem_reserve(); 2565 return 0; 2566 } 2567 module_init(init_per_zone_pages_min) 2568 2569 /* 2570 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 2571 * that we can call two helper functions whenever min_free_kbytes 2572 * changes. 2573 */ 2574 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 2575 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2576 { 2577 proc_dointvec(table, write, file, buffer, length, ppos); 2578 setup_per_zone_pages_min(); 2579 return 0; 2580 } 2581 2582 /* 2583 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 2584 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 2585 * whenever sysctl_lowmem_reserve_ratio changes. 2586 * 2587 * The reserve ratio obviously has absolutely no relation with the 2588 * pages_min watermarks. The lowmem reserve ratio can only make sense 2589 * if in function of the boot time zone sizes. 2590 */ 2591 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 2592 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2593 { 2594 proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2595 setup_per_zone_lowmem_reserve(); 2596 return 0; 2597 } 2598 2599 /* 2600 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 2601 * cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist 2602 * can have before it gets flushed back to buddy allocator. 2603 */ 2604 2605 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 2606 struct file *file, void __user *buffer, size_t *length, loff_t *ppos) 2607 { 2608 struct zone *zone; 2609 unsigned int cpu; 2610 int ret; 2611 2612 ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos); 2613 if (!write || (ret == -EINVAL)) 2614 return ret; 2615 for_each_zone(zone) { 2616 for_each_online_cpu(cpu) { 2617 unsigned long high; 2618 high = zone->present_pages / percpu_pagelist_fraction; 2619 setup_pagelist_highmark(zone_pcp(zone, cpu), high); 2620 } 2621 } 2622 return 0; 2623 } 2624 2625 __initdata int hashdist = HASHDIST_DEFAULT; 2626 2627 #ifdef CONFIG_NUMA 2628 static int __init set_hashdist(char *str) 2629 { 2630 if (!str) 2631 return 0; 2632 hashdist = simple_strtoul(str, &str, 0); 2633 return 1; 2634 } 2635 __setup("hashdist=", set_hashdist); 2636 #endif 2637 2638 /* 2639 * allocate a large system hash table from bootmem 2640 * - it is assumed that the hash table must contain an exact power-of-2 2641 * quantity of entries 2642 * - limit is the number of hash buckets, not the total allocation size 2643 */ 2644 void *__init alloc_large_system_hash(const char *tablename, 2645 unsigned long bucketsize, 2646 unsigned long numentries, 2647 int scale, 2648 int flags, 2649 unsigned int *_hash_shift, 2650 unsigned int *_hash_mask, 2651 unsigned long limit) 2652 { 2653 unsigned long long max = limit; 2654 unsigned long log2qty, size; 2655 void *table = NULL; 2656 2657 /* allow the kernel cmdline to have a say */ 2658 if (!numentries) { 2659 /* round applicable memory size up to nearest megabyte */ 2660 numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages; 2661 numentries += (1UL << (20 - PAGE_SHIFT)) - 1; 2662 numentries >>= 20 - PAGE_SHIFT; 2663 numentries <<= 20 - PAGE_SHIFT; 2664 2665 /* limit to 1 bucket per 2^scale bytes of low memory */ 2666 if (scale > PAGE_SHIFT) 2667 numentries >>= (scale - PAGE_SHIFT); 2668 else 2669 numentries <<= (PAGE_SHIFT - scale); 2670 } 2671 /* rounded up to nearest power of 2 in size */ 2672 numentries = 1UL << (long_log2(numentries) + 1); 2673 2674 /* limit allocation size to 1/16 total memory by default */ 2675 if (max == 0) { 2676 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2677 do_div(max, bucketsize); 2678 } 2679 2680 if (numentries > max) 2681 numentries = max; 2682 2683 log2qty = long_log2(numentries); 2684 2685 do { 2686 size = bucketsize << log2qty; 2687 if (flags & HASH_EARLY) 2688 table = alloc_bootmem(size); 2689 else if (hashdist) 2690 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 2691 else { 2692 unsigned long order; 2693 for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++) 2694 ; 2695 table = (void*) __get_free_pages(GFP_ATOMIC, order); 2696 } 2697 } while (!table && size > PAGE_SIZE && --log2qty); 2698 2699 if (!table) 2700 panic("Failed to allocate %s hash table\n", tablename); 2701 2702 printk("%s hash table entries: %d (order: %d, %lu bytes)\n", 2703 tablename, 2704 (1U << log2qty), 2705 long_log2(size) - PAGE_SHIFT, 2706 size); 2707 2708 if (_hash_shift) 2709 *_hash_shift = log2qty; 2710 if (_hash_mask) 2711 *_hash_mask = (1 << log2qty) - 1; 2712 2713 return table; 2714 } 2715