1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/jiffies.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/compiler.h> 26 #include <linux/kernel.h> 27 #include <linux/kmemcheck.h> 28 #include <linux/module.h> 29 #include <linux/suspend.h> 30 #include <linux/pagevec.h> 31 #include <linux/blkdev.h> 32 #include <linux/slab.h> 33 #include <linux/ratelimit.h> 34 #include <linux/oom.h> 35 #include <linux/notifier.h> 36 #include <linux/topology.h> 37 #include <linux/sysctl.h> 38 #include <linux/cpu.h> 39 #include <linux/cpuset.h> 40 #include <linux/memory_hotplug.h> 41 #include <linux/nodemask.h> 42 #include <linux/vmalloc.h> 43 #include <linux/vmstat.h> 44 #include <linux/mempolicy.h> 45 #include <linux/stop_machine.h> 46 #include <linux/sort.h> 47 #include <linux/pfn.h> 48 #include <linux/backing-dev.h> 49 #include <linux/fault-inject.h> 50 #include <linux/page-isolation.h> 51 #include <linux/page_cgroup.h> 52 #include <linux/debugobjects.h> 53 #include <linux/kmemleak.h> 54 #include <linux/compaction.h> 55 #include <trace/events/kmem.h> 56 #include <linux/ftrace_event.h> 57 #include <linux/memcontrol.h> 58 #include <linux/prefetch.h> 59 #include <linux/mm_inline.h> 60 #include <linux/migrate.h> 61 #include <linux/page-debug-flags.h> 62 #include <linux/hugetlb.h> 63 #include <linux/sched/rt.h> 64 65 #include <asm/sections.h> 66 #include <asm/tlbflush.h> 67 #include <asm/div64.h> 68 #include "internal.h" 69 70 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 71 static DEFINE_MUTEX(pcp_batch_high_lock); 72 73 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 74 DEFINE_PER_CPU(int, numa_node); 75 EXPORT_PER_CPU_SYMBOL(numa_node); 76 #endif 77 78 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 79 /* 80 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 81 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 82 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 83 * defined in <linux/topology.h>. 84 */ 85 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 86 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 87 #endif 88 89 /* 90 * Array of node states. 91 */ 92 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 93 [N_POSSIBLE] = NODE_MASK_ALL, 94 [N_ONLINE] = { { [0] = 1UL } }, 95 #ifndef CONFIG_NUMA 96 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 97 #ifdef CONFIG_HIGHMEM 98 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 99 #endif 100 #ifdef CONFIG_MOVABLE_NODE 101 [N_MEMORY] = { { [0] = 1UL } }, 102 #endif 103 [N_CPU] = { { [0] = 1UL } }, 104 #endif /* NUMA */ 105 }; 106 EXPORT_SYMBOL(node_states); 107 108 /* Protect totalram_pages and zone->managed_pages */ 109 static DEFINE_SPINLOCK(managed_page_count_lock); 110 111 unsigned long totalram_pages __read_mostly; 112 unsigned long totalreserve_pages __read_mostly; 113 /* 114 * When calculating the number of globally allowed dirty pages, there 115 * is a certain number of per-zone reserves that should not be 116 * considered dirtyable memory. This is the sum of those reserves 117 * over all existing zones that contribute dirtyable memory. 118 */ 119 unsigned long dirty_balance_reserve __read_mostly; 120 121 int percpu_pagelist_fraction; 122 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 123 124 #ifdef CONFIG_PM_SLEEP 125 /* 126 * The following functions are used by the suspend/hibernate code to temporarily 127 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 128 * while devices are suspended. To avoid races with the suspend/hibernate code, 129 * they should always be called with pm_mutex held (gfp_allowed_mask also should 130 * only be modified with pm_mutex held, unless the suspend/hibernate code is 131 * guaranteed not to run in parallel with that modification). 132 */ 133 134 static gfp_t saved_gfp_mask; 135 136 void pm_restore_gfp_mask(void) 137 { 138 WARN_ON(!mutex_is_locked(&pm_mutex)); 139 if (saved_gfp_mask) { 140 gfp_allowed_mask = saved_gfp_mask; 141 saved_gfp_mask = 0; 142 } 143 } 144 145 void pm_restrict_gfp_mask(void) 146 { 147 WARN_ON(!mutex_is_locked(&pm_mutex)); 148 WARN_ON(saved_gfp_mask); 149 saved_gfp_mask = gfp_allowed_mask; 150 gfp_allowed_mask &= ~GFP_IOFS; 151 } 152 153 bool pm_suspended_storage(void) 154 { 155 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS) 156 return false; 157 return true; 158 } 159 #endif /* CONFIG_PM_SLEEP */ 160 161 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 162 int pageblock_order __read_mostly; 163 #endif 164 165 static void __free_pages_ok(struct page *page, unsigned int order); 166 167 /* 168 * results with 256, 32 in the lowmem_reserve sysctl: 169 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 170 * 1G machine -> (16M dma, 784M normal, 224M high) 171 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 172 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 173 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 174 * 175 * TBD: should special case ZONE_DMA32 machines here - in those we normally 176 * don't need any ZONE_NORMAL reservation 177 */ 178 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 179 #ifdef CONFIG_ZONE_DMA 180 256, 181 #endif 182 #ifdef CONFIG_ZONE_DMA32 183 256, 184 #endif 185 #ifdef CONFIG_HIGHMEM 186 32, 187 #endif 188 32, 189 }; 190 191 EXPORT_SYMBOL(totalram_pages); 192 193 static char * const zone_names[MAX_NR_ZONES] = { 194 #ifdef CONFIG_ZONE_DMA 195 "DMA", 196 #endif 197 #ifdef CONFIG_ZONE_DMA32 198 "DMA32", 199 #endif 200 "Normal", 201 #ifdef CONFIG_HIGHMEM 202 "HighMem", 203 #endif 204 "Movable", 205 }; 206 207 int min_free_kbytes = 1024; 208 int user_min_free_kbytes = -1; 209 210 static unsigned long __meminitdata nr_kernel_pages; 211 static unsigned long __meminitdata nr_all_pages; 212 static unsigned long __meminitdata dma_reserve; 213 214 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 215 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 216 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 217 static unsigned long __initdata required_kernelcore; 218 static unsigned long __initdata required_movablecore; 219 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 220 221 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 222 int movable_zone; 223 EXPORT_SYMBOL(movable_zone); 224 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 225 226 #if MAX_NUMNODES > 1 227 int nr_node_ids __read_mostly = MAX_NUMNODES; 228 int nr_online_nodes __read_mostly = 1; 229 EXPORT_SYMBOL(nr_node_ids); 230 EXPORT_SYMBOL(nr_online_nodes); 231 #endif 232 233 int page_group_by_mobility_disabled __read_mostly; 234 235 void set_pageblock_migratetype(struct page *page, int migratetype) 236 { 237 if (unlikely(page_group_by_mobility_disabled && 238 migratetype < MIGRATE_PCPTYPES)) 239 migratetype = MIGRATE_UNMOVABLE; 240 241 set_pageblock_flags_group(page, (unsigned long)migratetype, 242 PB_migrate, PB_migrate_end); 243 } 244 245 bool oom_killer_disabled __read_mostly; 246 247 #ifdef CONFIG_DEBUG_VM 248 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 249 { 250 int ret = 0; 251 unsigned seq; 252 unsigned long pfn = page_to_pfn(page); 253 unsigned long sp, start_pfn; 254 255 do { 256 seq = zone_span_seqbegin(zone); 257 start_pfn = zone->zone_start_pfn; 258 sp = zone->spanned_pages; 259 if (!zone_spans_pfn(zone, pfn)) 260 ret = 1; 261 } while (zone_span_seqretry(zone, seq)); 262 263 if (ret) 264 pr_err("page %lu outside zone [ %lu - %lu ]\n", 265 pfn, start_pfn, start_pfn + sp); 266 267 return ret; 268 } 269 270 static int page_is_consistent(struct zone *zone, struct page *page) 271 { 272 if (!pfn_valid_within(page_to_pfn(page))) 273 return 0; 274 if (zone != page_zone(page)) 275 return 0; 276 277 return 1; 278 } 279 /* 280 * Temporary debugging check for pages not lying within a given zone. 281 */ 282 static int bad_range(struct zone *zone, struct page *page) 283 { 284 if (page_outside_zone_boundaries(zone, page)) 285 return 1; 286 if (!page_is_consistent(zone, page)) 287 return 1; 288 289 return 0; 290 } 291 #else 292 static inline int bad_range(struct zone *zone, struct page *page) 293 { 294 return 0; 295 } 296 #endif 297 298 static void bad_page(struct page *page, char *reason, unsigned long bad_flags) 299 { 300 static unsigned long resume; 301 static unsigned long nr_shown; 302 static unsigned long nr_unshown; 303 304 /* Don't complain about poisoned pages */ 305 if (PageHWPoison(page)) { 306 page_mapcount_reset(page); /* remove PageBuddy */ 307 return; 308 } 309 310 /* 311 * Allow a burst of 60 reports, then keep quiet for that minute; 312 * or allow a steady drip of one report per second. 313 */ 314 if (nr_shown == 60) { 315 if (time_before(jiffies, resume)) { 316 nr_unshown++; 317 goto out; 318 } 319 if (nr_unshown) { 320 printk(KERN_ALERT 321 "BUG: Bad page state: %lu messages suppressed\n", 322 nr_unshown); 323 nr_unshown = 0; 324 } 325 nr_shown = 0; 326 } 327 if (nr_shown++ == 0) 328 resume = jiffies + 60 * HZ; 329 330 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 331 current->comm, page_to_pfn(page)); 332 dump_page_badflags(page, reason, bad_flags); 333 334 print_modules(); 335 dump_stack(); 336 out: 337 /* Leave bad fields for debug, except PageBuddy could make trouble */ 338 page_mapcount_reset(page); /* remove PageBuddy */ 339 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 340 } 341 342 /* 343 * Higher-order pages are called "compound pages". They are structured thusly: 344 * 345 * The first PAGE_SIZE page is called the "head page". 346 * 347 * The remaining PAGE_SIZE pages are called "tail pages". 348 * 349 * All pages have PG_compound set. All tail pages have their ->first_page 350 * pointing at the head page. 351 * 352 * The first tail page's ->lru.next holds the address of the compound page's 353 * put_page() function. Its ->lru.prev holds the order of allocation. 354 * This usage means that zero-order pages may not be compound. 355 */ 356 357 static void free_compound_page(struct page *page) 358 { 359 __free_pages_ok(page, compound_order(page)); 360 } 361 362 void prep_compound_page(struct page *page, unsigned long order) 363 { 364 int i; 365 int nr_pages = 1 << order; 366 367 set_compound_page_dtor(page, free_compound_page); 368 set_compound_order(page, order); 369 __SetPageHead(page); 370 for (i = 1; i < nr_pages; i++) { 371 struct page *p = page + i; 372 set_page_count(p, 0); 373 p->first_page = page; 374 /* Make sure p->first_page is always valid for PageTail() */ 375 smp_wmb(); 376 __SetPageTail(p); 377 } 378 } 379 380 /* update __split_huge_page_refcount if you change this function */ 381 static int destroy_compound_page(struct page *page, unsigned long order) 382 { 383 int i; 384 int nr_pages = 1 << order; 385 int bad = 0; 386 387 if (unlikely(compound_order(page) != order)) { 388 bad_page(page, "wrong compound order", 0); 389 bad++; 390 } 391 392 __ClearPageHead(page); 393 394 for (i = 1; i < nr_pages; i++) { 395 struct page *p = page + i; 396 397 if (unlikely(!PageTail(p))) { 398 bad_page(page, "PageTail not set", 0); 399 bad++; 400 } else if (unlikely(p->first_page != page)) { 401 bad_page(page, "first_page not consistent", 0); 402 bad++; 403 } 404 __ClearPageTail(p); 405 } 406 407 return bad; 408 } 409 410 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 411 { 412 int i; 413 414 /* 415 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 416 * and __GFP_HIGHMEM from hard or soft interrupt context. 417 */ 418 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 419 for (i = 0; i < (1 << order); i++) 420 clear_highpage(page + i); 421 } 422 423 #ifdef CONFIG_DEBUG_PAGEALLOC 424 unsigned int _debug_guardpage_minorder; 425 426 static int __init debug_guardpage_minorder_setup(char *buf) 427 { 428 unsigned long res; 429 430 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 431 printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); 432 return 0; 433 } 434 _debug_guardpage_minorder = res; 435 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); 436 return 0; 437 } 438 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); 439 440 static inline void set_page_guard_flag(struct page *page) 441 { 442 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 443 } 444 445 static inline void clear_page_guard_flag(struct page *page) 446 { 447 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 448 } 449 #else 450 static inline void set_page_guard_flag(struct page *page) { } 451 static inline void clear_page_guard_flag(struct page *page) { } 452 #endif 453 454 static inline void set_page_order(struct page *page, int order) 455 { 456 set_page_private(page, order); 457 __SetPageBuddy(page); 458 } 459 460 static inline void rmv_page_order(struct page *page) 461 { 462 __ClearPageBuddy(page); 463 set_page_private(page, 0); 464 } 465 466 /* 467 * Locate the struct page for both the matching buddy in our 468 * pair (buddy1) and the combined O(n+1) page they form (page). 469 * 470 * 1) Any buddy B1 will have an order O twin B2 which satisfies 471 * the following equation: 472 * B2 = B1 ^ (1 << O) 473 * For example, if the starting buddy (buddy2) is #8 its order 474 * 1 buddy is #10: 475 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 476 * 477 * 2) Any buddy B will have an order O+1 parent P which 478 * satisfies the following equation: 479 * P = B & ~(1 << O) 480 * 481 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 482 */ 483 static inline unsigned long 484 __find_buddy_index(unsigned long page_idx, unsigned int order) 485 { 486 return page_idx ^ (1 << order); 487 } 488 489 /* 490 * This function checks whether a page is free && is the buddy 491 * we can do coalesce a page and its buddy if 492 * (a) the buddy is not in a hole && 493 * (b) the buddy is in the buddy system && 494 * (c) a page and its buddy have the same order && 495 * (d) a page and its buddy are in the same zone. 496 * 497 * For recording whether a page is in the buddy system, we set ->_mapcount 498 * PAGE_BUDDY_MAPCOUNT_VALUE. 499 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is 500 * serialized by zone->lock. 501 * 502 * For recording page's order, we use page_private(page). 503 */ 504 static inline int page_is_buddy(struct page *page, struct page *buddy, 505 int order) 506 { 507 if (!pfn_valid_within(page_to_pfn(buddy))) 508 return 0; 509 510 if (page_zone_id(page) != page_zone_id(buddy)) 511 return 0; 512 513 if (page_is_guard(buddy) && page_order(buddy) == order) { 514 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 515 return 1; 516 } 517 518 if (PageBuddy(buddy) && page_order(buddy) == order) { 519 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 520 return 1; 521 } 522 return 0; 523 } 524 525 /* 526 * Freeing function for a buddy system allocator. 527 * 528 * The concept of a buddy system is to maintain direct-mapped table 529 * (containing bit values) for memory blocks of various "orders". 530 * The bottom level table contains the map for the smallest allocatable 531 * units of memory (here, pages), and each level above it describes 532 * pairs of units from the levels below, hence, "buddies". 533 * At a high level, all that happens here is marking the table entry 534 * at the bottom level available, and propagating the changes upward 535 * as necessary, plus some accounting needed to play nicely with other 536 * parts of the VM system. 537 * At each level, we keep a list of pages, which are heads of continuous 538 * free pages of length of (1 << order) and marked with _mapcount 539 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) 540 * field. 541 * So when we are allocating or freeing one, we can derive the state of the 542 * other. That is, if we allocate a small block, and both were 543 * free, the remainder of the region must be split into blocks. 544 * If a block is freed, and its buddy is also free, then this 545 * triggers coalescing into a block of larger size. 546 * 547 * -- nyc 548 */ 549 550 static inline void __free_one_page(struct page *page, 551 struct zone *zone, unsigned int order, 552 int migratetype) 553 { 554 unsigned long page_idx; 555 unsigned long combined_idx; 556 unsigned long uninitialized_var(buddy_idx); 557 struct page *buddy; 558 559 VM_BUG_ON(!zone_is_initialized(zone)); 560 561 if (unlikely(PageCompound(page))) 562 if (unlikely(destroy_compound_page(page, order))) 563 return; 564 565 VM_BUG_ON(migratetype == -1); 566 567 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 568 569 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 570 VM_BUG_ON_PAGE(bad_range(zone, page), page); 571 572 while (order < MAX_ORDER-1) { 573 buddy_idx = __find_buddy_index(page_idx, order); 574 buddy = page + (buddy_idx - page_idx); 575 if (!page_is_buddy(page, buddy, order)) 576 break; 577 /* 578 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 579 * merge with it and move up one order. 580 */ 581 if (page_is_guard(buddy)) { 582 clear_page_guard_flag(buddy); 583 set_page_private(page, 0); 584 __mod_zone_freepage_state(zone, 1 << order, 585 migratetype); 586 } else { 587 list_del(&buddy->lru); 588 zone->free_area[order].nr_free--; 589 rmv_page_order(buddy); 590 } 591 combined_idx = buddy_idx & page_idx; 592 page = page + (combined_idx - page_idx); 593 page_idx = combined_idx; 594 order++; 595 } 596 set_page_order(page, order); 597 598 /* 599 * If this is not the largest possible page, check if the buddy 600 * of the next-highest order is free. If it is, it's possible 601 * that pages are being freed that will coalesce soon. In case, 602 * that is happening, add the free page to the tail of the list 603 * so it's less likely to be used soon and more likely to be merged 604 * as a higher order page 605 */ 606 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 607 struct page *higher_page, *higher_buddy; 608 combined_idx = buddy_idx & page_idx; 609 higher_page = page + (combined_idx - page_idx); 610 buddy_idx = __find_buddy_index(combined_idx, order + 1); 611 higher_buddy = higher_page + (buddy_idx - combined_idx); 612 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 613 list_add_tail(&page->lru, 614 &zone->free_area[order].free_list[migratetype]); 615 goto out; 616 } 617 } 618 619 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 620 out: 621 zone->free_area[order].nr_free++; 622 } 623 624 static inline int free_pages_check(struct page *page) 625 { 626 char *bad_reason = NULL; 627 unsigned long bad_flags = 0; 628 629 if (unlikely(page_mapcount(page))) 630 bad_reason = "nonzero mapcount"; 631 if (unlikely(page->mapping != NULL)) 632 bad_reason = "non-NULL mapping"; 633 if (unlikely(atomic_read(&page->_count) != 0)) 634 bad_reason = "nonzero _count"; 635 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 636 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 637 bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 638 } 639 if (unlikely(mem_cgroup_bad_page_check(page))) 640 bad_reason = "cgroup check failed"; 641 if (unlikely(bad_reason)) { 642 bad_page(page, bad_reason, bad_flags); 643 return 1; 644 } 645 page_cpupid_reset_last(page); 646 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 647 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 648 return 0; 649 } 650 651 /* 652 * Frees a number of pages from the PCP lists 653 * Assumes all pages on list are in same zone, and of same order. 654 * count is the number of pages to free. 655 * 656 * If the zone was previously in an "all pages pinned" state then look to 657 * see if this freeing clears that state. 658 * 659 * And clear the zone's pages_scanned counter, to hold off the "all pages are 660 * pinned" detection logic. 661 */ 662 static void free_pcppages_bulk(struct zone *zone, int count, 663 struct per_cpu_pages *pcp) 664 { 665 int migratetype = 0; 666 int batch_free = 0; 667 int to_free = count; 668 669 spin_lock(&zone->lock); 670 zone->pages_scanned = 0; 671 672 while (to_free) { 673 struct page *page; 674 struct list_head *list; 675 676 /* 677 * Remove pages from lists in a round-robin fashion. A 678 * batch_free count is maintained that is incremented when an 679 * empty list is encountered. This is so more pages are freed 680 * off fuller lists instead of spinning excessively around empty 681 * lists 682 */ 683 do { 684 batch_free++; 685 if (++migratetype == MIGRATE_PCPTYPES) 686 migratetype = 0; 687 list = &pcp->lists[migratetype]; 688 } while (list_empty(list)); 689 690 /* This is the only non-empty list. Free them all. */ 691 if (batch_free == MIGRATE_PCPTYPES) 692 batch_free = to_free; 693 694 do { 695 int mt; /* migratetype of the to-be-freed page */ 696 697 page = list_entry(list->prev, struct page, lru); 698 /* must delete as __free_one_page list manipulates */ 699 list_del(&page->lru); 700 mt = get_freepage_migratetype(page); 701 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 702 __free_one_page(page, zone, 0, mt); 703 trace_mm_page_pcpu_drain(page, 0, mt); 704 if (likely(!is_migrate_isolate_page(page))) { 705 __mod_zone_page_state(zone, NR_FREE_PAGES, 1); 706 if (is_migrate_cma(mt)) 707 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); 708 } 709 } while (--to_free && --batch_free && !list_empty(list)); 710 } 711 spin_unlock(&zone->lock); 712 } 713 714 static void free_one_page(struct zone *zone, struct page *page, int order, 715 int migratetype) 716 { 717 spin_lock(&zone->lock); 718 zone->pages_scanned = 0; 719 720 __free_one_page(page, zone, order, migratetype); 721 if (unlikely(!is_migrate_isolate(migratetype))) 722 __mod_zone_freepage_state(zone, 1 << order, migratetype); 723 spin_unlock(&zone->lock); 724 } 725 726 static bool free_pages_prepare(struct page *page, unsigned int order) 727 { 728 int i; 729 int bad = 0; 730 731 trace_mm_page_free(page, order); 732 kmemcheck_free_shadow(page, order); 733 734 if (PageAnon(page)) 735 page->mapping = NULL; 736 for (i = 0; i < (1 << order); i++) 737 bad += free_pages_check(page + i); 738 if (bad) 739 return false; 740 741 if (!PageHighMem(page)) { 742 debug_check_no_locks_freed(page_address(page), 743 PAGE_SIZE << order); 744 debug_check_no_obj_freed(page_address(page), 745 PAGE_SIZE << order); 746 } 747 arch_free_page(page, order); 748 kernel_map_pages(page, 1 << order, 0); 749 750 return true; 751 } 752 753 static void __free_pages_ok(struct page *page, unsigned int order) 754 { 755 unsigned long flags; 756 int migratetype; 757 758 if (!free_pages_prepare(page, order)) 759 return; 760 761 local_irq_save(flags); 762 __count_vm_events(PGFREE, 1 << order); 763 migratetype = get_pageblock_migratetype(page); 764 set_freepage_migratetype(page, migratetype); 765 free_one_page(page_zone(page), page, order, migratetype); 766 local_irq_restore(flags); 767 } 768 769 void __init __free_pages_bootmem(struct page *page, unsigned int order) 770 { 771 unsigned int nr_pages = 1 << order; 772 struct page *p = page; 773 unsigned int loop; 774 775 prefetchw(p); 776 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 777 prefetchw(p + 1); 778 __ClearPageReserved(p); 779 set_page_count(p, 0); 780 } 781 __ClearPageReserved(p); 782 set_page_count(p, 0); 783 784 page_zone(page)->managed_pages += nr_pages; 785 set_page_refcounted(page); 786 __free_pages(page, order); 787 } 788 789 #ifdef CONFIG_CMA 790 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 791 void __init init_cma_reserved_pageblock(struct page *page) 792 { 793 unsigned i = pageblock_nr_pages; 794 struct page *p = page; 795 796 do { 797 __ClearPageReserved(p); 798 set_page_count(p, 0); 799 } while (++p, --i); 800 801 set_page_refcounted(page); 802 set_pageblock_migratetype(page, MIGRATE_CMA); 803 __free_pages(page, pageblock_order); 804 adjust_managed_page_count(page, pageblock_nr_pages); 805 } 806 #endif 807 808 /* 809 * The order of subdivision here is critical for the IO subsystem. 810 * Please do not alter this order without good reasons and regression 811 * testing. Specifically, as large blocks of memory are subdivided, 812 * the order in which smaller blocks are delivered depends on the order 813 * they're subdivided in this function. This is the primary factor 814 * influencing the order in which pages are delivered to the IO 815 * subsystem according to empirical testing, and this is also justified 816 * by considering the behavior of a buddy system containing a single 817 * large block of memory acted on by a series of small allocations. 818 * This behavior is a critical factor in sglist merging's success. 819 * 820 * -- nyc 821 */ 822 static inline void expand(struct zone *zone, struct page *page, 823 int low, int high, struct free_area *area, 824 int migratetype) 825 { 826 unsigned long size = 1 << high; 827 828 while (high > low) { 829 area--; 830 high--; 831 size >>= 1; 832 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 833 834 #ifdef CONFIG_DEBUG_PAGEALLOC 835 if (high < debug_guardpage_minorder()) { 836 /* 837 * Mark as guard pages (or page), that will allow to 838 * merge back to allocator when buddy will be freed. 839 * Corresponding page table entries will not be touched, 840 * pages will stay not present in virtual address space 841 */ 842 INIT_LIST_HEAD(&page[size].lru); 843 set_page_guard_flag(&page[size]); 844 set_page_private(&page[size], high); 845 /* Guard pages are not available for any usage */ 846 __mod_zone_freepage_state(zone, -(1 << high), 847 migratetype); 848 continue; 849 } 850 #endif 851 list_add(&page[size].lru, &area->free_list[migratetype]); 852 area->nr_free++; 853 set_page_order(&page[size], high); 854 } 855 } 856 857 /* 858 * This page is about to be returned from the page allocator 859 */ 860 static inline int check_new_page(struct page *page) 861 { 862 char *bad_reason = NULL; 863 unsigned long bad_flags = 0; 864 865 if (unlikely(page_mapcount(page))) 866 bad_reason = "nonzero mapcount"; 867 if (unlikely(page->mapping != NULL)) 868 bad_reason = "non-NULL mapping"; 869 if (unlikely(atomic_read(&page->_count) != 0)) 870 bad_reason = "nonzero _count"; 871 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 872 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 873 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 874 } 875 if (unlikely(mem_cgroup_bad_page_check(page))) 876 bad_reason = "cgroup check failed"; 877 if (unlikely(bad_reason)) { 878 bad_page(page, bad_reason, bad_flags); 879 return 1; 880 } 881 return 0; 882 } 883 884 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 885 { 886 int i; 887 888 for (i = 0; i < (1 << order); i++) { 889 struct page *p = page + i; 890 if (unlikely(check_new_page(p))) 891 return 1; 892 } 893 894 set_page_private(page, 0); 895 set_page_refcounted(page); 896 897 arch_alloc_page(page, order); 898 kernel_map_pages(page, 1 << order, 1); 899 900 if (gfp_flags & __GFP_ZERO) 901 prep_zero_page(page, order, gfp_flags); 902 903 if (order && (gfp_flags & __GFP_COMP)) 904 prep_compound_page(page, order); 905 906 return 0; 907 } 908 909 /* 910 * Go through the free lists for the given migratetype and remove 911 * the smallest available page from the freelists 912 */ 913 static inline 914 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 915 int migratetype) 916 { 917 unsigned int current_order; 918 struct free_area *area; 919 struct page *page; 920 921 /* Find a page of the appropriate size in the preferred list */ 922 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 923 area = &(zone->free_area[current_order]); 924 if (list_empty(&area->free_list[migratetype])) 925 continue; 926 927 page = list_entry(area->free_list[migratetype].next, 928 struct page, lru); 929 list_del(&page->lru); 930 rmv_page_order(page); 931 area->nr_free--; 932 expand(zone, page, order, current_order, area, migratetype); 933 return page; 934 } 935 936 return NULL; 937 } 938 939 940 /* 941 * This array describes the order lists are fallen back to when 942 * the free lists for the desirable migrate type are depleted 943 */ 944 static int fallbacks[MIGRATE_TYPES][4] = { 945 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 946 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 947 #ifdef CONFIG_CMA 948 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 949 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */ 950 #else 951 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 952 #endif 953 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ 954 #ifdef CONFIG_MEMORY_ISOLATION 955 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */ 956 #endif 957 }; 958 959 /* 960 * Move the free pages in a range to the free lists of the requested type. 961 * Note that start_page and end_pages are not aligned on a pageblock 962 * boundary. If alignment is required, use move_freepages_block() 963 */ 964 int move_freepages(struct zone *zone, 965 struct page *start_page, struct page *end_page, 966 int migratetype) 967 { 968 struct page *page; 969 unsigned long order; 970 int pages_moved = 0; 971 972 #ifndef CONFIG_HOLES_IN_ZONE 973 /* 974 * page_zone is not safe to call in this context when 975 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 976 * anyway as we check zone boundaries in move_freepages_block(). 977 * Remove at a later date when no bug reports exist related to 978 * grouping pages by mobility 979 */ 980 BUG_ON(page_zone(start_page) != page_zone(end_page)); 981 #endif 982 983 for (page = start_page; page <= end_page;) { 984 /* Make sure we are not inadvertently changing nodes */ 985 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 986 987 if (!pfn_valid_within(page_to_pfn(page))) { 988 page++; 989 continue; 990 } 991 992 if (!PageBuddy(page)) { 993 page++; 994 continue; 995 } 996 997 order = page_order(page); 998 list_move(&page->lru, 999 &zone->free_area[order].free_list[migratetype]); 1000 set_freepage_migratetype(page, migratetype); 1001 page += 1 << order; 1002 pages_moved += 1 << order; 1003 } 1004 1005 return pages_moved; 1006 } 1007 1008 int move_freepages_block(struct zone *zone, struct page *page, 1009 int migratetype) 1010 { 1011 unsigned long start_pfn, end_pfn; 1012 struct page *start_page, *end_page; 1013 1014 start_pfn = page_to_pfn(page); 1015 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 1016 start_page = pfn_to_page(start_pfn); 1017 end_page = start_page + pageblock_nr_pages - 1; 1018 end_pfn = start_pfn + pageblock_nr_pages - 1; 1019 1020 /* Do not cross zone boundaries */ 1021 if (!zone_spans_pfn(zone, start_pfn)) 1022 start_page = page; 1023 if (!zone_spans_pfn(zone, end_pfn)) 1024 return 0; 1025 1026 return move_freepages(zone, start_page, end_page, migratetype); 1027 } 1028 1029 static void change_pageblock_range(struct page *pageblock_page, 1030 int start_order, int migratetype) 1031 { 1032 int nr_pageblocks = 1 << (start_order - pageblock_order); 1033 1034 while (nr_pageblocks--) { 1035 set_pageblock_migratetype(pageblock_page, migratetype); 1036 pageblock_page += pageblock_nr_pages; 1037 } 1038 } 1039 1040 /* 1041 * If breaking a large block of pages, move all free pages to the preferred 1042 * allocation list. If falling back for a reclaimable kernel allocation, be 1043 * more aggressive about taking ownership of free pages. 1044 * 1045 * On the other hand, never change migration type of MIGRATE_CMA pageblocks 1046 * nor move CMA pages to different free lists. We don't want unmovable pages 1047 * to be allocated from MIGRATE_CMA areas. 1048 * 1049 * Returns the new migratetype of the pageblock (or the same old migratetype 1050 * if it was unchanged). 1051 */ 1052 static int try_to_steal_freepages(struct zone *zone, struct page *page, 1053 int start_type, int fallback_type) 1054 { 1055 int current_order = page_order(page); 1056 1057 /* 1058 * When borrowing from MIGRATE_CMA, we need to release the excess 1059 * buddy pages to CMA itself. 1060 */ 1061 if (is_migrate_cma(fallback_type)) 1062 return fallback_type; 1063 1064 /* Take ownership for orders >= pageblock_order */ 1065 if (current_order >= pageblock_order) { 1066 change_pageblock_range(page, current_order, start_type); 1067 return start_type; 1068 } 1069 1070 if (current_order >= pageblock_order / 2 || 1071 start_type == MIGRATE_RECLAIMABLE || 1072 page_group_by_mobility_disabled) { 1073 int pages; 1074 1075 pages = move_freepages_block(zone, page, start_type); 1076 1077 /* Claim the whole block if over half of it is free */ 1078 if (pages >= (1 << (pageblock_order-1)) || 1079 page_group_by_mobility_disabled) { 1080 1081 set_pageblock_migratetype(page, start_type); 1082 return start_type; 1083 } 1084 1085 } 1086 1087 return fallback_type; 1088 } 1089 1090 /* Remove an element from the buddy allocator from the fallback list */ 1091 static inline struct page * 1092 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) 1093 { 1094 struct free_area *area; 1095 int current_order; 1096 struct page *page; 1097 int migratetype, new_type, i; 1098 1099 /* Find the largest possible block of pages in the other list */ 1100 for (current_order = MAX_ORDER-1; current_order >= order; 1101 --current_order) { 1102 for (i = 0;; i++) { 1103 migratetype = fallbacks[start_migratetype][i]; 1104 1105 /* MIGRATE_RESERVE handled later if necessary */ 1106 if (migratetype == MIGRATE_RESERVE) 1107 break; 1108 1109 area = &(zone->free_area[current_order]); 1110 if (list_empty(&area->free_list[migratetype])) 1111 continue; 1112 1113 page = list_entry(area->free_list[migratetype].next, 1114 struct page, lru); 1115 area->nr_free--; 1116 1117 new_type = try_to_steal_freepages(zone, page, 1118 start_migratetype, 1119 migratetype); 1120 1121 /* Remove the page from the freelists */ 1122 list_del(&page->lru); 1123 rmv_page_order(page); 1124 1125 expand(zone, page, order, current_order, area, 1126 new_type); 1127 1128 trace_mm_page_alloc_extfrag(page, order, current_order, 1129 start_migratetype, migratetype, new_type); 1130 1131 return page; 1132 } 1133 } 1134 1135 return NULL; 1136 } 1137 1138 /* 1139 * Do the hard work of removing an element from the buddy allocator. 1140 * Call me with the zone->lock already held. 1141 */ 1142 static struct page *__rmqueue(struct zone *zone, unsigned int order, 1143 int migratetype) 1144 { 1145 struct page *page; 1146 1147 retry_reserve: 1148 page = __rmqueue_smallest(zone, order, migratetype); 1149 1150 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { 1151 page = __rmqueue_fallback(zone, order, migratetype); 1152 1153 /* 1154 * Use MIGRATE_RESERVE rather than fail an allocation. goto 1155 * is used because __rmqueue_smallest is an inline function 1156 * and we want just one call site 1157 */ 1158 if (!page) { 1159 migratetype = MIGRATE_RESERVE; 1160 goto retry_reserve; 1161 } 1162 } 1163 1164 trace_mm_page_alloc_zone_locked(page, order, migratetype); 1165 return page; 1166 } 1167 1168 /* 1169 * Obtain a specified number of elements from the buddy allocator, all under 1170 * a single hold of the lock, for efficiency. Add them to the supplied list. 1171 * Returns the number of new pages which were placed at *list. 1172 */ 1173 static int rmqueue_bulk(struct zone *zone, unsigned int order, 1174 unsigned long count, struct list_head *list, 1175 int migratetype, int cold) 1176 { 1177 int mt = migratetype, i; 1178 1179 spin_lock(&zone->lock); 1180 for (i = 0; i < count; ++i) { 1181 struct page *page = __rmqueue(zone, order, migratetype); 1182 if (unlikely(page == NULL)) 1183 break; 1184 1185 /* 1186 * Split buddy pages returned by expand() are received here 1187 * in physical page order. The page is added to the callers and 1188 * list and the list head then moves forward. From the callers 1189 * perspective, the linked list is ordered by page number in 1190 * some conditions. This is useful for IO devices that can 1191 * merge IO requests if the physical pages are ordered 1192 * properly. 1193 */ 1194 if (likely(cold == 0)) 1195 list_add(&page->lru, list); 1196 else 1197 list_add_tail(&page->lru, list); 1198 if (IS_ENABLED(CONFIG_CMA)) { 1199 mt = get_pageblock_migratetype(page); 1200 if (!is_migrate_cma(mt) && !is_migrate_isolate(mt)) 1201 mt = migratetype; 1202 } 1203 set_freepage_migratetype(page, mt); 1204 list = &page->lru; 1205 if (is_migrate_cma(mt)) 1206 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1207 -(1 << order)); 1208 } 1209 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1210 spin_unlock(&zone->lock); 1211 return i; 1212 } 1213 1214 #ifdef CONFIG_NUMA 1215 /* 1216 * Called from the vmstat counter updater to drain pagesets of this 1217 * currently executing processor on remote nodes after they have 1218 * expired. 1219 * 1220 * Note that this function must be called with the thread pinned to 1221 * a single processor. 1222 */ 1223 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 1224 { 1225 unsigned long flags; 1226 int to_drain; 1227 unsigned long batch; 1228 1229 local_irq_save(flags); 1230 batch = ACCESS_ONCE(pcp->batch); 1231 if (pcp->count >= batch) 1232 to_drain = batch; 1233 else 1234 to_drain = pcp->count; 1235 if (to_drain > 0) { 1236 free_pcppages_bulk(zone, to_drain, pcp); 1237 pcp->count -= to_drain; 1238 } 1239 local_irq_restore(flags); 1240 } 1241 static bool gfp_thisnode_allocation(gfp_t gfp_mask) 1242 { 1243 return (gfp_mask & GFP_THISNODE) == GFP_THISNODE; 1244 } 1245 #else 1246 static bool gfp_thisnode_allocation(gfp_t gfp_mask) 1247 { 1248 return false; 1249 } 1250 #endif 1251 1252 /* 1253 * Drain pages of the indicated processor. 1254 * 1255 * The processor must either be the current processor and the 1256 * thread pinned to the current processor or a processor that 1257 * is not online. 1258 */ 1259 static void drain_pages(unsigned int cpu) 1260 { 1261 unsigned long flags; 1262 struct zone *zone; 1263 1264 for_each_populated_zone(zone) { 1265 struct per_cpu_pageset *pset; 1266 struct per_cpu_pages *pcp; 1267 1268 local_irq_save(flags); 1269 pset = per_cpu_ptr(zone->pageset, cpu); 1270 1271 pcp = &pset->pcp; 1272 if (pcp->count) { 1273 free_pcppages_bulk(zone, pcp->count, pcp); 1274 pcp->count = 0; 1275 } 1276 local_irq_restore(flags); 1277 } 1278 } 1279 1280 /* 1281 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 1282 */ 1283 void drain_local_pages(void *arg) 1284 { 1285 drain_pages(smp_processor_id()); 1286 } 1287 1288 /* 1289 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 1290 * 1291 * Note that this code is protected against sending an IPI to an offline 1292 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: 1293 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but 1294 * nothing keeps CPUs from showing up after we populated the cpumask and 1295 * before the call to on_each_cpu_mask(). 1296 */ 1297 void drain_all_pages(void) 1298 { 1299 int cpu; 1300 struct per_cpu_pageset *pcp; 1301 struct zone *zone; 1302 1303 /* 1304 * Allocate in the BSS so we wont require allocation in 1305 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 1306 */ 1307 static cpumask_t cpus_with_pcps; 1308 1309 /* 1310 * We don't care about racing with CPU hotplug event 1311 * as offline notification will cause the notified 1312 * cpu to drain that CPU pcps and on_each_cpu_mask 1313 * disables preemption as part of its processing 1314 */ 1315 for_each_online_cpu(cpu) { 1316 bool has_pcps = false; 1317 for_each_populated_zone(zone) { 1318 pcp = per_cpu_ptr(zone->pageset, cpu); 1319 if (pcp->pcp.count) { 1320 has_pcps = true; 1321 break; 1322 } 1323 } 1324 if (has_pcps) 1325 cpumask_set_cpu(cpu, &cpus_with_pcps); 1326 else 1327 cpumask_clear_cpu(cpu, &cpus_with_pcps); 1328 } 1329 on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); 1330 } 1331 1332 #ifdef CONFIG_HIBERNATION 1333 1334 void mark_free_pages(struct zone *zone) 1335 { 1336 unsigned long pfn, max_zone_pfn; 1337 unsigned long flags; 1338 int order, t; 1339 struct list_head *curr; 1340 1341 if (zone_is_empty(zone)) 1342 return; 1343 1344 spin_lock_irqsave(&zone->lock, flags); 1345 1346 max_zone_pfn = zone_end_pfn(zone); 1347 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1348 if (pfn_valid(pfn)) { 1349 struct page *page = pfn_to_page(pfn); 1350 1351 if (!swsusp_page_is_forbidden(page)) 1352 swsusp_unset_page_free(page); 1353 } 1354 1355 for_each_migratetype_order(order, t) { 1356 list_for_each(curr, &zone->free_area[order].free_list[t]) { 1357 unsigned long i; 1358 1359 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 1360 for (i = 0; i < (1UL << order); i++) 1361 swsusp_set_page_free(pfn_to_page(pfn + i)); 1362 } 1363 } 1364 spin_unlock_irqrestore(&zone->lock, flags); 1365 } 1366 #endif /* CONFIG_PM */ 1367 1368 /* 1369 * Free a 0-order page 1370 * cold == 1 ? free a cold page : free a hot page 1371 */ 1372 void free_hot_cold_page(struct page *page, int cold) 1373 { 1374 struct zone *zone = page_zone(page); 1375 struct per_cpu_pages *pcp; 1376 unsigned long flags; 1377 int migratetype; 1378 1379 if (!free_pages_prepare(page, 0)) 1380 return; 1381 1382 migratetype = get_pageblock_migratetype(page); 1383 set_freepage_migratetype(page, migratetype); 1384 local_irq_save(flags); 1385 __count_vm_event(PGFREE); 1386 1387 /* 1388 * We only track unmovable, reclaimable and movable on pcp lists. 1389 * Free ISOLATE pages back to the allocator because they are being 1390 * offlined but treat RESERVE as movable pages so we can get those 1391 * areas back if necessary. Otherwise, we may have to free 1392 * excessively into the page allocator 1393 */ 1394 if (migratetype >= MIGRATE_PCPTYPES) { 1395 if (unlikely(is_migrate_isolate(migratetype))) { 1396 free_one_page(zone, page, 0, migratetype); 1397 goto out; 1398 } 1399 migratetype = MIGRATE_MOVABLE; 1400 } 1401 1402 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1403 if (cold) 1404 list_add_tail(&page->lru, &pcp->lists[migratetype]); 1405 else 1406 list_add(&page->lru, &pcp->lists[migratetype]); 1407 pcp->count++; 1408 if (pcp->count >= pcp->high) { 1409 unsigned long batch = ACCESS_ONCE(pcp->batch); 1410 free_pcppages_bulk(zone, batch, pcp); 1411 pcp->count -= batch; 1412 } 1413 1414 out: 1415 local_irq_restore(flags); 1416 } 1417 1418 /* 1419 * Free a list of 0-order pages 1420 */ 1421 void free_hot_cold_page_list(struct list_head *list, int cold) 1422 { 1423 struct page *page, *next; 1424 1425 list_for_each_entry_safe(page, next, list, lru) { 1426 trace_mm_page_free_batched(page, cold); 1427 free_hot_cold_page(page, cold); 1428 } 1429 } 1430 1431 /* 1432 * split_page takes a non-compound higher-order page, and splits it into 1433 * n (1<<order) sub-pages: page[0..n] 1434 * Each sub-page must be freed individually. 1435 * 1436 * Note: this is probably too low level an operation for use in drivers. 1437 * Please consult with lkml before using this in your driver. 1438 */ 1439 void split_page(struct page *page, unsigned int order) 1440 { 1441 int i; 1442 1443 VM_BUG_ON_PAGE(PageCompound(page), page); 1444 VM_BUG_ON_PAGE(!page_count(page), page); 1445 1446 #ifdef CONFIG_KMEMCHECK 1447 /* 1448 * Split shadow pages too, because free(page[0]) would 1449 * otherwise free the whole shadow. 1450 */ 1451 if (kmemcheck_page_is_tracked(page)) 1452 split_page(virt_to_page(page[0].shadow), order); 1453 #endif 1454 1455 for (i = 1; i < (1 << order); i++) 1456 set_page_refcounted(page + i); 1457 } 1458 EXPORT_SYMBOL_GPL(split_page); 1459 1460 static int __isolate_free_page(struct page *page, unsigned int order) 1461 { 1462 unsigned long watermark; 1463 struct zone *zone; 1464 int mt; 1465 1466 BUG_ON(!PageBuddy(page)); 1467 1468 zone = page_zone(page); 1469 mt = get_pageblock_migratetype(page); 1470 1471 if (!is_migrate_isolate(mt)) { 1472 /* Obey watermarks as if the page was being allocated */ 1473 watermark = low_wmark_pages(zone) + (1 << order); 1474 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 1475 return 0; 1476 1477 __mod_zone_freepage_state(zone, -(1UL << order), mt); 1478 } 1479 1480 /* Remove page from free list */ 1481 list_del(&page->lru); 1482 zone->free_area[order].nr_free--; 1483 rmv_page_order(page); 1484 1485 /* Set the pageblock if the isolated page is at least a pageblock */ 1486 if (order >= pageblock_order - 1) { 1487 struct page *endpage = page + (1 << order) - 1; 1488 for (; page < endpage; page += pageblock_nr_pages) { 1489 int mt = get_pageblock_migratetype(page); 1490 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)) 1491 set_pageblock_migratetype(page, 1492 MIGRATE_MOVABLE); 1493 } 1494 } 1495 1496 return 1UL << order; 1497 } 1498 1499 /* 1500 * Similar to split_page except the page is already free. As this is only 1501 * being used for migration, the migratetype of the block also changes. 1502 * As this is called with interrupts disabled, the caller is responsible 1503 * for calling arch_alloc_page() and kernel_map_page() after interrupts 1504 * are enabled. 1505 * 1506 * Note: this is probably too low level an operation for use in drivers. 1507 * Please consult with lkml before using this in your driver. 1508 */ 1509 int split_free_page(struct page *page) 1510 { 1511 unsigned int order; 1512 int nr_pages; 1513 1514 order = page_order(page); 1515 1516 nr_pages = __isolate_free_page(page, order); 1517 if (!nr_pages) 1518 return 0; 1519 1520 /* Split into individual pages */ 1521 set_page_refcounted(page); 1522 split_page(page, order); 1523 return nr_pages; 1524 } 1525 1526 /* 1527 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1528 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1529 * or two. 1530 */ 1531 static inline 1532 struct page *buffered_rmqueue(struct zone *preferred_zone, 1533 struct zone *zone, int order, gfp_t gfp_flags, 1534 int migratetype) 1535 { 1536 unsigned long flags; 1537 struct page *page; 1538 int cold = !!(gfp_flags & __GFP_COLD); 1539 1540 again: 1541 if (likely(order == 0)) { 1542 struct per_cpu_pages *pcp; 1543 struct list_head *list; 1544 1545 local_irq_save(flags); 1546 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1547 list = &pcp->lists[migratetype]; 1548 if (list_empty(list)) { 1549 pcp->count += rmqueue_bulk(zone, 0, 1550 pcp->batch, list, 1551 migratetype, cold); 1552 if (unlikely(list_empty(list))) 1553 goto failed; 1554 } 1555 1556 if (cold) 1557 page = list_entry(list->prev, struct page, lru); 1558 else 1559 page = list_entry(list->next, struct page, lru); 1560 1561 list_del(&page->lru); 1562 pcp->count--; 1563 } else { 1564 if (unlikely(gfp_flags & __GFP_NOFAIL)) { 1565 /* 1566 * __GFP_NOFAIL is not to be used in new code. 1567 * 1568 * All __GFP_NOFAIL callers should be fixed so that they 1569 * properly detect and handle allocation failures. 1570 * 1571 * We most definitely don't want callers attempting to 1572 * allocate greater than order-1 page units with 1573 * __GFP_NOFAIL. 1574 */ 1575 WARN_ON_ONCE(order > 1); 1576 } 1577 spin_lock_irqsave(&zone->lock, flags); 1578 page = __rmqueue(zone, order, migratetype); 1579 spin_unlock(&zone->lock); 1580 if (!page) 1581 goto failed; 1582 __mod_zone_freepage_state(zone, -(1 << order), 1583 get_pageblock_migratetype(page)); 1584 } 1585 1586 /* 1587 * NOTE: GFP_THISNODE allocations do not partake in the kswapd 1588 * aging protocol, so they can't be fair. 1589 */ 1590 if (!gfp_thisnode_allocation(gfp_flags)) 1591 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1592 1593 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1594 zone_statistics(preferred_zone, zone, gfp_flags); 1595 local_irq_restore(flags); 1596 1597 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1598 if (prep_new_page(page, order, gfp_flags)) 1599 goto again; 1600 return page; 1601 1602 failed: 1603 local_irq_restore(flags); 1604 return NULL; 1605 } 1606 1607 #ifdef CONFIG_FAIL_PAGE_ALLOC 1608 1609 static struct { 1610 struct fault_attr attr; 1611 1612 u32 ignore_gfp_highmem; 1613 u32 ignore_gfp_wait; 1614 u32 min_order; 1615 } fail_page_alloc = { 1616 .attr = FAULT_ATTR_INITIALIZER, 1617 .ignore_gfp_wait = 1, 1618 .ignore_gfp_highmem = 1, 1619 .min_order = 1, 1620 }; 1621 1622 static int __init setup_fail_page_alloc(char *str) 1623 { 1624 return setup_fault_attr(&fail_page_alloc.attr, str); 1625 } 1626 __setup("fail_page_alloc=", setup_fail_page_alloc); 1627 1628 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1629 { 1630 if (order < fail_page_alloc.min_order) 1631 return false; 1632 if (gfp_mask & __GFP_NOFAIL) 1633 return false; 1634 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1635 return false; 1636 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1637 return false; 1638 1639 return should_fail(&fail_page_alloc.attr, 1 << order); 1640 } 1641 1642 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1643 1644 static int __init fail_page_alloc_debugfs(void) 1645 { 1646 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1647 struct dentry *dir; 1648 1649 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 1650 &fail_page_alloc.attr); 1651 if (IS_ERR(dir)) 1652 return PTR_ERR(dir); 1653 1654 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 1655 &fail_page_alloc.ignore_gfp_wait)) 1656 goto fail; 1657 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1658 &fail_page_alloc.ignore_gfp_highmem)) 1659 goto fail; 1660 if (!debugfs_create_u32("min-order", mode, dir, 1661 &fail_page_alloc.min_order)) 1662 goto fail; 1663 1664 return 0; 1665 fail: 1666 debugfs_remove_recursive(dir); 1667 1668 return -ENOMEM; 1669 } 1670 1671 late_initcall(fail_page_alloc_debugfs); 1672 1673 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1674 1675 #else /* CONFIG_FAIL_PAGE_ALLOC */ 1676 1677 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1678 { 1679 return false; 1680 } 1681 1682 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 1683 1684 /* 1685 * Return true if free pages are above 'mark'. This takes into account the order 1686 * of the allocation. 1687 */ 1688 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1689 int classzone_idx, int alloc_flags, long free_pages) 1690 { 1691 /* free_pages my go negative - that's OK */ 1692 long min = mark; 1693 long lowmem_reserve = z->lowmem_reserve[classzone_idx]; 1694 int o; 1695 long free_cma = 0; 1696 1697 free_pages -= (1 << order) - 1; 1698 if (alloc_flags & ALLOC_HIGH) 1699 min -= min / 2; 1700 if (alloc_flags & ALLOC_HARDER) 1701 min -= min / 4; 1702 #ifdef CONFIG_CMA 1703 /* If allocation can't use CMA areas don't use free CMA pages */ 1704 if (!(alloc_flags & ALLOC_CMA)) 1705 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); 1706 #endif 1707 1708 if (free_pages - free_cma <= min + lowmem_reserve) 1709 return false; 1710 for (o = 0; o < order; o++) { 1711 /* At the next order, this order's pages become unavailable */ 1712 free_pages -= z->free_area[o].nr_free << o; 1713 1714 /* Require fewer higher order pages to be free */ 1715 min >>= 1; 1716 1717 if (free_pages <= min) 1718 return false; 1719 } 1720 return true; 1721 } 1722 1723 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1724 int classzone_idx, int alloc_flags) 1725 { 1726 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 1727 zone_page_state(z, NR_FREE_PAGES)); 1728 } 1729 1730 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, 1731 int classzone_idx, int alloc_flags) 1732 { 1733 long free_pages = zone_page_state(z, NR_FREE_PAGES); 1734 1735 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 1736 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 1737 1738 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 1739 free_pages); 1740 } 1741 1742 #ifdef CONFIG_NUMA 1743 /* 1744 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1745 * skip over zones that are not allowed by the cpuset, or that have 1746 * been recently (in last second) found to be nearly full. See further 1747 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1748 * that have to skip over a lot of full or unallowed zones. 1749 * 1750 * If the zonelist cache is present in the passed zonelist, then 1751 * returns a pointer to the allowed node mask (either the current 1752 * tasks mems_allowed, or node_states[N_MEMORY].) 1753 * 1754 * If the zonelist cache is not available for this zonelist, does 1755 * nothing and returns NULL. 1756 * 1757 * If the fullzones BITMAP in the zonelist cache is stale (more than 1758 * a second since last zap'd) then we zap it out (clear its bits.) 1759 * 1760 * We hold off even calling zlc_setup, until after we've checked the 1761 * first zone in the zonelist, on the theory that most allocations will 1762 * be satisfied from that first zone, so best to examine that zone as 1763 * quickly as we can. 1764 */ 1765 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1766 { 1767 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1768 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1769 1770 zlc = zonelist->zlcache_ptr; 1771 if (!zlc) 1772 return NULL; 1773 1774 if (time_after(jiffies, zlc->last_full_zap + HZ)) { 1775 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1776 zlc->last_full_zap = jiffies; 1777 } 1778 1779 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1780 &cpuset_current_mems_allowed : 1781 &node_states[N_MEMORY]; 1782 return allowednodes; 1783 } 1784 1785 /* 1786 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1787 * if it is worth looking at further for free memory: 1788 * 1) Check that the zone isn't thought to be full (doesn't have its 1789 * bit set in the zonelist_cache fullzones BITMAP). 1790 * 2) Check that the zones node (obtained from the zonelist_cache 1791 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1792 * Return true (non-zero) if zone is worth looking at further, or 1793 * else return false (zero) if it is not. 1794 * 1795 * This check -ignores- the distinction between various watermarks, 1796 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1797 * found to be full for any variation of these watermarks, it will 1798 * be considered full for up to one second by all requests, unless 1799 * we are so low on memory on all allowed nodes that we are forced 1800 * into the second scan of the zonelist. 1801 * 1802 * In the second scan we ignore this zonelist cache and exactly 1803 * apply the watermarks to all zones, even it is slower to do so. 1804 * We are low on memory in the second scan, and should leave no stone 1805 * unturned looking for a free page. 1806 */ 1807 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1808 nodemask_t *allowednodes) 1809 { 1810 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1811 int i; /* index of *z in zonelist zones */ 1812 int n; /* node that zone *z is on */ 1813 1814 zlc = zonelist->zlcache_ptr; 1815 if (!zlc) 1816 return 1; 1817 1818 i = z - zonelist->_zonerefs; 1819 n = zlc->z_to_n[i]; 1820 1821 /* This zone is worth trying if it is allowed but not full */ 1822 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1823 } 1824 1825 /* 1826 * Given 'z' scanning a zonelist, set the corresponding bit in 1827 * zlc->fullzones, so that subsequent attempts to allocate a page 1828 * from that zone don't waste time re-examining it. 1829 */ 1830 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1831 { 1832 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1833 int i; /* index of *z in zonelist zones */ 1834 1835 zlc = zonelist->zlcache_ptr; 1836 if (!zlc) 1837 return; 1838 1839 i = z - zonelist->_zonerefs; 1840 1841 set_bit(i, zlc->fullzones); 1842 } 1843 1844 /* 1845 * clear all zones full, called after direct reclaim makes progress so that 1846 * a zone that was recently full is not skipped over for up to a second 1847 */ 1848 static void zlc_clear_zones_full(struct zonelist *zonelist) 1849 { 1850 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1851 1852 zlc = zonelist->zlcache_ptr; 1853 if (!zlc) 1854 return; 1855 1856 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1857 } 1858 1859 static bool zone_local(struct zone *local_zone, struct zone *zone) 1860 { 1861 return local_zone->node == zone->node; 1862 } 1863 1864 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 1865 { 1866 return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes); 1867 } 1868 1869 static void __paginginit init_zone_allows_reclaim(int nid) 1870 { 1871 int i; 1872 1873 for_each_online_node(i) 1874 if (node_distance(nid, i) <= RECLAIM_DISTANCE) 1875 node_set(i, NODE_DATA(nid)->reclaim_nodes); 1876 else 1877 zone_reclaim_mode = 1; 1878 } 1879 1880 #else /* CONFIG_NUMA */ 1881 1882 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1883 { 1884 return NULL; 1885 } 1886 1887 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1888 nodemask_t *allowednodes) 1889 { 1890 return 1; 1891 } 1892 1893 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1894 { 1895 } 1896 1897 static void zlc_clear_zones_full(struct zonelist *zonelist) 1898 { 1899 } 1900 1901 static bool zone_local(struct zone *local_zone, struct zone *zone) 1902 { 1903 return true; 1904 } 1905 1906 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 1907 { 1908 return true; 1909 } 1910 1911 static inline void init_zone_allows_reclaim(int nid) 1912 { 1913 } 1914 #endif /* CONFIG_NUMA */ 1915 1916 /* 1917 * get_page_from_freelist goes through the zonelist trying to allocate 1918 * a page. 1919 */ 1920 static struct page * 1921 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1922 struct zonelist *zonelist, int high_zoneidx, int alloc_flags, 1923 struct zone *preferred_zone, int migratetype) 1924 { 1925 struct zoneref *z; 1926 struct page *page = NULL; 1927 int classzone_idx; 1928 struct zone *zone; 1929 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1930 int zlc_active = 0; /* set if using zonelist_cache */ 1931 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1932 1933 classzone_idx = zone_idx(preferred_zone); 1934 zonelist_scan: 1935 /* 1936 * Scan zonelist, looking for a zone with enough free. 1937 * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c. 1938 */ 1939 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1940 high_zoneidx, nodemask) { 1941 unsigned long mark; 1942 1943 if (IS_ENABLED(CONFIG_NUMA) && zlc_active && 1944 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1945 continue; 1946 if ((alloc_flags & ALLOC_CPUSET) && 1947 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1948 continue; 1949 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 1950 if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS)) 1951 goto try_this_zone; 1952 /* 1953 * Distribute pages in proportion to the individual 1954 * zone size to ensure fair page aging. The zone a 1955 * page was allocated in should have no effect on the 1956 * time the page has in memory before being reclaimed. 1957 * 1958 * Try to stay in local zones in the fastpath. If 1959 * that fails, the slowpath is entered, which will do 1960 * another pass starting with the local zones, but 1961 * ultimately fall back to remote zones that do not 1962 * partake in the fairness round-robin cycle of this 1963 * zonelist. 1964 * 1965 * NOTE: GFP_THISNODE allocations do not partake in 1966 * the kswapd aging protocol, so they can't be fair. 1967 */ 1968 if ((alloc_flags & ALLOC_WMARK_LOW) && 1969 !gfp_thisnode_allocation(gfp_mask)) { 1970 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) 1971 continue; 1972 if (!zone_local(preferred_zone, zone)) 1973 continue; 1974 } 1975 /* 1976 * When allocating a page cache page for writing, we 1977 * want to get it from a zone that is within its dirty 1978 * limit, such that no single zone holds more than its 1979 * proportional share of globally allowed dirty pages. 1980 * The dirty limits take into account the zone's 1981 * lowmem reserves and high watermark so that kswapd 1982 * should be able to balance it without having to 1983 * write pages from its LRU list. 1984 * 1985 * This may look like it could increase pressure on 1986 * lower zones by failing allocations in higher zones 1987 * before they are full. But the pages that do spill 1988 * over are limited as the lower zones are protected 1989 * by this very same mechanism. It should not become 1990 * a practical burden to them. 1991 * 1992 * XXX: For now, allow allocations to potentially 1993 * exceed the per-zone dirty limit in the slowpath 1994 * (ALLOC_WMARK_LOW unset) before going into reclaim, 1995 * which is important when on a NUMA setup the allowed 1996 * zones are together not big enough to reach the 1997 * global limit. The proper fix for these situations 1998 * will require awareness of zones in the 1999 * dirty-throttling and the flusher threads. 2000 */ 2001 if ((alloc_flags & ALLOC_WMARK_LOW) && 2002 (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone)) 2003 goto this_zone_full; 2004 2005 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 2006 if (!zone_watermark_ok(zone, order, mark, 2007 classzone_idx, alloc_flags)) { 2008 int ret; 2009 2010 if (IS_ENABLED(CONFIG_NUMA) && 2011 !did_zlc_setup && nr_online_nodes > 1) { 2012 /* 2013 * we do zlc_setup if there are multiple nodes 2014 * and before considering the first zone allowed 2015 * by the cpuset. 2016 */ 2017 allowednodes = zlc_setup(zonelist, alloc_flags); 2018 zlc_active = 1; 2019 did_zlc_setup = 1; 2020 } 2021 2022 if (zone_reclaim_mode == 0 || 2023 !zone_allows_reclaim(preferred_zone, zone)) 2024 goto this_zone_full; 2025 2026 /* 2027 * As we may have just activated ZLC, check if the first 2028 * eligible zone has failed zone_reclaim recently. 2029 */ 2030 if (IS_ENABLED(CONFIG_NUMA) && zlc_active && 2031 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 2032 continue; 2033 2034 ret = zone_reclaim(zone, gfp_mask, order); 2035 switch (ret) { 2036 case ZONE_RECLAIM_NOSCAN: 2037 /* did not scan */ 2038 continue; 2039 case ZONE_RECLAIM_FULL: 2040 /* scanned but unreclaimable */ 2041 continue; 2042 default: 2043 /* did we reclaim enough */ 2044 if (zone_watermark_ok(zone, order, mark, 2045 classzone_idx, alloc_flags)) 2046 goto try_this_zone; 2047 2048 /* 2049 * Failed to reclaim enough to meet watermark. 2050 * Only mark the zone full if checking the min 2051 * watermark or if we failed to reclaim just 2052 * 1<<order pages or else the page allocator 2053 * fastpath will prematurely mark zones full 2054 * when the watermark is between the low and 2055 * min watermarks. 2056 */ 2057 if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) || 2058 ret == ZONE_RECLAIM_SOME) 2059 goto this_zone_full; 2060 2061 continue; 2062 } 2063 } 2064 2065 try_this_zone: 2066 page = buffered_rmqueue(preferred_zone, zone, order, 2067 gfp_mask, migratetype); 2068 if (page) 2069 break; 2070 this_zone_full: 2071 if (IS_ENABLED(CONFIG_NUMA)) 2072 zlc_mark_zone_full(zonelist, z); 2073 } 2074 2075 if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) { 2076 /* Disable zlc cache for second zonelist scan */ 2077 zlc_active = 0; 2078 goto zonelist_scan; 2079 } 2080 2081 if (page) 2082 /* 2083 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was 2084 * necessary to allocate the page. The expectation is 2085 * that the caller is taking steps that will free more 2086 * memory. The caller should avoid the page being used 2087 * for !PFMEMALLOC purposes. 2088 */ 2089 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); 2090 2091 return page; 2092 } 2093 2094 /* 2095 * Large machines with many possible nodes should not always dump per-node 2096 * meminfo in irq context. 2097 */ 2098 static inline bool should_suppress_show_mem(void) 2099 { 2100 bool ret = false; 2101 2102 #if NODES_SHIFT > 8 2103 ret = in_interrupt(); 2104 #endif 2105 return ret; 2106 } 2107 2108 static DEFINE_RATELIMIT_STATE(nopage_rs, 2109 DEFAULT_RATELIMIT_INTERVAL, 2110 DEFAULT_RATELIMIT_BURST); 2111 2112 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) 2113 { 2114 unsigned int filter = SHOW_MEM_FILTER_NODES; 2115 2116 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || 2117 debug_guardpage_minorder() > 0) 2118 return; 2119 2120 /* 2121 * This documents exceptions given to allocations in certain 2122 * contexts that are allowed to allocate outside current's set 2123 * of allowed nodes. 2124 */ 2125 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2126 if (test_thread_flag(TIF_MEMDIE) || 2127 (current->flags & (PF_MEMALLOC | PF_EXITING))) 2128 filter &= ~SHOW_MEM_FILTER_NODES; 2129 if (in_interrupt() || !(gfp_mask & __GFP_WAIT)) 2130 filter &= ~SHOW_MEM_FILTER_NODES; 2131 2132 if (fmt) { 2133 struct va_format vaf; 2134 va_list args; 2135 2136 va_start(args, fmt); 2137 2138 vaf.fmt = fmt; 2139 vaf.va = &args; 2140 2141 pr_warn("%pV", &vaf); 2142 2143 va_end(args); 2144 } 2145 2146 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n", 2147 current->comm, order, gfp_mask); 2148 2149 dump_stack(); 2150 if (!should_suppress_show_mem()) 2151 show_mem(filter); 2152 } 2153 2154 static inline int 2155 should_alloc_retry(gfp_t gfp_mask, unsigned int order, 2156 unsigned long did_some_progress, 2157 unsigned long pages_reclaimed) 2158 { 2159 /* Do not loop if specifically requested */ 2160 if (gfp_mask & __GFP_NORETRY) 2161 return 0; 2162 2163 /* Always retry if specifically requested */ 2164 if (gfp_mask & __GFP_NOFAIL) 2165 return 1; 2166 2167 /* 2168 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim 2169 * making forward progress without invoking OOM. Suspend also disables 2170 * storage devices so kswapd will not help. Bail if we are suspending. 2171 */ 2172 if (!did_some_progress && pm_suspended_storage()) 2173 return 0; 2174 2175 /* 2176 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER 2177 * means __GFP_NOFAIL, but that may not be true in other 2178 * implementations. 2179 */ 2180 if (order <= PAGE_ALLOC_COSTLY_ORDER) 2181 return 1; 2182 2183 /* 2184 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is 2185 * specified, then we retry until we no longer reclaim any pages 2186 * (above), or we've reclaimed an order of pages at least as 2187 * large as the allocation's order. In both cases, if the 2188 * allocation still fails, we stop retrying. 2189 */ 2190 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) 2191 return 1; 2192 2193 return 0; 2194 } 2195 2196 static inline struct page * 2197 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 2198 struct zonelist *zonelist, enum zone_type high_zoneidx, 2199 nodemask_t *nodemask, struct zone *preferred_zone, 2200 int migratetype) 2201 { 2202 struct page *page; 2203 2204 /* Acquire the OOM killer lock for the zones in zonelist */ 2205 if (!try_set_zonelist_oom(zonelist, gfp_mask)) { 2206 schedule_timeout_uninterruptible(1); 2207 return NULL; 2208 } 2209 2210 /* 2211 * Go through the zonelist yet one more time, keep very high watermark 2212 * here, this is only to catch a parallel oom killing, we must fail if 2213 * we're still under heavy pressure. 2214 */ 2215 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 2216 order, zonelist, high_zoneidx, 2217 ALLOC_WMARK_HIGH|ALLOC_CPUSET, 2218 preferred_zone, migratetype); 2219 if (page) 2220 goto out; 2221 2222 if (!(gfp_mask & __GFP_NOFAIL)) { 2223 /* The OOM killer will not help higher order allocs */ 2224 if (order > PAGE_ALLOC_COSTLY_ORDER) 2225 goto out; 2226 /* The OOM killer does not needlessly kill tasks for lowmem */ 2227 if (high_zoneidx < ZONE_NORMAL) 2228 goto out; 2229 /* 2230 * GFP_THISNODE contains __GFP_NORETRY and we never hit this. 2231 * Sanity check for bare calls of __GFP_THISNODE, not real OOM. 2232 * The caller should handle page allocation failure by itself if 2233 * it specifies __GFP_THISNODE. 2234 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER. 2235 */ 2236 if (gfp_mask & __GFP_THISNODE) 2237 goto out; 2238 } 2239 /* Exhausted what can be done so it's blamo time */ 2240 out_of_memory(zonelist, gfp_mask, order, nodemask, false); 2241 2242 out: 2243 clear_zonelist_oom(zonelist, gfp_mask); 2244 return page; 2245 } 2246 2247 #ifdef CONFIG_COMPACTION 2248 /* Try memory compaction for high-order allocations before reclaim */ 2249 static struct page * 2250 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2251 struct zonelist *zonelist, enum zone_type high_zoneidx, 2252 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2253 int migratetype, bool sync_migration, 2254 bool *contended_compaction, bool *deferred_compaction, 2255 unsigned long *did_some_progress) 2256 { 2257 if (!order) 2258 return NULL; 2259 2260 if (compaction_deferred(preferred_zone, order)) { 2261 *deferred_compaction = true; 2262 return NULL; 2263 } 2264 2265 current->flags |= PF_MEMALLOC; 2266 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 2267 nodemask, sync_migration, 2268 contended_compaction); 2269 current->flags &= ~PF_MEMALLOC; 2270 2271 if (*did_some_progress != COMPACT_SKIPPED) { 2272 struct page *page; 2273 2274 /* Page migration frees to the PCP lists but we want merging */ 2275 drain_pages(get_cpu()); 2276 put_cpu(); 2277 2278 page = get_page_from_freelist(gfp_mask, nodemask, 2279 order, zonelist, high_zoneidx, 2280 alloc_flags & ~ALLOC_NO_WATERMARKS, 2281 preferred_zone, migratetype); 2282 if (page) { 2283 preferred_zone->compact_blockskip_flush = false; 2284 compaction_defer_reset(preferred_zone, order, true); 2285 count_vm_event(COMPACTSUCCESS); 2286 return page; 2287 } 2288 2289 /* 2290 * It's bad if compaction run occurs and fails. 2291 * The most likely reason is that pages exist, 2292 * but not enough to satisfy watermarks. 2293 */ 2294 count_vm_event(COMPACTFAIL); 2295 2296 /* 2297 * As async compaction considers a subset of pageblocks, only 2298 * defer if the failure was a sync compaction failure. 2299 */ 2300 if (sync_migration) 2301 defer_compaction(preferred_zone, order); 2302 2303 cond_resched(); 2304 } 2305 2306 return NULL; 2307 } 2308 #else 2309 static inline struct page * 2310 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2311 struct zonelist *zonelist, enum zone_type high_zoneidx, 2312 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2313 int migratetype, bool sync_migration, 2314 bool *contended_compaction, bool *deferred_compaction, 2315 unsigned long *did_some_progress) 2316 { 2317 return NULL; 2318 } 2319 #endif /* CONFIG_COMPACTION */ 2320 2321 /* Perform direct synchronous page reclaim */ 2322 static int 2323 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, 2324 nodemask_t *nodemask) 2325 { 2326 struct reclaim_state reclaim_state; 2327 int progress; 2328 2329 cond_resched(); 2330 2331 /* We now go into synchronous reclaim */ 2332 cpuset_memory_pressure_bump(); 2333 current->flags |= PF_MEMALLOC; 2334 lockdep_set_current_reclaim_state(gfp_mask); 2335 reclaim_state.reclaimed_slab = 0; 2336 current->reclaim_state = &reclaim_state; 2337 2338 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 2339 2340 current->reclaim_state = NULL; 2341 lockdep_clear_current_reclaim_state(); 2342 current->flags &= ~PF_MEMALLOC; 2343 2344 cond_resched(); 2345 2346 return progress; 2347 } 2348 2349 /* The really slow allocator path where we enter direct reclaim */ 2350 static inline struct page * 2351 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 2352 struct zonelist *zonelist, enum zone_type high_zoneidx, 2353 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2354 int migratetype, unsigned long *did_some_progress) 2355 { 2356 struct page *page = NULL; 2357 bool drained = false; 2358 2359 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, 2360 nodemask); 2361 if (unlikely(!(*did_some_progress))) 2362 return NULL; 2363 2364 /* After successful reclaim, reconsider all zones for allocation */ 2365 if (IS_ENABLED(CONFIG_NUMA)) 2366 zlc_clear_zones_full(zonelist); 2367 2368 retry: 2369 page = get_page_from_freelist(gfp_mask, nodemask, order, 2370 zonelist, high_zoneidx, 2371 alloc_flags & ~ALLOC_NO_WATERMARKS, 2372 preferred_zone, migratetype); 2373 2374 /* 2375 * If an allocation failed after direct reclaim, it could be because 2376 * pages are pinned on the per-cpu lists. Drain them and try again 2377 */ 2378 if (!page && !drained) { 2379 drain_all_pages(); 2380 drained = true; 2381 goto retry; 2382 } 2383 2384 return page; 2385 } 2386 2387 /* 2388 * This is called in the allocator slow-path if the allocation request is of 2389 * sufficient urgency to ignore watermarks and take other desperate measures 2390 */ 2391 static inline struct page * 2392 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 2393 struct zonelist *zonelist, enum zone_type high_zoneidx, 2394 nodemask_t *nodemask, struct zone *preferred_zone, 2395 int migratetype) 2396 { 2397 struct page *page; 2398 2399 do { 2400 page = get_page_from_freelist(gfp_mask, nodemask, order, 2401 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, 2402 preferred_zone, migratetype); 2403 2404 if (!page && gfp_mask & __GFP_NOFAIL) 2405 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2406 } while (!page && (gfp_mask & __GFP_NOFAIL)); 2407 2408 return page; 2409 } 2410 2411 static void prepare_slowpath(gfp_t gfp_mask, unsigned int order, 2412 struct zonelist *zonelist, 2413 enum zone_type high_zoneidx, 2414 struct zone *preferred_zone) 2415 { 2416 struct zoneref *z; 2417 struct zone *zone; 2418 2419 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 2420 if (!(gfp_mask & __GFP_NO_KSWAPD)) 2421 wakeup_kswapd(zone, order, zone_idx(preferred_zone)); 2422 /* 2423 * Only reset the batches of zones that were actually 2424 * considered in the fast path, we don't want to 2425 * thrash fairness information for zones that are not 2426 * actually part of this zonelist's round-robin cycle. 2427 */ 2428 if (!zone_local(preferred_zone, zone)) 2429 continue; 2430 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2431 high_wmark_pages(zone) - 2432 low_wmark_pages(zone) - 2433 zone_page_state(zone, NR_ALLOC_BATCH)); 2434 } 2435 } 2436 2437 static inline int 2438 gfp_to_alloc_flags(gfp_t gfp_mask) 2439 { 2440 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 2441 const gfp_t wait = gfp_mask & __GFP_WAIT; 2442 2443 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 2444 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 2445 2446 /* 2447 * The caller may dip into page reserves a bit more if the caller 2448 * cannot run direct reclaim, or if the caller has realtime scheduling 2449 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 2450 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 2451 */ 2452 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 2453 2454 if (!wait) { 2455 /* 2456 * Not worth trying to allocate harder for 2457 * __GFP_NOMEMALLOC even if it can't schedule. 2458 */ 2459 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2460 alloc_flags |= ALLOC_HARDER; 2461 /* 2462 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 2463 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 2464 */ 2465 alloc_flags &= ~ALLOC_CPUSET; 2466 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2467 alloc_flags |= ALLOC_HARDER; 2468 2469 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 2470 if (gfp_mask & __GFP_MEMALLOC) 2471 alloc_flags |= ALLOC_NO_WATERMARKS; 2472 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 2473 alloc_flags |= ALLOC_NO_WATERMARKS; 2474 else if (!in_interrupt() && 2475 ((current->flags & PF_MEMALLOC) || 2476 unlikely(test_thread_flag(TIF_MEMDIE)))) 2477 alloc_flags |= ALLOC_NO_WATERMARKS; 2478 } 2479 #ifdef CONFIG_CMA 2480 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 2481 alloc_flags |= ALLOC_CMA; 2482 #endif 2483 return alloc_flags; 2484 } 2485 2486 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 2487 { 2488 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); 2489 } 2490 2491 static inline struct page * 2492 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 2493 struct zonelist *zonelist, enum zone_type high_zoneidx, 2494 nodemask_t *nodemask, struct zone *preferred_zone, 2495 int migratetype) 2496 { 2497 const gfp_t wait = gfp_mask & __GFP_WAIT; 2498 struct page *page = NULL; 2499 int alloc_flags; 2500 unsigned long pages_reclaimed = 0; 2501 unsigned long did_some_progress; 2502 bool sync_migration = false; 2503 bool deferred_compaction = false; 2504 bool contended_compaction = false; 2505 2506 /* 2507 * In the slowpath, we sanity check order to avoid ever trying to 2508 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 2509 * be using allocators in order of preference for an area that is 2510 * too large. 2511 */ 2512 if (order >= MAX_ORDER) { 2513 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 2514 return NULL; 2515 } 2516 2517 /* 2518 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 2519 * __GFP_NOWARN set) should not cause reclaim since the subsystem 2520 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 2521 * using a larger set of nodes after it has established that the 2522 * allowed per node queues are empty and that nodes are 2523 * over allocated. 2524 */ 2525 if (gfp_thisnode_allocation(gfp_mask)) 2526 goto nopage; 2527 2528 restart: 2529 prepare_slowpath(gfp_mask, order, zonelist, 2530 high_zoneidx, preferred_zone); 2531 2532 /* 2533 * OK, we're below the kswapd watermark and have kicked background 2534 * reclaim. Now things get more complex, so set up alloc_flags according 2535 * to how we want to proceed. 2536 */ 2537 alloc_flags = gfp_to_alloc_flags(gfp_mask); 2538 2539 /* 2540 * Find the true preferred zone if the allocation is unconstrained by 2541 * cpusets. 2542 */ 2543 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) 2544 first_zones_zonelist(zonelist, high_zoneidx, NULL, 2545 &preferred_zone); 2546 2547 rebalance: 2548 /* This is the last chance, in general, before the goto nopage. */ 2549 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 2550 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 2551 preferred_zone, migratetype); 2552 if (page) 2553 goto got_pg; 2554 2555 /* Allocate without watermarks if the context allows */ 2556 if (alloc_flags & ALLOC_NO_WATERMARKS) { 2557 /* 2558 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds 2559 * the allocation is high priority and these type of 2560 * allocations are system rather than user orientated 2561 */ 2562 zonelist = node_zonelist(numa_node_id(), gfp_mask); 2563 2564 page = __alloc_pages_high_priority(gfp_mask, order, 2565 zonelist, high_zoneidx, nodemask, 2566 preferred_zone, migratetype); 2567 if (page) { 2568 goto got_pg; 2569 } 2570 } 2571 2572 /* Atomic allocations - we can't balance anything */ 2573 if (!wait) { 2574 /* 2575 * All existing users of the deprecated __GFP_NOFAIL are 2576 * blockable, so warn of any new users that actually allow this 2577 * type of allocation to fail. 2578 */ 2579 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); 2580 goto nopage; 2581 } 2582 2583 /* Avoid recursion of direct reclaim */ 2584 if (current->flags & PF_MEMALLOC) 2585 goto nopage; 2586 2587 /* Avoid allocations with no watermarks from looping endlessly */ 2588 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 2589 goto nopage; 2590 2591 /* 2592 * Try direct compaction. The first pass is asynchronous. Subsequent 2593 * attempts after direct reclaim are synchronous 2594 */ 2595 page = __alloc_pages_direct_compact(gfp_mask, order, 2596 zonelist, high_zoneidx, 2597 nodemask, 2598 alloc_flags, preferred_zone, 2599 migratetype, sync_migration, 2600 &contended_compaction, 2601 &deferred_compaction, 2602 &did_some_progress); 2603 if (page) 2604 goto got_pg; 2605 sync_migration = true; 2606 2607 /* 2608 * If compaction is deferred for high-order allocations, it is because 2609 * sync compaction recently failed. In this is the case and the caller 2610 * requested a movable allocation that does not heavily disrupt the 2611 * system then fail the allocation instead of entering direct reclaim. 2612 */ 2613 if ((deferred_compaction || contended_compaction) && 2614 (gfp_mask & __GFP_NO_KSWAPD)) 2615 goto nopage; 2616 2617 /* Try direct reclaim and then allocating */ 2618 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2619 zonelist, high_zoneidx, 2620 nodemask, 2621 alloc_flags, preferred_zone, 2622 migratetype, &did_some_progress); 2623 if (page) 2624 goto got_pg; 2625 2626 /* 2627 * If we failed to make any progress reclaiming, then we are 2628 * running out of options and have to consider going OOM 2629 */ 2630 if (!did_some_progress) { 2631 if (oom_gfp_allowed(gfp_mask)) { 2632 if (oom_killer_disabled) 2633 goto nopage; 2634 /* Coredumps can quickly deplete all memory reserves */ 2635 if ((current->flags & PF_DUMPCORE) && 2636 !(gfp_mask & __GFP_NOFAIL)) 2637 goto nopage; 2638 page = __alloc_pages_may_oom(gfp_mask, order, 2639 zonelist, high_zoneidx, 2640 nodemask, preferred_zone, 2641 migratetype); 2642 if (page) 2643 goto got_pg; 2644 2645 if (!(gfp_mask & __GFP_NOFAIL)) { 2646 /* 2647 * The oom killer is not called for high-order 2648 * allocations that may fail, so if no progress 2649 * is being made, there are no other options and 2650 * retrying is unlikely to help. 2651 */ 2652 if (order > PAGE_ALLOC_COSTLY_ORDER) 2653 goto nopage; 2654 /* 2655 * The oom killer is not called for lowmem 2656 * allocations to prevent needlessly killing 2657 * innocent tasks. 2658 */ 2659 if (high_zoneidx < ZONE_NORMAL) 2660 goto nopage; 2661 } 2662 2663 goto restart; 2664 } 2665 } 2666 2667 /* Check if we should retry the allocation */ 2668 pages_reclaimed += did_some_progress; 2669 if (should_alloc_retry(gfp_mask, order, did_some_progress, 2670 pages_reclaimed)) { 2671 /* Wait for some write requests to complete then retry */ 2672 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2673 goto rebalance; 2674 } else { 2675 /* 2676 * High-order allocations do not necessarily loop after 2677 * direct reclaim and reclaim/compaction depends on compaction 2678 * being called after reclaim so call directly if necessary 2679 */ 2680 page = __alloc_pages_direct_compact(gfp_mask, order, 2681 zonelist, high_zoneidx, 2682 nodemask, 2683 alloc_flags, preferred_zone, 2684 migratetype, sync_migration, 2685 &contended_compaction, 2686 &deferred_compaction, 2687 &did_some_progress); 2688 if (page) 2689 goto got_pg; 2690 } 2691 2692 nopage: 2693 warn_alloc_failed(gfp_mask, order, NULL); 2694 return page; 2695 got_pg: 2696 if (kmemcheck_enabled) 2697 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 2698 2699 return page; 2700 } 2701 2702 /* 2703 * This is the 'heart' of the zoned buddy allocator. 2704 */ 2705 struct page * 2706 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 2707 struct zonelist *zonelist, nodemask_t *nodemask) 2708 { 2709 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 2710 struct zone *preferred_zone; 2711 struct page *page = NULL; 2712 int migratetype = allocflags_to_migratetype(gfp_mask); 2713 unsigned int cpuset_mems_cookie; 2714 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET; 2715 struct mem_cgroup *memcg = NULL; 2716 2717 gfp_mask &= gfp_allowed_mask; 2718 2719 lockdep_trace_alloc(gfp_mask); 2720 2721 might_sleep_if(gfp_mask & __GFP_WAIT); 2722 2723 if (should_fail_alloc_page(gfp_mask, order)) 2724 return NULL; 2725 2726 /* 2727 * Check the zones suitable for the gfp_mask contain at least one 2728 * valid zone. It's possible to have an empty zonelist as a result 2729 * of GFP_THISNODE and a memoryless node 2730 */ 2731 if (unlikely(!zonelist->_zonerefs->zone)) 2732 return NULL; 2733 2734 /* 2735 * Will only have any effect when __GFP_KMEMCG is set. This is 2736 * verified in the (always inline) callee 2737 */ 2738 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) 2739 return NULL; 2740 2741 retry_cpuset: 2742 cpuset_mems_cookie = get_mems_allowed(); 2743 2744 /* The preferred zone is used for statistics later */ 2745 first_zones_zonelist(zonelist, high_zoneidx, 2746 nodemask ? : &cpuset_current_mems_allowed, 2747 &preferred_zone); 2748 if (!preferred_zone) 2749 goto out; 2750 2751 #ifdef CONFIG_CMA 2752 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 2753 alloc_flags |= ALLOC_CMA; 2754 #endif 2755 /* First allocation attempt */ 2756 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2757 zonelist, high_zoneidx, alloc_flags, 2758 preferred_zone, migratetype); 2759 if (unlikely(!page)) { 2760 /* 2761 * Runtime PM, block IO and its error handling path 2762 * can deadlock because I/O on the device might not 2763 * complete. 2764 */ 2765 gfp_mask = memalloc_noio_flags(gfp_mask); 2766 page = __alloc_pages_slowpath(gfp_mask, order, 2767 zonelist, high_zoneidx, nodemask, 2768 preferred_zone, migratetype); 2769 } 2770 2771 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2772 2773 out: 2774 /* 2775 * When updating a task's mems_allowed, it is possible to race with 2776 * parallel threads in such a way that an allocation can fail while 2777 * the mask is being updated. If a page allocation is about to fail, 2778 * check if the cpuset changed during allocation and if so, retry. 2779 */ 2780 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page)) 2781 goto retry_cpuset; 2782 2783 memcg_kmem_commit_charge(page, memcg, order); 2784 2785 return page; 2786 } 2787 EXPORT_SYMBOL(__alloc_pages_nodemask); 2788 2789 /* 2790 * Common helper functions. 2791 */ 2792 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 2793 { 2794 struct page *page; 2795 2796 /* 2797 * __get_free_pages() returns a 32-bit address, which cannot represent 2798 * a highmem page 2799 */ 2800 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 2801 2802 page = alloc_pages(gfp_mask, order); 2803 if (!page) 2804 return 0; 2805 return (unsigned long) page_address(page); 2806 } 2807 EXPORT_SYMBOL(__get_free_pages); 2808 2809 unsigned long get_zeroed_page(gfp_t gfp_mask) 2810 { 2811 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 2812 } 2813 EXPORT_SYMBOL(get_zeroed_page); 2814 2815 void __free_pages(struct page *page, unsigned int order) 2816 { 2817 if (put_page_testzero(page)) { 2818 if (order == 0) 2819 free_hot_cold_page(page, 0); 2820 else 2821 __free_pages_ok(page, order); 2822 } 2823 } 2824 2825 EXPORT_SYMBOL(__free_pages); 2826 2827 void free_pages(unsigned long addr, unsigned int order) 2828 { 2829 if (addr != 0) { 2830 VM_BUG_ON(!virt_addr_valid((void *)addr)); 2831 __free_pages(virt_to_page((void *)addr), order); 2832 } 2833 } 2834 2835 EXPORT_SYMBOL(free_pages); 2836 2837 /* 2838 * __free_memcg_kmem_pages and free_memcg_kmem_pages will free 2839 * pages allocated with __GFP_KMEMCG. 2840 * 2841 * Those pages are accounted to a particular memcg, embedded in the 2842 * corresponding page_cgroup. To avoid adding a hit in the allocator to search 2843 * for that information only to find out that it is NULL for users who have no 2844 * interest in that whatsoever, we provide these functions. 2845 * 2846 * The caller knows better which flags it relies on. 2847 */ 2848 void __free_memcg_kmem_pages(struct page *page, unsigned int order) 2849 { 2850 memcg_kmem_uncharge_pages(page, order); 2851 __free_pages(page, order); 2852 } 2853 2854 void free_memcg_kmem_pages(unsigned long addr, unsigned int order) 2855 { 2856 if (addr != 0) { 2857 VM_BUG_ON(!virt_addr_valid((void *)addr)); 2858 __free_memcg_kmem_pages(virt_to_page((void *)addr), order); 2859 } 2860 } 2861 2862 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 2863 { 2864 if (addr) { 2865 unsigned long alloc_end = addr + (PAGE_SIZE << order); 2866 unsigned long used = addr + PAGE_ALIGN(size); 2867 2868 split_page(virt_to_page((void *)addr), order); 2869 while (used < alloc_end) { 2870 free_page(used); 2871 used += PAGE_SIZE; 2872 } 2873 } 2874 return (void *)addr; 2875 } 2876 2877 /** 2878 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 2879 * @size: the number of bytes to allocate 2880 * @gfp_mask: GFP flags for the allocation 2881 * 2882 * This function is similar to alloc_pages(), except that it allocates the 2883 * minimum number of pages to satisfy the request. alloc_pages() can only 2884 * allocate memory in power-of-two pages. 2885 * 2886 * This function is also limited by MAX_ORDER. 2887 * 2888 * Memory allocated by this function must be released by free_pages_exact(). 2889 */ 2890 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 2891 { 2892 unsigned int order = get_order(size); 2893 unsigned long addr; 2894 2895 addr = __get_free_pages(gfp_mask, order); 2896 return make_alloc_exact(addr, order, size); 2897 } 2898 EXPORT_SYMBOL(alloc_pages_exact); 2899 2900 /** 2901 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 2902 * pages on a node. 2903 * @nid: the preferred node ID where memory should be allocated 2904 * @size: the number of bytes to allocate 2905 * @gfp_mask: GFP flags for the allocation 2906 * 2907 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 2908 * back. 2909 * Note this is not alloc_pages_exact_node() which allocates on a specific node, 2910 * but is not exact. 2911 */ 2912 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 2913 { 2914 unsigned order = get_order(size); 2915 struct page *p = alloc_pages_node(nid, gfp_mask, order); 2916 if (!p) 2917 return NULL; 2918 return make_alloc_exact((unsigned long)page_address(p), order, size); 2919 } 2920 EXPORT_SYMBOL(alloc_pages_exact_nid); 2921 2922 /** 2923 * free_pages_exact - release memory allocated via alloc_pages_exact() 2924 * @virt: the value returned by alloc_pages_exact. 2925 * @size: size of allocation, same value as passed to alloc_pages_exact(). 2926 * 2927 * Release the memory allocated by a previous call to alloc_pages_exact. 2928 */ 2929 void free_pages_exact(void *virt, size_t size) 2930 { 2931 unsigned long addr = (unsigned long)virt; 2932 unsigned long end = addr + PAGE_ALIGN(size); 2933 2934 while (addr < end) { 2935 free_page(addr); 2936 addr += PAGE_SIZE; 2937 } 2938 } 2939 EXPORT_SYMBOL(free_pages_exact); 2940 2941 /** 2942 * nr_free_zone_pages - count number of pages beyond high watermark 2943 * @offset: The zone index of the highest zone 2944 * 2945 * nr_free_zone_pages() counts the number of counts pages which are beyond the 2946 * high watermark within all zones at or below a given zone index. For each 2947 * zone, the number of pages is calculated as: 2948 * managed_pages - high_pages 2949 */ 2950 static unsigned long nr_free_zone_pages(int offset) 2951 { 2952 struct zoneref *z; 2953 struct zone *zone; 2954 2955 /* Just pick one node, since fallback list is circular */ 2956 unsigned long sum = 0; 2957 2958 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 2959 2960 for_each_zone_zonelist(zone, z, zonelist, offset) { 2961 unsigned long size = zone->managed_pages; 2962 unsigned long high = high_wmark_pages(zone); 2963 if (size > high) 2964 sum += size - high; 2965 } 2966 2967 return sum; 2968 } 2969 2970 /** 2971 * nr_free_buffer_pages - count number of pages beyond high watermark 2972 * 2973 * nr_free_buffer_pages() counts the number of pages which are beyond the high 2974 * watermark within ZONE_DMA and ZONE_NORMAL. 2975 */ 2976 unsigned long nr_free_buffer_pages(void) 2977 { 2978 return nr_free_zone_pages(gfp_zone(GFP_USER)); 2979 } 2980 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 2981 2982 /** 2983 * nr_free_pagecache_pages - count number of pages beyond high watermark 2984 * 2985 * nr_free_pagecache_pages() counts the number of pages which are beyond the 2986 * high watermark within all zones. 2987 */ 2988 unsigned long nr_free_pagecache_pages(void) 2989 { 2990 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 2991 } 2992 2993 static inline void show_node(struct zone *zone) 2994 { 2995 if (IS_ENABLED(CONFIG_NUMA)) 2996 printk("Node %d ", zone_to_nid(zone)); 2997 } 2998 2999 void si_meminfo(struct sysinfo *val) 3000 { 3001 val->totalram = totalram_pages; 3002 val->sharedram = 0; 3003 val->freeram = global_page_state(NR_FREE_PAGES); 3004 val->bufferram = nr_blockdev_pages(); 3005 val->totalhigh = totalhigh_pages; 3006 val->freehigh = nr_free_highpages(); 3007 val->mem_unit = PAGE_SIZE; 3008 } 3009 3010 EXPORT_SYMBOL(si_meminfo); 3011 3012 #ifdef CONFIG_NUMA 3013 void si_meminfo_node(struct sysinfo *val, int nid) 3014 { 3015 int zone_type; /* needs to be signed */ 3016 unsigned long managed_pages = 0; 3017 pg_data_t *pgdat = NODE_DATA(nid); 3018 3019 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 3020 managed_pages += pgdat->node_zones[zone_type].managed_pages; 3021 val->totalram = managed_pages; 3022 val->freeram = node_page_state(nid, NR_FREE_PAGES); 3023 #ifdef CONFIG_HIGHMEM 3024 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; 3025 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 3026 NR_FREE_PAGES); 3027 #else 3028 val->totalhigh = 0; 3029 val->freehigh = 0; 3030 #endif 3031 val->mem_unit = PAGE_SIZE; 3032 } 3033 #endif 3034 3035 /* 3036 * Determine whether the node should be displayed or not, depending on whether 3037 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 3038 */ 3039 bool skip_free_areas_node(unsigned int flags, int nid) 3040 { 3041 bool ret = false; 3042 unsigned int cpuset_mems_cookie; 3043 3044 if (!(flags & SHOW_MEM_FILTER_NODES)) 3045 goto out; 3046 3047 do { 3048 cpuset_mems_cookie = get_mems_allowed(); 3049 ret = !node_isset(nid, cpuset_current_mems_allowed); 3050 } while (!put_mems_allowed(cpuset_mems_cookie)); 3051 out: 3052 return ret; 3053 } 3054 3055 #define K(x) ((x) << (PAGE_SHIFT-10)) 3056 3057 static void show_migration_types(unsigned char type) 3058 { 3059 static const char types[MIGRATE_TYPES] = { 3060 [MIGRATE_UNMOVABLE] = 'U', 3061 [MIGRATE_RECLAIMABLE] = 'E', 3062 [MIGRATE_MOVABLE] = 'M', 3063 [MIGRATE_RESERVE] = 'R', 3064 #ifdef CONFIG_CMA 3065 [MIGRATE_CMA] = 'C', 3066 #endif 3067 #ifdef CONFIG_MEMORY_ISOLATION 3068 [MIGRATE_ISOLATE] = 'I', 3069 #endif 3070 }; 3071 char tmp[MIGRATE_TYPES + 1]; 3072 char *p = tmp; 3073 int i; 3074 3075 for (i = 0; i < MIGRATE_TYPES; i++) { 3076 if (type & (1 << i)) 3077 *p++ = types[i]; 3078 } 3079 3080 *p = '\0'; 3081 printk("(%s) ", tmp); 3082 } 3083 3084 /* 3085 * Show free area list (used inside shift_scroll-lock stuff) 3086 * We also calculate the percentage fragmentation. We do this by counting the 3087 * memory on each free list with the exception of the first item on the list. 3088 * Suppresses nodes that are not allowed by current's cpuset if 3089 * SHOW_MEM_FILTER_NODES is passed. 3090 */ 3091 void show_free_areas(unsigned int filter) 3092 { 3093 int cpu; 3094 struct zone *zone; 3095 3096 for_each_populated_zone(zone) { 3097 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3098 continue; 3099 show_node(zone); 3100 printk("%s per-cpu:\n", zone->name); 3101 3102 for_each_online_cpu(cpu) { 3103 struct per_cpu_pageset *pageset; 3104 3105 pageset = per_cpu_ptr(zone->pageset, cpu); 3106 3107 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 3108 cpu, pageset->pcp.high, 3109 pageset->pcp.batch, pageset->pcp.count); 3110 } 3111 } 3112 3113 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 3114 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 3115 " unevictable:%lu" 3116 " dirty:%lu writeback:%lu unstable:%lu\n" 3117 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" 3118 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 3119 " free_cma:%lu\n", 3120 global_page_state(NR_ACTIVE_ANON), 3121 global_page_state(NR_INACTIVE_ANON), 3122 global_page_state(NR_ISOLATED_ANON), 3123 global_page_state(NR_ACTIVE_FILE), 3124 global_page_state(NR_INACTIVE_FILE), 3125 global_page_state(NR_ISOLATED_FILE), 3126 global_page_state(NR_UNEVICTABLE), 3127 global_page_state(NR_FILE_DIRTY), 3128 global_page_state(NR_WRITEBACK), 3129 global_page_state(NR_UNSTABLE_NFS), 3130 global_page_state(NR_FREE_PAGES), 3131 global_page_state(NR_SLAB_RECLAIMABLE), 3132 global_page_state(NR_SLAB_UNRECLAIMABLE), 3133 global_page_state(NR_FILE_MAPPED), 3134 global_page_state(NR_SHMEM), 3135 global_page_state(NR_PAGETABLE), 3136 global_page_state(NR_BOUNCE), 3137 global_page_state(NR_FREE_CMA_PAGES)); 3138 3139 for_each_populated_zone(zone) { 3140 int i; 3141 3142 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3143 continue; 3144 show_node(zone); 3145 printk("%s" 3146 " free:%lukB" 3147 " min:%lukB" 3148 " low:%lukB" 3149 " high:%lukB" 3150 " active_anon:%lukB" 3151 " inactive_anon:%lukB" 3152 " active_file:%lukB" 3153 " inactive_file:%lukB" 3154 " unevictable:%lukB" 3155 " isolated(anon):%lukB" 3156 " isolated(file):%lukB" 3157 " present:%lukB" 3158 " managed:%lukB" 3159 " mlocked:%lukB" 3160 " dirty:%lukB" 3161 " writeback:%lukB" 3162 " mapped:%lukB" 3163 " shmem:%lukB" 3164 " slab_reclaimable:%lukB" 3165 " slab_unreclaimable:%lukB" 3166 " kernel_stack:%lukB" 3167 " pagetables:%lukB" 3168 " unstable:%lukB" 3169 " bounce:%lukB" 3170 " free_cma:%lukB" 3171 " writeback_tmp:%lukB" 3172 " pages_scanned:%lu" 3173 " all_unreclaimable? %s" 3174 "\n", 3175 zone->name, 3176 K(zone_page_state(zone, NR_FREE_PAGES)), 3177 K(min_wmark_pages(zone)), 3178 K(low_wmark_pages(zone)), 3179 K(high_wmark_pages(zone)), 3180 K(zone_page_state(zone, NR_ACTIVE_ANON)), 3181 K(zone_page_state(zone, NR_INACTIVE_ANON)), 3182 K(zone_page_state(zone, NR_ACTIVE_FILE)), 3183 K(zone_page_state(zone, NR_INACTIVE_FILE)), 3184 K(zone_page_state(zone, NR_UNEVICTABLE)), 3185 K(zone_page_state(zone, NR_ISOLATED_ANON)), 3186 K(zone_page_state(zone, NR_ISOLATED_FILE)), 3187 K(zone->present_pages), 3188 K(zone->managed_pages), 3189 K(zone_page_state(zone, NR_MLOCK)), 3190 K(zone_page_state(zone, NR_FILE_DIRTY)), 3191 K(zone_page_state(zone, NR_WRITEBACK)), 3192 K(zone_page_state(zone, NR_FILE_MAPPED)), 3193 K(zone_page_state(zone, NR_SHMEM)), 3194 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 3195 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 3196 zone_page_state(zone, NR_KERNEL_STACK) * 3197 THREAD_SIZE / 1024, 3198 K(zone_page_state(zone, NR_PAGETABLE)), 3199 K(zone_page_state(zone, NR_UNSTABLE_NFS)), 3200 K(zone_page_state(zone, NR_BOUNCE)), 3201 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 3202 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 3203 zone->pages_scanned, 3204 (!zone_reclaimable(zone) ? "yes" : "no") 3205 ); 3206 printk("lowmem_reserve[]:"); 3207 for (i = 0; i < MAX_NR_ZONES; i++) 3208 printk(" %lu", zone->lowmem_reserve[i]); 3209 printk("\n"); 3210 } 3211 3212 for_each_populated_zone(zone) { 3213 unsigned long nr[MAX_ORDER], flags, order, total = 0; 3214 unsigned char types[MAX_ORDER]; 3215 3216 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3217 continue; 3218 show_node(zone); 3219 printk("%s: ", zone->name); 3220 3221 spin_lock_irqsave(&zone->lock, flags); 3222 for (order = 0; order < MAX_ORDER; order++) { 3223 struct free_area *area = &zone->free_area[order]; 3224 int type; 3225 3226 nr[order] = area->nr_free; 3227 total += nr[order] << order; 3228 3229 types[order] = 0; 3230 for (type = 0; type < MIGRATE_TYPES; type++) { 3231 if (!list_empty(&area->free_list[type])) 3232 types[order] |= 1 << type; 3233 } 3234 } 3235 spin_unlock_irqrestore(&zone->lock, flags); 3236 for (order = 0; order < MAX_ORDER; order++) { 3237 printk("%lu*%lukB ", nr[order], K(1UL) << order); 3238 if (nr[order]) 3239 show_migration_types(types[order]); 3240 } 3241 printk("= %lukB\n", K(total)); 3242 } 3243 3244 hugetlb_show_meminfo(); 3245 3246 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 3247 3248 show_swap_cache_info(); 3249 } 3250 3251 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 3252 { 3253 zoneref->zone = zone; 3254 zoneref->zone_idx = zone_idx(zone); 3255 } 3256 3257 /* 3258 * Builds allocation fallback zone lists. 3259 * 3260 * Add all populated zones of a node to the zonelist. 3261 */ 3262 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 3263 int nr_zones) 3264 { 3265 struct zone *zone; 3266 enum zone_type zone_type = MAX_NR_ZONES; 3267 3268 do { 3269 zone_type--; 3270 zone = pgdat->node_zones + zone_type; 3271 if (populated_zone(zone)) { 3272 zoneref_set_zone(zone, 3273 &zonelist->_zonerefs[nr_zones++]); 3274 check_highest_zone(zone_type); 3275 } 3276 } while (zone_type); 3277 3278 return nr_zones; 3279 } 3280 3281 3282 /* 3283 * zonelist_order: 3284 * 0 = automatic detection of better ordering. 3285 * 1 = order by ([node] distance, -zonetype) 3286 * 2 = order by (-zonetype, [node] distance) 3287 * 3288 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 3289 * the same zonelist. So only NUMA can configure this param. 3290 */ 3291 #define ZONELIST_ORDER_DEFAULT 0 3292 #define ZONELIST_ORDER_NODE 1 3293 #define ZONELIST_ORDER_ZONE 2 3294 3295 /* zonelist order in the kernel. 3296 * set_zonelist_order() will set this to NODE or ZONE. 3297 */ 3298 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 3299 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 3300 3301 3302 #ifdef CONFIG_NUMA 3303 /* The value user specified ....changed by config */ 3304 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3305 /* string for sysctl */ 3306 #define NUMA_ZONELIST_ORDER_LEN 16 3307 char numa_zonelist_order[16] = "default"; 3308 3309 /* 3310 * interface for configure zonelist ordering. 3311 * command line option "numa_zonelist_order" 3312 * = "[dD]efault - default, automatic configuration. 3313 * = "[nN]ode - order by node locality, then by zone within node 3314 * = "[zZ]one - order by zone, then by locality within zone 3315 */ 3316 3317 static int __parse_numa_zonelist_order(char *s) 3318 { 3319 if (*s == 'd' || *s == 'D') { 3320 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3321 } else if (*s == 'n' || *s == 'N') { 3322 user_zonelist_order = ZONELIST_ORDER_NODE; 3323 } else if (*s == 'z' || *s == 'Z') { 3324 user_zonelist_order = ZONELIST_ORDER_ZONE; 3325 } else { 3326 printk(KERN_WARNING 3327 "Ignoring invalid numa_zonelist_order value: " 3328 "%s\n", s); 3329 return -EINVAL; 3330 } 3331 return 0; 3332 } 3333 3334 static __init int setup_numa_zonelist_order(char *s) 3335 { 3336 int ret; 3337 3338 if (!s) 3339 return 0; 3340 3341 ret = __parse_numa_zonelist_order(s); 3342 if (ret == 0) 3343 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 3344 3345 return ret; 3346 } 3347 early_param("numa_zonelist_order", setup_numa_zonelist_order); 3348 3349 /* 3350 * sysctl handler for numa_zonelist_order 3351 */ 3352 int numa_zonelist_order_handler(ctl_table *table, int write, 3353 void __user *buffer, size_t *length, 3354 loff_t *ppos) 3355 { 3356 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 3357 int ret; 3358 static DEFINE_MUTEX(zl_order_mutex); 3359 3360 mutex_lock(&zl_order_mutex); 3361 if (write) { 3362 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { 3363 ret = -EINVAL; 3364 goto out; 3365 } 3366 strcpy(saved_string, (char *)table->data); 3367 } 3368 ret = proc_dostring(table, write, buffer, length, ppos); 3369 if (ret) 3370 goto out; 3371 if (write) { 3372 int oldval = user_zonelist_order; 3373 3374 ret = __parse_numa_zonelist_order((char *)table->data); 3375 if (ret) { 3376 /* 3377 * bogus value. restore saved string 3378 */ 3379 strncpy((char *)table->data, saved_string, 3380 NUMA_ZONELIST_ORDER_LEN); 3381 user_zonelist_order = oldval; 3382 } else if (oldval != user_zonelist_order) { 3383 mutex_lock(&zonelists_mutex); 3384 build_all_zonelists(NULL, NULL); 3385 mutex_unlock(&zonelists_mutex); 3386 } 3387 } 3388 out: 3389 mutex_unlock(&zl_order_mutex); 3390 return ret; 3391 } 3392 3393 3394 #define MAX_NODE_LOAD (nr_online_nodes) 3395 static int node_load[MAX_NUMNODES]; 3396 3397 /** 3398 * find_next_best_node - find the next node that should appear in a given node's fallback list 3399 * @node: node whose fallback list we're appending 3400 * @used_node_mask: nodemask_t of already used nodes 3401 * 3402 * We use a number of factors to determine which is the next node that should 3403 * appear on a given node's fallback list. The node should not have appeared 3404 * already in @node's fallback list, and it should be the next closest node 3405 * according to the distance array (which contains arbitrary distance values 3406 * from each node to each node in the system), and should also prefer nodes 3407 * with no CPUs, since presumably they'll have very little allocation pressure 3408 * on them otherwise. 3409 * It returns -1 if no node is found. 3410 */ 3411 static int find_next_best_node(int node, nodemask_t *used_node_mask) 3412 { 3413 int n, val; 3414 int min_val = INT_MAX; 3415 int best_node = NUMA_NO_NODE; 3416 const struct cpumask *tmp = cpumask_of_node(0); 3417 3418 /* Use the local node if we haven't already */ 3419 if (!node_isset(node, *used_node_mask)) { 3420 node_set(node, *used_node_mask); 3421 return node; 3422 } 3423 3424 for_each_node_state(n, N_MEMORY) { 3425 3426 /* Don't want a node to appear more than once */ 3427 if (node_isset(n, *used_node_mask)) 3428 continue; 3429 3430 /* Use the distance array to find the distance */ 3431 val = node_distance(node, n); 3432 3433 /* Penalize nodes under us ("prefer the next node") */ 3434 val += (n < node); 3435 3436 /* Give preference to headless and unused nodes */ 3437 tmp = cpumask_of_node(n); 3438 if (!cpumask_empty(tmp)) 3439 val += PENALTY_FOR_NODE_WITH_CPUS; 3440 3441 /* Slight preference for less loaded node */ 3442 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 3443 val += node_load[n]; 3444 3445 if (val < min_val) { 3446 min_val = val; 3447 best_node = n; 3448 } 3449 } 3450 3451 if (best_node >= 0) 3452 node_set(best_node, *used_node_mask); 3453 3454 return best_node; 3455 } 3456 3457 3458 /* 3459 * Build zonelists ordered by node and zones within node. 3460 * This results in maximum locality--normal zone overflows into local 3461 * DMA zone, if any--but risks exhausting DMA zone. 3462 */ 3463 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 3464 { 3465 int j; 3466 struct zonelist *zonelist; 3467 3468 zonelist = &pgdat->node_zonelists[0]; 3469 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 3470 ; 3471 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3472 zonelist->_zonerefs[j].zone = NULL; 3473 zonelist->_zonerefs[j].zone_idx = 0; 3474 } 3475 3476 /* 3477 * Build gfp_thisnode zonelists 3478 */ 3479 static void build_thisnode_zonelists(pg_data_t *pgdat) 3480 { 3481 int j; 3482 struct zonelist *zonelist; 3483 3484 zonelist = &pgdat->node_zonelists[1]; 3485 j = build_zonelists_node(pgdat, zonelist, 0); 3486 zonelist->_zonerefs[j].zone = NULL; 3487 zonelist->_zonerefs[j].zone_idx = 0; 3488 } 3489 3490 /* 3491 * Build zonelists ordered by zone and nodes within zones. 3492 * This results in conserving DMA zone[s] until all Normal memory is 3493 * exhausted, but results in overflowing to remote node while memory 3494 * may still exist in local DMA zone. 3495 */ 3496 static int node_order[MAX_NUMNODES]; 3497 3498 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 3499 { 3500 int pos, j, node; 3501 int zone_type; /* needs to be signed */ 3502 struct zone *z; 3503 struct zonelist *zonelist; 3504 3505 zonelist = &pgdat->node_zonelists[0]; 3506 pos = 0; 3507 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 3508 for (j = 0; j < nr_nodes; j++) { 3509 node = node_order[j]; 3510 z = &NODE_DATA(node)->node_zones[zone_type]; 3511 if (populated_zone(z)) { 3512 zoneref_set_zone(z, 3513 &zonelist->_zonerefs[pos++]); 3514 check_highest_zone(zone_type); 3515 } 3516 } 3517 } 3518 zonelist->_zonerefs[pos].zone = NULL; 3519 zonelist->_zonerefs[pos].zone_idx = 0; 3520 } 3521 3522 static int default_zonelist_order(void) 3523 { 3524 int nid, zone_type; 3525 unsigned long low_kmem_size, total_size; 3526 struct zone *z; 3527 int average_size; 3528 /* 3529 * ZONE_DMA and ZONE_DMA32 can be very small area in the system. 3530 * If they are really small and used heavily, the system can fall 3531 * into OOM very easily. 3532 * This function detect ZONE_DMA/DMA32 size and configures zone order. 3533 */ 3534 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ 3535 low_kmem_size = 0; 3536 total_size = 0; 3537 for_each_online_node(nid) { 3538 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 3539 z = &NODE_DATA(nid)->node_zones[zone_type]; 3540 if (populated_zone(z)) { 3541 if (zone_type < ZONE_NORMAL) 3542 low_kmem_size += z->managed_pages; 3543 total_size += z->managed_pages; 3544 } else if (zone_type == ZONE_NORMAL) { 3545 /* 3546 * If any node has only lowmem, then node order 3547 * is preferred to allow kernel allocations 3548 * locally; otherwise, they can easily infringe 3549 * on other nodes when there is an abundance of 3550 * lowmem available to allocate from. 3551 */ 3552 return ZONELIST_ORDER_NODE; 3553 } 3554 } 3555 } 3556 if (!low_kmem_size || /* there are no DMA area. */ 3557 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ 3558 return ZONELIST_ORDER_NODE; 3559 /* 3560 * look into each node's config. 3561 * If there is a node whose DMA/DMA32 memory is very big area on 3562 * local memory, NODE_ORDER may be suitable. 3563 */ 3564 average_size = total_size / 3565 (nodes_weight(node_states[N_MEMORY]) + 1); 3566 for_each_online_node(nid) { 3567 low_kmem_size = 0; 3568 total_size = 0; 3569 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 3570 z = &NODE_DATA(nid)->node_zones[zone_type]; 3571 if (populated_zone(z)) { 3572 if (zone_type < ZONE_NORMAL) 3573 low_kmem_size += z->present_pages; 3574 total_size += z->present_pages; 3575 } 3576 } 3577 if (low_kmem_size && 3578 total_size > average_size && /* ignore small node */ 3579 low_kmem_size > total_size * 70/100) 3580 return ZONELIST_ORDER_NODE; 3581 } 3582 return ZONELIST_ORDER_ZONE; 3583 } 3584 3585 static void set_zonelist_order(void) 3586 { 3587 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 3588 current_zonelist_order = default_zonelist_order(); 3589 else 3590 current_zonelist_order = user_zonelist_order; 3591 } 3592 3593 static void build_zonelists(pg_data_t *pgdat) 3594 { 3595 int j, node, load; 3596 enum zone_type i; 3597 nodemask_t used_mask; 3598 int local_node, prev_node; 3599 struct zonelist *zonelist; 3600 int order = current_zonelist_order; 3601 3602 /* initialize zonelists */ 3603 for (i = 0; i < MAX_ZONELISTS; i++) { 3604 zonelist = pgdat->node_zonelists + i; 3605 zonelist->_zonerefs[0].zone = NULL; 3606 zonelist->_zonerefs[0].zone_idx = 0; 3607 } 3608 3609 /* NUMA-aware ordering of nodes */ 3610 local_node = pgdat->node_id; 3611 load = nr_online_nodes; 3612 prev_node = local_node; 3613 nodes_clear(used_mask); 3614 3615 memset(node_order, 0, sizeof(node_order)); 3616 j = 0; 3617 3618 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 3619 /* 3620 * We don't want to pressure a particular node. 3621 * So adding penalty to the first node in same 3622 * distance group to make it round-robin. 3623 */ 3624 if (node_distance(local_node, node) != 3625 node_distance(local_node, prev_node)) 3626 node_load[node] = load; 3627 3628 prev_node = node; 3629 load--; 3630 if (order == ZONELIST_ORDER_NODE) 3631 build_zonelists_in_node_order(pgdat, node); 3632 else 3633 node_order[j++] = node; /* remember order */ 3634 } 3635 3636 if (order == ZONELIST_ORDER_ZONE) { 3637 /* calculate node order -- i.e., DMA last! */ 3638 build_zonelists_in_zone_order(pgdat, j); 3639 } 3640 3641 build_thisnode_zonelists(pgdat); 3642 } 3643 3644 /* Construct the zonelist performance cache - see further mmzone.h */ 3645 static void build_zonelist_cache(pg_data_t *pgdat) 3646 { 3647 struct zonelist *zonelist; 3648 struct zonelist_cache *zlc; 3649 struct zoneref *z; 3650 3651 zonelist = &pgdat->node_zonelists[0]; 3652 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 3653 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 3654 for (z = zonelist->_zonerefs; z->zone; z++) 3655 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 3656 } 3657 3658 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 3659 /* 3660 * Return node id of node used for "local" allocations. 3661 * I.e., first node id of first zone in arg node's generic zonelist. 3662 * Used for initializing percpu 'numa_mem', which is used primarily 3663 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 3664 */ 3665 int local_memory_node(int node) 3666 { 3667 struct zone *zone; 3668 3669 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 3670 gfp_zone(GFP_KERNEL), 3671 NULL, 3672 &zone); 3673 return zone->node; 3674 } 3675 #endif 3676 3677 #else /* CONFIG_NUMA */ 3678 3679 static void set_zonelist_order(void) 3680 { 3681 current_zonelist_order = ZONELIST_ORDER_ZONE; 3682 } 3683 3684 static void build_zonelists(pg_data_t *pgdat) 3685 { 3686 int node, local_node; 3687 enum zone_type j; 3688 struct zonelist *zonelist; 3689 3690 local_node = pgdat->node_id; 3691 3692 zonelist = &pgdat->node_zonelists[0]; 3693 j = build_zonelists_node(pgdat, zonelist, 0); 3694 3695 /* 3696 * Now we build the zonelist so that it contains the zones 3697 * of all the other nodes. 3698 * We don't want to pressure a particular node, so when 3699 * building the zones for node N, we make sure that the 3700 * zones coming right after the local ones are those from 3701 * node N+1 (modulo N) 3702 */ 3703 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 3704 if (!node_online(node)) 3705 continue; 3706 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3707 } 3708 for (node = 0; node < local_node; node++) { 3709 if (!node_online(node)) 3710 continue; 3711 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3712 } 3713 3714 zonelist->_zonerefs[j].zone = NULL; 3715 zonelist->_zonerefs[j].zone_idx = 0; 3716 } 3717 3718 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 3719 static void build_zonelist_cache(pg_data_t *pgdat) 3720 { 3721 pgdat->node_zonelists[0].zlcache_ptr = NULL; 3722 } 3723 3724 #endif /* CONFIG_NUMA */ 3725 3726 /* 3727 * Boot pageset table. One per cpu which is going to be used for all 3728 * zones and all nodes. The parameters will be set in such a way 3729 * that an item put on a list will immediately be handed over to 3730 * the buddy list. This is safe since pageset manipulation is done 3731 * with interrupts disabled. 3732 * 3733 * The boot_pagesets must be kept even after bootup is complete for 3734 * unused processors and/or zones. They do play a role for bootstrapping 3735 * hotplugged processors. 3736 * 3737 * zoneinfo_show() and maybe other functions do 3738 * not check if the processor is online before following the pageset pointer. 3739 * Other parts of the kernel may not check if the zone is available. 3740 */ 3741 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 3742 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 3743 static void setup_zone_pageset(struct zone *zone); 3744 3745 /* 3746 * Global mutex to protect against size modification of zonelists 3747 * as well as to serialize pageset setup for the new populated zone. 3748 */ 3749 DEFINE_MUTEX(zonelists_mutex); 3750 3751 /* return values int ....just for stop_machine() */ 3752 static int __build_all_zonelists(void *data) 3753 { 3754 int nid; 3755 int cpu; 3756 pg_data_t *self = data; 3757 3758 #ifdef CONFIG_NUMA 3759 memset(node_load, 0, sizeof(node_load)); 3760 #endif 3761 3762 if (self && !node_online(self->node_id)) { 3763 build_zonelists(self); 3764 build_zonelist_cache(self); 3765 } 3766 3767 for_each_online_node(nid) { 3768 pg_data_t *pgdat = NODE_DATA(nid); 3769 3770 build_zonelists(pgdat); 3771 build_zonelist_cache(pgdat); 3772 } 3773 3774 /* 3775 * Initialize the boot_pagesets that are going to be used 3776 * for bootstrapping processors. The real pagesets for 3777 * each zone will be allocated later when the per cpu 3778 * allocator is available. 3779 * 3780 * boot_pagesets are used also for bootstrapping offline 3781 * cpus if the system is already booted because the pagesets 3782 * are needed to initialize allocators on a specific cpu too. 3783 * F.e. the percpu allocator needs the page allocator which 3784 * needs the percpu allocator in order to allocate its pagesets 3785 * (a chicken-egg dilemma). 3786 */ 3787 for_each_possible_cpu(cpu) { 3788 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 3789 3790 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 3791 /* 3792 * We now know the "local memory node" for each node-- 3793 * i.e., the node of the first zone in the generic zonelist. 3794 * Set up numa_mem percpu variable for on-line cpus. During 3795 * boot, only the boot cpu should be on-line; we'll init the 3796 * secondary cpus' numa_mem as they come on-line. During 3797 * node/memory hotplug, we'll fixup all on-line cpus. 3798 */ 3799 if (cpu_online(cpu)) 3800 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 3801 #endif 3802 } 3803 3804 return 0; 3805 } 3806 3807 /* 3808 * Called with zonelists_mutex held always 3809 * unless system_state == SYSTEM_BOOTING. 3810 */ 3811 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) 3812 { 3813 set_zonelist_order(); 3814 3815 if (system_state == SYSTEM_BOOTING) { 3816 __build_all_zonelists(NULL); 3817 mminit_verify_zonelist(); 3818 cpuset_init_current_mems_allowed(); 3819 } else { 3820 #ifdef CONFIG_MEMORY_HOTPLUG 3821 if (zone) 3822 setup_zone_pageset(zone); 3823 #endif 3824 /* we have to stop all cpus to guarantee there is no user 3825 of zonelist */ 3826 stop_machine(__build_all_zonelists, pgdat, NULL); 3827 /* cpuset refresh routine should be here */ 3828 } 3829 vm_total_pages = nr_free_pagecache_pages(); 3830 /* 3831 * Disable grouping by mobility if the number of pages in the 3832 * system is too low to allow the mechanism to work. It would be 3833 * more accurate, but expensive to check per-zone. This check is 3834 * made on memory-hotadd so a system can start with mobility 3835 * disabled and enable it later 3836 */ 3837 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 3838 page_group_by_mobility_disabled = 1; 3839 else 3840 page_group_by_mobility_disabled = 0; 3841 3842 printk("Built %i zonelists in %s order, mobility grouping %s. " 3843 "Total pages: %ld\n", 3844 nr_online_nodes, 3845 zonelist_order_name[current_zonelist_order], 3846 page_group_by_mobility_disabled ? "off" : "on", 3847 vm_total_pages); 3848 #ifdef CONFIG_NUMA 3849 printk("Policy zone: %s\n", zone_names[policy_zone]); 3850 #endif 3851 } 3852 3853 /* 3854 * Helper functions to size the waitqueue hash table. 3855 * Essentially these want to choose hash table sizes sufficiently 3856 * large so that collisions trying to wait on pages are rare. 3857 * But in fact, the number of active page waitqueues on typical 3858 * systems is ridiculously low, less than 200. So this is even 3859 * conservative, even though it seems large. 3860 * 3861 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 3862 * waitqueues, i.e. the size of the waitq table given the number of pages. 3863 */ 3864 #define PAGES_PER_WAITQUEUE 256 3865 3866 #ifndef CONFIG_MEMORY_HOTPLUG 3867 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 3868 { 3869 unsigned long size = 1; 3870 3871 pages /= PAGES_PER_WAITQUEUE; 3872 3873 while (size < pages) 3874 size <<= 1; 3875 3876 /* 3877 * Once we have dozens or even hundreds of threads sleeping 3878 * on IO we've got bigger problems than wait queue collision. 3879 * Limit the size of the wait table to a reasonable size. 3880 */ 3881 size = min(size, 4096UL); 3882 3883 return max(size, 4UL); 3884 } 3885 #else 3886 /* 3887 * A zone's size might be changed by hot-add, so it is not possible to determine 3888 * a suitable size for its wait_table. So we use the maximum size now. 3889 * 3890 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 3891 * 3892 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 3893 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 3894 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 3895 * 3896 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 3897 * or more by the traditional way. (See above). It equals: 3898 * 3899 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 3900 * ia64(16K page size) : = ( 8G + 4M)byte. 3901 * powerpc (64K page size) : = (32G +16M)byte. 3902 */ 3903 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 3904 { 3905 return 4096UL; 3906 } 3907 #endif 3908 3909 /* 3910 * This is an integer logarithm so that shifts can be used later 3911 * to extract the more random high bits from the multiplicative 3912 * hash function before the remainder is taken. 3913 */ 3914 static inline unsigned long wait_table_bits(unsigned long size) 3915 { 3916 return ffz(~size); 3917 } 3918 3919 /* 3920 * Check if a pageblock contains reserved pages 3921 */ 3922 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn) 3923 { 3924 unsigned long pfn; 3925 3926 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 3927 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn))) 3928 return 1; 3929 } 3930 return 0; 3931 } 3932 3933 /* 3934 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 3935 * of blocks reserved is based on min_wmark_pages(zone). The memory within 3936 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes 3937 * higher will lead to a bigger reserve which will get freed as contiguous 3938 * blocks as reclaim kicks in 3939 */ 3940 static void setup_zone_migrate_reserve(struct zone *zone) 3941 { 3942 unsigned long start_pfn, pfn, end_pfn, block_end_pfn; 3943 struct page *page; 3944 unsigned long block_migratetype; 3945 int reserve; 3946 int old_reserve; 3947 3948 /* 3949 * Get the start pfn, end pfn and the number of blocks to reserve 3950 * We have to be careful to be aligned to pageblock_nr_pages to 3951 * make sure that we always check pfn_valid for the first page in 3952 * the block. 3953 */ 3954 start_pfn = zone->zone_start_pfn; 3955 end_pfn = zone_end_pfn(zone); 3956 start_pfn = roundup(start_pfn, pageblock_nr_pages); 3957 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> 3958 pageblock_order; 3959 3960 /* 3961 * Reserve blocks are generally in place to help high-order atomic 3962 * allocations that are short-lived. A min_free_kbytes value that 3963 * would result in more than 2 reserve blocks for atomic allocations 3964 * is assumed to be in place to help anti-fragmentation for the 3965 * future allocation of hugepages at runtime. 3966 */ 3967 reserve = min(2, reserve); 3968 old_reserve = zone->nr_migrate_reserve_block; 3969 3970 /* When memory hot-add, we almost always need to do nothing */ 3971 if (reserve == old_reserve) 3972 return; 3973 zone->nr_migrate_reserve_block = reserve; 3974 3975 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 3976 if (!pfn_valid(pfn)) 3977 continue; 3978 page = pfn_to_page(pfn); 3979 3980 /* Watch out for overlapping nodes */ 3981 if (page_to_nid(page) != zone_to_nid(zone)) 3982 continue; 3983 3984 block_migratetype = get_pageblock_migratetype(page); 3985 3986 /* Only test what is necessary when the reserves are not met */ 3987 if (reserve > 0) { 3988 /* 3989 * Blocks with reserved pages will never free, skip 3990 * them. 3991 */ 3992 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn); 3993 if (pageblock_is_reserved(pfn, block_end_pfn)) 3994 continue; 3995 3996 /* If this block is reserved, account for it */ 3997 if (block_migratetype == MIGRATE_RESERVE) { 3998 reserve--; 3999 continue; 4000 } 4001 4002 /* Suitable for reserving if this block is movable */ 4003 if (block_migratetype == MIGRATE_MOVABLE) { 4004 set_pageblock_migratetype(page, 4005 MIGRATE_RESERVE); 4006 move_freepages_block(zone, page, 4007 MIGRATE_RESERVE); 4008 reserve--; 4009 continue; 4010 } 4011 } else if (!old_reserve) { 4012 /* 4013 * At boot time we don't need to scan the whole zone 4014 * for turning off MIGRATE_RESERVE. 4015 */ 4016 break; 4017 } 4018 4019 /* 4020 * If the reserve is met and this is a previous reserved block, 4021 * take it back 4022 */ 4023 if (block_migratetype == MIGRATE_RESERVE) { 4024 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4025 move_freepages_block(zone, page, MIGRATE_MOVABLE); 4026 } 4027 } 4028 } 4029 4030 /* 4031 * Initially all pages are reserved - free ones are freed 4032 * up by free_all_bootmem() once the early boot process is 4033 * done. Non-atomic initialization, single-pass. 4034 */ 4035 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 4036 unsigned long start_pfn, enum memmap_context context) 4037 { 4038 struct page *page; 4039 unsigned long end_pfn = start_pfn + size; 4040 unsigned long pfn; 4041 struct zone *z; 4042 4043 if (highest_memmap_pfn < end_pfn - 1) 4044 highest_memmap_pfn = end_pfn - 1; 4045 4046 z = &NODE_DATA(nid)->node_zones[zone]; 4047 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4048 /* 4049 * There can be holes in boot-time mem_map[]s 4050 * handed to this function. They do not 4051 * exist on hotplugged memory. 4052 */ 4053 if (context == MEMMAP_EARLY) { 4054 if (!early_pfn_valid(pfn)) 4055 continue; 4056 if (!early_pfn_in_nid(pfn, nid)) 4057 continue; 4058 } 4059 page = pfn_to_page(pfn); 4060 set_page_links(page, zone, nid, pfn); 4061 mminit_verify_page_links(page, zone, nid, pfn); 4062 init_page_count(page); 4063 page_mapcount_reset(page); 4064 page_cpupid_reset_last(page); 4065 SetPageReserved(page); 4066 /* 4067 * Mark the block movable so that blocks are reserved for 4068 * movable at startup. This will force kernel allocations 4069 * to reserve their blocks rather than leaking throughout 4070 * the address space during boot when many long-lived 4071 * kernel allocations are made. Later some blocks near 4072 * the start are marked MIGRATE_RESERVE by 4073 * setup_zone_migrate_reserve() 4074 * 4075 * bitmap is created for zone's valid pfn range. but memmap 4076 * can be created for invalid pages (for alignment) 4077 * check here not to call set_pageblock_migratetype() against 4078 * pfn out of zone. 4079 */ 4080 if ((z->zone_start_pfn <= pfn) 4081 && (pfn < zone_end_pfn(z)) 4082 && !(pfn & (pageblock_nr_pages - 1))) 4083 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4084 4085 INIT_LIST_HEAD(&page->lru); 4086 #ifdef WANT_PAGE_VIRTUAL 4087 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 4088 if (!is_highmem_idx(zone)) 4089 set_page_address(page, __va(pfn << PAGE_SHIFT)); 4090 #endif 4091 } 4092 } 4093 4094 static void __meminit zone_init_free_lists(struct zone *zone) 4095 { 4096 int order, t; 4097 for_each_migratetype_order(order, t) { 4098 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 4099 zone->free_area[order].nr_free = 0; 4100 } 4101 } 4102 4103 #ifndef __HAVE_ARCH_MEMMAP_INIT 4104 #define memmap_init(size, nid, zone, start_pfn) \ 4105 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 4106 #endif 4107 4108 static int __meminit zone_batchsize(struct zone *zone) 4109 { 4110 #ifdef CONFIG_MMU 4111 int batch; 4112 4113 /* 4114 * The per-cpu-pages pools are set to around 1000th of the 4115 * size of the zone. But no more than 1/2 of a meg. 4116 * 4117 * OK, so we don't know how big the cache is. So guess. 4118 */ 4119 batch = zone->managed_pages / 1024; 4120 if (batch * PAGE_SIZE > 512 * 1024) 4121 batch = (512 * 1024) / PAGE_SIZE; 4122 batch /= 4; /* We effectively *= 4 below */ 4123 if (batch < 1) 4124 batch = 1; 4125 4126 /* 4127 * Clamp the batch to a 2^n - 1 value. Having a power 4128 * of 2 value was found to be more likely to have 4129 * suboptimal cache aliasing properties in some cases. 4130 * 4131 * For example if 2 tasks are alternately allocating 4132 * batches of pages, one task can end up with a lot 4133 * of pages of one half of the possible page colors 4134 * and the other with pages of the other colors. 4135 */ 4136 batch = rounddown_pow_of_two(batch + batch/2) - 1; 4137 4138 return batch; 4139 4140 #else 4141 /* The deferral and batching of frees should be suppressed under NOMMU 4142 * conditions. 4143 * 4144 * The problem is that NOMMU needs to be able to allocate large chunks 4145 * of contiguous memory as there's no hardware page translation to 4146 * assemble apparent contiguous memory from discontiguous pages. 4147 * 4148 * Queueing large contiguous runs of pages for batching, however, 4149 * causes the pages to actually be freed in smaller chunks. As there 4150 * can be a significant delay between the individual batches being 4151 * recycled, this leads to the once large chunks of space being 4152 * fragmented and becoming unavailable for high-order allocations. 4153 */ 4154 return 0; 4155 #endif 4156 } 4157 4158 /* 4159 * pcp->high and pcp->batch values are related and dependent on one another: 4160 * ->batch must never be higher then ->high. 4161 * The following function updates them in a safe manner without read side 4162 * locking. 4163 * 4164 * Any new users of pcp->batch and pcp->high should ensure they can cope with 4165 * those fields changing asynchronously (acording the the above rule). 4166 * 4167 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 4168 * outside of boot time (or some other assurance that no concurrent updaters 4169 * exist). 4170 */ 4171 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 4172 unsigned long batch) 4173 { 4174 /* start with a fail safe value for batch */ 4175 pcp->batch = 1; 4176 smp_wmb(); 4177 4178 /* Update high, then batch, in order */ 4179 pcp->high = high; 4180 smp_wmb(); 4181 4182 pcp->batch = batch; 4183 } 4184 4185 /* a companion to pageset_set_high() */ 4186 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) 4187 { 4188 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); 4189 } 4190 4191 static void pageset_init(struct per_cpu_pageset *p) 4192 { 4193 struct per_cpu_pages *pcp; 4194 int migratetype; 4195 4196 memset(p, 0, sizeof(*p)); 4197 4198 pcp = &p->pcp; 4199 pcp->count = 0; 4200 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 4201 INIT_LIST_HEAD(&pcp->lists[migratetype]); 4202 } 4203 4204 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 4205 { 4206 pageset_init(p); 4207 pageset_set_batch(p, batch); 4208 } 4209 4210 /* 4211 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist 4212 * to the value high for the pageset p. 4213 */ 4214 static void pageset_set_high(struct per_cpu_pageset *p, 4215 unsigned long high) 4216 { 4217 unsigned long batch = max(1UL, high / 4); 4218 if ((high / 4) > (PAGE_SHIFT * 8)) 4219 batch = PAGE_SHIFT * 8; 4220 4221 pageset_update(&p->pcp, high, batch); 4222 } 4223 4224 static void __meminit pageset_set_high_and_batch(struct zone *zone, 4225 struct per_cpu_pageset *pcp) 4226 { 4227 if (percpu_pagelist_fraction) 4228 pageset_set_high(pcp, 4229 (zone->managed_pages / 4230 percpu_pagelist_fraction)); 4231 else 4232 pageset_set_batch(pcp, zone_batchsize(zone)); 4233 } 4234 4235 static void __meminit zone_pageset_init(struct zone *zone, int cpu) 4236 { 4237 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 4238 4239 pageset_init(pcp); 4240 pageset_set_high_and_batch(zone, pcp); 4241 } 4242 4243 static void __meminit setup_zone_pageset(struct zone *zone) 4244 { 4245 int cpu; 4246 zone->pageset = alloc_percpu(struct per_cpu_pageset); 4247 for_each_possible_cpu(cpu) 4248 zone_pageset_init(zone, cpu); 4249 } 4250 4251 /* 4252 * Allocate per cpu pagesets and initialize them. 4253 * Before this call only boot pagesets were available. 4254 */ 4255 void __init setup_per_cpu_pageset(void) 4256 { 4257 struct zone *zone; 4258 4259 for_each_populated_zone(zone) 4260 setup_zone_pageset(zone); 4261 } 4262 4263 static noinline __init_refok 4264 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 4265 { 4266 int i; 4267 size_t alloc_size; 4268 4269 /* 4270 * The per-page waitqueue mechanism uses hashed waitqueues 4271 * per zone. 4272 */ 4273 zone->wait_table_hash_nr_entries = 4274 wait_table_hash_nr_entries(zone_size_pages); 4275 zone->wait_table_bits = 4276 wait_table_bits(zone->wait_table_hash_nr_entries); 4277 alloc_size = zone->wait_table_hash_nr_entries 4278 * sizeof(wait_queue_head_t); 4279 4280 if (!slab_is_available()) { 4281 zone->wait_table = (wait_queue_head_t *) 4282 memblock_virt_alloc_node_nopanic( 4283 alloc_size, zone->zone_pgdat->node_id); 4284 } else { 4285 /* 4286 * This case means that a zone whose size was 0 gets new memory 4287 * via memory hot-add. 4288 * But it may be the case that a new node was hot-added. In 4289 * this case vmalloc() will not be able to use this new node's 4290 * memory - this wait_table must be initialized to use this new 4291 * node itself as well. 4292 * To use this new node's memory, further consideration will be 4293 * necessary. 4294 */ 4295 zone->wait_table = vmalloc(alloc_size); 4296 } 4297 if (!zone->wait_table) 4298 return -ENOMEM; 4299 4300 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) 4301 init_waitqueue_head(zone->wait_table + i); 4302 4303 return 0; 4304 } 4305 4306 static __meminit void zone_pcp_init(struct zone *zone) 4307 { 4308 /* 4309 * per cpu subsystem is not up at this point. The following code 4310 * relies on the ability of the linker to provide the 4311 * offset of a (static) per cpu variable into the per cpu area. 4312 */ 4313 zone->pageset = &boot_pageset; 4314 4315 if (populated_zone(zone)) 4316 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 4317 zone->name, zone->present_pages, 4318 zone_batchsize(zone)); 4319 } 4320 4321 int __meminit init_currently_empty_zone(struct zone *zone, 4322 unsigned long zone_start_pfn, 4323 unsigned long size, 4324 enum memmap_context context) 4325 { 4326 struct pglist_data *pgdat = zone->zone_pgdat; 4327 int ret; 4328 ret = zone_wait_table_init(zone, size); 4329 if (ret) 4330 return ret; 4331 pgdat->nr_zones = zone_idx(zone) + 1; 4332 4333 zone->zone_start_pfn = zone_start_pfn; 4334 4335 mminit_dprintk(MMINIT_TRACE, "memmap_init", 4336 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 4337 pgdat->node_id, 4338 (unsigned long)zone_idx(zone), 4339 zone_start_pfn, (zone_start_pfn + size)); 4340 4341 zone_init_free_lists(zone); 4342 4343 return 0; 4344 } 4345 4346 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4347 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 4348 /* 4349 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 4350 * Architectures may implement their own version but if add_active_range() 4351 * was used and there are no special requirements, this is a convenient 4352 * alternative 4353 */ 4354 int __meminit __early_pfn_to_nid(unsigned long pfn) 4355 { 4356 unsigned long start_pfn, end_pfn; 4357 int nid; 4358 /* 4359 * NOTE: The following SMP-unsafe globals are only used early in boot 4360 * when the kernel is running single-threaded. 4361 */ 4362 static unsigned long __meminitdata last_start_pfn, last_end_pfn; 4363 static int __meminitdata last_nid; 4364 4365 if (last_start_pfn <= pfn && pfn < last_end_pfn) 4366 return last_nid; 4367 4368 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 4369 if (nid != -1) { 4370 last_start_pfn = start_pfn; 4371 last_end_pfn = end_pfn; 4372 last_nid = nid; 4373 } 4374 4375 return nid; 4376 } 4377 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 4378 4379 int __meminit early_pfn_to_nid(unsigned long pfn) 4380 { 4381 int nid; 4382 4383 nid = __early_pfn_to_nid(pfn); 4384 if (nid >= 0) 4385 return nid; 4386 /* just returns 0 */ 4387 return 0; 4388 } 4389 4390 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 4391 bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 4392 { 4393 int nid; 4394 4395 nid = __early_pfn_to_nid(pfn); 4396 if (nid >= 0 && nid != node) 4397 return false; 4398 return true; 4399 } 4400 #endif 4401 4402 /** 4403 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 4404 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 4405 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid 4406 * 4407 * If an architecture guarantees that all ranges registered with 4408 * add_active_ranges() contain no holes and may be freed, this 4409 * this function may be used instead of calling memblock_free_early_nid() 4410 * manually. 4411 */ 4412 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 4413 { 4414 unsigned long start_pfn, end_pfn; 4415 int i, this_nid; 4416 4417 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { 4418 start_pfn = min(start_pfn, max_low_pfn); 4419 end_pfn = min(end_pfn, max_low_pfn); 4420 4421 if (start_pfn < end_pfn) 4422 memblock_free_early_nid(PFN_PHYS(start_pfn), 4423 (end_pfn - start_pfn) << PAGE_SHIFT, 4424 this_nid); 4425 } 4426 } 4427 4428 /** 4429 * sparse_memory_present_with_active_regions - Call memory_present for each active range 4430 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 4431 * 4432 * If an architecture guarantees that all ranges registered with 4433 * add_active_ranges() contain no holes and may be freed, this 4434 * function may be used instead of calling memory_present() manually. 4435 */ 4436 void __init sparse_memory_present_with_active_regions(int nid) 4437 { 4438 unsigned long start_pfn, end_pfn; 4439 int i, this_nid; 4440 4441 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 4442 memory_present(this_nid, start_pfn, end_pfn); 4443 } 4444 4445 /** 4446 * get_pfn_range_for_nid - Return the start and end page frames for a node 4447 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 4448 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 4449 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 4450 * 4451 * It returns the start and end page frame of a node based on information 4452 * provided by an arch calling add_active_range(). If called for a node 4453 * with no available memory, a warning is printed and the start and end 4454 * PFNs will be 0. 4455 */ 4456 void __meminit get_pfn_range_for_nid(unsigned int nid, 4457 unsigned long *start_pfn, unsigned long *end_pfn) 4458 { 4459 unsigned long this_start_pfn, this_end_pfn; 4460 int i; 4461 4462 *start_pfn = -1UL; 4463 *end_pfn = 0; 4464 4465 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 4466 *start_pfn = min(*start_pfn, this_start_pfn); 4467 *end_pfn = max(*end_pfn, this_end_pfn); 4468 } 4469 4470 if (*start_pfn == -1UL) 4471 *start_pfn = 0; 4472 } 4473 4474 /* 4475 * This finds a zone that can be used for ZONE_MOVABLE pages. The 4476 * assumption is made that zones within a node are ordered in monotonic 4477 * increasing memory addresses so that the "highest" populated zone is used 4478 */ 4479 static void __init find_usable_zone_for_movable(void) 4480 { 4481 int zone_index; 4482 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 4483 if (zone_index == ZONE_MOVABLE) 4484 continue; 4485 4486 if (arch_zone_highest_possible_pfn[zone_index] > 4487 arch_zone_lowest_possible_pfn[zone_index]) 4488 break; 4489 } 4490 4491 VM_BUG_ON(zone_index == -1); 4492 movable_zone = zone_index; 4493 } 4494 4495 /* 4496 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 4497 * because it is sized independent of architecture. Unlike the other zones, 4498 * the starting point for ZONE_MOVABLE is not fixed. It may be different 4499 * in each node depending on the size of each node and how evenly kernelcore 4500 * is distributed. This helper function adjusts the zone ranges 4501 * provided by the architecture for a given node by using the end of the 4502 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 4503 * zones within a node are in order of monotonic increases memory addresses 4504 */ 4505 static void __meminit adjust_zone_range_for_zone_movable(int nid, 4506 unsigned long zone_type, 4507 unsigned long node_start_pfn, 4508 unsigned long node_end_pfn, 4509 unsigned long *zone_start_pfn, 4510 unsigned long *zone_end_pfn) 4511 { 4512 /* Only adjust if ZONE_MOVABLE is on this node */ 4513 if (zone_movable_pfn[nid]) { 4514 /* Size ZONE_MOVABLE */ 4515 if (zone_type == ZONE_MOVABLE) { 4516 *zone_start_pfn = zone_movable_pfn[nid]; 4517 *zone_end_pfn = min(node_end_pfn, 4518 arch_zone_highest_possible_pfn[movable_zone]); 4519 4520 /* Adjust for ZONE_MOVABLE starting within this range */ 4521 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 4522 *zone_end_pfn > zone_movable_pfn[nid]) { 4523 *zone_end_pfn = zone_movable_pfn[nid]; 4524 4525 /* Check if this whole range is within ZONE_MOVABLE */ 4526 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 4527 *zone_start_pfn = *zone_end_pfn; 4528 } 4529 } 4530 4531 /* 4532 * Return the number of pages a zone spans in a node, including holes 4533 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 4534 */ 4535 static unsigned long __meminit zone_spanned_pages_in_node(int nid, 4536 unsigned long zone_type, 4537 unsigned long node_start_pfn, 4538 unsigned long node_end_pfn, 4539 unsigned long *ignored) 4540 { 4541 unsigned long zone_start_pfn, zone_end_pfn; 4542 4543 /* Get the start and end of the zone */ 4544 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 4545 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 4546 adjust_zone_range_for_zone_movable(nid, zone_type, 4547 node_start_pfn, node_end_pfn, 4548 &zone_start_pfn, &zone_end_pfn); 4549 4550 /* Check that this node has pages within the zone's required range */ 4551 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 4552 return 0; 4553 4554 /* Move the zone boundaries inside the node if necessary */ 4555 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 4556 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 4557 4558 /* Return the spanned pages */ 4559 return zone_end_pfn - zone_start_pfn; 4560 } 4561 4562 /* 4563 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 4564 * then all holes in the requested range will be accounted for. 4565 */ 4566 unsigned long __meminit __absent_pages_in_range(int nid, 4567 unsigned long range_start_pfn, 4568 unsigned long range_end_pfn) 4569 { 4570 unsigned long nr_absent = range_end_pfn - range_start_pfn; 4571 unsigned long start_pfn, end_pfn; 4572 int i; 4573 4574 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 4575 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 4576 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 4577 nr_absent -= end_pfn - start_pfn; 4578 } 4579 return nr_absent; 4580 } 4581 4582 /** 4583 * absent_pages_in_range - Return number of page frames in holes within a range 4584 * @start_pfn: The start PFN to start searching for holes 4585 * @end_pfn: The end PFN to stop searching for holes 4586 * 4587 * It returns the number of pages frames in memory holes within a range. 4588 */ 4589 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 4590 unsigned long end_pfn) 4591 { 4592 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 4593 } 4594 4595 /* Return the number of page frames in holes in a zone on a node */ 4596 static unsigned long __meminit zone_absent_pages_in_node(int nid, 4597 unsigned long zone_type, 4598 unsigned long node_start_pfn, 4599 unsigned long node_end_pfn, 4600 unsigned long *ignored) 4601 { 4602 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 4603 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 4604 unsigned long zone_start_pfn, zone_end_pfn; 4605 4606 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 4607 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 4608 4609 adjust_zone_range_for_zone_movable(nid, zone_type, 4610 node_start_pfn, node_end_pfn, 4611 &zone_start_pfn, &zone_end_pfn); 4612 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 4613 } 4614 4615 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 4616 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 4617 unsigned long zone_type, 4618 unsigned long node_start_pfn, 4619 unsigned long node_end_pfn, 4620 unsigned long *zones_size) 4621 { 4622 return zones_size[zone_type]; 4623 } 4624 4625 static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 4626 unsigned long zone_type, 4627 unsigned long node_start_pfn, 4628 unsigned long node_end_pfn, 4629 unsigned long *zholes_size) 4630 { 4631 if (!zholes_size) 4632 return 0; 4633 4634 return zholes_size[zone_type]; 4635 } 4636 4637 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 4638 4639 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 4640 unsigned long node_start_pfn, 4641 unsigned long node_end_pfn, 4642 unsigned long *zones_size, 4643 unsigned long *zholes_size) 4644 { 4645 unsigned long realtotalpages, totalpages = 0; 4646 enum zone_type i; 4647 4648 for (i = 0; i < MAX_NR_ZONES; i++) 4649 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 4650 node_start_pfn, 4651 node_end_pfn, 4652 zones_size); 4653 pgdat->node_spanned_pages = totalpages; 4654 4655 realtotalpages = totalpages; 4656 for (i = 0; i < MAX_NR_ZONES; i++) 4657 realtotalpages -= 4658 zone_absent_pages_in_node(pgdat->node_id, i, 4659 node_start_pfn, node_end_pfn, 4660 zholes_size); 4661 pgdat->node_present_pages = realtotalpages; 4662 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 4663 realtotalpages); 4664 } 4665 4666 #ifndef CONFIG_SPARSEMEM 4667 /* 4668 * Calculate the size of the zone->blockflags rounded to an unsigned long 4669 * Start by making sure zonesize is a multiple of pageblock_order by rounding 4670 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 4671 * round what is now in bits to nearest long in bits, then return it in 4672 * bytes. 4673 */ 4674 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 4675 { 4676 unsigned long usemapsize; 4677 4678 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 4679 usemapsize = roundup(zonesize, pageblock_nr_pages); 4680 usemapsize = usemapsize >> pageblock_order; 4681 usemapsize *= NR_PAGEBLOCK_BITS; 4682 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 4683 4684 return usemapsize / 8; 4685 } 4686 4687 static void __init setup_usemap(struct pglist_data *pgdat, 4688 struct zone *zone, 4689 unsigned long zone_start_pfn, 4690 unsigned long zonesize) 4691 { 4692 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 4693 zone->pageblock_flags = NULL; 4694 if (usemapsize) 4695 zone->pageblock_flags = 4696 memblock_virt_alloc_node_nopanic(usemapsize, 4697 pgdat->node_id); 4698 } 4699 #else 4700 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 4701 unsigned long zone_start_pfn, unsigned long zonesize) {} 4702 #endif /* CONFIG_SPARSEMEM */ 4703 4704 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4705 4706 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 4707 void __paginginit set_pageblock_order(void) 4708 { 4709 unsigned int order; 4710 4711 /* Check that pageblock_nr_pages has not already been setup */ 4712 if (pageblock_order) 4713 return; 4714 4715 if (HPAGE_SHIFT > PAGE_SHIFT) 4716 order = HUGETLB_PAGE_ORDER; 4717 else 4718 order = MAX_ORDER - 1; 4719 4720 /* 4721 * Assume the largest contiguous order of interest is a huge page. 4722 * This value may be variable depending on boot parameters on IA64 and 4723 * powerpc. 4724 */ 4725 pageblock_order = order; 4726 } 4727 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4728 4729 /* 4730 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 4731 * is unused as pageblock_order is set at compile-time. See 4732 * include/linux/pageblock-flags.h for the values of pageblock_order based on 4733 * the kernel config 4734 */ 4735 void __paginginit set_pageblock_order(void) 4736 { 4737 } 4738 4739 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4740 4741 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, 4742 unsigned long present_pages) 4743 { 4744 unsigned long pages = spanned_pages; 4745 4746 /* 4747 * Provide a more accurate estimation if there are holes within 4748 * the zone and SPARSEMEM is in use. If there are holes within the 4749 * zone, each populated memory region may cost us one or two extra 4750 * memmap pages due to alignment because memmap pages for each 4751 * populated regions may not naturally algined on page boundary. 4752 * So the (present_pages >> 4) heuristic is a tradeoff for that. 4753 */ 4754 if (spanned_pages > present_pages + (present_pages >> 4) && 4755 IS_ENABLED(CONFIG_SPARSEMEM)) 4756 pages = present_pages; 4757 4758 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 4759 } 4760 4761 /* 4762 * Set up the zone data structures: 4763 * - mark all pages reserved 4764 * - mark all memory queues empty 4765 * - clear the memory bitmaps 4766 * 4767 * NOTE: pgdat should get zeroed by caller. 4768 */ 4769 static void __paginginit free_area_init_core(struct pglist_data *pgdat, 4770 unsigned long node_start_pfn, unsigned long node_end_pfn, 4771 unsigned long *zones_size, unsigned long *zholes_size) 4772 { 4773 enum zone_type j; 4774 int nid = pgdat->node_id; 4775 unsigned long zone_start_pfn = pgdat->node_start_pfn; 4776 int ret; 4777 4778 pgdat_resize_init(pgdat); 4779 #ifdef CONFIG_NUMA_BALANCING 4780 spin_lock_init(&pgdat->numabalancing_migrate_lock); 4781 pgdat->numabalancing_migrate_nr_pages = 0; 4782 pgdat->numabalancing_migrate_next_window = jiffies; 4783 #endif 4784 init_waitqueue_head(&pgdat->kswapd_wait); 4785 init_waitqueue_head(&pgdat->pfmemalloc_wait); 4786 pgdat_page_cgroup_init(pgdat); 4787 4788 for (j = 0; j < MAX_NR_ZONES; j++) { 4789 struct zone *zone = pgdat->node_zones + j; 4790 unsigned long size, realsize, freesize, memmap_pages; 4791 4792 size = zone_spanned_pages_in_node(nid, j, node_start_pfn, 4793 node_end_pfn, zones_size); 4794 realsize = freesize = size - zone_absent_pages_in_node(nid, j, 4795 node_start_pfn, 4796 node_end_pfn, 4797 zholes_size); 4798 4799 /* 4800 * Adjust freesize so that it accounts for how much memory 4801 * is used by this zone for memmap. This affects the watermark 4802 * and per-cpu initialisations 4803 */ 4804 memmap_pages = calc_memmap_size(size, realsize); 4805 if (freesize >= memmap_pages) { 4806 freesize -= memmap_pages; 4807 if (memmap_pages) 4808 printk(KERN_DEBUG 4809 " %s zone: %lu pages used for memmap\n", 4810 zone_names[j], memmap_pages); 4811 } else 4812 printk(KERN_WARNING 4813 " %s zone: %lu pages exceeds freesize %lu\n", 4814 zone_names[j], memmap_pages, freesize); 4815 4816 /* Account for reserved pages */ 4817 if (j == 0 && freesize > dma_reserve) { 4818 freesize -= dma_reserve; 4819 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 4820 zone_names[0], dma_reserve); 4821 } 4822 4823 if (!is_highmem_idx(j)) 4824 nr_kernel_pages += freesize; 4825 /* Charge for highmem memmap if there are enough kernel pages */ 4826 else if (nr_kernel_pages > memmap_pages * 2) 4827 nr_kernel_pages -= memmap_pages; 4828 nr_all_pages += freesize; 4829 4830 zone->spanned_pages = size; 4831 zone->present_pages = realsize; 4832 /* 4833 * Set an approximate value for lowmem here, it will be adjusted 4834 * when the bootmem allocator frees pages into the buddy system. 4835 * And all highmem pages will be managed by the buddy system. 4836 */ 4837 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 4838 #ifdef CONFIG_NUMA 4839 zone->node = nid; 4840 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) 4841 / 100; 4842 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 4843 #endif 4844 zone->name = zone_names[j]; 4845 spin_lock_init(&zone->lock); 4846 spin_lock_init(&zone->lru_lock); 4847 zone_seqlock_init(zone); 4848 zone->zone_pgdat = pgdat; 4849 zone_pcp_init(zone); 4850 4851 /* For bootup, initialized properly in watermark setup */ 4852 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); 4853 4854 lruvec_init(&zone->lruvec); 4855 if (!size) 4856 continue; 4857 4858 set_pageblock_order(); 4859 setup_usemap(pgdat, zone, zone_start_pfn, size); 4860 ret = init_currently_empty_zone(zone, zone_start_pfn, 4861 size, MEMMAP_EARLY); 4862 BUG_ON(ret); 4863 memmap_init(size, nid, j, zone_start_pfn); 4864 zone_start_pfn += size; 4865 } 4866 } 4867 4868 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 4869 { 4870 /* Skip empty nodes */ 4871 if (!pgdat->node_spanned_pages) 4872 return; 4873 4874 #ifdef CONFIG_FLAT_NODE_MEM_MAP 4875 /* ia64 gets its own node_mem_map, before this, without bootmem */ 4876 if (!pgdat->node_mem_map) { 4877 unsigned long size, start, end; 4878 struct page *map; 4879 4880 /* 4881 * The zone's endpoints aren't required to be MAX_ORDER 4882 * aligned but the node_mem_map endpoints must be in order 4883 * for the buddy allocator to function correctly. 4884 */ 4885 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 4886 end = pgdat_end_pfn(pgdat); 4887 end = ALIGN(end, MAX_ORDER_NR_PAGES); 4888 size = (end - start) * sizeof(struct page); 4889 map = alloc_remap(pgdat->node_id, size); 4890 if (!map) 4891 map = memblock_virt_alloc_node_nopanic(size, 4892 pgdat->node_id); 4893 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 4894 } 4895 #ifndef CONFIG_NEED_MULTIPLE_NODES 4896 /* 4897 * With no DISCONTIG, the global mem_map is just set as node 0's 4898 */ 4899 if (pgdat == NODE_DATA(0)) { 4900 mem_map = NODE_DATA(0)->node_mem_map; 4901 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4902 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 4903 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 4904 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 4905 } 4906 #endif 4907 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 4908 } 4909 4910 void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 4911 unsigned long node_start_pfn, unsigned long *zholes_size) 4912 { 4913 pg_data_t *pgdat = NODE_DATA(nid); 4914 unsigned long start_pfn = 0; 4915 unsigned long end_pfn = 0; 4916 4917 /* pg_data_t should be reset to zero when it's allocated */ 4918 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 4919 4920 pgdat->node_id = nid; 4921 pgdat->node_start_pfn = node_start_pfn; 4922 init_zone_allows_reclaim(nid); 4923 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4924 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 4925 #endif 4926 calculate_node_totalpages(pgdat, start_pfn, end_pfn, 4927 zones_size, zholes_size); 4928 4929 alloc_node_mem_map(pgdat); 4930 #ifdef CONFIG_FLAT_NODE_MEM_MAP 4931 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 4932 nid, (unsigned long)pgdat, 4933 (unsigned long)pgdat->node_mem_map); 4934 #endif 4935 4936 free_area_init_core(pgdat, start_pfn, end_pfn, 4937 zones_size, zholes_size); 4938 } 4939 4940 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4941 4942 #if MAX_NUMNODES > 1 4943 /* 4944 * Figure out the number of possible node ids. 4945 */ 4946 void __init setup_nr_node_ids(void) 4947 { 4948 unsigned int node; 4949 unsigned int highest = 0; 4950 4951 for_each_node_mask(node, node_possible_map) 4952 highest = node; 4953 nr_node_ids = highest + 1; 4954 } 4955 #endif 4956 4957 /** 4958 * node_map_pfn_alignment - determine the maximum internode alignment 4959 * 4960 * This function should be called after node map is populated and sorted. 4961 * It calculates the maximum power of two alignment which can distinguish 4962 * all the nodes. 4963 * 4964 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 4965 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 4966 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 4967 * shifted, 1GiB is enough and this function will indicate so. 4968 * 4969 * This is used to test whether pfn -> nid mapping of the chosen memory 4970 * model has fine enough granularity to avoid incorrect mapping for the 4971 * populated node map. 4972 * 4973 * Returns the determined alignment in pfn's. 0 if there is no alignment 4974 * requirement (single node). 4975 */ 4976 unsigned long __init node_map_pfn_alignment(void) 4977 { 4978 unsigned long accl_mask = 0, last_end = 0; 4979 unsigned long start, end, mask; 4980 int last_nid = -1; 4981 int i, nid; 4982 4983 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 4984 if (!start || last_nid < 0 || last_nid == nid) { 4985 last_nid = nid; 4986 last_end = end; 4987 continue; 4988 } 4989 4990 /* 4991 * Start with a mask granular enough to pin-point to the 4992 * start pfn and tick off bits one-by-one until it becomes 4993 * too coarse to separate the current node from the last. 4994 */ 4995 mask = ~((1 << __ffs(start)) - 1); 4996 while (mask && last_end <= (start & (mask << 1))) 4997 mask <<= 1; 4998 4999 /* accumulate all internode masks */ 5000 accl_mask |= mask; 5001 } 5002 5003 /* convert mask to number of pages */ 5004 return ~accl_mask + 1; 5005 } 5006 5007 /* Find the lowest pfn for a node */ 5008 static unsigned long __init find_min_pfn_for_node(int nid) 5009 { 5010 unsigned long min_pfn = ULONG_MAX; 5011 unsigned long start_pfn; 5012 int i; 5013 5014 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) 5015 min_pfn = min(min_pfn, start_pfn); 5016 5017 if (min_pfn == ULONG_MAX) { 5018 printk(KERN_WARNING 5019 "Could not find start_pfn for node %d\n", nid); 5020 return 0; 5021 } 5022 5023 return min_pfn; 5024 } 5025 5026 /** 5027 * find_min_pfn_with_active_regions - Find the minimum PFN registered 5028 * 5029 * It returns the minimum PFN based on information provided via 5030 * add_active_range(). 5031 */ 5032 unsigned long __init find_min_pfn_with_active_regions(void) 5033 { 5034 return find_min_pfn_for_node(MAX_NUMNODES); 5035 } 5036 5037 /* 5038 * early_calculate_totalpages() 5039 * Sum pages in active regions for movable zone. 5040 * Populate N_MEMORY for calculating usable_nodes. 5041 */ 5042 static unsigned long __init early_calculate_totalpages(void) 5043 { 5044 unsigned long totalpages = 0; 5045 unsigned long start_pfn, end_pfn; 5046 int i, nid; 5047 5048 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 5049 unsigned long pages = end_pfn - start_pfn; 5050 5051 totalpages += pages; 5052 if (pages) 5053 node_set_state(nid, N_MEMORY); 5054 } 5055 return totalpages; 5056 } 5057 5058 /* 5059 * Find the PFN the Movable zone begins in each node. Kernel memory 5060 * is spread evenly between nodes as long as the nodes have enough 5061 * memory. When they don't, some nodes will have more kernelcore than 5062 * others 5063 */ 5064 static void __init find_zone_movable_pfns_for_nodes(void) 5065 { 5066 int i, nid; 5067 unsigned long usable_startpfn; 5068 unsigned long kernelcore_node, kernelcore_remaining; 5069 /* save the state before borrow the nodemask */ 5070 nodemask_t saved_node_state = node_states[N_MEMORY]; 5071 unsigned long totalpages = early_calculate_totalpages(); 5072 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 5073 struct memblock_type *type = &memblock.memory; 5074 5075 /* Need to find movable_zone earlier when movable_node is specified. */ 5076 find_usable_zone_for_movable(); 5077 5078 /* 5079 * If movable_node is specified, ignore kernelcore and movablecore 5080 * options. 5081 */ 5082 if (movable_node_is_enabled()) { 5083 for (i = 0; i < type->cnt; i++) { 5084 if (!memblock_is_hotpluggable(&type->regions[i])) 5085 continue; 5086 5087 nid = type->regions[i].nid; 5088 5089 usable_startpfn = PFN_DOWN(type->regions[i].base); 5090 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 5091 min(usable_startpfn, zone_movable_pfn[nid]) : 5092 usable_startpfn; 5093 } 5094 5095 goto out2; 5096 } 5097 5098 /* 5099 * If movablecore=nn[KMG] was specified, calculate what size of 5100 * kernelcore that corresponds so that memory usable for 5101 * any allocation type is evenly spread. If both kernelcore 5102 * and movablecore are specified, then the value of kernelcore 5103 * will be used for required_kernelcore if it's greater than 5104 * what movablecore would have allowed. 5105 */ 5106 if (required_movablecore) { 5107 unsigned long corepages; 5108 5109 /* 5110 * Round-up so that ZONE_MOVABLE is at least as large as what 5111 * was requested by the user 5112 */ 5113 required_movablecore = 5114 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 5115 corepages = totalpages - required_movablecore; 5116 5117 required_kernelcore = max(required_kernelcore, corepages); 5118 } 5119 5120 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 5121 if (!required_kernelcore) 5122 goto out; 5123 5124 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 5125 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 5126 5127 restart: 5128 /* Spread kernelcore memory as evenly as possible throughout nodes */ 5129 kernelcore_node = required_kernelcore / usable_nodes; 5130 for_each_node_state(nid, N_MEMORY) { 5131 unsigned long start_pfn, end_pfn; 5132 5133 /* 5134 * Recalculate kernelcore_node if the division per node 5135 * now exceeds what is necessary to satisfy the requested 5136 * amount of memory for the kernel 5137 */ 5138 if (required_kernelcore < kernelcore_node) 5139 kernelcore_node = required_kernelcore / usable_nodes; 5140 5141 /* 5142 * As the map is walked, we track how much memory is usable 5143 * by the kernel using kernelcore_remaining. When it is 5144 * 0, the rest of the node is usable by ZONE_MOVABLE 5145 */ 5146 kernelcore_remaining = kernelcore_node; 5147 5148 /* Go through each range of PFNs within this node */ 5149 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 5150 unsigned long size_pages; 5151 5152 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 5153 if (start_pfn >= end_pfn) 5154 continue; 5155 5156 /* Account for what is only usable for kernelcore */ 5157 if (start_pfn < usable_startpfn) { 5158 unsigned long kernel_pages; 5159 kernel_pages = min(end_pfn, usable_startpfn) 5160 - start_pfn; 5161 5162 kernelcore_remaining -= min(kernel_pages, 5163 kernelcore_remaining); 5164 required_kernelcore -= min(kernel_pages, 5165 required_kernelcore); 5166 5167 /* Continue if range is now fully accounted */ 5168 if (end_pfn <= usable_startpfn) { 5169 5170 /* 5171 * Push zone_movable_pfn to the end so 5172 * that if we have to rebalance 5173 * kernelcore across nodes, we will 5174 * not double account here 5175 */ 5176 zone_movable_pfn[nid] = end_pfn; 5177 continue; 5178 } 5179 start_pfn = usable_startpfn; 5180 } 5181 5182 /* 5183 * The usable PFN range for ZONE_MOVABLE is from 5184 * start_pfn->end_pfn. Calculate size_pages as the 5185 * number of pages used as kernelcore 5186 */ 5187 size_pages = end_pfn - start_pfn; 5188 if (size_pages > kernelcore_remaining) 5189 size_pages = kernelcore_remaining; 5190 zone_movable_pfn[nid] = start_pfn + size_pages; 5191 5192 /* 5193 * Some kernelcore has been met, update counts and 5194 * break if the kernelcore for this node has been 5195 * satisfied 5196 */ 5197 required_kernelcore -= min(required_kernelcore, 5198 size_pages); 5199 kernelcore_remaining -= size_pages; 5200 if (!kernelcore_remaining) 5201 break; 5202 } 5203 } 5204 5205 /* 5206 * If there is still required_kernelcore, we do another pass with one 5207 * less node in the count. This will push zone_movable_pfn[nid] further 5208 * along on the nodes that still have memory until kernelcore is 5209 * satisfied 5210 */ 5211 usable_nodes--; 5212 if (usable_nodes && required_kernelcore > usable_nodes) 5213 goto restart; 5214 5215 out2: 5216 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 5217 for (nid = 0; nid < MAX_NUMNODES; nid++) 5218 zone_movable_pfn[nid] = 5219 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 5220 5221 out: 5222 /* restore the node_state */ 5223 node_states[N_MEMORY] = saved_node_state; 5224 } 5225 5226 /* Any regular or high memory on that node ? */ 5227 static void check_for_memory(pg_data_t *pgdat, int nid) 5228 { 5229 enum zone_type zone_type; 5230 5231 if (N_MEMORY == N_NORMAL_MEMORY) 5232 return; 5233 5234 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 5235 struct zone *zone = &pgdat->node_zones[zone_type]; 5236 if (populated_zone(zone)) { 5237 node_set_state(nid, N_HIGH_MEMORY); 5238 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 5239 zone_type <= ZONE_NORMAL) 5240 node_set_state(nid, N_NORMAL_MEMORY); 5241 break; 5242 } 5243 } 5244 } 5245 5246 /** 5247 * free_area_init_nodes - Initialise all pg_data_t and zone data 5248 * @max_zone_pfn: an array of max PFNs for each zone 5249 * 5250 * This will call free_area_init_node() for each active node in the system. 5251 * Using the page ranges provided by add_active_range(), the size of each 5252 * zone in each node and their holes is calculated. If the maximum PFN 5253 * between two adjacent zones match, it is assumed that the zone is empty. 5254 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 5255 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 5256 * starts where the previous one ended. For example, ZONE_DMA32 starts 5257 * at arch_max_dma_pfn. 5258 */ 5259 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 5260 { 5261 unsigned long start_pfn, end_pfn; 5262 int i, nid; 5263 5264 /* Record where the zone boundaries are */ 5265 memset(arch_zone_lowest_possible_pfn, 0, 5266 sizeof(arch_zone_lowest_possible_pfn)); 5267 memset(arch_zone_highest_possible_pfn, 0, 5268 sizeof(arch_zone_highest_possible_pfn)); 5269 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 5270 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 5271 for (i = 1; i < MAX_NR_ZONES; i++) { 5272 if (i == ZONE_MOVABLE) 5273 continue; 5274 arch_zone_lowest_possible_pfn[i] = 5275 arch_zone_highest_possible_pfn[i-1]; 5276 arch_zone_highest_possible_pfn[i] = 5277 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 5278 } 5279 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 5280 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 5281 5282 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 5283 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 5284 find_zone_movable_pfns_for_nodes(); 5285 5286 /* Print out the zone ranges */ 5287 printk("Zone ranges:\n"); 5288 for (i = 0; i < MAX_NR_ZONES; i++) { 5289 if (i == ZONE_MOVABLE) 5290 continue; 5291 printk(KERN_CONT " %-8s ", zone_names[i]); 5292 if (arch_zone_lowest_possible_pfn[i] == 5293 arch_zone_highest_possible_pfn[i]) 5294 printk(KERN_CONT "empty\n"); 5295 else 5296 printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n", 5297 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT, 5298 (arch_zone_highest_possible_pfn[i] 5299 << PAGE_SHIFT) - 1); 5300 } 5301 5302 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 5303 printk("Movable zone start for each node\n"); 5304 for (i = 0; i < MAX_NUMNODES; i++) { 5305 if (zone_movable_pfn[i]) 5306 printk(" Node %d: %#010lx\n", i, 5307 zone_movable_pfn[i] << PAGE_SHIFT); 5308 } 5309 5310 /* Print out the early node map */ 5311 printk("Early memory node ranges\n"); 5312 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 5313 printk(" node %3d: [mem %#010lx-%#010lx]\n", nid, 5314 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); 5315 5316 /* Initialise every node */ 5317 mminit_verify_pageflags_layout(); 5318 setup_nr_node_ids(); 5319 for_each_online_node(nid) { 5320 pg_data_t *pgdat = NODE_DATA(nid); 5321 free_area_init_node(nid, NULL, 5322 find_min_pfn_for_node(nid), NULL); 5323 5324 /* Any memory on that node */ 5325 if (pgdat->node_present_pages) 5326 node_set_state(nid, N_MEMORY); 5327 check_for_memory(pgdat, nid); 5328 } 5329 } 5330 5331 static int __init cmdline_parse_core(char *p, unsigned long *core) 5332 { 5333 unsigned long long coremem; 5334 if (!p) 5335 return -EINVAL; 5336 5337 coremem = memparse(p, &p); 5338 *core = coremem >> PAGE_SHIFT; 5339 5340 /* Paranoid check that UL is enough for the coremem value */ 5341 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 5342 5343 return 0; 5344 } 5345 5346 /* 5347 * kernelcore=size sets the amount of memory for use for allocations that 5348 * cannot be reclaimed or migrated. 5349 */ 5350 static int __init cmdline_parse_kernelcore(char *p) 5351 { 5352 return cmdline_parse_core(p, &required_kernelcore); 5353 } 5354 5355 /* 5356 * movablecore=size sets the amount of memory for use for allocations that 5357 * can be reclaimed or migrated. 5358 */ 5359 static int __init cmdline_parse_movablecore(char *p) 5360 { 5361 return cmdline_parse_core(p, &required_movablecore); 5362 } 5363 5364 early_param("kernelcore", cmdline_parse_kernelcore); 5365 early_param("movablecore", cmdline_parse_movablecore); 5366 5367 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5368 5369 void adjust_managed_page_count(struct page *page, long count) 5370 { 5371 spin_lock(&managed_page_count_lock); 5372 page_zone(page)->managed_pages += count; 5373 totalram_pages += count; 5374 #ifdef CONFIG_HIGHMEM 5375 if (PageHighMem(page)) 5376 totalhigh_pages += count; 5377 #endif 5378 spin_unlock(&managed_page_count_lock); 5379 } 5380 EXPORT_SYMBOL(adjust_managed_page_count); 5381 5382 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 5383 { 5384 void *pos; 5385 unsigned long pages = 0; 5386 5387 start = (void *)PAGE_ALIGN((unsigned long)start); 5388 end = (void *)((unsigned long)end & PAGE_MASK); 5389 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5390 if ((unsigned int)poison <= 0xFF) 5391 memset(pos, poison, PAGE_SIZE); 5392 free_reserved_page(virt_to_page(pos)); 5393 } 5394 5395 if (pages && s) 5396 pr_info("Freeing %s memory: %ldK (%p - %p)\n", 5397 s, pages << (PAGE_SHIFT - 10), start, end); 5398 5399 return pages; 5400 } 5401 EXPORT_SYMBOL(free_reserved_area); 5402 5403 #ifdef CONFIG_HIGHMEM 5404 void free_highmem_page(struct page *page) 5405 { 5406 __free_reserved_page(page); 5407 totalram_pages++; 5408 page_zone(page)->managed_pages++; 5409 totalhigh_pages++; 5410 } 5411 #endif 5412 5413 5414 void __init mem_init_print_info(const char *str) 5415 { 5416 unsigned long physpages, codesize, datasize, rosize, bss_size; 5417 unsigned long init_code_size, init_data_size; 5418 5419 physpages = get_num_physpages(); 5420 codesize = _etext - _stext; 5421 datasize = _edata - _sdata; 5422 rosize = __end_rodata - __start_rodata; 5423 bss_size = __bss_stop - __bss_start; 5424 init_data_size = __init_end - __init_begin; 5425 init_code_size = _einittext - _sinittext; 5426 5427 /* 5428 * Detect special cases and adjust section sizes accordingly: 5429 * 1) .init.* may be embedded into .data sections 5430 * 2) .init.text.* may be out of [__init_begin, __init_end], 5431 * please refer to arch/tile/kernel/vmlinux.lds.S. 5432 * 3) .rodata.* may be embedded into .text or .data sections. 5433 */ 5434 #define adj_init_size(start, end, size, pos, adj) \ 5435 do { \ 5436 if (start <= pos && pos < end && size > adj) \ 5437 size -= adj; \ 5438 } while (0) 5439 5440 adj_init_size(__init_begin, __init_end, init_data_size, 5441 _sinittext, init_code_size); 5442 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 5443 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 5444 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 5445 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 5446 5447 #undef adj_init_size 5448 5449 printk("Memory: %luK/%luK available " 5450 "(%luK kernel code, %luK rwdata, %luK rodata, " 5451 "%luK init, %luK bss, %luK reserved" 5452 #ifdef CONFIG_HIGHMEM 5453 ", %luK highmem" 5454 #endif 5455 "%s%s)\n", 5456 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), 5457 codesize >> 10, datasize >> 10, rosize >> 10, 5458 (init_data_size + init_code_size) >> 10, bss_size >> 10, 5459 (physpages - totalram_pages) << (PAGE_SHIFT-10), 5460 #ifdef CONFIG_HIGHMEM 5461 totalhigh_pages << (PAGE_SHIFT-10), 5462 #endif 5463 str ? ", " : "", str ? str : ""); 5464 } 5465 5466 /** 5467 * set_dma_reserve - set the specified number of pages reserved in the first zone 5468 * @new_dma_reserve: The number of pages to mark reserved 5469 * 5470 * The per-cpu batchsize and zone watermarks are determined by present_pages. 5471 * In the DMA zone, a significant percentage may be consumed by kernel image 5472 * and other unfreeable allocations which can skew the watermarks badly. This 5473 * function may optionally be used to account for unfreeable pages in the 5474 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 5475 * smaller per-cpu batchsize. 5476 */ 5477 void __init set_dma_reserve(unsigned long new_dma_reserve) 5478 { 5479 dma_reserve = new_dma_reserve; 5480 } 5481 5482 void __init free_area_init(unsigned long *zones_size) 5483 { 5484 free_area_init_node(0, zones_size, 5485 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 5486 } 5487 5488 static int page_alloc_cpu_notify(struct notifier_block *self, 5489 unsigned long action, void *hcpu) 5490 { 5491 int cpu = (unsigned long)hcpu; 5492 5493 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 5494 lru_add_drain_cpu(cpu); 5495 drain_pages(cpu); 5496 5497 /* 5498 * Spill the event counters of the dead processor 5499 * into the current processors event counters. 5500 * This artificially elevates the count of the current 5501 * processor. 5502 */ 5503 vm_events_fold_cpu(cpu); 5504 5505 /* 5506 * Zero the differential counters of the dead processor 5507 * so that the vm statistics are consistent. 5508 * 5509 * This is only okay since the processor is dead and cannot 5510 * race with what we are doing. 5511 */ 5512 cpu_vm_stats_fold(cpu); 5513 } 5514 return NOTIFY_OK; 5515 } 5516 5517 void __init page_alloc_init(void) 5518 { 5519 hotcpu_notifier(page_alloc_cpu_notify, 0); 5520 } 5521 5522 /* 5523 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 5524 * or min_free_kbytes changes. 5525 */ 5526 static void calculate_totalreserve_pages(void) 5527 { 5528 struct pglist_data *pgdat; 5529 unsigned long reserve_pages = 0; 5530 enum zone_type i, j; 5531 5532 for_each_online_pgdat(pgdat) { 5533 for (i = 0; i < MAX_NR_ZONES; i++) { 5534 struct zone *zone = pgdat->node_zones + i; 5535 unsigned long max = 0; 5536 5537 /* Find valid and maximum lowmem_reserve in the zone */ 5538 for (j = i; j < MAX_NR_ZONES; j++) { 5539 if (zone->lowmem_reserve[j] > max) 5540 max = zone->lowmem_reserve[j]; 5541 } 5542 5543 /* we treat the high watermark as reserved pages. */ 5544 max += high_wmark_pages(zone); 5545 5546 if (max > zone->managed_pages) 5547 max = zone->managed_pages; 5548 reserve_pages += max; 5549 /* 5550 * Lowmem reserves are not available to 5551 * GFP_HIGHUSER page cache allocations and 5552 * kswapd tries to balance zones to their high 5553 * watermark. As a result, neither should be 5554 * regarded as dirtyable memory, to prevent a 5555 * situation where reclaim has to clean pages 5556 * in order to balance the zones. 5557 */ 5558 zone->dirty_balance_reserve = max; 5559 } 5560 } 5561 dirty_balance_reserve = reserve_pages; 5562 totalreserve_pages = reserve_pages; 5563 } 5564 5565 /* 5566 * setup_per_zone_lowmem_reserve - called whenever 5567 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 5568 * has a correct pages reserved value, so an adequate number of 5569 * pages are left in the zone after a successful __alloc_pages(). 5570 */ 5571 static void setup_per_zone_lowmem_reserve(void) 5572 { 5573 struct pglist_data *pgdat; 5574 enum zone_type j, idx; 5575 5576 for_each_online_pgdat(pgdat) { 5577 for (j = 0; j < MAX_NR_ZONES; j++) { 5578 struct zone *zone = pgdat->node_zones + j; 5579 unsigned long managed_pages = zone->managed_pages; 5580 5581 zone->lowmem_reserve[j] = 0; 5582 5583 idx = j; 5584 while (idx) { 5585 struct zone *lower_zone; 5586 5587 idx--; 5588 5589 if (sysctl_lowmem_reserve_ratio[idx] < 1) 5590 sysctl_lowmem_reserve_ratio[idx] = 1; 5591 5592 lower_zone = pgdat->node_zones + idx; 5593 lower_zone->lowmem_reserve[j] = managed_pages / 5594 sysctl_lowmem_reserve_ratio[idx]; 5595 managed_pages += lower_zone->managed_pages; 5596 } 5597 } 5598 } 5599 5600 /* update totalreserve_pages */ 5601 calculate_totalreserve_pages(); 5602 } 5603 5604 static void __setup_per_zone_wmarks(void) 5605 { 5606 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5607 unsigned long lowmem_pages = 0; 5608 struct zone *zone; 5609 unsigned long flags; 5610 5611 /* Calculate total number of !ZONE_HIGHMEM pages */ 5612 for_each_zone(zone) { 5613 if (!is_highmem(zone)) 5614 lowmem_pages += zone->managed_pages; 5615 } 5616 5617 for_each_zone(zone) { 5618 u64 tmp; 5619 5620 spin_lock_irqsave(&zone->lock, flags); 5621 tmp = (u64)pages_min * zone->managed_pages; 5622 do_div(tmp, lowmem_pages); 5623 if (is_highmem(zone)) { 5624 /* 5625 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5626 * need highmem pages, so cap pages_min to a small 5627 * value here. 5628 * 5629 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5630 * deltas controls asynch page reclaim, and so should 5631 * not be capped for highmem. 5632 */ 5633 unsigned long min_pages; 5634 5635 min_pages = zone->managed_pages / 1024; 5636 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5637 zone->watermark[WMARK_MIN] = min_pages; 5638 } else { 5639 /* 5640 * If it's a lowmem zone, reserve a number of pages 5641 * proportionate to the zone's size. 5642 */ 5643 zone->watermark[WMARK_MIN] = tmp; 5644 } 5645 5646 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 5647 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5648 5649 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 5650 high_wmark_pages(zone) - 5651 low_wmark_pages(zone) - 5652 zone_page_state(zone, NR_ALLOC_BATCH)); 5653 5654 setup_zone_migrate_reserve(zone); 5655 spin_unlock_irqrestore(&zone->lock, flags); 5656 } 5657 5658 /* update totalreserve_pages */ 5659 calculate_totalreserve_pages(); 5660 } 5661 5662 /** 5663 * setup_per_zone_wmarks - called when min_free_kbytes changes 5664 * or when memory is hot-{added|removed} 5665 * 5666 * Ensures that the watermark[min,low,high] values for each zone are set 5667 * correctly with respect to min_free_kbytes. 5668 */ 5669 void setup_per_zone_wmarks(void) 5670 { 5671 mutex_lock(&zonelists_mutex); 5672 __setup_per_zone_wmarks(); 5673 mutex_unlock(&zonelists_mutex); 5674 } 5675 5676 /* 5677 * The inactive anon list should be small enough that the VM never has to 5678 * do too much work, but large enough that each inactive page has a chance 5679 * to be referenced again before it is swapped out. 5680 * 5681 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to 5682 * INACTIVE_ANON pages on this zone's LRU, maintained by the 5683 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of 5684 * the anonymous pages are kept on the inactive list. 5685 * 5686 * total target max 5687 * memory ratio inactive anon 5688 * ------------------------------------- 5689 * 10MB 1 5MB 5690 * 100MB 1 50MB 5691 * 1GB 3 250MB 5692 * 10GB 10 0.9GB 5693 * 100GB 31 3GB 5694 * 1TB 101 10GB 5695 * 10TB 320 32GB 5696 */ 5697 static void __meminit calculate_zone_inactive_ratio(struct zone *zone) 5698 { 5699 unsigned int gb, ratio; 5700 5701 /* Zone size in gigabytes */ 5702 gb = zone->managed_pages >> (30 - PAGE_SHIFT); 5703 if (gb) 5704 ratio = int_sqrt(10 * gb); 5705 else 5706 ratio = 1; 5707 5708 zone->inactive_ratio = ratio; 5709 } 5710 5711 static void __meminit setup_per_zone_inactive_ratio(void) 5712 { 5713 struct zone *zone; 5714 5715 for_each_zone(zone) 5716 calculate_zone_inactive_ratio(zone); 5717 } 5718 5719 /* 5720 * Initialise min_free_kbytes. 5721 * 5722 * For small machines we want it small (128k min). For large machines 5723 * we want it large (64MB max). But it is not linear, because network 5724 * bandwidth does not increase linearly with machine size. We use 5725 * 5726 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5727 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5728 * 5729 * which yields 5730 * 5731 * 16MB: 512k 5732 * 32MB: 724k 5733 * 64MB: 1024k 5734 * 128MB: 1448k 5735 * 256MB: 2048k 5736 * 512MB: 2896k 5737 * 1024MB: 4096k 5738 * 2048MB: 5792k 5739 * 4096MB: 8192k 5740 * 8192MB: 11584k 5741 * 16384MB: 16384k 5742 */ 5743 int __meminit init_per_zone_wmark_min(void) 5744 { 5745 unsigned long lowmem_kbytes; 5746 int new_min_free_kbytes; 5747 5748 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5749 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5750 5751 if (new_min_free_kbytes > user_min_free_kbytes) { 5752 min_free_kbytes = new_min_free_kbytes; 5753 if (min_free_kbytes < 128) 5754 min_free_kbytes = 128; 5755 if (min_free_kbytes > 65536) 5756 min_free_kbytes = 65536; 5757 } else { 5758 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 5759 new_min_free_kbytes, user_min_free_kbytes); 5760 } 5761 setup_per_zone_wmarks(); 5762 refresh_zone_stat_thresholds(); 5763 setup_per_zone_lowmem_reserve(); 5764 setup_per_zone_inactive_ratio(); 5765 return 0; 5766 } 5767 module_init(init_per_zone_wmark_min) 5768 5769 /* 5770 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5771 * that we can call two helper functions whenever min_free_kbytes 5772 * changes. 5773 */ 5774 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 5775 void __user *buffer, size_t *length, loff_t *ppos) 5776 { 5777 int rc; 5778 5779 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5780 if (rc) 5781 return rc; 5782 5783 if (write) { 5784 user_min_free_kbytes = min_free_kbytes; 5785 setup_per_zone_wmarks(); 5786 } 5787 return 0; 5788 } 5789 5790 #ifdef CONFIG_NUMA 5791 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 5792 void __user *buffer, size_t *length, loff_t *ppos) 5793 { 5794 struct zone *zone; 5795 int rc; 5796 5797 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5798 if (rc) 5799 return rc; 5800 5801 for_each_zone(zone) 5802 zone->min_unmapped_pages = (zone->managed_pages * 5803 sysctl_min_unmapped_ratio) / 100; 5804 return 0; 5805 } 5806 5807 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 5808 void __user *buffer, size_t *length, loff_t *ppos) 5809 { 5810 struct zone *zone; 5811 int rc; 5812 5813 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5814 if (rc) 5815 return rc; 5816 5817 for_each_zone(zone) 5818 zone->min_slab_pages = (zone->managed_pages * 5819 sysctl_min_slab_ratio) / 100; 5820 return 0; 5821 } 5822 #endif 5823 5824 /* 5825 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 5826 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 5827 * whenever sysctl_lowmem_reserve_ratio changes. 5828 * 5829 * The reserve ratio obviously has absolutely no relation with the 5830 * minimum watermarks. The lowmem reserve ratio can only make sense 5831 * if in function of the boot time zone sizes. 5832 */ 5833 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 5834 void __user *buffer, size_t *length, loff_t *ppos) 5835 { 5836 proc_dointvec_minmax(table, write, buffer, length, ppos); 5837 setup_per_zone_lowmem_reserve(); 5838 return 0; 5839 } 5840 5841 /* 5842 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 5843 * cpu. It is the fraction of total pages in each zone that a hot per cpu 5844 * pagelist can have before it gets flushed back to buddy allocator. 5845 */ 5846 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 5847 void __user *buffer, size_t *length, loff_t *ppos) 5848 { 5849 struct zone *zone; 5850 unsigned int cpu; 5851 int ret; 5852 5853 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5854 if (!write || (ret < 0)) 5855 return ret; 5856 5857 mutex_lock(&pcp_batch_high_lock); 5858 for_each_populated_zone(zone) { 5859 unsigned long high; 5860 high = zone->managed_pages / percpu_pagelist_fraction; 5861 for_each_possible_cpu(cpu) 5862 pageset_set_high(per_cpu_ptr(zone->pageset, cpu), 5863 high); 5864 } 5865 mutex_unlock(&pcp_batch_high_lock); 5866 return 0; 5867 } 5868 5869 int hashdist = HASHDIST_DEFAULT; 5870 5871 #ifdef CONFIG_NUMA 5872 static int __init set_hashdist(char *str) 5873 { 5874 if (!str) 5875 return 0; 5876 hashdist = simple_strtoul(str, &str, 0); 5877 return 1; 5878 } 5879 __setup("hashdist=", set_hashdist); 5880 #endif 5881 5882 /* 5883 * allocate a large system hash table from bootmem 5884 * - it is assumed that the hash table must contain an exact power-of-2 5885 * quantity of entries 5886 * - limit is the number of hash buckets, not the total allocation size 5887 */ 5888 void *__init alloc_large_system_hash(const char *tablename, 5889 unsigned long bucketsize, 5890 unsigned long numentries, 5891 int scale, 5892 int flags, 5893 unsigned int *_hash_shift, 5894 unsigned int *_hash_mask, 5895 unsigned long low_limit, 5896 unsigned long high_limit) 5897 { 5898 unsigned long long max = high_limit; 5899 unsigned long log2qty, size; 5900 void *table = NULL; 5901 5902 /* allow the kernel cmdline to have a say */ 5903 if (!numentries) { 5904 /* round applicable memory size up to nearest megabyte */ 5905 numentries = nr_kernel_pages; 5906 5907 /* It isn't necessary when PAGE_SIZE >= 1MB */ 5908 if (PAGE_SHIFT < 20) 5909 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 5910 5911 /* limit to 1 bucket per 2^scale bytes of low memory */ 5912 if (scale > PAGE_SHIFT) 5913 numentries >>= (scale - PAGE_SHIFT); 5914 else 5915 numentries <<= (PAGE_SHIFT - scale); 5916 5917 /* Make sure we've got at least a 0-order allocation.. */ 5918 if (unlikely(flags & HASH_SMALL)) { 5919 /* Makes no sense without HASH_EARLY */ 5920 WARN_ON(!(flags & HASH_EARLY)); 5921 if (!(numentries >> *_hash_shift)) { 5922 numentries = 1UL << *_hash_shift; 5923 BUG_ON(!numentries); 5924 } 5925 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 5926 numentries = PAGE_SIZE / bucketsize; 5927 } 5928 numentries = roundup_pow_of_two(numentries); 5929 5930 /* limit allocation size to 1/16 total memory by default */ 5931 if (max == 0) { 5932 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 5933 do_div(max, bucketsize); 5934 } 5935 max = min(max, 0x80000000ULL); 5936 5937 if (numentries < low_limit) 5938 numentries = low_limit; 5939 if (numentries > max) 5940 numentries = max; 5941 5942 log2qty = ilog2(numentries); 5943 5944 do { 5945 size = bucketsize << log2qty; 5946 if (flags & HASH_EARLY) 5947 table = memblock_virt_alloc_nopanic(size, 0); 5948 else if (hashdist) 5949 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 5950 else { 5951 /* 5952 * If bucketsize is not a power-of-two, we may free 5953 * some pages at the end of hash table which 5954 * alloc_pages_exact() automatically does 5955 */ 5956 if (get_order(size) < MAX_ORDER) { 5957 table = alloc_pages_exact(size, GFP_ATOMIC); 5958 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 5959 } 5960 } 5961 } while (!table && size > PAGE_SIZE && --log2qty); 5962 5963 if (!table) 5964 panic("Failed to allocate %s hash table\n", tablename); 5965 5966 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", 5967 tablename, 5968 (1UL << log2qty), 5969 ilog2(size) - PAGE_SHIFT, 5970 size); 5971 5972 if (_hash_shift) 5973 *_hash_shift = log2qty; 5974 if (_hash_mask) 5975 *_hash_mask = (1 << log2qty) - 1; 5976 5977 return table; 5978 } 5979 5980 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 5981 static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 5982 unsigned long pfn) 5983 { 5984 #ifdef CONFIG_SPARSEMEM 5985 return __pfn_to_section(pfn)->pageblock_flags; 5986 #else 5987 return zone->pageblock_flags; 5988 #endif /* CONFIG_SPARSEMEM */ 5989 } 5990 5991 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 5992 { 5993 #ifdef CONFIG_SPARSEMEM 5994 pfn &= (PAGES_PER_SECTION-1); 5995 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 5996 #else 5997 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); 5998 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 5999 #endif /* CONFIG_SPARSEMEM */ 6000 } 6001 6002 /** 6003 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 6004 * @page: The page within the block of interest 6005 * @start_bitidx: The first bit of interest to retrieve 6006 * @end_bitidx: The last bit of interest 6007 * returns pageblock_bits flags 6008 */ 6009 unsigned long get_pageblock_flags_group(struct page *page, 6010 int start_bitidx, int end_bitidx) 6011 { 6012 struct zone *zone; 6013 unsigned long *bitmap; 6014 unsigned long pfn, bitidx; 6015 unsigned long flags = 0; 6016 unsigned long value = 1; 6017 6018 zone = page_zone(page); 6019 pfn = page_to_pfn(page); 6020 bitmap = get_pageblock_bitmap(zone, pfn); 6021 bitidx = pfn_to_bitidx(zone, pfn); 6022 6023 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 6024 if (test_bit(bitidx + start_bitidx, bitmap)) 6025 flags |= value; 6026 6027 return flags; 6028 } 6029 6030 /** 6031 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages 6032 * @page: The page within the block of interest 6033 * @start_bitidx: The first bit of interest 6034 * @end_bitidx: The last bit of interest 6035 * @flags: The flags to set 6036 */ 6037 void set_pageblock_flags_group(struct page *page, unsigned long flags, 6038 int start_bitidx, int end_bitidx) 6039 { 6040 struct zone *zone; 6041 unsigned long *bitmap; 6042 unsigned long pfn, bitidx; 6043 unsigned long value = 1; 6044 6045 zone = page_zone(page); 6046 pfn = page_to_pfn(page); 6047 bitmap = get_pageblock_bitmap(zone, pfn); 6048 bitidx = pfn_to_bitidx(zone, pfn); 6049 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); 6050 6051 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 6052 if (flags & value) 6053 __set_bit(bitidx + start_bitidx, bitmap); 6054 else 6055 __clear_bit(bitidx + start_bitidx, bitmap); 6056 } 6057 6058 /* 6059 * This function checks whether pageblock includes unmovable pages or not. 6060 * If @count is not zero, it is okay to include less @count unmovable pages 6061 * 6062 * PageLRU check without isolation or lru_lock could race so that 6063 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 6064 * expect this function should be exact. 6065 */ 6066 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 6067 bool skip_hwpoisoned_pages) 6068 { 6069 unsigned long pfn, iter, found; 6070 int mt; 6071 6072 /* 6073 * For avoiding noise data, lru_add_drain_all() should be called 6074 * If ZONE_MOVABLE, the zone never contains unmovable pages 6075 */ 6076 if (zone_idx(zone) == ZONE_MOVABLE) 6077 return false; 6078 mt = get_pageblock_migratetype(page); 6079 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) 6080 return false; 6081 6082 pfn = page_to_pfn(page); 6083 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 6084 unsigned long check = pfn + iter; 6085 6086 if (!pfn_valid_within(check)) 6087 continue; 6088 6089 page = pfn_to_page(check); 6090 6091 /* 6092 * Hugepages are not in LRU lists, but they're movable. 6093 * We need not scan over tail pages bacause we don't 6094 * handle each tail page individually in migration. 6095 */ 6096 if (PageHuge(page)) { 6097 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 6098 continue; 6099 } 6100 6101 /* 6102 * We can't use page_count without pin a page 6103 * because another CPU can free compound page. 6104 * This check already skips compound tails of THP 6105 * because their page->_count is zero at all time. 6106 */ 6107 if (!atomic_read(&page->_count)) { 6108 if (PageBuddy(page)) 6109 iter += (1 << page_order(page)) - 1; 6110 continue; 6111 } 6112 6113 /* 6114 * The HWPoisoned page may be not in buddy system, and 6115 * page_count() is not 0. 6116 */ 6117 if (skip_hwpoisoned_pages && PageHWPoison(page)) 6118 continue; 6119 6120 if (!PageLRU(page)) 6121 found++; 6122 /* 6123 * If there are RECLAIMABLE pages, we need to check it. 6124 * But now, memory offline itself doesn't call shrink_slab() 6125 * and it still to be fixed. 6126 */ 6127 /* 6128 * If the page is not RAM, page_count()should be 0. 6129 * we don't need more check. This is an _used_ not-movable page. 6130 * 6131 * The problematic thing here is PG_reserved pages. PG_reserved 6132 * is set to both of a memory hole page and a _used_ kernel 6133 * page at boot. 6134 */ 6135 if (found > count) 6136 return true; 6137 } 6138 return false; 6139 } 6140 6141 bool is_pageblock_removable_nolock(struct page *page) 6142 { 6143 struct zone *zone; 6144 unsigned long pfn; 6145 6146 /* 6147 * We have to be careful here because we are iterating over memory 6148 * sections which are not zone aware so we might end up outside of 6149 * the zone but still within the section. 6150 * We have to take care about the node as well. If the node is offline 6151 * its NODE_DATA will be NULL - see page_zone. 6152 */ 6153 if (!node_online(page_to_nid(page))) 6154 return false; 6155 6156 zone = page_zone(page); 6157 pfn = page_to_pfn(page); 6158 if (!zone_spans_pfn(zone, pfn)) 6159 return false; 6160 6161 return !has_unmovable_pages(zone, page, 0, true); 6162 } 6163 6164 #ifdef CONFIG_CMA 6165 6166 static unsigned long pfn_max_align_down(unsigned long pfn) 6167 { 6168 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 6169 pageblock_nr_pages) - 1); 6170 } 6171 6172 static unsigned long pfn_max_align_up(unsigned long pfn) 6173 { 6174 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 6175 pageblock_nr_pages)); 6176 } 6177 6178 /* [start, end) must belong to a single zone. */ 6179 static int __alloc_contig_migrate_range(struct compact_control *cc, 6180 unsigned long start, unsigned long end) 6181 { 6182 /* This function is based on compact_zone() from compaction.c. */ 6183 unsigned long nr_reclaimed; 6184 unsigned long pfn = start; 6185 unsigned int tries = 0; 6186 int ret = 0; 6187 6188 migrate_prep(); 6189 6190 while (pfn < end || !list_empty(&cc->migratepages)) { 6191 if (fatal_signal_pending(current)) { 6192 ret = -EINTR; 6193 break; 6194 } 6195 6196 if (list_empty(&cc->migratepages)) { 6197 cc->nr_migratepages = 0; 6198 pfn = isolate_migratepages_range(cc->zone, cc, 6199 pfn, end, true); 6200 if (!pfn) { 6201 ret = -EINTR; 6202 break; 6203 } 6204 tries = 0; 6205 } else if (++tries == 5) { 6206 ret = ret < 0 ? ret : -EBUSY; 6207 break; 6208 } 6209 6210 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6211 &cc->migratepages); 6212 cc->nr_migratepages -= nr_reclaimed; 6213 6214 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 6215 0, MIGRATE_SYNC, MR_CMA); 6216 } 6217 if (ret < 0) { 6218 putback_movable_pages(&cc->migratepages); 6219 return ret; 6220 } 6221 return 0; 6222 } 6223 6224 /** 6225 * alloc_contig_range() -- tries to allocate given range of pages 6226 * @start: start PFN to allocate 6227 * @end: one-past-the-last PFN to allocate 6228 * @migratetype: migratetype of the underlaying pageblocks (either 6229 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6230 * in range must have the same migratetype and it must 6231 * be either of the two. 6232 * 6233 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 6234 * aligned, however it's the caller's responsibility to guarantee that 6235 * we are the only thread that changes migrate type of pageblocks the 6236 * pages fall in. 6237 * 6238 * The PFN range must belong to a single zone. 6239 * 6240 * Returns zero on success or negative error code. On success all 6241 * pages which PFN is in [start, end) are allocated for the caller and 6242 * need to be freed with free_contig_range(). 6243 */ 6244 int alloc_contig_range(unsigned long start, unsigned long end, 6245 unsigned migratetype) 6246 { 6247 unsigned long outer_start, outer_end; 6248 int ret = 0, order; 6249 6250 struct compact_control cc = { 6251 .nr_migratepages = 0, 6252 .order = -1, 6253 .zone = page_zone(pfn_to_page(start)), 6254 .sync = true, 6255 .ignore_skip_hint = true, 6256 }; 6257 INIT_LIST_HEAD(&cc.migratepages); 6258 6259 /* 6260 * What we do here is we mark all pageblocks in range as 6261 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6262 * have different sizes, and due to the way page allocator 6263 * work, we align the range to biggest of the two pages so 6264 * that page allocator won't try to merge buddies from 6265 * different pageblocks and change MIGRATE_ISOLATE to some 6266 * other migration type. 6267 * 6268 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6269 * migrate the pages from an unaligned range (ie. pages that 6270 * we are interested in). This will put all the pages in 6271 * range back to page allocator as MIGRATE_ISOLATE. 6272 * 6273 * When this is done, we take the pages in range from page 6274 * allocator removing them from the buddy system. This way 6275 * page allocator will never consider using them. 6276 * 6277 * This lets us mark the pageblocks back as 6278 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6279 * aligned range but not in the unaligned, original range are 6280 * put back to page allocator so that buddy can use them. 6281 */ 6282 6283 ret = start_isolate_page_range(pfn_max_align_down(start), 6284 pfn_max_align_up(end), migratetype, 6285 false); 6286 if (ret) 6287 return ret; 6288 6289 ret = __alloc_contig_migrate_range(&cc, start, end); 6290 if (ret) 6291 goto done; 6292 6293 /* 6294 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 6295 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6296 * more, all pages in [start, end) are free in page allocator. 6297 * What we are going to do is to allocate all pages from 6298 * [start, end) (that is remove them from page allocator). 6299 * 6300 * The only problem is that pages at the beginning and at the 6301 * end of interesting range may be not aligned with pages that 6302 * page allocator holds, ie. they can be part of higher order 6303 * pages. Because of this, we reserve the bigger range and 6304 * once this is done free the pages we are not interested in. 6305 * 6306 * We don't have to hold zone->lock here because the pages are 6307 * isolated thus they won't get removed from buddy. 6308 */ 6309 6310 lru_add_drain_all(); 6311 drain_all_pages(); 6312 6313 order = 0; 6314 outer_start = start; 6315 while (!PageBuddy(pfn_to_page(outer_start))) { 6316 if (++order >= MAX_ORDER) { 6317 ret = -EBUSY; 6318 goto done; 6319 } 6320 outer_start &= ~0UL << order; 6321 } 6322 6323 /* Make sure the range is really isolated. */ 6324 if (test_pages_isolated(outer_start, end, false)) { 6325 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n", 6326 outer_start, end); 6327 ret = -EBUSY; 6328 goto done; 6329 } 6330 6331 6332 /* Grab isolated pages from freelists. */ 6333 outer_end = isolate_freepages_range(&cc, outer_start, end); 6334 if (!outer_end) { 6335 ret = -EBUSY; 6336 goto done; 6337 } 6338 6339 /* Free head and tail (if any) */ 6340 if (start != outer_start) 6341 free_contig_range(outer_start, start - outer_start); 6342 if (end != outer_end) 6343 free_contig_range(end, outer_end - end); 6344 6345 done: 6346 undo_isolate_page_range(pfn_max_align_down(start), 6347 pfn_max_align_up(end), migratetype); 6348 return ret; 6349 } 6350 6351 void free_contig_range(unsigned long pfn, unsigned nr_pages) 6352 { 6353 unsigned int count = 0; 6354 6355 for (; nr_pages--; pfn++) { 6356 struct page *page = pfn_to_page(pfn); 6357 6358 count += page_count(page) != 1; 6359 __free_page(page); 6360 } 6361 WARN(count != 0, "%d pages are still in use!\n", count); 6362 } 6363 #endif 6364 6365 #ifdef CONFIG_MEMORY_HOTPLUG 6366 /* 6367 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6368 * page high values need to be recalulated. 6369 */ 6370 void __meminit zone_pcp_update(struct zone *zone) 6371 { 6372 unsigned cpu; 6373 mutex_lock(&pcp_batch_high_lock); 6374 for_each_possible_cpu(cpu) 6375 pageset_set_high_and_batch(zone, 6376 per_cpu_ptr(zone->pageset, cpu)); 6377 mutex_unlock(&pcp_batch_high_lock); 6378 } 6379 #endif 6380 6381 void zone_pcp_reset(struct zone *zone) 6382 { 6383 unsigned long flags; 6384 int cpu; 6385 struct per_cpu_pageset *pset; 6386 6387 /* avoid races with drain_pages() */ 6388 local_irq_save(flags); 6389 if (zone->pageset != &boot_pageset) { 6390 for_each_online_cpu(cpu) { 6391 pset = per_cpu_ptr(zone->pageset, cpu); 6392 drain_zonestat(zone, pset); 6393 } 6394 free_percpu(zone->pageset); 6395 zone->pageset = &boot_pageset; 6396 } 6397 local_irq_restore(flags); 6398 } 6399 6400 #ifdef CONFIG_MEMORY_HOTREMOVE 6401 /* 6402 * All pages in the range must be isolated before calling this. 6403 */ 6404 void 6405 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6406 { 6407 struct page *page; 6408 struct zone *zone; 6409 int order, i; 6410 unsigned long pfn; 6411 unsigned long flags; 6412 /* find the first valid pfn */ 6413 for (pfn = start_pfn; pfn < end_pfn; pfn++) 6414 if (pfn_valid(pfn)) 6415 break; 6416 if (pfn == end_pfn) 6417 return; 6418 zone = page_zone(pfn_to_page(pfn)); 6419 spin_lock_irqsave(&zone->lock, flags); 6420 pfn = start_pfn; 6421 while (pfn < end_pfn) { 6422 if (!pfn_valid(pfn)) { 6423 pfn++; 6424 continue; 6425 } 6426 page = pfn_to_page(pfn); 6427 /* 6428 * The HWPoisoned page may be not in buddy system, and 6429 * page_count() is not 0. 6430 */ 6431 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6432 pfn++; 6433 SetPageReserved(page); 6434 continue; 6435 } 6436 6437 BUG_ON(page_count(page)); 6438 BUG_ON(!PageBuddy(page)); 6439 order = page_order(page); 6440 #ifdef CONFIG_DEBUG_VM 6441 printk(KERN_INFO "remove from free list %lx %d %lx\n", 6442 pfn, 1 << order, end_pfn); 6443 #endif 6444 list_del(&page->lru); 6445 rmv_page_order(page); 6446 zone->free_area[order].nr_free--; 6447 for (i = 0; i < (1 << order); i++) 6448 SetPageReserved((page+i)); 6449 pfn += (1 << order); 6450 } 6451 spin_unlock_irqrestore(&zone->lock, flags); 6452 } 6453 #endif 6454 6455 #ifdef CONFIG_MEMORY_FAILURE 6456 bool is_free_buddy_page(struct page *page) 6457 { 6458 struct zone *zone = page_zone(page); 6459 unsigned long pfn = page_to_pfn(page); 6460 unsigned long flags; 6461 int order; 6462 6463 spin_lock_irqsave(&zone->lock, flags); 6464 for (order = 0; order < MAX_ORDER; order++) { 6465 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6466 6467 if (PageBuddy(page_head) && page_order(page_head) >= order) 6468 break; 6469 } 6470 spin_unlock_irqrestore(&zone->lock, flags); 6471 6472 return order < MAX_ORDER; 6473 } 6474 #endif 6475 6476 static const struct trace_print_flags pageflag_names[] = { 6477 {1UL << PG_locked, "locked" }, 6478 {1UL << PG_error, "error" }, 6479 {1UL << PG_referenced, "referenced" }, 6480 {1UL << PG_uptodate, "uptodate" }, 6481 {1UL << PG_dirty, "dirty" }, 6482 {1UL << PG_lru, "lru" }, 6483 {1UL << PG_active, "active" }, 6484 {1UL << PG_slab, "slab" }, 6485 {1UL << PG_owner_priv_1, "owner_priv_1" }, 6486 {1UL << PG_arch_1, "arch_1" }, 6487 {1UL << PG_reserved, "reserved" }, 6488 {1UL << PG_private, "private" }, 6489 {1UL << PG_private_2, "private_2" }, 6490 {1UL << PG_writeback, "writeback" }, 6491 #ifdef CONFIG_PAGEFLAGS_EXTENDED 6492 {1UL << PG_head, "head" }, 6493 {1UL << PG_tail, "tail" }, 6494 #else 6495 {1UL << PG_compound, "compound" }, 6496 #endif 6497 {1UL << PG_swapcache, "swapcache" }, 6498 {1UL << PG_mappedtodisk, "mappedtodisk" }, 6499 {1UL << PG_reclaim, "reclaim" }, 6500 {1UL << PG_swapbacked, "swapbacked" }, 6501 {1UL << PG_unevictable, "unevictable" }, 6502 #ifdef CONFIG_MMU 6503 {1UL << PG_mlocked, "mlocked" }, 6504 #endif 6505 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 6506 {1UL << PG_uncached, "uncached" }, 6507 #endif 6508 #ifdef CONFIG_MEMORY_FAILURE 6509 {1UL << PG_hwpoison, "hwpoison" }, 6510 #endif 6511 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6512 {1UL << PG_compound_lock, "compound_lock" }, 6513 #endif 6514 }; 6515 6516 static void dump_page_flags(unsigned long flags) 6517 { 6518 const char *delim = ""; 6519 unsigned long mask; 6520 int i; 6521 6522 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); 6523 6524 printk(KERN_ALERT "page flags: %#lx(", flags); 6525 6526 /* remove zone id */ 6527 flags &= (1UL << NR_PAGEFLAGS) - 1; 6528 6529 for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) { 6530 6531 mask = pageflag_names[i].mask; 6532 if ((flags & mask) != mask) 6533 continue; 6534 6535 flags &= ~mask; 6536 printk("%s%s", delim, pageflag_names[i].name); 6537 delim = "|"; 6538 } 6539 6540 /* check for left over flags */ 6541 if (flags) 6542 printk("%s%#lx", delim, flags); 6543 6544 printk(")\n"); 6545 } 6546 6547 void dump_page_badflags(struct page *page, char *reason, unsigned long badflags) 6548 { 6549 printk(KERN_ALERT 6550 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", 6551 page, atomic_read(&page->_count), page_mapcount(page), 6552 page->mapping, page->index); 6553 dump_page_flags(page->flags); 6554 if (reason) 6555 pr_alert("page dumped because: %s\n", reason); 6556 if (page->flags & badflags) { 6557 pr_alert("bad because of flags:\n"); 6558 dump_page_flags(page->flags & badflags); 6559 } 6560 mem_cgroup_print_bad_page(page); 6561 } 6562 6563 void dump_page(struct page *page, char *reason) 6564 { 6565 dump_page_badflags(page, reason, 0); 6566 } 6567 EXPORT_SYMBOL_GPL(dump_page); 6568