1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/jiffies.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/compiler.h> 26 #include <linux/kernel.h> 27 #include <linux/kmemcheck.h> 28 #include <linux/module.h> 29 #include <linux/suspend.h> 30 #include <linux/pagevec.h> 31 #include <linux/blkdev.h> 32 #include <linux/slab.h> 33 #include <linux/ratelimit.h> 34 #include <linux/oom.h> 35 #include <linux/notifier.h> 36 #include <linux/topology.h> 37 #include <linux/sysctl.h> 38 #include <linux/cpu.h> 39 #include <linux/cpuset.h> 40 #include <linux/memory_hotplug.h> 41 #include <linux/nodemask.h> 42 #include <linux/vmalloc.h> 43 #include <linux/vmstat.h> 44 #include <linux/mempolicy.h> 45 #include <linux/stop_machine.h> 46 #include <linux/sort.h> 47 #include <linux/pfn.h> 48 #include <linux/backing-dev.h> 49 #include <linux/fault-inject.h> 50 #include <linux/page-isolation.h> 51 #include <linux/page_cgroup.h> 52 #include <linux/debugobjects.h> 53 #include <linux/kmemleak.h> 54 #include <linux/compaction.h> 55 #include <trace/events/kmem.h> 56 #include <linux/ftrace_event.h> 57 #include <linux/memcontrol.h> 58 #include <linux/prefetch.h> 59 #include <linux/mm_inline.h> 60 #include <linux/migrate.h> 61 #include <linux/page-debug-flags.h> 62 #include <linux/hugetlb.h> 63 #include <linux/sched/rt.h> 64 65 #include <asm/sections.h> 66 #include <asm/tlbflush.h> 67 #include <asm/div64.h> 68 #include "internal.h" 69 70 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 71 static DEFINE_MUTEX(pcp_batch_high_lock); 72 73 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 74 DEFINE_PER_CPU(int, numa_node); 75 EXPORT_PER_CPU_SYMBOL(numa_node); 76 #endif 77 78 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 79 /* 80 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 81 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 82 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 83 * defined in <linux/topology.h>. 84 */ 85 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 86 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 87 #endif 88 89 /* 90 * Array of node states. 91 */ 92 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 93 [N_POSSIBLE] = NODE_MASK_ALL, 94 [N_ONLINE] = { { [0] = 1UL } }, 95 #ifndef CONFIG_NUMA 96 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 97 #ifdef CONFIG_HIGHMEM 98 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 99 #endif 100 #ifdef CONFIG_MOVABLE_NODE 101 [N_MEMORY] = { { [0] = 1UL } }, 102 #endif 103 [N_CPU] = { { [0] = 1UL } }, 104 #endif /* NUMA */ 105 }; 106 EXPORT_SYMBOL(node_states); 107 108 /* Protect totalram_pages and zone->managed_pages */ 109 static DEFINE_SPINLOCK(managed_page_count_lock); 110 111 unsigned long totalram_pages __read_mostly; 112 unsigned long totalreserve_pages __read_mostly; 113 /* 114 * When calculating the number of globally allowed dirty pages, there 115 * is a certain number of per-zone reserves that should not be 116 * considered dirtyable memory. This is the sum of those reserves 117 * over all existing zones that contribute dirtyable memory. 118 */ 119 unsigned long dirty_balance_reserve __read_mostly; 120 121 int percpu_pagelist_fraction; 122 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 123 124 #ifdef CONFIG_PM_SLEEP 125 /* 126 * The following functions are used by the suspend/hibernate code to temporarily 127 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 128 * while devices are suspended. To avoid races with the suspend/hibernate code, 129 * they should always be called with pm_mutex held (gfp_allowed_mask also should 130 * only be modified with pm_mutex held, unless the suspend/hibernate code is 131 * guaranteed not to run in parallel with that modification). 132 */ 133 134 static gfp_t saved_gfp_mask; 135 136 void pm_restore_gfp_mask(void) 137 { 138 WARN_ON(!mutex_is_locked(&pm_mutex)); 139 if (saved_gfp_mask) { 140 gfp_allowed_mask = saved_gfp_mask; 141 saved_gfp_mask = 0; 142 } 143 } 144 145 void pm_restrict_gfp_mask(void) 146 { 147 WARN_ON(!mutex_is_locked(&pm_mutex)); 148 WARN_ON(saved_gfp_mask); 149 saved_gfp_mask = gfp_allowed_mask; 150 gfp_allowed_mask &= ~GFP_IOFS; 151 } 152 153 bool pm_suspended_storage(void) 154 { 155 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS) 156 return false; 157 return true; 158 } 159 #endif /* CONFIG_PM_SLEEP */ 160 161 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 162 int pageblock_order __read_mostly; 163 #endif 164 165 static void __free_pages_ok(struct page *page, unsigned int order); 166 167 /* 168 * results with 256, 32 in the lowmem_reserve sysctl: 169 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 170 * 1G machine -> (16M dma, 784M normal, 224M high) 171 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 172 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 173 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 174 * 175 * TBD: should special case ZONE_DMA32 machines here - in those we normally 176 * don't need any ZONE_NORMAL reservation 177 */ 178 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 179 #ifdef CONFIG_ZONE_DMA 180 256, 181 #endif 182 #ifdef CONFIG_ZONE_DMA32 183 256, 184 #endif 185 #ifdef CONFIG_HIGHMEM 186 32, 187 #endif 188 32, 189 }; 190 191 EXPORT_SYMBOL(totalram_pages); 192 193 static char * const zone_names[MAX_NR_ZONES] = { 194 #ifdef CONFIG_ZONE_DMA 195 "DMA", 196 #endif 197 #ifdef CONFIG_ZONE_DMA32 198 "DMA32", 199 #endif 200 "Normal", 201 #ifdef CONFIG_HIGHMEM 202 "HighMem", 203 #endif 204 "Movable", 205 }; 206 207 int min_free_kbytes = 1024; 208 int user_min_free_kbytes = -1; 209 210 static unsigned long __meminitdata nr_kernel_pages; 211 static unsigned long __meminitdata nr_all_pages; 212 static unsigned long __meminitdata dma_reserve; 213 214 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 215 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 216 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 217 static unsigned long __initdata required_kernelcore; 218 static unsigned long __initdata required_movablecore; 219 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 220 221 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 222 int movable_zone; 223 EXPORT_SYMBOL(movable_zone); 224 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 225 226 #if MAX_NUMNODES > 1 227 int nr_node_ids __read_mostly = MAX_NUMNODES; 228 int nr_online_nodes __read_mostly = 1; 229 EXPORT_SYMBOL(nr_node_ids); 230 EXPORT_SYMBOL(nr_online_nodes); 231 #endif 232 233 int page_group_by_mobility_disabled __read_mostly; 234 235 void set_pageblock_migratetype(struct page *page, int migratetype) 236 { 237 if (unlikely(page_group_by_mobility_disabled && 238 migratetype < MIGRATE_PCPTYPES)) 239 migratetype = MIGRATE_UNMOVABLE; 240 241 set_pageblock_flags_group(page, (unsigned long)migratetype, 242 PB_migrate, PB_migrate_end); 243 } 244 245 bool oom_killer_disabled __read_mostly; 246 247 #ifdef CONFIG_DEBUG_VM 248 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 249 { 250 int ret = 0; 251 unsigned seq; 252 unsigned long pfn = page_to_pfn(page); 253 unsigned long sp, start_pfn; 254 255 do { 256 seq = zone_span_seqbegin(zone); 257 start_pfn = zone->zone_start_pfn; 258 sp = zone->spanned_pages; 259 if (!zone_spans_pfn(zone, pfn)) 260 ret = 1; 261 } while (zone_span_seqretry(zone, seq)); 262 263 if (ret) 264 pr_err("page %lu outside zone [ %lu - %lu ]\n", 265 pfn, start_pfn, start_pfn + sp); 266 267 return ret; 268 } 269 270 static int page_is_consistent(struct zone *zone, struct page *page) 271 { 272 if (!pfn_valid_within(page_to_pfn(page))) 273 return 0; 274 if (zone != page_zone(page)) 275 return 0; 276 277 return 1; 278 } 279 /* 280 * Temporary debugging check for pages not lying within a given zone. 281 */ 282 static int bad_range(struct zone *zone, struct page *page) 283 { 284 if (page_outside_zone_boundaries(zone, page)) 285 return 1; 286 if (!page_is_consistent(zone, page)) 287 return 1; 288 289 return 0; 290 } 291 #else 292 static inline int bad_range(struct zone *zone, struct page *page) 293 { 294 return 0; 295 } 296 #endif 297 298 static void bad_page(struct page *page, const char *reason, 299 unsigned long bad_flags) 300 { 301 static unsigned long resume; 302 static unsigned long nr_shown; 303 static unsigned long nr_unshown; 304 305 /* Don't complain about poisoned pages */ 306 if (PageHWPoison(page)) { 307 page_mapcount_reset(page); /* remove PageBuddy */ 308 return; 309 } 310 311 /* 312 * Allow a burst of 60 reports, then keep quiet for that minute; 313 * or allow a steady drip of one report per second. 314 */ 315 if (nr_shown == 60) { 316 if (time_before(jiffies, resume)) { 317 nr_unshown++; 318 goto out; 319 } 320 if (nr_unshown) { 321 printk(KERN_ALERT 322 "BUG: Bad page state: %lu messages suppressed\n", 323 nr_unshown); 324 nr_unshown = 0; 325 } 326 nr_shown = 0; 327 } 328 if (nr_shown++ == 0) 329 resume = jiffies + 60 * HZ; 330 331 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 332 current->comm, page_to_pfn(page)); 333 dump_page_badflags(page, reason, bad_flags); 334 335 print_modules(); 336 dump_stack(); 337 out: 338 /* Leave bad fields for debug, except PageBuddy could make trouble */ 339 page_mapcount_reset(page); /* remove PageBuddy */ 340 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 341 } 342 343 /* 344 * Higher-order pages are called "compound pages". They are structured thusly: 345 * 346 * The first PAGE_SIZE page is called the "head page". 347 * 348 * The remaining PAGE_SIZE pages are called "tail pages". 349 * 350 * All pages have PG_compound set. All tail pages have their ->first_page 351 * pointing at the head page. 352 * 353 * The first tail page's ->lru.next holds the address of the compound page's 354 * put_page() function. Its ->lru.prev holds the order of allocation. 355 * This usage means that zero-order pages may not be compound. 356 */ 357 358 static void free_compound_page(struct page *page) 359 { 360 __free_pages_ok(page, compound_order(page)); 361 } 362 363 void prep_compound_page(struct page *page, unsigned long order) 364 { 365 int i; 366 int nr_pages = 1 << order; 367 368 set_compound_page_dtor(page, free_compound_page); 369 set_compound_order(page, order); 370 __SetPageHead(page); 371 for (i = 1; i < nr_pages; i++) { 372 struct page *p = page + i; 373 set_page_count(p, 0); 374 p->first_page = page; 375 /* Make sure p->first_page is always valid for PageTail() */ 376 smp_wmb(); 377 __SetPageTail(p); 378 } 379 } 380 381 /* update __split_huge_page_refcount if you change this function */ 382 static int destroy_compound_page(struct page *page, unsigned long order) 383 { 384 int i; 385 int nr_pages = 1 << order; 386 int bad = 0; 387 388 if (unlikely(compound_order(page) != order)) { 389 bad_page(page, "wrong compound order", 0); 390 bad++; 391 } 392 393 __ClearPageHead(page); 394 395 for (i = 1; i < nr_pages; i++) { 396 struct page *p = page + i; 397 398 if (unlikely(!PageTail(p))) { 399 bad_page(page, "PageTail not set", 0); 400 bad++; 401 } else if (unlikely(p->first_page != page)) { 402 bad_page(page, "first_page not consistent", 0); 403 bad++; 404 } 405 __ClearPageTail(p); 406 } 407 408 return bad; 409 } 410 411 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) 412 { 413 int i; 414 415 /* 416 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 417 * and __GFP_HIGHMEM from hard or soft interrupt context. 418 */ 419 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 420 for (i = 0; i < (1 << order); i++) 421 clear_highpage(page + i); 422 } 423 424 #ifdef CONFIG_DEBUG_PAGEALLOC 425 unsigned int _debug_guardpage_minorder; 426 427 static int __init debug_guardpage_minorder_setup(char *buf) 428 { 429 unsigned long res; 430 431 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 432 printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); 433 return 0; 434 } 435 _debug_guardpage_minorder = res; 436 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); 437 return 0; 438 } 439 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); 440 441 static inline void set_page_guard_flag(struct page *page) 442 { 443 __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 444 } 445 446 static inline void clear_page_guard_flag(struct page *page) 447 { 448 __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 449 } 450 #else 451 static inline void set_page_guard_flag(struct page *page) { } 452 static inline void clear_page_guard_flag(struct page *page) { } 453 #endif 454 455 static inline void set_page_order(struct page *page, int order) 456 { 457 set_page_private(page, order); 458 __SetPageBuddy(page); 459 } 460 461 static inline void rmv_page_order(struct page *page) 462 { 463 __ClearPageBuddy(page); 464 set_page_private(page, 0); 465 } 466 467 /* 468 * Locate the struct page for both the matching buddy in our 469 * pair (buddy1) and the combined O(n+1) page they form (page). 470 * 471 * 1) Any buddy B1 will have an order O twin B2 which satisfies 472 * the following equation: 473 * B2 = B1 ^ (1 << O) 474 * For example, if the starting buddy (buddy2) is #8 its order 475 * 1 buddy is #10: 476 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10 477 * 478 * 2) Any buddy B will have an order O+1 parent P which 479 * satisfies the following equation: 480 * P = B & ~(1 << O) 481 * 482 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER 483 */ 484 static inline unsigned long 485 __find_buddy_index(unsigned long page_idx, unsigned int order) 486 { 487 return page_idx ^ (1 << order); 488 } 489 490 /* 491 * This function checks whether a page is free && is the buddy 492 * we can do coalesce a page and its buddy if 493 * (a) the buddy is not in a hole && 494 * (b) the buddy is in the buddy system && 495 * (c) a page and its buddy have the same order && 496 * (d) a page and its buddy are in the same zone. 497 * 498 * For recording whether a page is in the buddy system, we set ->_mapcount 499 * PAGE_BUDDY_MAPCOUNT_VALUE. 500 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is 501 * serialized by zone->lock. 502 * 503 * For recording page's order, we use page_private(page). 504 */ 505 static inline int page_is_buddy(struct page *page, struct page *buddy, 506 int order) 507 { 508 if (!pfn_valid_within(page_to_pfn(buddy))) 509 return 0; 510 511 if (page_zone_id(page) != page_zone_id(buddy)) 512 return 0; 513 514 if (page_is_guard(buddy) && page_order(buddy) == order) { 515 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 516 return 1; 517 } 518 519 if (PageBuddy(buddy) && page_order(buddy) == order) { 520 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 521 return 1; 522 } 523 return 0; 524 } 525 526 /* 527 * Freeing function for a buddy system allocator. 528 * 529 * The concept of a buddy system is to maintain direct-mapped table 530 * (containing bit values) for memory blocks of various "orders". 531 * The bottom level table contains the map for the smallest allocatable 532 * units of memory (here, pages), and each level above it describes 533 * pairs of units from the levels below, hence, "buddies". 534 * At a high level, all that happens here is marking the table entry 535 * at the bottom level available, and propagating the changes upward 536 * as necessary, plus some accounting needed to play nicely with other 537 * parts of the VM system. 538 * At each level, we keep a list of pages, which are heads of continuous 539 * free pages of length of (1 << order) and marked with _mapcount 540 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) 541 * field. 542 * So when we are allocating or freeing one, we can derive the state of the 543 * other. That is, if we allocate a small block, and both were 544 * free, the remainder of the region must be split into blocks. 545 * If a block is freed, and its buddy is also free, then this 546 * triggers coalescing into a block of larger size. 547 * 548 * -- nyc 549 */ 550 551 static inline void __free_one_page(struct page *page, 552 struct zone *zone, unsigned int order, 553 int migratetype) 554 { 555 unsigned long page_idx; 556 unsigned long combined_idx; 557 unsigned long uninitialized_var(buddy_idx); 558 struct page *buddy; 559 560 VM_BUG_ON(!zone_is_initialized(zone)); 561 562 if (unlikely(PageCompound(page))) 563 if (unlikely(destroy_compound_page(page, order))) 564 return; 565 566 VM_BUG_ON(migratetype == -1); 567 568 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); 569 570 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 571 VM_BUG_ON_PAGE(bad_range(zone, page), page); 572 573 while (order < MAX_ORDER-1) { 574 buddy_idx = __find_buddy_index(page_idx, order); 575 buddy = page + (buddy_idx - page_idx); 576 if (!page_is_buddy(page, buddy, order)) 577 break; 578 /* 579 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 580 * merge with it and move up one order. 581 */ 582 if (page_is_guard(buddy)) { 583 clear_page_guard_flag(buddy); 584 set_page_private(page, 0); 585 __mod_zone_freepage_state(zone, 1 << order, 586 migratetype); 587 } else { 588 list_del(&buddy->lru); 589 zone->free_area[order].nr_free--; 590 rmv_page_order(buddy); 591 } 592 combined_idx = buddy_idx & page_idx; 593 page = page + (combined_idx - page_idx); 594 page_idx = combined_idx; 595 order++; 596 } 597 set_page_order(page, order); 598 599 /* 600 * If this is not the largest possible page, check if the buddy 601 * of the next-highest order is free. If it is, it's possible 602 * that pages are being freed that will coalesce soon. In case, 603 * that is happening, add the free page to the tail of the list 604 * so it's less likely to be used soon and more likely to be merged 605 * as a higher order page 606 */ 607 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 608 struct page *higher_page, *higher_buddy; 609 combined_idx = buddy_idx & page_idx; 610 higher_page = page + (combined_idx - page_idx); 611 buddy_idx = __find_buddy_index(combined_idx, order + 1); 612 higher_buddy = higher_page + (buddy_idx - combined_idx); 613 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 614 list_add_tail(&page->lru, 615 &zone->free_area[order].free_list[migratetype]); 616 goto out; 617 } 618 } 619 620 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 621 out: 622 zone->free_area[order].nr_free++; 623 } 624 625 static inline int free_pages_check(struct page *page) 626 { 627 const char *bad_reason = NULL; 628 unsigned long bad_flags = 0; 629 630 if (unlikely(page_mapcount(page))) 631 bad_reason = "nonzero mapcount"; 632 if (unlikely(page->mapping != NULL)) 633 bad_reason = "non-NULL mapping"; 634 if (unlikely(atomic_read(&page->_count) != 0)) 635 bad_reason = "nonzero _count"; 636 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 637 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 638 bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 639 } 640 if (unlikely(mem_cgroup_bad_page_check(page))) 641 bad_reason = "cgroup check failed"; 642 if (unlikely(bad_reason)) { 643 bad_page(page, bad_reason, bad_flags); 644 return 1; 645 } 646 page_cpupid_reset_last(page); 647 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 648 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 649 return 0; 650 } 651 652 /* 653 * Frees a number of pages from the PCP lists 654 * Assumes all pages on list are in same zone, and of same order. 655 * count is the number of pages to free. 656 * 657 * If the zone was previously in an "all pages pinned" state then look to 658 * see if this freeing clears that state. 659 * 660 * And clear the zone's pages_scanned counter, to hold off the "all pages are 661 * pinned" detection logic. 662 */ 663 static void free_pcppages_bulk(struct zone *zone, int count, 664 struct per_cpu_pages *pcp) 665 { 666 int migratetype = 0; 667 int batch_free = 0; 668 int to_free = count; 669 670 spin_lock(&zone->lock); 671 zone->pages_scanned = 0; 672 673 while (to_free) { 674 struct page *page; 675 struct list_head *list; 676 677 /* 678 * Remove pages from lists in a round-robin fashion. A 679 * batch_free count is maintained that is incremented when an 680 * empty list is encountered. This is so more pages are freed 681 * off fuller lists instead of spinning excessively around empty 682 * lists 683 */ 684 do { 685 batch_free++; 686 if (++migratetype == MIGRATE_PCPTYPES) 687 migratetype = 0; 688 list = &pcp->lists[migratetype]; 689 } while (list_empty(list)); 690 691 /* This is the only non-empty list. Free them all. */ 692 if (batch_free == MIGRATE_PCPTYPES) 693 batch_free = to_free; 694 695 do { 696 int mt; /* migratetype of the to-be-freed page */ 697 698 page = list_entry(list->prev, struct page, lru); 699 /* must delete as __free_one_page list manipulates */ 700 list_del(&page->lru); 701 mt = get_freepage_migratetype(page); 702 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 703 __free_one_page(page, zone, 0, mt); 704 trace_mm_page_pcpu_drain(page, 0, mt); 705 if (likely(!is_migrate_isolate_page(page))) { 706 __mod_zone_page_state(zone, NR_FREE_PAGES, 1); 707 if (is_migrate_cma(mt)) 708 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1); 709 } 710 } while (--to_free && --batch_free && !list_empty(list)); 711 } 712 spin_unlock(&zone->lock); 713 } 714 715 static void free_one_page(struct zone *zone, struct page *page, int order, 716 int migratetype) 717 { 718 spin_lock(&zone->lock); 719 zone->pages_scanned = 0; 720 721 __free_one_page(page, zone, order, migratetype); 722 if (unlikely(!is_migrate_isolate(migratetype))) 723 __mod_zone_freepage_state(zone, 1 << order, migratetype); 724 spin_unlock(&zone->lock); 725 } 726 727 static bool free_pages_prepare(struct page *page, unsigned int order) 728 { 729 int i; 730 int bad = 0; 731 732 trace_mm_page_free(page, order); 733 kmemcheck_free_shadow(page, order); 734 735 if (PageAnon(page)) 736 page->mapping = NULL; 737 for (i = 0; i < (1 << order); i++) 738 bad += free_pages_check(page + i); 739 if (bad) 740 return false; 741 742 if (!PageHighMem(page)) { 743 debug_check_no_locks_freed(page_address(page), 744 PAGE_SIZE << order); 745 debug_check_no_obj_freed(page_address(page), 746 PAGE_SIZE << order); 747 } 748 arch_free_page(page, order); 749 kernel_map_pages(page, 1 << order, 0); 750 751 return true; 752 } 753 754 static void __free_pages_ok(struct page *page, unsigned int order) 755 { 756 unsigned long flags; 757 int migratetype; 758 759 if (!free_pages_prepare(page, order)) 760 return; 761 762 local_irq_save(flags); 763 __count_vm_events(PGFREE, 1 << order); 764 migratetype = get_pageblock_migratetype(page); 765 set_freepage_migratetype(page, migratetype); 766 free_one_page(page_zone(page), page, order, migratetype); 767 local_irq_restore(flags); 768 } 769 770 void __init __free_pages_bootmem(struct page *page, unsigned int order) 771 { 772 unsigned int nr_pages = 1 << order; 773 struct page *p = page; 774 unsigned int loop; 775 776 prefetchw(p); 777 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 778 prefetchw(p + 1); 779 __ClearPageReserved(p); 780 set_page_count(p, 0); 781 } 782 __ClearPageReserved(p); 783 set_page_count(p, 0); 784 785 page_zone(page)->managed_pages += nr_pages; 786 set_page_refcounted(page); 787 __free_pages(page, order); 788 } 789 790 #ifdef CONFIG_CMA 791 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 792 void __init init_cma_reserved_pageblock(struct page *page) 793 { 794 unsigned i = pageblock_nr_pages; 795 struct page *p = page; 796 797 do { 798 __ClearPageReserved(p); 799 set_page_count(p, 0); 800 } while (++p, --i); 801 802 set_page_refcounted(page); 803 set_pageblock_migratetype(page, MIGRATE_CMA); 804 __free_pages(page, pageblock_order); 805 adjust_managed_page_count(page, pageblock_nr_pages); 806 } 807 #endif 808 809 /* 810 * The order of subdivision here is critical for the IO subsystem. 811 * Please do not alter this order without good reasons and regression 812 * testing. Specifically, as large blocks of memory are subdivided, 813 * the order in which smaller blocks are delivered depends on the order 814 * they're subdivided in this function. This is the primary factor 815 * influencing the order in which pages are delivered to the IO 816 * subsystem according to empirical testing, and this is also justified 817 * by considering the behavior of a buddy system containing a single 818 * large block of memory acted on by a series of small allocations. 819 * This behavior is a critical factor in sglist merging's success. 820 * 821 * -- nyc 822 */ 823 static inline void expand(struct zone *zone, struct page *page, 824 int low, int high, struct free_area *area, 825 int migratetype) 826 { 827 unsigned long size = 1 << high; 828 829 while (high > low) { 830 area--; 831 high--; 832 size >>= 1; 833 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 834 835 #ifdef CONFIG_DEBUG_PAGEALLOC 836 if (high < debug_guardpage_minorder()) { 837 /* 838 * Mark as guard pages (or page), that will allow to 839 * merge back to allocator when buddy will be freed. 840 * Corresponding page table entries will not be touched, 841 * pages will stay not present in virtual address space 842 */ 843 INIT_LIST_HEAD(&page[size].lru); 844 set_page_guard_flag(&page[size]); 845 set_page_private(&page[size], high); 846 /* Guard pages are not available for any usage */ 847 __mod_zone_freepage_state(zone, -(1 << high), 848 migratetype); 849 continue; 850 } 851 #endif 852 list_add(&page[size].lru, &area->free_list[migratetype]); 853 area->nr_free++; 854 set_page_order(&page[size], high); 855 } 856 } 857 858 /* 859 * This page is about to be returned from the page allocator 860 */ 861 static inline int check_new_page(struct page *page) 862 { 863 const char *bad_reason = NULL; 864 unsigned long bad_flags = 0; 865 866 if (unlikely(page_mapcount(page))) 867 bad_reason = "nonzero mapcount"; 868 if (unlikely(page->mapping != NULL)) 869 bad_reason = "non-NULL mapping"; 870 if (unlikely(atomic_read(&page->_count) != 0)) 871 bad_reason = "nonzero _count"; 872 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 873 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 874 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 875 } 876 if (unlikely(mem_cgroup_bad_page_check(page))) 877 bad_reason = "cgroup check failed"; 878 if (unlikely(bad_reason)) { 879 bad_page(page, bad_reason, bad_flags); 880 return 1; 881 } 882 return 0; 883 } 884 885 static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) 886 { 887 int i; 888 889 for (i = 0; i < (1 << order); i++) { 890 struct page *p = page + i; 891 if (unlikely(check_new_page(p))) 892 return 1; 893 } 894 895 set_page_private(page, 0); 896 set_page_refcounted(page); 897 898 arch_alloc_page(page, order); 899 kernel_map_pages(page, 1 << order, 1); 900 901 if (gfp_flags & __GFP_ZERO) 902 prep_zero_page(page, order, gfp_flags); 903 904 if (order && (gfp_flags & __GFP_COMP)) 905 prep_compound_page(page, order); 906 907 return 0; 908 } 909 910 /* 911 * Go through the free lists for the given migratetype and remove 912 * the smallest available page from the freelists 913 */ 914 static inline 915 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 916 int migratetype) 917 { 918 unsigned int current_order; 919 struct free_area *area; 920 struct page *page; 921 922 /* Find a page of the appropriate size in the preferred list */ 923 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 924 area = &(zone->free_area[current_order]); 925 if (list_empty(&area->free_list[migratetype])) 926 continue; 927 928 page = list_entry(area->free_list[migratetype].next, 929 struct page, lru); 930 list_del(&page->lru); 931 rmv_page_order(page); 932 area->nr_free--; 933 expand(zone, page, order, current_order, area, migratetype); 934 return page; 935 } 936 937 return NULL; 938 } 939 940 941 /* 942 * This array describes the order lists are fallen back to when 943 * the free lists for the desirable migrate type are depleted 944 */ 945 static int fallbacks[MIGRATE_TYPES][4] = { 946 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 947 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 948 #ifdef CONFIG_CMA 949 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 950 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */ 951 #else 952 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 953 #endif 954 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ 955 #ifdef CONFIG_MEMORY_ISOLATION 956 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */ 957 #endif 958 }; 959 960 /* 961 * Move the free pages in a range to the free lists of the requested type. 962 * Note that start_page and end_pages are not aligned on a pageblock 963 * boundary. If alignment is required, use move_freepages_block() 964 */ 965 int move_freepages(struct zone *zone, 966 struct page *start_page, struct page *end_page, 967 int migratetype) 968 { 969 struct page *page; 970 unsigned long order; 971 int pages_moved = 0; 972 973 #ifndef CONFIG_HOLES_IN_ZONE 974 /* 975 * page_zone is not safe to call in this context when 976 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 977 * anyway as we check zone boundaries in move_freepages_block(). 978 * Remove at a later date when no bug reports exist related to 979 * grouping pages by mobility 980 */ 981 BUG_ON(page_zone(start_page) != page_zone(end_page)); 982 #endif 983 984 for (page = start_page; page <= end_page;) { 985 /* Make sure we are not inadvertently changing nodes */ 986 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 987 988 if (!pfn_valid_within(page_to_pfn(page))) { 989 page++; 990 continue; 991 } 992 993 if (!PageBuddy(page)) { 994 page++; 995 continue; 996 } 997 998 order = page_order(page); 999 list_move(&page->lru, 1000 &zone->free_area[order].free_list[migratetype]); 1001 set_freepage_migratetype(page, migratetype); 1002 page += 1 << order; 1003 pages_moved += 1 << order; 1004 } 1005 1006 return pages_moved; 1007 } 1008 1009 int move_freepages_block(struct zone *zone, struct page *page, 1010 int migratetype) 1011 { 1012 unsigned long start_pfn, end_pfn; 1013 struct page *start_page, *end_page; 1014 1015 start_pfn = page_to_pfn(page); 1016 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 1017 start_page = pfn_to_page(start_pfn); 1018 end_page = start_page + pageblock_nr_pages - 1; 1019 end_pfn = start_pfn + pageblock_nr_pages - 1; 1020 1021 /* Do not cross zone boundaries */ 1022 if (!zone_spans_pfn(zone, start_pfn)) 1023 start_page = page; 1024 if (!zone_spans_pfn(zone, end_pfn)) 1025 return 0; 1026 1027 return move_freepages(zone, start_page, end_page, migratetype); 1028 } 1029 1030 static void change_pageblock_range(struct page *pageblock_page, 1031 int start_order, int migratetype) 1032 { 1033 int nr_pageblocks = 1 << (start_order - pageblock_order); 1034 1035 while (nr_pageblocks--) { 1036 set_pageblock_migratetype(pageblock_page, migratetype); 1037 pageblock_page += pageblock_nr_pages; 1038 } 1039 } 1040 1041 /* 1042 * If breaking a large block of pages, move all free pages to the preferred 1043 * allocation list. If falling back for a reclaimable kernel allocation, be 1044 * more aggressive about taking ownership of free pages. 1045 * 1046 * On the other hand, never change migration type of MIGRATE_CMA pageblocks 1047 * nor move CMA pages to different free lists. We don't want unmovable pages 1048 * to be allocated from MIGRATE_CMA areas. 1049 * 1050 * Returns the new migratetype of the pageblock (or the same old migratetype 1051 * if it was unchanged). 1052 */ 1053 static int try_to_steal_freepages(struct zone *zone, struct page *page, 1054 int start_type, int fallback_type) 1055 { 1056 int current_order = page_order(page); 1057 1058 /* 1059 * When borrowing from MIGRATE_CMA, we need to release the excess 1060 * buddy pages to CMA itself. 1061 */ 1062 if (is_migrate_cma(fallback_type)) 1063 return fallback_type; 1064 1065 /* Take ownership for orders >= pageblock_order */ 1066 if (current_order >= pageblock_order) { 1067 change_pageblock_range(page, current_order, start_type); 1068 return start_type; 1069 } 1070 1071 if (current_order >= pageblock_order / 2 || 1072 start_type == MIGRATE_RECLAIMABLE || 1073 page_group_by_mobility_disabled) { 1074 int pages; 1075 1076 pages = move_freepages_block(zone, page, start_type); 1077 1078 /* Claim the whole block if over half of it is free */ 1079 if (pages >= (1 << (pageblock_order-1)) || 1080 page_group_by_mobility_disabled) { 1081 1082 set_pageblock_migratetype(page, start_type); 1083 return start_type; 1084 } 1085 1086 } 1087 1088 return fallback_type; 1089 } 1090 1091 /* Remove an element from the buddy allocator from the fallback list */ 1092 static inline struct page * 1093 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) 1094 { 1095 struct free_area *area; 1096 int current_order; 1097 struct page *page; 1098 int migratetype, new_type, i; 1099 1100 /* Find the largest possible block of pages in the other list */ 1101 for (current_order = MAX_ORDER-1; current_order >= order; 1102 --current_order) { 1103 for (i = 0;; i++) { 1104 migratetype = fallbacks[start_migratetype][i]; 1105 1106 /* MIGRATE_RESERVE handled later if necessary */ 1107 if (migratetype == MIGRATE_RESERVE) 1108 break; 1109 1110 area = &(zone->free_area[current_order]); 1111 if (list_empty(&area->free_list[migratetype])) 1112 continue; 1113 1114 page = list_entry(area->free_list[migratetype].next, 1115 struct page, lru); 1116 area->nr_free--; 1117 1118 new_type = try_to_steal_freepages(zone, page, 1119 start_migratetype, 1120 migratetype); 1121 1122 /* Remove the page from the freelists */ 1123 list_del(&page->lru); 1124 rmv_page_order(page); 1125 1126 expand(zone, page, order, current_order, area, 1127 new_type); 1128 1129 trace_mm_page_alloc_extfrag(page, order, current_order, 1130 start_migratetype, migratetype, new_type); 1131 1132 return page; 1133 } 1134 } 1135 1136 return NULL; 1137 } 1138 1139 /* 1140 * Do the hard work of removing an element from the buddy allocator. 1141 * Call me with the zone->lock already held. 1142 */ 1143 static struct page *__rmqueue(struct zone *zone, unsigned int order, 1144 int migratetype) 1145 { 1146 struct page *page; 1147 1148 retry_reserve: 1149 page = __rmqueue_smallest(zone, order, migratetype); 1150 1151 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { 1152 page = __rmqueue_fallback(zone, order, migratetype); 1153 1154 /* 1155 * Use MIGRATE_RESERVE rather than fail an allocation. goto 1156 * is used because __rmqueue_smallest is an inline function 1157 * and we want just one call site 1158 */ 1159 if (!page) { 1160 migratetype = MIGRATE_RESERVE; 1161 goto retry_reserve; 1162 } 1163 } 1164 1165 trace_mm_page_alloc_zone_locked(page, order, migratetype); 1166 return page; 1167 } 1168 1169 /* 1170 * Obtain a specified number of elements from the buddy allocator, all under 1171 * a single hold of the lock, for efficiency. Add them to the supplied list. 1172 * Returns the number of new pages which were placed at *list. 1173 */ 1174 static int rmqueue_bulk(struct zone *zone, unsigned int order, 1175 unsigned long count, struct list_head *list, 1176 int migratetype, int cold) 1177 { 1178 int mt = migratetype, i; 1179 1180 spin_lock(&zone->lock); 1181 for (i = 0; i < count; ++i) { 1182 struct page *page = __rmqueue(zone, order, migratetype); 1183 if (unlikely(page == NULL)) 1184 break; 1185 1186 /* 1187 * Split buddy pages returned by expand() are received here 1188 * in physical page order. The page is added to the callers and 1189 * list and the list head then moves forward. From the callers 1190 * perspective, the linked list is ordered by page number in 1191 * some conditions. This is useful for IO devices that can 1192 * merge IO requests if the physical pages are ordered 1193 * properly. 1194 */ 1195 if (likely(cold == 0)) 1196 list_add(&page->lru, list); 1197 else 1198 list_add_tail(&page->lru, list); 1199 if (IS_ENABLED(CONFIG_CMA)) { 1200 mt = get_pageblock_migratetype(page); 1201 if (!is_migrate_cma(mt) && !is_migrate_isolate(mt)) 1202 mt = migratetype; 1203 } 1204 set_freepage_migratetype(page, mt); 1205 list = &page->lru; 1206 if (is_migrate_cma(mt)) 1207 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1208 -(1 << order)); 1209 } 1210 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1211 spin_unlock(&zone->lock); 1212 return i; 1213 } 1214 1215 #ifdef CONFIG_NUMA 1216 /* 1217 * Called from the vmstat counter updater to drain pagesets of this 1218 * currently executing processor on remote nodes after they have 1219 * expired. 1220 * 1221 * Note that this function must be called with the thread pinned to 1222 * a single processor. 1223 */ 1224 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 1225 { 1226 unsigned long flags; 1227 int to_drain; 1228 unsigned long batch; 1229 1230 local_irq_save(flags); 1231 batch = ACCESS_ONCE(pcp->batch); 1232 if (pcp->count >= batch) 1233 to_drain = batch; 1234 else 1235 to_drain = pcp->count; 1236 if (to_drain > 0) { 1237 free_pcppages_bulk(zone, to_drain, pcp); 1238 pcp->count -= to_drain; 1239 } 1240 local_irq_restore(flags); 1241 } 1242 #endif 1243 1244 /* 1245 * Drain pages of the indicated processor. 1246 * 1247 * The processor must either be the current processor and the 1248 * thread pinned to the current processor or a processor that 1249 * is not online. 1250 */ 1251 static void drain_pages(unsigned int cpu) 1252 { 1253 unsigned long flags; 1254 struct zone *zone; 1255 1256 for_each_populated_zone(zone) { 1257 struct per_cpu_pageset *pset; 1258 struct per_cpu_pages *pcp; 1259 1260 local_irq_save(flags); 1261 pset = per_cpu_ptr(zone->pageset, cpu); 1262 1263 pcp = &pset->pcp; 1264 if (pcp->count) { 1265 free_pcppages_bulk(zone, pcp->count, pcp); 1266 pcp->count = 0; 1267 } 1268 local_irq_restore(flags); 1269 } 1270 } 1271 1272 /* 1273 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 1274 */ 1275 void drain_local_pages(void *arg) 1276 { 1277 drain_pages(smp_processor_id()); 1278 } 1279 1280 /* 1281 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 1282 * 1283 * Note that this code is protected against sending an IPI to an offline 1284 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: 1285 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but 1286 * nothing keeps CPUs from showing up after we populated the cpumask and 1287 * before the call to on_each_cpu_mask(). 1288 */ 1289 void drain_all_pages(void) 1290 { 1291 int cpu; 1292 struct per_cpu_pageset *pcp; 1293 struct zone *zone; 1294 1295 /* 1296 * Allocate in the BSS so we wont require allocation in 1297 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 1298 */ 1299 static cpumask_t cpus_with_pcps; 1300 1301 /* 1302 * We don't care about racing with CPU hotplug event 1303 * as offline notification will cause the notified 1304 * cpu to drain that CPU pcps and on_each_cpu_mask 1305 * disables preemption as part of its processing 1306 */ 1307 for_each_online_cpu(cpu) { 1308 bool has_pcps = false; 1309 for_each_populated_zone(zone) { 1310 pcp = per_cpu_ptr(zone->pageset, cpu); 1311 if (pcp->pcp.count) { 1312 has_pcps = true; 1313 break; 1314 } 1315 } 1316 if (has_pcps) 1317 cpumask_set_cpu(cpu, &cpus_with_pcps); 1318 else 1319 cpumask_clear_cpu(cpu, &cpus_with_pcps); 1320 } 1321 on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); 1322 } 1323 1324 #ifdef CONFIG_HIBERNATION 1325 1326 void mark_free_pages(struct zone *zone) 1327 { 1328 unsigned long pfn, max_zone_pfn; 1329 unsigned long flags; 1330 int order, t; 1331 struct list_head *curr; 1332 1333 if (zone_is_empty(zone)) 1334 return; 1335 1336 spin_lock_irqsave(&zone->lock, flags); 1337 1338 max_zone_pfn = zone_end_pfn(zone); 1339 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1340 if (pfn_valid(pfn)) { 1341 struct page *page = pfn_to_page(pfn); 1342 1343 if (!swsusp_page_is_forbidden(page)) 1344 swsusp_unset_page_free(page); 1345 } 1346 1347 for_each_migratetype_order(order, t) { 1348 list_for_each(curr, &zone->free_area[order].free_list[t]) { 1349 unsigned long i; 1350 1351 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 1352 for (i = 0; i < (1UL << order); i++) 1353 swsusp_set_page_free(pfn_to_page(pfn + i)); 1354 } 1355 } 1356 spin_unlock_irqrestore(&zone->lock, flags); 1357 } 1358 #endif /* CONFIG_PM */ 1359 1360 /* 1361 * Free a 0-order page 1362 * cold == 1 ? free a cold page : free a hot page 1363 */ 1364 void free_hot_cold_page(struct page *page, int cold) 1365 { 1366 struct zone *zone = page_zone(page); 1367 struct per_cpu_pages *pcp; 1368 unsigned long flags; 1369 int migratetype; 1370 1371 if (!free_pages_prepare(page, 0)) 1372 return; 1373 1374 migratetype = get_pageblock_migratetype(page); 1375 set_freepage_migratetype(page, migratetype); 1376 local_irq_save(flags); 1377 __count_vm_event(PGFREE); 1378 1379 /* 1380 * We only track unmovable, reclaimable and movable on pcp lists. 1381 * Free ISOLATE pages back to the allocator because they are being 1382 * offlined but treat RESERVE as movable pages so we can get those 1383 * areas back if necessary. Otherwise, we may have to free 1384 * excessively into the page allocator 1385 */ 1386 if (migratetype >= MIGRATE_PCPTYPES) { 1387 if (unlikely(is_migrate_isolate(migratetype))) { 1388 free_one_page(zone, page, 0, migratetype); 1389 goto out; 1390 } 1391 migratetype = MIGRATE_MOVABLE; 1392 } 1393 1394 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1395 if (cold) 1396 list_add_tail(&page->lru, &pcp->lists[migratetype]); 1397 else 1398 list_add(&page->lru, &pcp->lists[migratetype]); 1399 pcp->count++; 1400 if (pcp->count >= pcp->high) { 1401 unsigned long batch = ACCESS_ONCE(pcp->batch); 1402 free_pcppages_bulk(zone, batch, pcp); 1403 pcp->count -= batch; 1404 } 1405 1406 out: 1407 local_irq_restore(flags); 1408 } 1409 1410 /* 1411 * Free a list of 0-order pages 1412 */ 1413 void free_hot_cold_page_list(struct list_head *list, int cold) 1414 { 1415 struct page *page, *next; 1416 1417 list_for_each_entry_safe(page, next, list, lru) { 1418 trace_mm_page_free_batched(page, cold); 1419 free_hot_cold_page(page, cold); 1420 } 1421 } 1422 1423 /* 1424 * split_page takes a non-compound higher-order page, and splits it into 1425 * n (1<<order) sub-pages: page[0..n] 1426 * Each sub-page must be freed individually. 1427 * 1428 * Note: this is probably too low level an operation for use in drivers. 1429 * Please consult with lkml before using this in your driver. 1430 */ 1431 void split_page(struct page *page, unsigned int order) 1432 { 1433 int i; 1434 1435 VM_BUG_ON_PAGE(PageCompound(page), page); 1436 VM_BUG_ON_PAGE(!page_count(page), page); 1437 1438 #ifdef CONFIG_KMEMCHECK 1439 /* 1440 * Split shadow pages too, because free(page[0]) would 1441 * otherwise free the whole shadow. 1442 */ 1443 if (kmemcheck_page_is_tracked(page)) 1444 split_page(virt_to_page(page[0].shadow), order); 1445 #endif 1446 1447 for (i = 1; i < (1 << order); i++) 1448 set_page_refcounted(page + i); 1449 } 1450 EXPORT_SYMBOL_GPL(split_page); 1451 1452 static int __isolate_free_page(struct page *page, unsigned int order) 1453 { 1454 unsigned long watermark; 1455 struct zone *zone; 1456 int mt; 1457 1458 BUG_ON(!PageBuddy(page)); 1459 1460 zone = page_zone(page); 1461 mt = get_pageblock_migratetype(page); 1462 1463 if (!is_migrate_isolate(mt)) { 1464 /* Obey watermarks as if the page was being allocated */ 1465 watermark = low_wmark_pages(zone) + (1 << order); 1466 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 1467 return 0; 1468 1469 __mod_zone_freepage_state(zone, -(1UL << order), mt); 1470 } 1471 1472 /* Remove page from free list */ 1473 list_del(&page->lru); 1474 zone->free_area[order].nr_free--; 1475 rmv_page_order(page); 1476 1477 /* Set the pageblock if the isolated page is at least a pageblock */ 1478 if (order >= pageblock_order - 1) { 1479 struct page *endpage = page + (1 << order) - 1; 1480 for (; page < endpage; page += pageblock_nr_pages) { 1481 int mt = get_pageblock_migratetype(page); 1482 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)) 1483 set_pageblock_migratetype(page, 1484 MIGRATE_MOVABLE); 1485 } 1486 } 1487 1488 return 1UL << order; 1489 } 1490 1491 /* 1492 * Similar to split_page except the page is already free. As this is only 1493 * being used for migration, the migratetype of the block also changes. 1494 * As this is called with interrupts disabled, the caller is responsible 1495 * for calling arch_alloc_page() and kernel_map_page() after interrupts 1496 * are enabled. 1497 * 1498 * Note: this is probably too low level an operation for use in drivers. 1499 * Please consult with lkml before using this in your driver. 1500 */ 1501 int split_free_page(struct page *page) 1502 { 1503 unsigned int order; 1504 int nr_pages; 1505 1506 order = page_order(page); 1507 1508 nr_pages = __isolate_free_page(page, order); 1509 if (!nr_pages) 1510 return 0; 1511 1512 /* Split into individual pages */ 1513 set_page_refcounted(page); 1514 split_page(page, order); 1515 return nr_pages; 1516 } 1517 1518 /* 1519 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1520 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1521 * or two. 1522 */ 1523 static inline 1524 struct page *buffered_rmqueue(struct zone *preferred_zone, 1525 struct zone *zone, int order, gfp_t gfp_flags, 1526 int migratetype) 1527 { 1528 unsigned long flags; 1529 struct page *page; 1530 int cold = !!(gfp_flags & __GFP_COLD); 1531 1532 again: 1533 if (likely(order == 0)) { 1534 struct per_cpu_pages *pcp; 1535 struct list_head *list; 1536 1537 local_irq_save(flags); 1538 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1539 list = &pcp->lists[migratetype]; 1540 if (list_empty(list)) { 1541 pcp->count += rmqueue_bulk(zone, 0, 1542 pcp->batch, list, 1543 migratetype, cold); 1544 if (unlikely(list_empty(list))) 1545 goto failed; 1546 } 1547 1548 if (cold) 1549 page = list_entry(list->prev, struct page, lru); 1550 else 1551 page = list_entry(list->next, struct page, lru); 1552 1553 list_del(&page->lru); 1554 pcp->count--; 1555 } else { 1556 if (unlikely(gfp_flags & __GFP_NOFAIL)) { 1557 /* 1558 * __GFP_NOFAIL is not to be used in new code. 1559 * 1560 * All __GFP_NOFAIL callers should be fixed so that they 1561 * properly detect and handle allocation failures. 1562 * 1563 * We most definitely don't want callers attempting to 1564 * allocate greater than order-1 page units with 1565 * __GFP_NOFAIL. 1566 */ 1567 WARN_ON_ONCE(order > 1); 1568 } 1569 spin_lock_irqsave(&zone->lock, flags); 1570 page = __rmqueue(zone, order, migratetype); 1571 spin_unlock(&zone->lock); 1572 if (!page) 1573 goto failed; 1574 __mod_zone_freepage_state(zone, -(1 << order), 1575 get_pageblock_migratetype(page)); 1576 } 1577 1578 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1579 1580 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1581 zone_statistics(preferred_zone, zone, gfp_flags); 1582 local_irq_restore(flags); 1583 1584 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1585 if (prep_new_page(page, order, gfp_flags)) 1586 goto again; 1587 return page; 1588 1589 failed: 1590 local_irq_restore(flags); 1591 return NULL; 1592 } 1593 1594 #ifdef CONFIG_FAIL_PAGE_ALLOC 1595 1596 static struct { 1597 struct fault_attr attr; 1598 1599 u32 ignore_gfp_highmem; 1600 u32 ignore_gfp_wait; 1601 u32 min_order; 1602 } fail_page_alloc = { 1603 .attr = FAULT_ATTR_INITIALIZER, 1604 .ignore_gfp_wait = 1, 1605 .ignore_gfp_highmem = 1, 1606 .min_order = 1, 1607 }; 1608 1609 static int __init setup_fail_page_alloc(char *str) 1610 { 1611 return setup_fault_attr(&fail_page_alloc.attr, str); 1612 } 1613 __setup("fail_page_alloc=", setup_fail_page_alloc); 1614 1615 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1616 { 1617 if (order < fail_page_alloc.min_order) 1618 return false; 1619 if (gfp_mask & __GFP_NOFAIL) 1620 return false; 1621 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1622 return false; 1623 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1624 return false; 1625 1626 return should_fail(&fail_page_alloc.attr, 1 << order); 1627 } 1628 1629 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1630 1631 static int __init fail_page_alloc_debugfs(void) 1632 { 1633 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1634 struct dentry *dir; 1635 1636 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 1637 &fail_page_alloc.attr); 1638 if (IS_ERR(dir)) 1639 return PTR_ERR(dir); 1640 1641 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 1642 &fail_page_alloc.ignore_gfp_wait)) 1643 goto fail; 1644 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1645 &fail_page_alloc.ignore_gfp_highmem)) 1646 goto fail; 1647 if (!debugfs_create_u32("min-order", mode, dir, 1648 &fail_page_alloc.min_order)) 1649 goto fail; 1650 1651 return 0; 1652 fail: 1653 debugfs_remove_recursive(dir); 1654 1655 return -ENOMEM; 1656 } 1657 1658 late_initcall(fail_page_alloc_debugfs); 1659 1660 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1661 1662 #else /* CONFIG_FAIL_PAGE_ALLOC */ 1663 1664 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1665 { 1666 return false; 1667 } 1668 1669 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 1670 1671 /* 1672 * Return true if free pages are above 'mark'. This takes into account the order 1673 * of the allocation. 1674 */ 1675 static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1676 int classzone_idx, int alloc_flags, long free_pages) 1677 { 1678 /* free_pages my go negative - that's OK */ 1679 long min = mark; 1680 long lowmem_reserve = z->lowmem_reserve[classzone_idx]; 1681 int o; 1682 long free_cma = 0; 1683 1684 free_pages -= (1 << order) - 1; 1685 if (alloc_flags & ALLOC_HIGH) 1686 min -= min / 2; 1687 if (alloc_flags & ALLOC_HARDER) 1688 min -= min / 4; 1689 #ifdef CONFIG_CMA 1690 /* If allocation can't use CMA areas don't use free CMA pages */ 1691 if (!(alloc_flags & ALLOC_CMA)) 1692 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); 1693 #endif 1694 1695 if (free_pages - free_cma <= min + lowmem_reserve) 1696 return false; 1697 for (o = 0; o < order; o++) { 1698 /* At the next order, this order's pages become unavailable */ 1699 free_pages -= z->free_area[o].nr_free << o; 1700 1701 /* Require fewer higher order pages to be free */ 1702 min >>= 1; 1703 1704 if (free_pages <= min) 1705 return false; 1706 } 1707 return true; 1708 } 1709 1710 bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, 1711 int classzone_idx, int alloc_flags) 1712 { 1713 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 1714 zone_page_state(z, NR_FREE_PAGES)); 1715 } 1716 1717 bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, 1718 int classzone_idx, int alloc_flags) 1719 { 1720 long free_pages = zone_page_state(z, NR_FREE_PAGES); 1721 1722 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 1723 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 1724 1725 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 1726 free_pages); 1727 } 1728 1729 #ifdef CONFIG_NUMA 1730 /* 1731 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1732 * skip over zones that are not allowed by the cpuset, or that have 1733 * been recently (in last second) found to be nearly full. See further 1734 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1735 * that have to skip over a lot of full or unallowed zones. 1736 * 1737 * If the zonelist cache is present in the passed zonelist, then 1738 * returns a pointer to the allowed node mask (either the current 1739 * tasks mems_allowed, or node_states[N_MEMORY].) 1740 * 1741 * If the zonelist cache is not available for this zonelist, does 1742 * nothing and returns NULL. 1743 * 1744 * If the fullzones BITMAP in the zonelist cache is stale (more than 1745 * a second since last zap'd) then we zap it out (clear its bits.) 1746 * 1747 * We hold off even calling zlc_setup, until after we've checked the 1748 * first zone in the zonelist, on the theory that most allocations will 1749 * be satisfied from that first zone, so best to examine that zone as 1750 * quickly as we can. 1751 */ 1752 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1753 { 1754 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1755 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1756 1757 zlc = zonelist->zlcache_ptr; 1758 if (!zlc) 1759 return NULL; 1760 1761 if (time_after(jiffies, zlc->last_full_zap + HZ)) { 1762 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1763 zlc->last_full_zap = jiffies; 1764 } 1765 1766 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1767 &cpuset_current_mems_allowed : 1768 &node_states[N_MEMORY]; 1769 return allowednodes; 1770 } 1771 1772 /* 1773 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1774 * if it is worth looking at further for free memory: 1775 * 1) Check that the zone isn't thought to be full (doesn't have its 1776 * bit set in the zonelist_cache fullzones BITMAP). 1777 * 2) Check that the zones node (obtained from the zonelist_cache 1778 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1779 * Return true (non-zero) if zone is worth looking at further, or 1780 * else return false (zero) if it is not. 1781 * 1782 * This check -ignores- the distinction between various watermarks, 1783 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1784 * found to be full for any variation of these watermarks, it will 1785 * be considered full for up to one second by all requests, unless 1786 * we are so low on memory on all allowed nodes that we are forced 1787 * into the second scan of the zonelist. 1788 * 1789 * In the second scan we ignore this zonelist cache and exactly 1790 * apply the watermarks to all zones, even it is slower to do so. 1791 * We are low on memory in the second scan, and should leave no stone 1792 * unturned looking for a free page. 1793 */ 1794 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1795 nodemask_t *allowednodes) 1796 { 1797 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1798 int i; /* index of *z in zonelist zones */ 1799 int n; /* node that zone *z is on */ 1800 1801 zlc = zonelist->zlcache_ptr; 1802 if (!zlc) 1803 return 1; 1804 1805 i = z - zonelist->_zonerefs; 1806 n = zlc->z_to_n[i]; 1807 1808 /* This zone is worth trying if it is allowed but not full */ 1809 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1810 } 1811 1812 /* 1813 * Given 'z' scanning a zonelist, set the corresponding bit in 1814 * zlc->fullzones, so that subsequent attempts to allocate a page 1815 * from that zone don't waste time re-examining it. 1816 */ 1817 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1818 { 1819 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1820 int i; /* index of *z in zonelist zones */ 1821 1822 zlc = zonelist->zlcache_ptr; 1823 if (!zlc) 1824 return; 1825 1826 i = z - zonelist->_zonerefs; 1827 1828 set_bit(i, zlc->fullzones); 1829 } 1830 1831 /* 1832 * clear all zones full, called after direct reclaim makes progress so that 1833 * a zone that was recently full is not skipped over for up to a second 1834 */ 1835 static void zlc_clear_zones_full(struct zonelist *zonelist) 1836 { 1837 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1838 1839 zlc = zonelist->zlcache_ptr; 1840 if (!zlc) 1841 return; 1842 1843 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1844 } 1845 1846 static bool zone_local(struct zone *local_zone, struct zone *zone) 1847 { 1848 return local_zone->node == zone->node; 1849 } 1850 1851 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 1852 { 1853 return node_isset(local_zone->node, zone->zone_pgdat->reclaim_nodes); 1854 } 1855 1856 static void __paginginit init_zone_allows_reclaim(int nid) 1857 { 1858 int i; 1859 1860 for_each_node_state(i, N_MEMORY) 1861 if (node_distance(nid, i) <= RECLAIM_DISTANCE) 1862 node_set(i, NODE_DATA(nid)->reclaim_nodes); 1863 else 1864 zone_reclaim_mode = 1; 1865 } 1866 1867 #else /* CONFIG_NUMA */ 1868 1869 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1870 { 1871 return NULL; 1872 } 1873 1874 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1875 nodemask_t *allowednodes) 1876 { 1877 return 1; 1878 } 1879 1880 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1881 { 1882 } 1883 1884 static void zlc_clear_zones_full(struct zonelist *zonelist) 1885 { 1886 } 1887 1888 static bool zone_local(struct zone *local_zone, struct zone *zone) 1889 { 1890 return true; 1891 } 1892 1893 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 1894 { 1895 return true; 1896 } 1897 1898 static inline void init_zone_allows_reclaim(int nid) 1899 { 1900 } 1901 #endif /* CONFIG_NUMA */ 1902 1903 /* 1904 * get_page_from_freelist goes through the zonelist trying to allocate 1905 * a page. 1906 */ 1907 static struct page * 1908 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 1909 struct zonelist *zonelist, int high_zoneidx, int alloc_flags, 1910 struct zone *preferred_zone, int migratetype) 1911 { 1912 struct zoneref *z; 1913 struct page *page = NULL; 1914 int classzone_idx; 1915 struct zone *zone; 1916 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 1917 int zlc_active = 0; /* set if using zonelist_cache */ 1918 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 1919 1920 classzone_idx = zone_idx(preferred_zone); 1921 zonelist_scan: 1922 /* 1923 * Scan zonelist, looking for a zone with enough free. 1924 * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c. 1925 */ 1926 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1927 high_zoneidx, nodemask) { 1928 unsigned long mark; 1929 1930 if (IS_ENABLED(CONFIG_NUMA) && zlc_active && 1931 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 1932 continue; 1933 if ((alloc_flags & ALLOC_CPUSET) && 1934 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 1935 continue; 1936 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 1937 if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS)) 1938 goto try_this_zone; 1939 /* 1940 * Distribute pages in proportion to the individual 1941 * zone size to ensure fair page aging. The zone a 1942 * page was allocated in should have no effect on the 1943 * time the page has in memory before being reclaimed. 1944 */ 1945 if (alloc_flags & ALLOC_FAIR) { 1946 if (!zone_local(preferred_zone, zone)) 1947 continue; 1948 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) 1949 continue; 1950 } 1951 /* 1952 * When allocating a page cache page for writing, we 1953 * want to get it from a zone that is within its dirty 1954 * limit, such that no single zone holds more than its 1955 * proportional share of globally allowed dirty pages. 1956 * The dirty limits take into account the zone's 1957 * lowmem reserves and high watermark so that kswapd 1958 * should be able to balance it without having to 1959 * write pages from its LRU list. 1960 * 1961 * This may look like it could increase pressure on 1962 * lower zones by failing allocations in higher zones 1963 * before they are full. But the pages that do spill 1964 * over are limited as the lower zones are protected 1965 * by this very same mechanism. It should not become 1966 * a practical burden to them. 1967 * 1968 * XXX: For now, allow allocations to potentially 1969 * exceed the per-zone dirty limit in the slowpath 1970 * (ALLOC_WMARK_LOW unset) before going into reclaim, 1971 * which is important when on a NUMA setup the allowed 1972 * zones are together not big enough to reach the 1973 * global limit. The proper fix for these situations 1974 * will require awareness of zones in the 1975 * dirty-throttling and the flusher threads. 1976 */ 1977 if ((alloc_flags & ALLOC_WMARK_LOW) && 1978 (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone)) 1979 goto this_zone_full; 1980 1981 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 1982 if (!zone_watermark_ok(zone, order, mark, 1983 classzone_idx, alloc_flags)) { 1984 int ret; 1985 1986 if (IS_ENABLED(CONFIG_NUMA) && 1987 !did_zlc_setup && nr_online_nodes > 1) { 1988 /* 1989 * we do zlc_setup if there are multiple nodes 1990 * and before considering the first zone allowed 1991 * by the cpuset. 1992 */ 1993 allowednodes = zlc_setup(zonelist, alloc_flags); 1994 zlc_active = 1; 1995 did_zlc_setup = 1; 1996 } 1997 1998 if (zone_reclaim_mode == 0 || 1999 !zone_allows_reclaim(preferred_zone, zone)) 2000 goto this_zone_full; 2001 2002 /* 2003 * As we may have just activated ZLC, check if the first 2004 * eligible zone has failed zone_reclaim recently. 2005 */ 2006 if (IS_ENABLED(CONFIG_NUMA) && zlc_active && 2007 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 2008 continue; 2009 2010 ret = zone_reclaim(zone, gfp_mask, order); 2011 switch (ret) { 2012 case ZONE_RECLAIM_NOSCAN: 2013 /* did not scan */ 2014 continue; 2015 case ZONE_RECLAIM_FULL: 2016 /* scanned but unreclaimable */ 2017 continue; 2018 default: 2019 /* did we reclaim enough */ 2020 if (zone_watermark_ok(zone, order, mark, 2021 classzone_idx, alloc_flags)) 2022 goto try_this_zone; 2023 2024 /* 2025 * Failed to reclaim enough to meet watermark. 2026 * Only mark the zone full if checking the min 2027 * watermark or if we failed to reclaim just 2028 * 1<<order pages or else the page allocator 2029 * fastpath will prematurely mark zones full 2030 * when the watermark is between the low and 2031 * min watermarks. 2032 */ 2033 if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) || 2034 ret == ZONE_RECLAIM_SOME) 2035 goto this_zone_full; 2036 2037 continue; 2038 } 2039 } 2040 2041 try_this_zone: 2042 page = buffered_rmqueue(preferred_zone, zone, order, 2043 gfp_mask, migratetype); 2044 if (page) 2045 break; 2046 this_zone_full: 2047 if (IS_ENABLED(CONFIG_NUMA)) 2048 zlc_mark_zone_full(zonelist, z); 2049 } 2050 2051 if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) { 2052 /* Disable zlc cache for second zonelist scan */ 2053 zlc_active = 0; 2054 goto zonelist_scan; 2055 } 2056 2057 if (page) 2058 /* 2059 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was 2060 * necessary to allocate the page. The expectation is 2061 * that the caller is taking steps that will free more 2062 * memory. The caller should avoid the page being used 2063 * for !PFMEMALLOC purposes. 2064 */ 2065 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); 2066 2067 return page; 2068 } 2069 2070 /* 2071 * Large machines with many possible nodes should not always dump per-node 2072 * meminfo in irq context. 2073 */ 2074 static inline bool should_suppress_show_mem(void) 2075 { 2076 bool ret = false; 2077 2078 #if NODES_SHIFT > 8 2079 ret = in_interrupt(); 2080 #endif 2081 return ret; 2082 } 2083 2084 static DEFINE_RATELIMIT_STATE(nopage_rs, 2085 DEFAULT_RATELIMIT_INTERVAL, 2086 DEFAULT_RATELIMIT_BURST); 2087 2088 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) 2089 { 2090 unsigned int filter = SHOW_MEM_FILTER_NODES; 2091 2092 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || 2093 debug_guardpage_minorder() > 0) 2094 return; 2095 2096 /* 2097 * This documents exceptions given to allocations in certain 2098 * contexts that are allowed to allocate outside current's set 2099 * of allowed nodes. 2100 */ 2101 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2102 if (test_thread_flag(TIF_MEMDIE) || 2103 (current->flags & (PF_MEMALLOC | PF_EXITING))) 2104 filter &= ~SHOW_MEM_FILTER_NODES; 2105 if (in_interrupt() || !(gfp_mask & __GFP_WAIT)) 2106 filter &= ~SHOW_MEM_FILTER_NODES; 2107 2108 if (fmt) { 2109 struct va_format vaf; 2110 va_list args; 2111 2112 va_start(args, fmt); 2113 2114 vaf.fmt = fmt; 2115 vaf.va = &args; 2116 2117 pr_warn("%pV", &vaf); 2118 2119 va_end(args); 2120 } 2121 2122 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n", 2123 current->comm, order, gfp_mask); 2124 2125 dump_stack(); 2126 if (!should_suppress_show_mem()) 2127 show_mem(filter); 2128 } 2129 2130 static inline int 2131 should_alloc_retry(gfp_t gfp_mask, unsigned int order, 2132 unsigned long did_some_progress, 2133 unsigned long pages_reclaimed) 2134 { 2135 /* Do not loop if specifically requested */ 2136 if (gfp_mask & __GFP_NORETRY) 2137 return 0; 2138 2139 /* Always retry if specifically requested */ 2140 if (gfp_mask & __GFP_NOFAIL) 2141 return 1; 2142 2143 /* 2144 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim 2145 * making forward progress without invoking OOM. Suspend also disables 2146 * storage devices so kswapd will not help. Bail if we are suspending. 2147 */ 2148 if (!did_some_progress && pm_suspended_storage()) 2149 return 0; 2150 2151 /* 2152 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER 2153 * means __GFP_NOFAIL, but that may not be true in other 2154 * implementations. 2155 */ 2156 if (order <= PAGE_ALLOC_COSTLY_ORDER) 2157 return 1; 2158 2159 /* 2160 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is 2161 * specified, then we retry until we no longer reclaim any pages 2162 * (above), or we've reclaimed an order of pages at least as 2163 * large as the allocation's order. In both cases, if the 2164 * allocation still fails, we stop retrying. 2165 */ 2166 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) 2167 return 1; 2168 2169 return 0; 2170 } 2171 2172 static inline struct page * 2173 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 2174 struct zonelist *zonelist, enum zone_type high_zoneidx, 2175 nodemask_t *nodemask, struct zone *preferred_zone, 2176 int migratetype) 2177 { 2178 struct page *page; 2179 2180 /* Acquire the OOM killer lock for the zones in zonelist */ 2181 if (!try_set_zonelist_oom(zonelist, gfp_mask)) { 2182 schedule_timeout_uninterruptible(1); 2183 return NULL; 2184 } 2185 2186 /* 2187 * Go through the zonelist yet one more time, keep very high watermark 2188 * here, this is only to catch a parallel oom killing, we must fail if 2189 * we're still under heavy pressure. 2190 */ 2191 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 2192 order, zonelist, high_zoneidx, 2193 ALLOC_WMARK_HIGH|ALLOC_CPUSET, 2194 preferred_zone, migratetype); 2195 if (page) 2196 goto out; 2197 2198 if (!(gfp_mask & __GFP_NOFAIL)) { 2199 /* The OOM killer will not help higher order allocs */ 2200 if (order > PAGE_ALLOC_COSTLY_ORDER) 2201 goto out; 2202 /* The OOM killer does not needlessly kill tasks for lowmem */ 2203 if (high_zoneidx < ZONE_NORMAL) 2204 goto out; 2205 /* 2206 * GFP_THISNODE contains __GFP_NORETRY and we never hit this. 2207 * Sanity check for bare calls of __GFP_THISNODE, not real OOM. 2208 * The caller should handle page allocation failure by itself if 2209 * it specifies __GFP_THISNODE. 2210 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER. 2211 */ 2212 if (gfp_mask & __GFP_THISNODE) 2213 goto out; 2214 } 2215 /* Exhausted what can be done so it's blamo time */ 2216 out_of_memory(zonelist, gfp_mask, order, nodemask, false); 2217 2218 out: 2219 clear_zonelist_oom(zonelist, gfp_mask); 2220 return page; 2221 } 2222 2223 #ifdef CONFIG_COMPACTION 2224 /* Try memory compaction for high-order allocations before reclaim */ 2225 static struct page * 2226 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2227 struct zonelist *zonelist, enum zone_type high_zoneidx, 2228 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2229 int migratetype, bool sync_migration, 2230 bool *contended_compaction, bool *deferred_compaction, 2231 unsigned long *did_some_progress) 2232 { 2233 if (!order) 2234 return NULL; 2235 2236 if (compaction_deferred(preferred_zone, order)) { 2237 *deferred_compaction = true; 2238 return NULL; 2239 } 2240 2241 current->flags |= PF_MEMALLOC; 2242 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 2243 nodemask, sync_migration, 2244 contended_compaction); 2245 current->flags &= ~PF_MEMALLOC; 2246 2247 if (*did_some_progress != COMPACT_SKIPPED) { 2248 struct page *page; 2249 2250 /* Page migration frees to the PCP lists but we want merging */ 2251 drain_pages(get_cpu()); 2252 put_cpu(); 2253 2254 page = get_page_from_freelist(gfp_mask, nodemask, 2255 order, zonelist, high_zoneidx, 2256 alloc_flags & ~ALLOC_NO_WATERMARKS, 2257 preferred_zone, migratetype); 2258 if (page) { 2259 preferred_zone->compact_blockskip_flush = false; 2260 compaction_defer_reset(preferred_zone, order, true); 2261 count_vm_event(COMPACTSUCCESS); 2262 return page; 2263 } 2264 2265 /* 2266 * It's bad if compaction run occurs and fails. 2267 * The most likely reason is that pages exist, 2268 * but not enough to satisfy watermarks. 2269 */ 2270 count_vm_event(COMPACTFAIL); 2271 2272 /* 2273 * As async compaction considers a subset of pageblocks, only 2274 * defer if the failure was a sync compaction failure. 2275 */ 2276 if (sync_migration) 2277 defer_compaction(preferred_zone, order); 2278 2279 cond_resched(); 2280 } 2281 2282 return NULL; 2283 } 2284 #else 2285 static inline struct page * 2286 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2287 struct zonelist *zonelist, enum zone_type high_zoneidx, 2288 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2289 int migratetype, bool sync_migration, 2290 bool *contended_compaction, bool *deferred_compaction, 2291 unsigned long *did_some_progress) 2292 { 2293 return NULL; 2294 } 2295 #endif /* CONFIG_COMPACTION */ 2296 2297 /* Perform direct synchronous page reclaim */ 2298 static int 2299 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, 2300 nodemask_t *nodemask) 2301 { 2302 struct reclaim_state reclaim_state; 2303 int progress; 2304 2305 cond_resched(); 2306 2307 /* We now go into synchronous reclaim */ 2308 cpuset_memory_pressure_bump(); 2309 current->flags |= PF_MEMALLOC; 2310 lockdep_set_current_reclaim_state(gfp_mask); 2311 reclaim_state.reclaimed_slab = 0; 2312 current->reclaim_state = &reclaim_state; 2313 2314 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 2315 2316 current->reclaim_state = NULL; 2317 lockdep_clear_current_reclaim_state(); 2318 current->flags &= ~PF_MEMALLOC; 2319 2320 cond_resched(); 2321 2322 return progress; 2323 } 2324 2325 /* The really slow allocator path where we enter direct reclaim */ 2326 static inline struct page * 2327 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 2328 struct zonelist *zonelist, enum zone_type high_zoneidx, 2329 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2330 int migratetype, unsigned long *did_some_progress) 2331 { 2332 struct page *page = NULL; 2333 bool drained = false; 2334 2335 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, 2336 nodemask); 2337 if (unlikely(!(*did_some_progress))) 2338 return NULL; 2339 2340 /* After successful reclaim, reconsider all zones for allocation */ 2341 if (IS_ENABLED(CONFIG_NUMA)) 2342 zlc_clear_zones_full(zonelist); 2343 2344 retry: 2345 page = get_page_from_freelist(gfp_mask, nodemask, order, 2346 zonelist, high_zoneidx, 2347 alloc_flags & ~ALLOC_NO_WATERMARKS, 2348 preferred_zone, migratetype); 2349 2350 /* 2351 * If an allocation failed after direct reclaim, it could be because 2352 * pages are pinned on the per-cpu lists. Drain them and try again 2353 */ 2354 if (!page && !drained) { 2355 drain_all_pages(); 2356 drained = true; 2357 goto retry; 2358 } 2359 2360 return page; 2361 } 2362 2363 /* 2364 * This is called in the allocator slow-path if the allocation request is of 2365 * sufficient urgency to ignore watermarks and take other desperate measures 2366 */ 2367 static inline struct page * 2368 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 2369 struct zonelist *zonelist, enum zone_type high_zoneidx, 2370 nodemask_t *nodemask, struct zone *preferred_zone, 2371 int migratetype) 2372 { 2373 struct page *page; 2374 2375 do { 2376 page = get_page_from_freelist(gfp_mask, nodemask, order, 2377 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, 2378 preferred_zone, migratetype); 2379 2380 if (!page && gfp_mask & __GFP_NOFAIL) 2381 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2382 } while (!page && (gfp_mask & __GFP_NOFAIL)); 2383 2384 return page; 2385 } 2386 2387 static void reset_alloc_batches(struct zonelist *zonelist, 2388 enum zone_type high_zoneidx, 2389 struct zone *preferred_zone) 2390 { 2391 struct zoneref *z; 2392 struct zone *zone; 2393 2394 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 2395 /* 2396 * Only reset the batches of zones that were actually 2397 * considered in the fairness pass, we don't want to 2398 * trash fairness information for zones that are not 2399 * actually part of this zonelist's round-robin cycle. 2400 */ 2401 if (!zone_local(preferred_zone, zone)) 2402 continue; 2403 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2404 high_wmark_pages(zone) - low_wmark_pages(zone) - 2405 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 2406 } 2407 } 2408 2409 static void wake_all_kswapds(unsigned int order, 2410 struct zonelist *zonelist, 2411 enum zone_type high_zoneidx, 2412 struct zone *preferred_zone) 2413 { 2414 struct zoneref *z; 2415 struct zone *zone; 2416 2417 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) 2418 wakeup_kswapd(zone, order, zone_idx(preferred_zone)); 2419 } 2420 2421 static inline int 2422 gfp_to_alloc_flags(gfp_t gfp_mask) 2423 { 2424 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 2425 const gfp_t wait = gfp_mask & __GFP_WAIT; 2426 2427 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 2428 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 2429 2430 /* 2431 * The caller may dip into page reserves a bit more if the caller 2432 * cannot run direct reclaim, or if the caller has realtime scheduling 2433 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 2434 * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH). 2435 */ 2436 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 2437 2438 if (!wait) { 2439 /* 2440 * Not worth trying to allocate harder for 2441 * __GFP_NOMEMALLOC even if it can't schedule. 2442 */ 2443 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2444 alloc_flags |= ALLOC_HARDER; 2445 /* 2446 * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc. 2447 * See also cpuset_zone_allowed() comment in kernel/cpuset.c. 2448 */ 2449 alloc_flags &= ~ALLOC_CPUSET; 2450 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2451 alloc_flags |= ALLOC_HARDER; 2452 2453 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 2454 if (gfp_mask & __GFP_MEMALLOC) 2455 alloc_flags |= ALLOC_NO_WATERMARKS; 2456 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 2457 alloc_flags |= ALLOC_NO_WATERMARKS; 2458 else if (!in_interrupt() && 2459 ((current->flags & PF_MEMALLOC) || 2460 unlikely(test_thread_flag(TIF_MEMDIE)))) 2461 alloc_flags |= ALLOC_NO_WATERMARKS; 2462 } 2463 #ifdef CONFIG_CMA 2464 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 2465 alloc_flags |= ALLOC_CMA; 2466 #endif 2467 return alloc_flags; 2468 } 2469 2470 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 2471 { 2472 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); 2473 } 2474 2475 static inline struct page * 2476 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 2477 struct zonelist *zonelist, enum zone_type high_zoneidx, 2478 nodemask_t *nodemask, struct zone *preferred_zone, 2479 int migratetype) 2480 { 2481 const gfp_t wait = gfp_mask & __GFP_WAIT; 2482 struct page *page = NULL; 2483 int alloc_flags; 2484 unsigned long pages_reclaimed = 0; 2485 unsigned long did_some_progress; 2486 bool sync_migration = false; 2487 bool deferred_compaction = false; 2488 bool contended_compaction = false; 2489 2490 /* 2491 * In the slowpath, we sanity check order to avoid ever trying to 2492 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 2493 * be using allocators in order of preference for an area that is 2494 * too large. 2495 */ 2496 if (order >= MAX_ORDER) { 2497 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 2498 return NULL; 2499 } 2500 2501 /* 2502 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 2503 * __GFP_NOWARN set) should not cause reclaim since the subsystem 2504 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 2505 * using a larger set of nodes after it has established that the 2506 * allowed per node queues are empty and that nodes are 2507 * over allocated. 2508 */ 2509 if (IS_ENABLED(CONFIG_NUMA) && 2510 (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 2511 goto nopage; 2512 2513 restart: 2514 if (!(gfp_mask & __GFP_NO_KSWAPD)) 2515 wake_all_kswapds(order, zonelist, high_zoneidx, preferred_zone); 2516 2517 /* 2518 * OK, we're below the kswapd watermark and have kicked background 2519 * reclaim. Now things get more complex, so set up alloc_flags according 2520 * to how we want to proceed. 2521 */ 2522 alloc_flags = gfp_to_alloc_flags(gfp_mask); 2523 2524 /* 2525 * Find the true preferred zone if the allocation is unconstrained by 2526 * cpusets. 2527 */ 2528 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) 2529 first_zones_zonelist(zonelist, high_zoneidx, NULL, 2530 &preferred_zone); 2531 2532 rebalance: 2533 /* This is the last chance, in general, before the goto nopage. */ 2534 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 2535 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 2536 preferred_zone, migratetype); 2537 if (page) 2538 goto got_pg; 2539 2540 /* Allocate without watermarks if the context allows */ 2541 if (alloc_flags & ALLOC_NO_WATERMARKS) { 2542 /* 2543 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds 2544 * the allocation is high priority and these type of 2545 * allocations are system rather than user orientated 2546 */ 2547 zonelist = node_zonelist(numa_node_id(), gfp_mask); 2548 2549 page = __alloc_pages_high_priority(gfp_mask, order, 2550 zonelist, high_zoneidx, nodemask, 2551 preferred_zone, migratetype); 2552 if (page) { 2553 goto got_pg; 2554 } 2555 } 2556 2557 /* Atomic allocations - we can't balance anything */ 2558 if (!wait) { 2559 /* 2560 * All existing users of the deprecated __GFP_NOFAIL are 2561 * blockable, so warn of any new users that actually allow this 2562 * type of allocation to fail. 2563 */ 2564 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); 2565 goto nopage; 2566 } 2567 2568 /* Avoid recursion of direct reclaim */ 2569 if (current->flags & PF_MEMALLOC) 2570 goto nopage; 2571 2572 /* Avoid allocations with no watermarks from looping endlessly */ 2573 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 2574 goto nopage; 2575 2576 /* 2577 * Try direct compaction. The first pass is asynchronous. Subsequent 2578 * attempts after direct reclaim are synchronous 2579 */ 2580 page = __alloc_pages_direct_compact(gfp_mask, order, 2581 zonelist, high_zoneidx, 2582 nodemask, 2583 alloc_flags, preferred_zone, 2584 migratetype, sync_migration, 2585 &contended_compaction, 2586 &deferred_compaction, 2587 &did_some_progress); 2588 if (page) 2589 goto got_pg; 2590 sync_migration = true; 2591 2592 /* 2593 * If compaction is deferred for high-order allocations, it is because 2594 * sync compaction recently failed. In this is the case and the caller 2595 * requested a movable allocation that does not heavily disrupt the 2596 * system then fail the allocation instead of entering direct reclaim. 2597 */ 2598 if ((deferred_compaction || contended_compaction) && 2599 (gfp_mask & __GFP_NO_KSWAPD)) 2600 goto nopage; 2601 2602 /* Try direct reclaim and then allocating */ 2603 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2604 zonelist, high_zoneidx, 2605 nodemask, 2606 alloc_flags, preferred_zone, 2607 migratetype, &did_some_progress); 2608 if (page) 2609 goto got_pg; 2610 2611 /* 2612 * If we failed to make any progress reclaiming, then we are 2613 * running out of options and have to consider going OOM 2614 */ 2615 if (!did_some_progress) { 2616 if (oom_gfp_allowed(gfp_mask)) { 2617 if (oom_killer_disabled) 2618 goto nopage; 2619 /* Coredumps can quickly deplete all memory reserves */ 2620 if ((current->flags & PF_DUMPCORE) && 2621 !(gfp_mask & __GFP_NOFAIL)) 2622 goto nopage; 2623 page = __alloc_pages_may_oom(gfp_mask, order, 2624 zonelist, high_zoneidx, 2625 nodemask, preferred_zone, 2626 migratetype); 2627 if (page) 2628 goto got_pg; 2629 2630 if (!(gfp_mask & __GFP_NOFAIL)) { 2631 /* 2632 * The oom killer is not called for high-order 2633 * allocations that may fail, so if no progress 2634 * is being made, there are no other options and 2635 * retrying is unlikely to help. 2636 */ 2637 if (order > PAGE_ALLOC_COSTLY_ORDER) 2638 goto nopage; 2639 /* 2640 * The oom killer is not called for lowmem 2641 * allocations to prevent needlessly killing 2642 * innocent tasks. 2643 */ 2644 if (high_zoneidx < ZONE_NORMAL) 2645 goto nopage; 2646 } 2647 2648 goto restart; 2649 } 2650 } 2651 2652 /* Check if we should retry the allocation */ 2653 pages_reclaimed += did_some_progress; 2654 if (should_alloc_retry(gfp_mask, order, did_some_progress, 2655 pages_reclaimed)) { 2656 /* Wait for some write requests to complete then retry */ 2657 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2658 goto rebalance; 2659 } else { 2660 /* 2661 * High-order allocations do not necessarily loop after 2662 * direct reclaim and reclaim/compaction depends on compaction 2663 * being called after reclaim so call directly if necessary 2664 */ 2665 page = __alloc_pages_direct_compact(gfp_mask, order, 2666 zonelist, high_zoneidx, 2667 nodemask, 2668 alloc_flags, preferred_zone, 2669 migratetype, sync_migration, 2670 &contended_compaction, 2671 &deferred_compaction, 2672 &did_some_progress); 2673 if (page) 2674 goto got_pg; 2675 } 2676 2677 nopage: 2678 warn_alloc_failed(gfp_mask, order, NULL); 2679 return page; 2680 got_pg: 2681 if (kmemcheck_enabled) 2682 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 2683 2684 return page; 2685 } 2686 2687 /* 2688 * This is the 'heart' of the zoned buddy allocator. 2689 */ 2690 struct page * 2691 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 2692 struct zonelist *zonelist, nodemask_t *nodemask) 2693 { 2694 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 2695 struct zone *preferred_zone; 2696 struct page *page = NULL; 2697 int migratetype = allocflags_to_migratetype(gfp_mask); 2698 unsigned int cpuset_mems_cookie; 2699 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; 2700 struct mem_cgroup *memcg = NULL; 2701 2702 gfp_mask &= gfp_allowed_mask; 2703 2704 lockdep_trace_alloc(gfp_mask); 2705 2706 might_sleep_if(gfp_mask & __GFP_WAIT); 2707 2708 if (should_fail_alloc_page(gfp_mask, order)) 2709 return NULL; 2710 2711 /* 2712 * Check the zones suitable for the gfp_mask contain at least one 2713 * valid zone. It's possible to have an empty zonelist as a result 2714 * of GFP_THISNODE and a memoryless node 2715 */ 2716 if (unlikely(!zonelist->_zonerefs->zone)) 2717 return NULL; 2718 2719 /* 2720 * Will only have any effect when __GFP_KMEMCG is set. This is 2721 * verified in the (always inline) callee 2722 */ 2723 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) 2724 return NULL; 2725 2726 retry_cpuset: 2727 cpuset_mems_cookie = read_mems_allowed_begin(); 2728 2729 /* The preferred zone is used for statistics later */ 2730 first_zones_zonelist(zonelist, high_zoneidx, 2731 nodemask ? : &cpuset_current_mems_allowed, 2732 &preferred_zone); 2733 if (!preferred_zone) 2734 goto out; 2735 2736 #ifdef CONFIG_CMA 2737 if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 2738 alloc_flags |= ALLOC_CMA; 2739 #endif 2740 retry: 2741 /* First allocation attempt */ 2742 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2743 zonelist, high_zoneidx, alloc_flags, 2744 preferred_zone, migratetype); 2745 if (unlikely(!page)) { 2746 /* 2747 * The first pass makes sure allocations are spread 2748 * fairly within the local node. However, the local 2749 * node might have free pages left after the fairness 2750 * batches are exhausted, and remote zones haven't 2751 * even been considered yet. Try once more without 2752 * fairness, and include remote zones now, before 2753 * entering the slowpath and waking kswapd: prefer 2754 * spilling to a remote zone over swapping locally. 2755 */ 2756 if (alloc_flags & ALLOC_FAIR) { 2757 reset_alloc_batches(zonelist, high_zoneidx, 2758 preferred_zone); 2759 alloc_flags &= ~ALLOC_FAIR; 2760 goto retry; 2761 } 2762 /* 2763 * Runtime PM, block IO and its error handling path 2764 * can deadlock because I/O on the device might not 2765 * complete. 2766 */ 2767 gfp_mask = memalloc_noio_flags(gfp_mask); 2768 page = __alloc_pages_slowpath(gfp_mask, order, 2769 zonelist, high_zoneidx, nodemask, 2770 preferred_zone, migratetype); 2771 } 2772 2773 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2774 2775 out: 2776 /* 2777 * When updating a task's mems_allowed, it is possible to race with 2778 * parallel threads in such a way that an allocation can fail while 2779 * the mask is being updated. If a page allocation is about to fail, 2780 * check if the cpuset changed during allocation and if so, retry. 2781 */ 2782 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2783 goto retry_cpuset; 2784 2785 memcg_kmem_commit_charge(page, memcg, order); 2786 2787 return page; 2788 } 2789 EXPORT_SYMBOL(__alloc_pages_nodemask); 2790 2791 /* 2792 * Common helper functions. 2793 */ 2794 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 2795 { 2796 struct page *page; 2797 2798 /* 2799 * __get_free_pages() returns a 32-bit address, which cannot represent 2800 * a highmem page 2801 */ 2802 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 2803 2804 page = alloc_pages(gfp_mask, order); 2805 if (!page) 2806 return 0; 2807 return (unsigned long) page_address(page); 2808 } 2809 EXPORT_SYMBOL(__get_free_pages); 2810 2811 unsigned long get_zeroed_page(gfp_t gfp_mask) 2812 { 2813 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 2814 } 2815 EXPORT_SYMBOL(get_zeroed_page); 2816 2817 void __free_pages(struct page *page, unsigned int order) 2818 { 2819 if (put_page_testzero(page)) { 2820 if (order == 0) 2821 free_hot_cold_page(page, 0); 2822 else 2823 __free_pages_ok(page, order); 2824 } 2825 } 2826 2827 EXPORT_SYMBOL(__free_pages); 2828 2829 void free_pages(unsigned long addr, unsigned int order) 2830 { 2831 if (addr != 0) { 2832 VM_BUG_ON(!virt_addr_valid((void *)addr)); 2833 __free_pages(virt_to_page((void *)addr), order); 2834 } 2835 } 2836 2837 EXPORT_SYMBOL(free_pages); 2838 2839 /* 2840 * __free_memcg_kmem_pages and free_memcg_kmem_pages will free 2841 * pages allocated with __GFP_KMEMCG. 2842 * 2843 * Those pages are accounted to a particular memcg, embedded in the 2844 * corresponding page_cgroup. To avoid adding a hit in the allocator to search 2845 * for that information only to find out that it is NULL for users who have no 2846 * interest in that whatsoever, we provide these functions. 2847 * 2848 * The caller knows better which flags it relies on. 2849 */ 2850 void __free_memcg_kmem_pages(struct page *page, unsigned int order) 2851 { 2852 memcg_kmem_uncharge_pages(page, order); 2853 __free_pages(page, order); 2854 } 2855 2856 void free_memcg_kmem_pages(unsigned long addr, unsigned int order) 2857 { 2858 if (addr != 0) { 2859 VM_BUG_ON(!virt_addr_valid((void *)addr)); 2860 __free_memcg_kmem_pages(virt_to_page((void *)addr), order); 2861 } 2862 } 2863 2864 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 2865 { 2866 if (addr) { 2867 unsigned long alloc_end = addr + (PAGE_SIZE << order); 2868 unsigned long used = addr + PAGE_ALIGN(size); 2869 2870 split_page(virt_to_page((void *)addr), order); 2871 while (used < alloc_end) { 2872 free_page(used); 2873 used += PAGE_SIZE; 2874 } 2875 } 2876 return (void *)addr; 2877 } 2878 2879 /** 2880 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 2881 * @size: the number of bytes to allocate 2882 * @gfp_mask: GFP flags for the allocation 2883 * 2884 * This function is similar to alloc_pages(), except that it allocates the 2885 * minimum number of pages to satisfy the request. alloc_pages() can only 2886 * allocate memory in power-of-two pages. 2887 * 2888 * This function is also limited by MAX_ORDER. 2889 * 2890 * Memory allocated by this function must be released by free_pages_exact(). 2891 */ 2892 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 2893 { 2894 unsigned int order = get_order(size); 2895 unsigned long addr; 2896 2897 addr = __get_free_pages(gfp_mask, order); 2898 return make_alloc_exact(addr, order, size); 2899 } 2900 EXPORT_SYMBOL(alloc_pages_exact); 2901 2902 /** 2903 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 2904 * pages on a node. 2905 * @nid: the preferred node ID where memory should be allocated 2906 * @size: the number of bytes to allocate 2907 * @gfp_mask: GFP flags for the allocation 2908 * 2909 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 2910 * back. 2911 * Note this is not alloc_pages_exact_node() which allocates on a specific node, 2912 * but is not exact. 2913 */ 2914 void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 2915 { 2916 unsigned order = get_order(size); 2917 struct page *p = alloc_pages_node(nid, gfp_mask, order); 2918 if (!p) 2919 return NULL; 2920 return make_alloc_exact((unsigned long)page_address(p), order, size); 2921 } 2922 EXPORT_SYMBOL(alloc_pages_exact_nid); 2923 2924 /** 2925 * free_pages_exact - release memory allocated via alloc_pages_exact() 2926 * @virt: the value returned by alloc_pages_exact. 2927 * @size: size of allocation, same value as passed to alloc_pages_exact(). 2928 * 2929 * Release the memory allocated by a previous call to alloc_pages_exact. 2930 */ 2931 void free_pages_exact(void *virt, size_t size) 2932 { 2933 unsigned long addr = (unsigned long)virt; 2934 unsigned long end = addr + PAGE_ALIGN(size); 2935 2936 while (addr < end) { 2937 free_page(addr); 2938 addr += PAGE_SIZE; 2939 } 2940 } 2941 EXPORT_SYMBOL(free_pages_exact); 2942 2943 /** 2944 * nr_free_zone_pages - count number of pages beyond high watermark 2945 * @offset: The zone index of the highest zone 2946 * 2947 * nr_free_zone_pages() counts the number of counts pages which are beyond the 2948 * high watermark within all zones at or below a given zone index. For each 2949 * zone, the number of pages is calculated as: 2950 * managed_pages - high_pages 2951 */ 2952 static unsigned long nr_free_zone_pages(int offset) 2953 { 2954 struct zoneref *z; 2955 struct zone *zone; 2956 2957 /* Just pick one node, since fallback list is circular */ 2958 unsigned long sum = 0; 2959 2960 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 2961 2962 for_each_zone_zonelist(zone, z, zonelist, offset) { 2963 unsigned long size = zone->managed_pages; 2964 unsigned long high = high_wmark_pages(zone); 2965 if (size > high) 2966 sum += size - high; 2967 } 2968 2969 return sum; 2970 } 2971 2972 /** 2973 * nr_free_buffer_pages - count number of pages beyond high watermark 2974 * 2975 * nr_free_buffer_pages() counts the number of pages which are beyond the high 2976 * watermark within ZONE_DMA and ZONE_NORMAL. 2977 */ 2978 unsigned long nr_free_buffer_pages(void) 2979 { 2980 return nr_free_zone_pages(gfp_zone(GFP_USER)); 2981 } 2982 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 2983 2984 /** 2985 * nr_free_pagecache_pages - count number of pages beyond high watermark 2986 * 2987 * nr_free_pagecache_pages() counts the number of pages which are beyond the 2988 * high watermark within all zones. 2989 */ 2990 unsigned long nr_free_pagecache_pages(void) 2991 { 2992 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 2993 } 2994 2995 static inline void show_node(struct zone *zone) 2996 { 2997 if (IS_ENABLED(CONFIG_NUMA)) 2998 printk("Node %d ", zone_to_nid(zone)); 2999 } 3000 3001 void si_meminfo(struct sysinfo *val) 3002 { 3003 val->totalram = totalram_pages; 3004 val->sharedram = 0; 3005 val->freeram = global_page_state(NR_FREE_PAGES); 3006 val->bufferram = nr_blockdev_pages(); 3007 val->totalhigh = totalhigh_pages; 3008 val->freehigh = nr_free_highpages(); 3009 val->mem_unit = PAGE_SIZE; 3010 } 3011 3012 EXPORT_SYMBOL(si_meminfo); 3013 3014 #ifdef CONFIG_NUMA 3015 void si_meminfo_node(struct sysinfo *val, int nid) 3016 { 3017 int zone_type; /* needs to be signed */ 3018 unsigned long managed_pages = 0; 3019 pg_data_t *pgdat = NODE_DATA(nid); 3020 3021 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 3022 managed_pages += pgdat->node_zones[zone_type].managed_pages; 3023 val->totalram = managed_pages; 3024 val->freeram = node_page_state(nid, NR_FREE_PAGES); 3025 #ifdef CONFIG_HIGHMEM 3026 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; 3027 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 3028 NR_FREE_PAGES); 3029 #else 3030 val->totalhigh = 0; 3031 val->freehigh = 0; 3032 #endif 3033 val->mem_unit = PAGE_SIZE; 3034 } 3035 #endif 3036 3037 /* 3038 * Determine whether the node should be displayed or not, depending on whether 3039 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 3040 */ 3041 bool skip_free_areas_node(unsigned int flags, int nid) 3042 { 3043 bool ret = false; 3044 unsigned int cpuset_mems_cookie; 3045 3046 if (!(flags & SHOW_MEM_FILTER_NODES)) 3047 goto out; 3048 3049 do { 3050 cpuset_mems_cookie = read_mems_allowed_begin(); 3051 ret = !node_isset(nid, cpuset_current_mems_allowed); 3052 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 3053 out: 3054 return ret; 3055 } 3056 3057 #define K(x) ((x) << (PAGE_SHIFT-10)) 3058 3059 static void show_migration_types(unsigned char type) 3060 { 3061 static const char types[MIGRATE_TYPES] = { 3062 [MIGRATE_UNMOVABLE] = 'U', 3063 [MIGRATE_RECLAIMABLE] = 'E', 3064 [MIGRATE_MOVABLE] = 'M', 3065 [MIGRATE_RESERVE] = 'R', 3066 #ifdef CONFIG_CMA 3067 [MIGRATE_CMA] = 'C', 3068 #endif 3069 #ifdef CONFIG_MEMORY_ISOLATION 3070 [MIGRATE_ISOLATE] = 'I', 3071 #endif 3072 }; 3073 char tmp[MIGRATE_TYPES + 1]; 3074 char *p = tmp; 3075 int i; 3076 3077 for (i = 0; i < MIGRATE_TYPES; i++) { 3078 if (type & (1 << i)) 3079 *p++ = types[i]; 3080 } 3081 3082 *p = '\0'; 3083 printk("(%s) ", tmp); 3084 } 3085 3086 /* 3087 * Show free area list (used inside shift_scroll-lock stuff) 3088 * We also calculate the percentage fragmentation. We do this by counting the 3089 * memory on each free list with the exception of the first item on the list. 3090 * Suppresses nodes that are not allowed by current's cpuset if 3091 * SHOW_MEM_FILTER_NODES is passed. 3092 */ 3093 void show_free_areas(unsigned int filter) 3094 { 3095 int cpu; 3096 struct zone *zone; 3097 3098 for_each_populated_zone(zone) { 3099 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3100 continue; 3101 show_node(zone); 3102 printk("%s per-cpu:\n", zone->name); 3103 3104 for_each_online_cpu(cpu) { 3105 struct per_cpu_pageset *pageset; 3106 3107 pageset = per_cpu_ptr(zone->pageset, cpu); 3108 3109 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 3110 cpu, pageset->pcp.high, 3111 pageset->pcp.batch, pageset->pcp.count); 3112 } 3113 } 3114 3115 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 3116 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 3117 " unevictable:%lu" 3118 " dirty:%lu writeback:%lu unstable:%lu\n" 3119 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" 3120 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 3121 " free_cma:%lu\n", 3122 global_page_state(NR_ACTIVE_ANON), 3123 global_page_state(NR_INACTIVE_ANON), 3124 global_page_state(NR_ISOLATED_ANON), 3125 global_page_state(NR_ACTIVE_FILE), 3126 global_page_state(NR_INACTIVE_FILE), 3127 global_page_state(NR_ISOLATED_FILE), 3128 global_page_state(NR_UNEVICTABLE), 3129 global_page_state(NR_FILE_DIRTY), 3130 global_page_state(NR_WRITEBACK), 3131 global_page_state(NR_UNSTABLE_NFS), 3132 global_page_state(NR_FREE_PAGES), 3133 global_page_state(NR_SLAB_RECLAIMABLE), 3134 global_page_state(NR_SLAB_UNRECLAIMABLE), 3135 global_page_state(NR_FILE_MAPPED), 3136 global_page_state(NR_SHMEM), 3137 global_page_state(NR_PAGETABLE), 3138 global_page_state(NR_BOUNCE), 3139 global_page_state(NR_FREE_CMA_PAGES)); 3140 3141 for_each_populated_zone(zone) { 3142 int i; 3143 3144 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3145 continue; 3146 show_node(zone); 3147 printk("%s" 3148 " free:%lukB" 3149 " min:%lukB" 3150 " low:%lukB" 3151 " high:%lukB" 3152 " active_anon:%lukB" 3153 " inactive_anon:%lukB" 3154 " active_file:%lukB" 3155 " inactive_file:%lukB" 3156 " unevictable:%lukB" 3157 " isolated(anon):%lukB" 3158 " isolated(file):%lukB" 3159 " present:%lukB" 3160 " managed:%lukB" 3161 " mlocked:%lukB" 3162 " dirty:%lukB" 3163 " writeback:%lukB" 3164 " mapped:%lukB" 3165 " shmem:%lukB" 3166 " slab_reclaimable:%lukB" 3167 " slab_unreclaimable:%lukB" 3168 " kernel_stack:%lukB" 3169 " pagetables:%lukB" 3170 " unstable:%lukB" 3171 " bounce:%lukB" 3172 " free_cma:%lukB" 3173 " writeback_tmp:%lukB" 3174 " pages_scanned:%lu" 3175 " all_unreclaimable? %s" 3176 "\n", 3177 zone->name, 3178 K(zone_page_state(zone, NR_FREE_PAGES)), 3179 K(min_wmark_pages(zone)), 3180 K(low_wmark_pages(zone)), 3181 K(high_wmark_pages(zone)), 3182 K(zone_page_state(zone, NR_ACTIVE_ANON)), 3183 K(zone_page_state(zone, NR_INACTIVE_ANON)), 3184 K(zone_page_state(zone, NR_ACTIVE_FILE)), 3185 K(zone_page_state(zone, NR_INACTIVE_FILE)), 3186 K(zone_page_state(zone, NR_UNEVICTABLE)), 3187 K(zone_page_state(zone, NR_ISOLATED_ANON)), 3188 K(zone_page_state(zone, NR_ISOLATED_FILE)), 3189 K(zone->present_pages), 3190 K(zone->managed_pages), 3191 K(zone_page_state(zone, NR_MLOCK)), 3192 K(zone_page_state(zone, NR_FILE_DIRTY)), 3193 K(zone_page_state(zone, NR_WRITEBACK)), 3194 K(zone_page_state(zone, NR_FILE_MAPPED)), 3195 K(zone_page_state(zone, NR_SHMEM)), 3196 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 3197 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 3198 zone_page_state(zone, NR_KERNEL_STACK) * 3199 THREAD_SIZE / 1024, 3200 K(zone_page_state(zone, NR_PAGETABLE)), 3201 K(zone_page_state(zone, NR_UNSTABLE_NFS)), 3202 K(zone_page_state(zone, NR_BOUNCE)), 3203 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 3204 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 3205 zone->pages_scanned, 3206 (!zone_reclaimable(zone) ? "yes" : "no") 3207 ); 3208 printk("lowmem_reserve[]:"); 3209 for (i = 0; i < MAX_NR_ZONES; i++) 3210 printk(" %lu", zone->lowmem_reserve[i]); 3211 printk("\n"); 3212 } 3213 3214 for_each_populated_zone(zone) { 3215 unsigned long nr[MAX_ORDER], flags, order, total = 0; 3216 unsigned char types[MAX_ORDER]; 3217 3218 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3219 continue; 3220 show_node(zone); 3221 printk("%s: ", zone->name); 3222 3223 spin_lock_irqsave(&zone->lock, flags); 3224 for (order = 0; order < MAX_ORDER; order++) { 3225 struct free_area *area = &zone->free_area[order]; 3226 int type; 3227 3228 nr[order] = area->nr_free; 3229 total += nr[order] << order; 3230 3231 types[order] = 0; 3232 for (type = 0; type < MIGRATE_TYPES; type++) { 3233 if (!list_empty(&area->free_list[type])) 3234 types[order] |= 1 << type; 3235 } 3236 } 3237 spin_unlock_irqrestore(&zone->lock, flags); 3238 for (order = 0; order < MAX_ORDER; order++) { 3239 printk("%lu*%lukB ", nr[order], K(1UL) << order); 3240 if (nr[order]) 3241 show_migration_types(types[order]); 3242 } 3243 printk("= %lukB\n", K(total)); 3244 } 3245 3246 hugetlb_show_meminfo(); 3247 3248 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 3249 3250 show_swap_cache_info(); 3251 } 3252 3253 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 3254 { 3255 zoneref->zone = zone; 3256 zoneref->zone_idx = zone_idx(zone); 3257 } 3258 3259 /* 3260 * Builds allocation fallback zone lists. 3261 * 3262 * Add all populated zones of a node to the zonelist. 3263 */ 3264 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 3265 int nr_zones) 3266 { 3267 struct zone *zone; 3268 enum zone_type zone_type = MAX_NR_ZONES; 3269 3270 do { 3271 zone_type--; 3272 zone = pgdat->node_zones + zone_type; 3273 if (populated_zone(zone)) { 3274 zoneref_set_zone(zone, 3275 &zonelist->_zonerefs[nr_zones++]); 3276 check_highest_zone(zone_type); 3277 } 3278 } while (zone_type); 3279 3280 return nr_zones; 3281 } 3282 3283 3284 /* 3285 * zonelist_order: 3286 * 0 = automatic detection of better ordering. 3287 * 1 = order by ([node] distance, -zonetype) 3288 * 2 = order by (-zonetype, [node] distance) 3289 * 3290 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 3291 * the same zonelist. So only NUMA can configure this param. 3292 */ 3293 #define ZONELIST_ORDER_DEFAULT 0 3294 #define ZONELIST_ORDER_NODE 1 3295 #define ZONELIST_ORDER_ZONE 2 3296 3297 /* zonelist order in the kernel. 3298 * set_zonelist_order() will set this to NODE or ZONE. 3299 */ 3300 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 3301 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 3302 3303 3304 #ifdef CONFIG_NUMA 3305 /* The value user specified ....changed by config */ 3306 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3307 /* string for sysctl */ 3308 #define NUMA_ZONELIST_ORDER_LEN 16 3309 char numa_zonelist_order[16] = "default"; 3310 3311 /* 3312 * interface for configure zonelist ordering. 3313 * command line option "numa_zonelist_order" 3314 * = "[dD]efault - default, automatic configuration. 3315 * = "[nN]ode - order by node locality, then by zone within node 3316 * = "[zZ]one - order by zone, then by locality within zone 3317 */ 3318 3319 static int __parse_numa_zonelist_order(char *s) 3320 { 3321 if (*s == 'd' || *s == 'D') { 3322 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3323 } else if (*s == 'n' || *s == 'N') { 3324 user_zonelist_order = ZONELIST_ORDER_NODE; 3325 } else if (*s == 'z' || *s == 'Z') { 3326 user_zonelist_order = ZONELIST_ORDER_ZONE; 3327 } else { 3328 printk(KERN_WARNING 3329 "Ignoring invalid numa_zonelist_order value: " 3330 "%s\n", s); 3331 return -EINVAL; 3332 } 3333 return 0; 3334 } 3335 3336 static __init int setup_numa_zonelist_order(char *s) 3337 { 3338 int ret; 3339 3340 if (!s) 3341 return 0; 3342 3343 ret = __parse_numa_zonelist_order(s); 3344 if (ret == 0) 3345 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 3346 3347 return ret; 3348 } 3349 early_param("numa_zonelist_order", setup_numa_zonelist_order); 3350 3351 /* 3352 * sysctl handler for numa_zonelist_order 3353 */ 3354 int numa_zonelist_order_handler(ctl_table *table, int write, 3355 void __user *buffer, size_t *length, 3356 loff_t *ppos) 3357 { 3358 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 3359 int ret; 3360 static DEFINE_MUTEX(zl_order_mutex); 3361 3362 mutex_lock(&zl_order_mutex); 3363 if (write) { 3364 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { 3365 ret = -EINVAL; 3366 goto out; 3367 } 3368 strcpy(saved_string, (char *)table->data); 3369 } 3370 ret = proc_dostring(table, write, buffer, length, ppos); 3371 if (ret) 3372 goto out; 3373 if (write) { 3374 int oldval = user_zonelist_order; 3375 3376 ret = __parse_numa_zonelist_order((char *)table->data); 3377 if (ret) { 3378 /* 3379 * bogus value. restore saved string 3380 */ 3381 strncpy((char *)table->data, saved_string, 3382 NUMA_ZONELIST_ORDER_LEN); 3383 user_zonelist_order = oldval; 3384 } else if (oldval != user_zonelist_order) { 3385 mutex_lock(&zonelists_mutex); 3386 build_all_zonelists(NULL, NULL); 3387 mutex_unlock(&zonelists_mutex); 3388 } 3389 } 3390 out: 3391 mutex_unlock(&zl_order_mutex); 3392 return ret; 3393 } 3394 3395 3396 #define MAX_NODE_LOAD (nr_online_nodes) 3397 static int node_load[MAX_NUMNODES]; 3398 3399 /** 3400 * find_next_best_node - find the next node that should appear in a given node's fallback list 3401 * @node: node whose fallback list we're appending 3402 * @used_node_mask: nodemask_t of already used nodes 3403 * 3404 * We use a number of factors to determine which is the next node that should 3405 * appear on a given node's fallback list. The node should not have appeared 3406 * already in @node's fallback list, and it should be the next closest node 3407 * according to the distance array (which contains arbitrary distance values 3408 * from each node to each node in the system), and should also prefer nodes 3409 * with no CPUs, since presumably they'll have very little allocation pressure 3410 * on them otherwise. 3411 * It returns -1 if no node is found. 3412 */ 3413 static int find_next_best_node(int node, nodemask_t *used_node_mask) 3414 { 3415 int n, val; 3416 int min_val = INT_MAX; 3417 int best_node = NUMA_NO_NODE; 3418 const struct cpumask *tmp = cpumask_of_node(0); 3419 3420 /* Use the local node if we haven't already */ 3421 if (!node_isset(node, *used_node_mask)) { 3422 node_set(node, *used_node_mask); 3423 return node; 3424 } 3425 3426 for_each_node_state(n, N_MEMORY) { 3427 3428 /* Don't want a node to appear more than once */ 3429 if (node_isset(n, *used_node_mask)) 3430 continue; 3431 3432 /* Use the distance array to find the distance */ 3433 val = node_distance(node, n); 3434 3435 /* Penalize nodes under us ("prefer the next node") */ 3436 val += (n < node); 3437 3438 /* Give preference to headless and unused nodes */ 3439 tmp = cpumask_of_node(n); 3440 if (!cpumask_empty(tmp)) 3441 val += PENALTY_FOR_NODE_WITH_CPUS; 3442 3443 /* Slight preference for less loaded node */ 3444 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 3445 val += node_load[n]; 3446 3447 if (val < min_val) { 3448 min_val = val; 3449 best_node = n; 3450 } 3451 } 3452 3453 if (best_node >= 0) 3454 node_set(best_node, *used_node_mask); 3455 3456 return best_node; 3457 } 3458 3459 3460 /* 3461 * Build zonelists ordered by node and zones within node. 3462 * This results in maximum locality--normal zone overflows into local 3463 * DMA zone, if any--but risks exhausting DMA zone. 3464 */ 3465 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 3466 { 3467 int j; 3468 struct zonelist *zonelist; 3469 3470 zonelist = &pgdat->node_zonelists[0]; 3471 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 3472 ; 3473 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3474 zonelist->_zonerefs[j].zone = NULL; 3475 zonelist->_zonerefs[j].zone_idx = 0; 3476 } 3477 3478 /* 3479 * Build gfp_thisnode zonelists 3480 */ 3481 static void build_thisnode_zonelists(pg_data_t *pgdat) 3482 { 3483 int j; 3484 struct zonelist *zonelist; 3485 3486 zonelist = &pgdat->node_zonelists[1]; 3487 j = build_zonelists_node(pgdat, zonelist, 0); 3488 zonelist->_zonerefs[j].zone = NULL; 3489 zonelist->_zonerefs[j].zone_idx = 0; 3490 } 3491 3492 /* 3493 * Build zonelists ordered by zone and nodes within zones. 3494 * This results in conserving DMA zone[s] until all Normal memory is 3495 * exhausted, but results in overflowing to remote node while memory 3496 * may still exist in local DMA zone. 3497 */ 3498 static int node_order[MAX_NUMNODES]; 3499 3500 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 3501 { 3502 int pos, j, node; 3503 int zone_type; /* needs to be signed */ 3504 struct zone *z; 3505 struct zonelist *zonelist; 3506 3507 zonelist = &pgdat->node_zonelists[0]; 3508 pos = 0; 3509 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 3510 for (j = 0; j < nr_nodes; j++) { 3511 node = node_order[j]; 3512 z = &NODE_DATA(node)->node_zones[zone_type]; 3513 if (populated_zone(z)) { 3514 zoneref_set_zone(z, 3515 &zonelist->_zonerefs[pos++]); 3516 check_highest_zone(zone_type); 3517 } 3518 } 3519 } 3520 zonelist->_zonerefs[pos].zone = NULL; 3521 zonelist->_zonerefs[pos].zone_idx = 0; 3522 } 3523 3524 static int default_zonelist_order(void) 3525 { 3526 int nid, zone_type; 3527 unsigned long low_kmem_size, total_size; 3528 struct zone *z; 3529 int average_size; 3530 /* 3531 * ZONE_DMA and ZONE_DMA32 can be very small area in the system. 3532 * If they are really small and used heavily, the system can fall 3533 * into OOM very easily. 3534 * This function detect ZONE_DMA/DMA32 size and configures zone order. 3535 */ 3536 /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */ 3537 low_kmem_size = 0; 3538 total_size = 0; 3539 for_each_online_node(nid) { 3540 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 3541 z = &NODE_DATA(nid)->node_zones[zone_type]; 3542 if (populated_zone(z)) { 3543 if (zone_type < ZONE_NORMAL) 3544 low_kmem_size += z->managed_pages; 3545 total_size += z->managed_pages; 3546 } else if (zone_type == ZONE_NORMAL) { 3547 /* 3548 * If any node has only lowmem, then node order 3549 * is preferred to allow kernel allocations 3550 * locally; otherwise, they can easily infringe 3551 * on other nodes when there is an abundance of 3552 * lowmem available to allocate from. 3553 */ 3554 return ZONELIST_ORDER_NODE; 3555 } 3556 } 3557 } 3558 if (!low_kmem_size || /* there are no DMA area. */ 3559 low_kmem_size > total_size/2) /* DMA/DMA32 is big. */ 3560 return ZONELIST_ORDER_NODE; 3561 /* 3562 * look into each node's config. 3563 * If there is a node whose DMA/DMA32 memory is very big area on 3564 * local memory, NODE_ORDER may be suitable. 3565 */ 3566 average_size = total_size / 3567 (nodes_weight(node_states[N_MEMORY]) + 1); 3568 for_each_online_node(nid) { 3569 low_kmem_size = 0; 3570 total_size = 0; 3571 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 3572 z = &NODE_DATA(nid)->node_zones[zone_type]; 3573 if (populated_zone(z)) { 3574 if (zone_type < ZONE_NORMAL) 3575 low_kmem_size += z->present_pages; 3576 total_size += z->present_pages; 3577 } 3578 } 3579 if (low_kmem_size && 3580 total_size > average_size && /* ignore small node */ 3581 low_kmem_size > total_size * 70/100) 3582 return ZONELIST_ORDER_NODE; 3583 } 3584 return ZONELIST_ORDER_ZONE; 3585 } 3586 3587 static void set_zonelist_order(void) 3588 { 3589 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 3590 current_zonelist_order = default_zonelist_order(); 3591 else 3592 current_zonelist_order = user_zonelist_order; 3593 } 3594 3595 static void build_zonelists(pg_data_t *pgdat) 3596 { 3597 int j, node, load; 3598 enum zone_type i; 3599 nodemask_t used_mask; 3600 int local_node, prev_node; 3601 struct zonelist *zonelist; 3602 int order = current_zonelist_order; 3603 3604 /* initialize zonelists */ 3605 for (i = 0; i < MAX_ZONELISTS; i++) { 3606 zonelist = pgdat->node_zonelists + i; 3607 zonelist->_zonerefs[0].zone = NULL; 3608 zonelist->_zonerefs[0].zone_idx = 0; 3609 } 3610 3611 /* NUMA-aware ordering of nodes */ 3612 local_node = pgdat->node_id; 3613 load = nr_online_nodes; 3614 prev_node = local_node; 3615 nodes_clear(used_mask); 3616 3617 memset(node_order, 0, sizeof(node_order)); 3618 j = 0; 3619 3620 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 3621 /* 3622 * We don't want to pressure a particular node. 3623 * So adding penalty to the first node in same 3624 * distance group to make it round-robin. 3625 */ 3626 if (node_distance(local_node, node) != 3627 node_distance(local_node, prev_node)) 3628 node_load[node] = load; 3629 3630 prev_node = node; 3631 load--; 3632 if (order == ZONELIST_ORDER_NODE) 3633 build_zonelists_in_node_order(pgdat, node); 3634 else 3635 node_order[j++] = node; /* remember order */ 3636 } 3637 3638 if (order == ZONELIST_ORDER_ZONE) { 3639 /* calculate node order -- i.e., DMA last! */ 3640 build_zonelists_in_zone_order(pgdat, j); 3641 } 3642 3643 build_thisnode_zonelists(pgdat); 3644 } 3645 3646 /* Construct the zonelist performance cache - see further mmzone.h */ 3647 static void build_zonelist_cache(pg_data_t *pgdat) 3648 { 3649 struct zonelist *zonelist; 3650 struct zonelist_cache *zlc; 3651 struct zoneref *z; 3652 3653 zonelist = &pgdat->node_zonelists[0]; 3654 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 3655 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 3656 for (z = zonelist->_zonerefs; z->zone; z++) 3657 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 3658 } 3659 3660 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 3661 /* 3662 * Return node id of node used for "local" allocations. 3663 * I.e., first node id of first zone in arg node's generic zonelist. 3664 * Used for initializing percpu 'numa_mem', which is used primarily 3665 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 3666 */ 3667 int local_memory_node(int node) 3668 { 3669 struct zone *zone; 3670 3671 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 3672 gfp_zone(GFP_KERNEL), 3673 NULL, 3674 &zone); 3675 return zone->node; 3676 } 3677 #endif 3678 3679 #else /* CONFIG_NUMA */ 3680 3681 static void set_zonelist_order(void) 3682 { 3683 current_zonelist_order = ZONELIST_ORDER_ZONE; 3684 } 3685 3686 static void build_zonelists(pg_data_t *pgdat) 3687 { 3688 int node, local_node; 3689 enum zone_type j; 3690 struct zonelist *zonelist; 3691 3692 local_node = pgdat->node_id; 3693 3694 zonelist = &pgdat->node_zonelists[0]; 3695 j = build_zonelists_node(pgdat, zonelist, 0); 3696 3697 /* 3698 * Now we build the zonelist so that it contains the zones 3699 * of all the other nodes. 3700 * We don't want to pressure a particular node, so when 3701 * building the zones for node N, we make sure that the 3702 * zones coming right after the local ones are those from 3703 * node N+1 (modulo N) 3704 */ 3705 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 3706 if (!node_online(node)) 3707 continue; 3708 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3709 } 3710 for (node = 0; node < local_node; node++) { 3711 if (!node_online(node)) 3712 continue; 3713 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3714 } 3715 3716 zonelist->_zonerefs[j].zone = NULL; 3717 zonelist->_zonerefs[j].zone_idx = 0; 3718 } 3719 3720 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 3721 static void build_zonelist_cache(pg_data_t *pgdat) 3722 { 3723 pgdat->node_zonelists[0].zlcache_ptr = NULL; 3724 } 3725 3726 #endif /* CONFIG_NUMA */ 3727 3728 /* 3729 * Boot pageset table. One per cpu which is going to be used for all 3730 * zones and all nodes. The parameters will be set in such a way 3731 * that an item put on a list will immediately be handed over to 3732 * the buddy list. This is safe since pageset manipulation is done 3733 * with interrupts disabled. 3734 * 3735 * The boot_pagesets must be kept even after bootup is complete for 3736 * unused processors and/or zones. They do play a role for bootstrapping 3737 * hotplugged processors. 3738 * 3739 * zoneinfo_show() and maybe other functions do 3740 * not check if the processor is online before following the pageset pointer. 3741 * Other parts of the kernel may not check if the zone is available. 3742 */ 3743 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 3744 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 3745 static void setup_zone_pageset(struct zone *zone); 3746 3747 /* 3748 * Global mutex to protect against size modification of zonelists 3749 * as well as to serialize pageset setup for the new populated zone. 3750 */ 3751 DEFINE_MUTEX(zonelists_mutex); 3752 3753 /* return values int ....just for stop_machine() */ 3754 static int __build_all_zonelists(void *data) 3755 { 3756 int nid; 3757 int cpu; 3758 pg_data_t *self = data; 3759 3760 #ifdef CONFIG_NUMA 3761 memset(node_load, 0, sizeof(node_load)); 3762 #endif 3763 3764 if (self && !node_online(self->node_id)) { 3765 build_zonelists(self); 3766 build_zonelist_cache(self); 3767 } 3768 3769 for_each_online_node(nid) { 3770 pg_data_t *pgdat = NODE_DATA(nid); 3771 3772 build_zonelists(pgdat); 3773 build_zonelist_cache(pgdat); 3774 } 3775 3776 /* 3777 * Initialize the boot_pagesets that are going to be used 3778 * for bootstrapping processors. The real pagesets for 3779 * each zone will be allocated later when the per cpu 3780 * allocator is available. 3781 * 3782 * boot_pagesets are used also for bootstrapping offline 3783 * cpus if the system is already booted because the pagesets 3784 * are needed to initialize allocators on a specific cpu too. 3785 * F.e. the percpu allocator needs the page allocator which 3786 * needs the percpu allocator in order to allocate its pagesets 3787 * (a chicken-egg dilemma). 3788 */ 3789 for_each_possible_cpu(cpu) { 3790 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 3791 3792 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 3793 /* 3794 * We now know the "local memory node" for each node-- 3795 * i.e., the node of the first zone in the generic zonelist. 3796 * Set up numa_mem percpu variable for on-line cpus. During 3797 * boot, only the boot cpu should be on-line; we'll init the 3798 * secondary cpus' numa_mem as they come on-line. During 3799 * node/memory hotplug, we'll fixup all on-line cpus. 3800 */ 3801 if (cpu_online(cpu)) 3802 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 3803 #endif 3804 } 3805 3806 return 0; 3807 } 3808 3809 /* 3810 * Called with zonelists_mutex held always 3811 * unless system_state == SYSTEM_BOOTING. 3812 */ 3813 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) 3814 { 3815 set_zonelist_order(); 3816 3817 if (system_state == SYSTEM_BOOTING) { 3818 __build_all_zonelists(NULL); 3819 mminit_verify_zonelist(); 3820 cpuset_init_current_mems_allowed(); 3821 } else { 3822 #ifdef CONFIG_MEMORY_HOTPLUG 3823 if (zone) 3824 setup_zone_pageset(zone); 3825 #endif 3826 /* we have to stop all cpus to guarantee there is no user 3827 of zonelist */ 3828 stop_machine(__build_all_zonelists, pgdat, NULL); 3829 /* cpuset refresh routine should be here */ 3830 } 3831 vm_total_pages = nr_free_pagecache_pages(); 3832 /* 3833 * Disable grouping by mobility if the number of pages in the 3834 * system is too low to allow the mechanism to work. It would be 3835 * more accurate, but expensive to check per-zone. This check is 3836 * made on memory-hotadd so a system can start with mobility 3837 * disabled and enable it later 3838 */ 3839 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 3840 page_group_by_mobility_disabled = 1; 3841 else 3842 page_group_by_mobility_disabled = 0; 3843 3844 printk("Built %i zonelists in %s order, mobility grouping %s. " 3845 "Total pages: %ld\n", 3846 nr_online_nodes, 3847 zonelist_order_name[current_zonelist_order], 3848 page_group_by_mobility_disabled ? "off" : "on", 3849 vm_total_pages); 3850 #ifdef CONFIG_NUMA 3851 printk("Policy zone: %s\n", zone_names[policy_zone]); 3852 #endif 3853 } 3854 3855 /* 3856 * Helper functions to size the waitqueue hash table. 3857 * Essentially these want to choose hash table sizes sufficiently 3858 * large so that collisions trying to wait on pages are rare. 3859 * But in fact, the number of active page waitqueues on typical 3860 * systems is ridiculously low, less than 200. So this is even 3861 * conservative, even though it seems large. 3862 * 3863 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 3864 * waitqueues, i.e. the size of the waitq table given the number of pages. 3865 */ 3866 #define PAGES_PER_WAITQUEUE 256 3867 3868 #ifndef CONFIG_MEMORY_HOTPLUG 3869 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 3870 { 3871 unsigned long size = 1; 3872 3873 pages /= PAGES_PER_WAITQUEUE; 3874 3875 while (size < pages) 3876 size <<= 1; 3877 3878 /* 3879 * Once we have dozens or even hundreds of threads sleeping 3880 * on IO we've got bigger problems than wait queue collision. 3881 * Limit the size of the wait table to a reasonable size. 3882 */ 3883 size = min(size, 4096UL); 3884 3885 return max(size, 4UL); 3886 } 3887 #else 3888 /* 3889 * A zone's size might be changed by hot-add, so it is not possible to determine 3890 * a suitable size for its wait_table. So we use the maximum size now. 3891 * 3892 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 3893 * 3894 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 3895 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 3896 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 3897 * 3898 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 3899 * or more by the traditional way. (See above). It equals: 3900 * 3901 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 3902 * ia64(16K page size) : = ( 8G + 4M)byte. 3903 * powerpc (64K page size) : = (32G +16M)byte. 3904 */ 3905 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 3906 { 3907 return 4096UL; 3908 } 3909 #endif 3910 3911 /* 3912 * This is an integer logarithm so that shifts can be used later 3913 * to extract the more random high bits from the multiplicative 3914 * hash function before the remainder is taken. 3915 */ 3916 static inline unsigned long wait_table_bits(unsigned long size) 3917 { 3918 return ffz(~size); 3919 } 3920 3921 /* 3922 * Check if a pageblock contains reserved pages 3923 */ 3924 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn) 3925 { 3926 unsigned long pfn; 3927 3928 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 3929 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn))) 3930 return 1; 3931 } 3932 return 0; 3933 } 3934 3935 /* 3936 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 3937 * of blocks reserved is based on min_wmark_pages(zone). The memory within 3938 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes 3939 * higher will lead to a bigger reserve which will get freed as contiguous 3940 * blocks as reclaim kicks in 3941 */ 3942 static void setup_zone_migrate_reserve(struct zone *zone) 3943 { 3944 unsigned long start_pfn, pfn, end_pfn, block_end_pfn; 3945 struct page *page; 3946 unsigned long block_migratetype; 3947 int reserve; 3948 int old_reserve; 3949 3950 /* 3951 * Get the start pfn, end pfn and the number of blocks to reserve 3952 * We have to be careful to be aligned to pageblock_nr_pages to 3953 * make sure that we always check pfn_valid for the first page in 3954 * the block. 3955 */ 3956 start_pfn = zone->zone_start_pfn; 3957 end_pfn = zone_end_pfn(zone); 3958 start_pfn = roundup(start_pfn, pageblock_nr_pages); 3959 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> 3960 pageblock_order; 3961 3962 /* 3963 * Reserve blocks are generally in place to help high-order atomic 3964 * allocations that are short-lived. A min_free_kbytes value that 3965 * would result in more than 2 reserve blocks for atomic allocations 3966 * is assumed to be in place to help anti-fragmentation for the 3967 * future allocation of hugepages at runtime. 3968 */ 3969 reserve = min(2, reserve); 3970 old_reserve = zone->nr_migrate_reserve_block; 3971 3972 /* When memory hot-add, we almost always need to do nothing */ 3973 if (reserve == old_reserve) 3974 return; 3975 zone->nr_migrate_reserve_block = reserve; 3976 3977 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 3978 if (!pfn_valid(pfn)) 3979 continue; 3980 page = pfn_to_page(pfn); 3981 3982 /* Watch out for overlapping nodes */ 3983 if (page_to_nid(page) != zone_to_nid(zone)) 3984 continue; 3985 3986 block_migratetype = get_pageblock_migratetype(page); 3987 3988 /* Only test what is necessary when the reserves are not met */ 3989 if (reserve > 0) { 3990 /* 3991 * Blocks with reserved pages will never free, skip 3992 * them. 3993 */ 3994 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn); 3995 if (pageblock_is_reserved(pfn, block_end_pfn)) 3996 continue; 3997 3998 /* If this block is reserved, account for it */ 3999 if (block_migratetype == MIGRATE_RESERVE) { 4000 reserve--; 4001 continue; 4002 } 4003 4004 /* Suitable for reserving if this block is movable */ 4005 if (block_migratetype == MIGRATE_MOVABLE) { 4006 set_pageblock_migratetype(page, 4007 MIGRATE_RESERVE); 4008 move_freepages_block(zone, page, 4009 MIGRATE_RESERVE); 4010 reserve--; 4011 continue; 4012 } 4013 } else if (!old_reserve) { 4014 /* 4015 * At boot time we don't need to scan the whole zone 4016 * for turning off MIGRATE_RESERVE. 4017 */ 4018 break; 4019 } 4020 4021 /* 4022 * If the reserve is met and this is a previous reserved block, 4023 * take it back 4024 */ 4025 if (block_migratetype == MIGRATE_RESERVE) { 4026 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4027 move_freepages_block(zone, page, MIGRATE_MOVABLE); 4028 } 4029 } 4030 } 4031 4032 /* 4033 * Initially all pages are reserved - free ones are freed 4034 * up by free_all_bootmem() once the early boot process is 4035 * done. Non-atomic initialization, single-pass. 4036 */ 4037 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 4038 unsigned long start_pfn, enum memmap_context context) 4039 { 4040 struct page *page; 4041 unsigned long end_pfn = start_pfn + size; 4042 unsigned long pfn; 4043 struct zone *z; 4044 4045 if (highest_memmap_pfn < end_pfn - 1) 4046 highest_memmap_pfn = end_pfn - 1; 4047 4048 z = &NODE_DATA(nid)->node_zones[zone]; 4049 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4050 /* 4051 * There can be holes in boot-time mem_map[]s 4052 * handed to this function. They do not 4053 * exist on hotplugged memory. 4054 */ 4055 if (context == MEMMAP_EARLY) { 4056 if (!early_pfn_valid(pfn)) 4057 continue; 4058 if (!early_pfn_in_nid(pfn, nid)) 4059 continue; 4060 } 4061 page = pfn_to_page(pfn); 4062 set_page_links(page, zone, nid, pfn); 4063 mminit_verify_page_links(page, zone, nid, pfn); 4064 init_page_count(page); 4065 page_mapcount_reset(page); 4066 page_cpupid_reset_last(page); 4067 SetPageReserved(page); 4068 /* 4069 * Mark the block movable so that blocks are reserved for 4070 * movable at startup. This will force kernel allocations 4071 * to reserve their blocks rather than leaking throughout 4072 * the address space during boot when many long-lived 4073 * kernel allocations are made. Later some blocks near 4074 * the start are marked MIGRATE_RESERVE by 4075 * setup_zone_migrate_reserve() 4076 * 4077 * bitmap is created for zone's valid pfn range. but memmap 4078 * can be created for invalid pages (for alignment) 4079 * check here not to call set_pageblock_migratetype() against 4080 * pfn out of zone. 4081 */ 4082 if ((z->zone_start_pfn <= pfn) 4083 && (pfn < zone_end_pfn(z)) 4084 && !(pfn & (pageblock_nr_pages - 1))) 4085 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4086 4087 INIT_LIST_HEAD(&page->lru); 4088 #ifdef WANT_PAGE_VIRTUAL 4089 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 4090 if (!is_highmem_idx(zone)) 4091 set_page_address(page, __va(pfn << PAGE_SHIFT)); 4092 #endif 4093 } 4094 } 4095 4096 static void __meminit zone_init_free_lists(struct zone *zone) 4097 { 4098 int order, t; 4099 for_each_migratetype_order(order, t) { 4100 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 4101 zone->free_area[order].nr_free = 0; 4102 } 4103 } 4104 4105 #ifndef __HAVE_ARCH_MEMMAP_INIT 4106 #define memmap_init(size, nid, zone, start_pfn) \ 4107 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 4108 #endif 4109 4110 static int __meminit zone_batchsize(struct zone *zone) 4111 { 4112 #ifdef CONFIG_MMU 4113 int batch; 4114 4115 /* 4116 * The per-cpu-pages pools are set to around 1000th of the 4117 * size of the zone. But no more than 1/2 of a meg. 4118 * 4119 * OK, so we don't know how big the cache is. So guess. 4120 */ 4121 batch = zone->managed_pages / 1024; 4122 if (batch * PAGE_SIZE > 512 * 1024) 4123 batch = (512 * 1024) / PAGE_SIZE; 4124 batch /= 4; /* We effectively *= 4 below */ 4125 if (batch < 1) 4126 batch = 1; 4127 4128 /* 4129 * Clamp the batch to a 2^n - 1 value. Having a power 4130 * of 2 value was found to be more likely to have 4131 * suboptimal cache aliasing properties in some cases. 4132 * 4133 * For example if 2 tasks are alternately allocating 4134 * batches of pages, one task can end up with a lot 4135 * of pages of one half of the possible page colors 4136 * and the other with pages of the other colors. 4137 */ 4138 batch = rounddown_pow_of_two(batch + batch/2) - 1; 4139 4140 return batch; 4141 4142 #else 4143 /* The deferral and batching of frees should be suppressed under NOMMU 4144 * conditions. 4145 * 4146 * The problem is that NOMMU needs to be able to allocate large chunks 4147 * of contiguous memory as there's no hardware page translation to 4148 * assemble apparent contiguous memory from discontiguous pages. 4149 * 4150 * Queueing large contiguous runs of pages for batching, however, 4151 * causes the pages to actually be freed in smaller chunks. As there 4152 * can be a significant delay between the individual batches being 4153 * recycled, this leads to the once large chunks of space being 4154 * fragmented and becoming unavailable for high-order allocations. 4155 */ 4156 return 0; 4157 #endif 4158 } 4159 4160 /* 4161 * pcp->high and pcp->batch values are related and dependent on one another: 4162 * ->batch must never be higher then ->high. 4163 * The following function updates them in a safe manner without read side 4164 * locking. 4165 * 4166 * Any new users of pcp->batch and pcp->high should ensure they can cope with 4167 * those fields changing asynchronously (acording the the above rule). 4168 * 4169 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 4170 * outside of boot time (or some other assurance that no concurrent updaters 4171 * exist). 4172 */ 4173 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 4174 unsigned long batch) 4175 { 4176 /* start with a fail safe value for batch */ 4177 pcp->batch = 1; 4178 smp_wmb(); 4179 4180 /* Update high, then batch, in order */ 4181 pcp->high = high; 4182 smp_wmb(); 4183 4184 pcp->batch = batch; 4185 } 4186 4187 /* a companion to pageset_set_high() */ 4188 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) 4189 { 4190 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); 4191 } 4192 4193 static void pageset_init(struct per_cpu_pageset *p) 4194 { 4195 struct per_cpu_pages *pcp; 4196 int migratetype; 4197 4198 memset(p, 0, sizeof(*p)); 4199 4200 pcp = &p->pcp; 4201 pcp->count = 0; 4202 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 4203 INIT_LIST_HEAD(&pcp->lists[migratetype]); 4204 } 4205 4206 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 4207 { 4208 pageset_init(p); 4209 pageset_set_batch(p, batch); 4210 } 4211 4212 /* 4213 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist 4214 * to the value high for the pageset p. 4215 */ 4216 static void pageset_set_high(struct per_cpu_pageset *p, 4217 unsigned long high) 4218 { 4219 unsigned long batch = max(1UL, high / 4); 4220 if ((high / 4) > (PAGE_SHIFT * 8)) 4221 batch = PAGE_SHIFT * 8; 4222 4223 pageset_update(&p->pcp, high, batch); 4224 } 4225 4226 static void __meminit pageset_set_high_and_batch(struct zone *zone, 4227 struct per_cpu_pageset *pcp) 4228 { 4229 if (percpu_pagelist_fraction) 4230 pageset_set_high(pcp, 4231 (zone->managed_pages / 4232 percpu_pagelist_fraction)); 4233 else 4234 pageset_set_batch(pcp, zone_batchsize(zone)); 4235 } 4236 4237 static void __meminit zone_pageset_init(struct zone *zone, int cpu) 4238 { 4239 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 4240 4241 pageset_init(pcp); 4242 pageset_set_high_and_batch(zone, pcp); 4243 } 4244 4245 static void __meminit setup_zone_pageset(struct zone *zone) 4246 { 4247 int cpu; 4248 zone->pageset = alloc_percpu(struct per_cpu_pageset); 4249 for_each_possible_cpu(cpu) 4250 zone_pageset_init(zone, cpu); 4251 } 4252 4253 /* 4254 * Allocate per cpu pagesets and initialize them. 4255 * Before this call only boot pagesets were available. 4256 */ 4257 void __init setup_per_cpu_pageset(void) 4258 { 4259 struct zone *zone; 4260 4261 for_each_populated_zone(zone) 4262 setup_zone_pageset(zone); 4263 } 4264 4265 static noinline __init_refok 4266 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 4267 { 4268 int i; 4269 size_t alloc_size; 4270 4271 /* 4272 * The per-page waitqueue mechanism uses hashed waitqueues 4273 * per zone. 4274 */ 4275 zone->wait_table_hash_nr_entries = 4276 wait_table_hash_nr_entries(zone_size_pages); 4277 zone->wait_table_bits = 4278 wait_table_bits(zone->wait_table_hash_nr_entries); 4279 alloc_size = zone->wait_table_hash_nr_entries 4280 * sizeof(wait_queue_head_t); 4281 4282 if (!slab_is_available()) { 4283 zone->wait_table = (wait_queue_head_t *) 4284 memblock_virt_alloc_node_nopanic( 4285 alloc_size, zone->zone_pgdat->node_id); 4286 } else { 4287 /* 4288 * This case means that a zone whose size was 0 gets new memory 4289 * via memory hot-add. 4290 * But it may be the case that a new node was hot-added. In 4291 * this case vmalloc() will not be able to use this new node's 4292 * memory - this wait_table must be initialized to use this new 4293 * node itself as well. 4294 * To use this new node's memory, further consideration will be 4295 * necessary. 4296 */ 4297 zone->wait_table = vmalloc(alloc_size); 4298 } 4299 if (!zone->wait_table) 4300 return -ENOMEM; 4301 4302 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) 4303 init_waitqueue_head(zone->wait_table + i); 4304 4305 return 0; 4306 } 4307 4308 static __meminit void zone_pcp_init(struct zone *zone) 4309 { 4310 /* 4311 * per cpu subsystem is not up at this point. The following code 4312 * relies on the ability of the linker to provide the 4313 * offset of a (static) per cpu variable into the per cpu area. 4314 */ 4315 zone->pageset = &boot_pageset; 4316 4317 if (populated_zone(zone)) 4318 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 4319 zone->name, zone->present_pages, 4320 zone_batchsize(zone)); 4321 } 4322 4323 int __meminit init_currently_empty_zone(struct zone *zone, 4324 unsigned long zone_start_pfn, 4325 unsigned long size, 4326 enum memmap_context context) 4327 { 4328 struct pglist_data *pgdat = zone->zone_pgdat; 4329 int ret; 4330 ret = zone_wait_table_init(zone, size); 4331 if (ret) 4332 return ret; 4333 pgdat->nr_zones = zone_idx(zone) + 1; 4334 4335 zone->zone_start_pfn = zone_start_pfn; 4336 4337 mminit_dprintk(MMINIT_TRACE, "memmap_init", 4338 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 4339 pgdat->node_id, 4340 (unsigned long)zone_idx(zone), 4341 zone_start_pfn, (zone_start_pfn + size)); 4342 4343 zone_init_free_lists(zone); 4344 4345 return 0; 4346 } 4347 4348 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4349 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 4350 /* 4351 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 4352 * Architectures may implement their own version but if add_active_range() 4353 * was used and there are no special requirements, this is a convenient 4354 * alternative 4355 */ 4356 int __meminit __early_pfn_to_nid(unsigned long pfn) 4357 { 4358 unsigned long start_pfn, end_pfn; 4359 int nid; 4360 /* 4361 * NOTE: The following SMP-unsafe globals are only used early in boot 4362 * when the kernel is running single-threaded. 4363 */ 4364 static unsigned long __meminitdata last_start_pfn, last_end_pfn; 4365 static int __meminitdata last_nid; 4366 4367 if (last_start_pfn <= pfn && pfn < last_end_pfn) 4368 return last_nid; 4369 4370 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 4371 if (nid != -1) { 4372 last_start_pfn = start_pfn; 4373 last_end_pfn = end_pfn; 4374 last_nid = nid; 4375 } 4376 4377 return nid; 4378 } 4379 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 4380 4381 int __meminit early_pfn_to_nid(unsigned long pfn) 4382 { 4383 int nid; 4384 4385 nid = __early_pfn_to_nid(pfn); 4386 if (nid >= 0) 4387 return nid; 4388 /* just returns 0 */ 4389 return 0; 4390 } 4391 4392 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 4393 bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 4394 { 4395 int nid; 4396 4397 nid = __early_pfn_to_nid(pfn); 4398 if (nid >= 0 && nid != node) 4399 return false; 4400 return true; 4401 } 4402 #endif 4403 4404 /** 4405 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 4406 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 4407 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid 4408 * 4409 * If an architecture guarantees that all ranges registered with 4410 * add_active_ranges() contain no holes and may be freed, this 4411 * this function may be used instead of calling memblock_free_early_nid() 4412 * manually. 4413 */ 4414 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 4415 { 4416 unsigned long start_pfn, end_pfn; 4417 int i, this_nid; 4418 4419 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { 4420 start_pfn = min(start_pfn, max_low_pfn); 4421 end_pfn = min(end_pfn, max_low_pfn); 4422 4423 if (start_pfn < end_pfn) 4424 memblock_free_early_nid(PFN_PHYS(start_pfn), 4425 (end_pfn - start_pfn) << PAGE_SHIFT, 4426 this_nid); 4427 } 4428 } 4429 4430 /** 4431 * sparse_memory_present_with_active_regions - Call memory_present for each active range 4432 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 4433 * 4434 * If an architecture guarantees that all ranges registered with 4435 * add_active_ranges() contain no holes and may be freed, this 4436 * function may be used instead of calling memory_present() manually. 4437 */ 4438 void __init sparse_memory_present_with_active_regions(int nid) 4439 { 4440 unsigned long start_pfn, end_pfn; 4441 int i, this_nid; 4442 4443 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 4444 memory_present(this_nid, start_pfn, end_pfn); 4445 } 4446 4447 /** 4448 * get_pfn_range_for_nid - Return the start and end page frames for a node 4449 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 4450 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 4451 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 4452 * 4453 * It returns the start and end page frame of a node based on information 4454 * provided by an arch calling add_active_range(). If called for a node 4455 * with no available memory, a warning is printed and the start and end 4456 * PFNs will be 0. 4457 */ 4458 void __meminit get_pfn_range_for_nid(unsigned int nid, 4459 unsigned long *start_pfn, unsigned long *end_pfn) 4460 { 4461 unsigned long this_start_pfn, this_end_pfn; 4462 int i; 4463 4464 *start_pfn = -1UL; 4465 *end_pfn = 0; 4466 4467 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 4468 *start_pfn = min(*start_pfn, this_start_pfn); 4469 *end_pfn = max(*end_pfn, this_end_pfn); 4470 } 4471 4472 if (*start_pfn == -1UL) 4473 *start_pfn = 0; 4474 } 4475 4476 /* 4477 * This finds a zone that can be used for ZONE_MOVABLE pages. The 4478 * assumption is made that zones within a node are ordered in monotonic 4479 * increasing memory addresses so that the "highest" populated zone is used 4480 */ 4481 static void __init find_usable_zone_for_movable(void) 4482 { 4483 int zone_index; 4484 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 4485 if (zone_index == ZONE_MOVABLE) 4486 continue; 4487 4488 if (arch_zone_highest_possible_pfn[zone_index] > 4489 arch_zone_lowest_possible_pfn[zone_index]) 4490 break; 4491 } 4492 4493 VM_BUG_ON(zone_index == -1); 4494 movable_zone = zone_index; 4495 } 4496 4497 /* 4498 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 4499 * because it is sized independent of architecture. Unlike the other zones, 4500 * the starting point for ZONE_MOVABLE is not fixed. It may be different 4501 * in each node depending on the size of each node and how evenly kernelcore 4502 * is distributed. This helper function adjusts the zone ranges 4503 * provided by the architecture for a given node by using the end of the 4504 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 4505 * zones within a node are in order of monotonic increases memory addresses 4506 */ 4507 static void __meminit adjust_zone_range_for_zone_movable(int nid, 4508 unsigned long zone_type, 4509 unsigned long node_start_pfn, 4510 unsigned long node_end_pfn, 4511 unsigned long *zone_start_pfn, 4512 unsigned long *zone_end_pfn) 4513 { 4514 /* Only adjust if ZONE_MOVABLE is on this node */ 4515 if (zone_movable_pfn[nid]) { 4516 /* Size ZONE_MOVABLE */ 4517 if (zone_type == ZONE_MOVABLE) { 4518 *zone_start_pfn = zone_movable_pfn[nid]; 4519 *zone_end_pfn = min(node_end_pfn, 4520 arch_zone_highest_possible_pfn[movable_zone]); 4521 4522 /* Adjust for ZONE_MOVABLE starting within this range */ 4523 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 4524 *zone_end_pfn > zone_movable_pfn[nid]) { 4525 *zone_end_pfn = zone_movable_pfn[nid]; 4526 4527 /* Check if this whole range is within ZONE_MOVABLE */ 4528 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 4529 *zone_start_pfn = *zone_end_pfn; 4530 } 4531 } 4532 4533 /* 4534 * Return the number of pages a zone spans in a node, including holes 4535 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 4536 */ 4537 static unsigned long __meminit zone_spanned_pages_in_node(int nid, 4538 unsigned long zone_type, 4539 unsigned long node_start_pfn, 4540 unsigned long node_end_pfn, 4541 unsigned long *ignored) 4542 { 4543 unsigned long zone_start_pfn, zone_end_pfn; 4544 4545 /* Get the start and end of the zone */ 4546 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 4547 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 4548 adjust_zone_range_for_zone_movable(nid, zone_type, 4549 node_start_pfn, node_end_pfn, 4550 &zone_start_pfn, &zone_end_pfn); 4551 4552 /* Check that this node has pages within the zone's required range */ 4553 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 4554 return 0; 4555 4556 /* Move the zone boundaries inside the node if necessary */ 4557 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 4558 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 4559 4560 /* Return the spanned pages */ 4561 return zone_end_pfn - zone_start_pfn; 4562 } 4563 4564 /* 4565 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 4566 * then all holes in the requested range will be accounted for. 4567 */ 4568 unsigned long __meminit __absent_pages_in_range(int nid, 4569 unsigned long range_start_pfn, 4570 unsigned long range_end_pfn) 4571 { 4572 unsigned long nr_absent = range_end_pfn - range_start_pfn; 4573 unsigned long start_pfn, end_pfn; 4574 int i; 4575 4576 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 4577 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 4578 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 4579 nr_absent -= end_pfn - start_pfn; 4580 } 4581 return nr_absent; 4582 } 4583 4584 /** 4585 * absent_pages_in_range - Return number of page frames in holes within a range 4586 * @start_pfn: The start PFN to start searching for holes 4587 * @end_pfn: The end PFN to stop searching for holes 4588 * 4589 * It returns the number of pages frames in memory holes within a range. 4590 */ 4591 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 4592 unsigned long end_pfn) 4593 { 4594 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 4595 } 4596 4597 /* Return the number of page frames in holes in a zone on a node */ 4598 static unsigned long __meminit zone_absent_pages_in_node(int nid, 4599 unsigned long zone_type, 4600 unsigned long node_start_pfn, 4601 unsigned long node_end_pfn, 4602 unsigned long *ignored) 4603 { 4604 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 4605 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 4606 unsigned long zone_start_pfn, zone_end_pfn; 4607 4608 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 4609 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 4610 4611 adjust_zone_range_for_zone_movable(nid, zone_type, 4612 node_start_pfn, node_end_pfn, 4613 &zone_start_pfn, &zone_end_pfn); 4614 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 4615 } 4616 4617 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 4618 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 4619 unsigned long zone_type, 4620 unsigned long node_start_pfn, 4621 unsigned long node_end_pfn, 4622 unsigned long *zones_size) 4623 { 4624 return zones_size[zone_type]; 4625 } 4626 4627 static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 4628 unsigned long zone_type, 4629 unsigned long node_start_pfn, 4630 unsigned long node_end_pfn, 4631 unsigned long *zholes_size) 4632 { 4633 if (!zholes_size) 4634 return 0; 4635 4636 return zholes_size[zone_type]; 4637 } 4638 4639 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 4640 4641 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 4642 unsigned long node_start_pfn, 4643 unsigned long node_end_pfn, 4644 unsigned long *zones_size, 4645 unsigned long *zholes_size) 4646 { 4647 unsigned long realtotalpages, totalpages = 0; 4648 enum zone_type i; 4649 4650 for (i = 0; i < MAX_NR_ZONES; i++) 4651 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 4652 node_start_pfn, 4653 node_end_pfn, 4654 zones_size); 4655 pgdat->node_spanned_pages = totalpages; 4656 4657 realtotalpages = totalpages; 4658 for (i = 0; i < MAX_NR_ZONES; i++) 4659 realtotalpages -= 4660 zone_absent_pages_in_node(pgdat->node_id, i, 4661 node_start_pfn, node_end_pfn, 4662 zholes_size); 4663 pgdat->node_present_pages = realtotalpages; 4664 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 4665 realtotalpages); 4666 } 4667 4668 #ifndef CONFIG_SPARSEMEM 4669 /* 4670 * Calculate the size of the zone->blockflags rounded to an unsigned long 4671 * Start by making sure zonesize is a multiple of pageblock_order by rounding 4672 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 4673 * round what is now in bits to nearest long in bits, then return it in 4674 * bytes. 4675 */ 4676 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 4677 { 4678 unsigned long usemapsize; 4679 4680 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 4681 usemapsize = roundup(zonesize, pageblock_nr_pages); 4682 usemapsize = usemapsize >> pageblock_order; 4683 usemapsize *= NR_PAGEBLOCK_BITS; 4684 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 4685 4686 return usemapsize / 8; 4687 } 4688 4689 static void __init setup_usemap(struct pglist_data *pgdat, 4690 struct zone *zone, 4691 unsigned long zone_start_pfn, 4692 unsigned long zonesize) 4693 { 4694 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 4695 zone->pageblock_flags = NULL; 4696 if (usemapsize) 4697 zone->pageblock_flags = 4698 memblock_virt_alloc_node_nopanic(usemapsize, 4699 pgdat->node_id); 4700 } 4701 #else 4702 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 4703 unsigned long zone_start_pfn, unsigned long zonesize) {} 4704 #endif /* CONFIG_SPARSEMEM */ 4705 4706 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4707 4708 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 4709 void __paginginit set_pageblock_order(void) 4710 { 4711 unsigned int order; 4712 4713 /* Check that pageblock_nr_pages has not already been setup */ 4714 if (pageblock_order) 4715 return; 4716 4717 if (HPAGE_SHIFT > PAGE_SHIFT) 4718 order = HUGETLB_PAGE_ORDER; 4719 else 4720 order = MAX_ORDER - 1; 4721 4722 /* 4723 * Assume the largest contiguous order of interest is a huge page. 4724 * This value may be variable depending on boot parameters on IA64 and 4725 * powerpc. 4726 */ 4727 pageblock_order = order; 4728 } 4729 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4730 4731 /* 4732 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 4733 * is unused as pageblock_order is set at compile-time. See 4734 * include/linux/pageblock-flags.h for the values of pageblock_order based on 4735 * the kernel config 4736 */ 4737 void __paginginit set_pageblock_order(void) 4738 { 4739 } 4740 4741 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4742 4743 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, 4744 unsigned long present_pages) 4745 { 4746 unsigned long pages = spanned_pages; 4747 4748 /* 4749 * Provide a more accurate estimation if there are holes within 4750 * the zone and SPARSEMEM is in use. If there are holes within the 4751 * zone, each populated memory region may cost us one or two extra 4752 * memmap pages due to alignment because memmap pages for each 4753 * populated regions may not naturally algined on page boundary. 4754 * So the (present_pages >> 4) heuristic is a tradeoff for that. 4755 */ 4756 if (spanned_pages > present_pages + (present_pages >> 4) && 4757 IS_ENABLED(CONFIG_SPARSEMEM)) 4758 pages = present_pages; 4759 4760 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 4761 } 4762 4763 /* 4764 * Set up the zone data structures: 4765 * - mark all pages reserved 4766 * - mark all memory queues empty 4767 * - clear the memory bitmaps 4768 * 4769 * NOTE: pgdat should get zeroed by caller. 4770 */ 4771 static void __paginginit free_area_init_core(struct pglist_data *pgdat, 4772 unsigned long node_start_pfn, unsigned long node_end_pfn, 4773 unsigned long *zones_size, unsigned long *zholes_size) 4774 { 4775 enum zone_type j; 4776 int nid = pgdat->node_id; 4777 unsigned long zone_start_pfn = pgdat->node_start_pfn; 4778 int ret; 4779 4780 pgdat_resize_init(pgdat); 4781 #ifdef CONFIG_NUMA_BALANCING 4782 spin_lock_init(&pgdat->numabalancing_migrate_lock); 4783 pgdat->numabalancing_migrate_nr_pages = 0; 4784 pgdat->numabalancing_migrate_next_window = jiffies; 4785 #endif 4786 init_waitqueue_head(&pgdat->kswapd_wait); 4787 init_waitqueue_head(&pgdat->pfmemalloc_wait); 4788 pgdat_page_cgroup_init(pgdat); 4789 4790 for (j = 0; j < MAX_NR_ZONES; j++) { 4791 struct zone *zone = pgdat->node_zones + j; 4792 unsigned long size, realsize, freesize, memmap_pages; 4793 4794 size = zone_spanned_pages_in_node(nid, j, node_start_pfn, 4795 node_end_pfn, zones_size); 4796 realsize = freesize = size - zone_absent_pages_in_node(nid, j, 4797 node_start_pfn, 4798 node_end_pfn, 4799 zholes_size); 4800 4801 /* 4802 * Adjust freesize so that it accounts for how much memory 4803 * is used by this zone for memmap. This affects the watermark 4804 * and per-cpu initialisations 4805 */ 4806 memmap_pages = calc_memmap_size(size, realsize); 4807 if (freesize >= memmap_pages) { 4808 freesize -= memmap_pages; 4809 if (memmap_pages) 4810 printk(KERN_DEBUG 4811 " %s zone: %lu pages used for memmap\n", 4812 zone_names[j], memmap_pages); 4813 } else 4814 printk(KERN_WARNING 4815 " %s zone: %lu pages exceeds freesize %lu\n", 4816 zone_names[j], memmap_pages, freesize); 4817 4818 /* Account for reserved pages */ 4819 if (j == 0 && freesize > dma_reserve) { 4820 freesize -= dma_reserve; 4821 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 4822 zone_names[0], dma_reserve); 4823 } 4824 4825 if (!is_highmem_idx(j)) 4826 nr_kernel_pages += freesize; 4827 /* Charge for highmem memmap if there are enough kernel pages */ 4828 else if (nr_kernel_pages > memmap_pages * 2) 4829 nr_kernel_pages -= memmap_pages; 4830 nr_all_pages += freesize; 4831 4832 zone->spanned_pages = size; 4833 zone->present_pages = realsize; 4834 /* 4835 * Set an approximate value for lowmem here, it will be adjusted 4836 * when the bootmem allocator frees pages into the buddy system. 4837 * And all highmem pages will be managed by the buddy system. 4838 */ 4839 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 4840 #ifdef CONFIG_NUMA 4841 zone->node = nid; 4842 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) 4843 / 100; 4844 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 4845 #endif 4846 zone->name = zone_names[j]; 4847 spin_lock_init(&zone->lock); 4848 spin_lock_init(&zone->lru_lock); 4849 zone_seqlock_init(zone); 4850 zone->zone_pgdat = pgdat; 4851 zone_pcp_init(zone); 4852 4853 /* For bootup, initialized properly in watermark setup */ 4854 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); 4855 4856 lruvec_init(&zone->lruvec); 4857 if (!size) 4858 continue; 4859 4860 set_pageblock_order(); 4861 setup_usemap(pgdat, zone, zone_start_pfn, size); 4862 ret = init_currently_empty_zone(zone, zone_start_pfn, 4863 size, MEMMAP_EARLY); 4864 BUG_ON(ret); 4865 memmap_init(size, nid, j, zone_start_pfn); 4866 zone_start_pfn += size; 4867 } 4868 } 4869 4870 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 4871 { 4872 /* Skip empty nodes */ 4873 if (!pgdat->node_spanned_pages) 4874 return; 4875 4876 #ifdef CONFIG_FLAT_NODE_MEM_MAP 4877 /* ia64 gets its own node_mem_map, before this, without bootmem */ 4878 if (!pgdat->node_mem_map) { 4879 unsigned long size, start, end; 4880 struct page *map; 4881 4882 /* 4883 * The zone's endpoints aren't required to be MAX_ORDER 4884 * aligned but the node_mem_map endpoints must be in order 4885 * for the buddy allocator to function correctly. 4886 */ 4887 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 4888 end = pgdat_end_pfn(pgdat); 4889 end = ALIGN(end, MAX_ORDER_NR_PAGES); 4890 size = (end - start) * sizeof(struct page); 4891 map = alloc_remap(pgdat->node_id, size); 4892 if (!map) 4893 map = memblock_virt_alloc_node_nopanic(size, 4894 pgdat->node_id); 4895 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 4896 } 4897 #ifndef CONFIG_NEED_MULTIPLE_NODES 4898 /* 4899 * With no DISCONTIG, the global mem_map is just set as node 0's 4900 */ 4901 if (pgdat == NODE_DATA(0)) { 4902 mem_map = NODE_DATA(0)->node_mem_map; 4903 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4904 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 4905 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 4906 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 4907 } 4908 #endif 4909 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 4910 } 4911 4912 void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 4913 unsigned long node_start_pfn, unsigned long *zholes_size) 4914 { 4915 pg_data_t *pgdat = NODE_DATA(nid); 4916 unsigned long start_pfn = 0; 4917 unsigned long end_pfn = 0; 4918 4919 /* pg_data_t should be reset to zero when it's allocated */ 4920 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 4921 4922 pgdat->node_id = nid; 4923 pgdat->node_start_pfn = node_start_pfn; 4924 if (node_state(nid, N_MEMORY)) 4925 init_zone_allows_reclaim(nid); 4926 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4927 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 4928 #endif 4929 calculate_node_totalpages(pgdat, start_pfn, end_pfn, 4930 zones_size, zholes_size); 4931 4932 alloc_node_mem_map(pgdat); 4933 #ifdef CONFIG_FLAT_NODE_MEM_MAP 4934 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 4935 nid, (unsigned long)pgdat, 4936 (unsigned long)pgdat->node_mem_map); 4937 #endif 4938 4939 free_area_init_core(pgdat, start_pfn, end_pfn, 4940 zones_size, zholes_size); 4941 } 4942 4943 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4944 4945 #if MAX_NUMNODES > 1 4946 /* 4947 * Figure out the number of possible node ids. 4948 */ 4949 void __init setup_nr_node_ids(void) 4950 { 4951 unsigned int node; 4952 unsigned int highest = 0; 4953 4954 for_each_node_mask(node, node_possible_map) 4955 highest = node; 4956 nr_node_ids = highest + 1; 4957 } 4958 #endif 4959 4960 /** 4961 * node_map_pfn_alignment - determine the maximum internode alignment 4962 * 4963 * This function should be called after node map is populated and sorted. 4964 * It calculates the maximum power of two alignment which can distinguish 4965 * all the nodes. 4966 * 4967 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 4968 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 4969 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 4970 * shifted, 1GiB is enough and this function will indicate so. 4971 * 4972 * This is used to test whether pfn -> nid mapping of the chosen memory 4973 * model has fine enough granularity to avoid incorrect mapping for the 4974 * populated node map. 4975 * 4976 * Returns the determined alignment in pfn's. 0 if there is no alignment 4977 * requirement (single node). 4978 */ 4979 unsigned long __init node_map_pfn_alignment(void) 4980 { 4981 unsigned long accl_mask = 0, last_end = 0; 4982 unsigned long start, end, mask; 4983 int last_nid = -1; 4984 int i, nid; 4985 4986 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 4987 if (!start || last_nid < 0 || last_nid == nid) { 4988 last_nid = nid; 4989 last_end = end; 4990 continue; 4991 } 4992 4993 /* 4994 * Start with a mask granular enough to pin-point to the 4995 * start pfn and tick off bits one-by-one until it becomes 4996 * too coarse to separate the current node from the last. 4997 */ 4998 mask = ~((1 << __ffs(start)) - 1); 4999 while (mask && last_end <= (start & (mask << 1))) 5000 mask <<= 1; 5001 5002 /* accumulate all internode masks */ 5003 accl_mask |= mask; 5004 } 5005 5006 /* convert mask to number of pages */ 5007 return ~accl_mask + 1; 5008 } 5009 5010 /* Find the lowest pfn for a node */ 5011 static unsigned long __init find_min_pfn_for_node(int nid) 5012 { 5013 unsigned long min_pfn = ULONG_MAX; 5014 unsigned long start_pfn; 5015 int i; 5016 5017 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) 5018 min_pfn = min(min_pfn, start_pfn); 5019 5020 if (min_pfn == ULONG_MAX) { 5021 printk(KERN_WARNING 5022 "Could not find start_pfn for node %d\n", nid); 5023 return 0; 5024 } 5025 5026 return min_pfn; 5027 } 5028 5029 /** 5030 * find_min_pfn_with_active_regions - Find the minimum PFN registered 5031 * 5032 * It returns the minimum PFN based on information provided via 5033 * add_active_range(). 5034 */ 5035 unsigned long __init find_min_pfn_with_active_regions(void) 5036 { 5037 return find_min_pfn_for_node(MAX_NUMNODES); 5038 } 5039 5040 /* 5041 * early_calculate_totalpages() 5042 * Sum pages in active regions for movable zone. 5043 * Populate N_MEMORY for calculating usable_nodes. 5044 */ 5045 static unsigned long __init early_calculate_totalpages(void) 5046 { 5047 unsigned long totalpages = 0; 5048 unsigned long start_pfn, end_pfn; 5049 int i, nid; 5050 5051 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 5052 unsigned long pages = end_pfn - start_pfn; 5053 5054 totalpages += pages; 5055 if (pages) 5056 node_set_state(nid, N_MEMORY); 5057 } 5058 return totalpages; 5059 } 5060 5061 /* 5062 * Find the PFN the Movable zone begins in each node. Kernel memory 5063 * is spread evenly between nodes as long as the nodes have enough 5064 * memory. When they don't, some nodes will have more kernelcore than 5065 * others 5066 */ 5067 static void __init find_zone_movable_pfns_for_nodes(void) 5068 { 5069 int i, nid; 5070 unsigned long usable_startpfn; 5071 unsigned long kernelcore_node, kernelcore_remaining; 5072 /* save the state before borrow the nodemask */ 5073 nodemask_t saved_node_state = node_states[N_MEMORY]; 5074 unsigned long totalpages = early_calculate_totalpages(); 5075 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 5076 struct memblock_region *r; 5077 5078 /* Need to find movable_zone earlier when movable_node is specified. */ 5079 find_usable_zone_for_movable(); 5080 5081 /* 5082 * If movable_node is specified, ignore kernelcore and movablecore 5083 * options. 5084 */ 5085 if (movable_node_is_enabled()) { 5086 for_each_memblock(memory, r) { 5087 if (!memblock_is_hotpluggable(r)) 5088 continue; 5089 5090 nid = r->nid; 5091 5092 usable_startpfn = PFN_DOWN(r->base); 5093 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 5094 min(usable_startpfn, zone_movable_pfn[nid]) : 5095 usable_startpfn; 5096 } 5097 5098 goto out2; 5099 } 5100 5101 /* 5102 * If movablecore=nn[KMG] was specified, calculate what size of 5103 * kernelcore that corresponds so that memory usable for 5104 * any allocation type is evenly spread. If both kernelcore 5105 * and movablecore are specified, then the value of kernelcore 5106 * will be used for required_kernelcore if it's greater than 5107 * what movablecore would have allowed. 5108 */ 5109 if (required_movablecore) { 5110 unsigned long corepages; 5111 5112 /* 5113 * Round-up so that ZONE_MOVABLE is at least as large as what 5114 * was requested by the user 5115 */ 5116 required_movablecore = 5117 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 5118 corepages = totalpages - required_movablecore; 5119 5120 required_kernelcore = max(required_kernelcore, corepages); 5121 } 5122 5123 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 5124 if (!required_kernelcore) 5125 goto out; 5126 5127 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 5128 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 5129 5130 restart: 5131 /* Spread kernelcore memory as evenly as possible throughout nodes */ 5132 kernelcore_node = required_kernelcore / usable_nodes; 5133 for_each_node_state(nid, N_MEMORY) { 5134 unsigned long start_pfn, end_pfn; 5135 5136 /* 5137 * Recalculate kernelcore_node if the division per node 5138 * now exceeds what is necessary to satisfy the requested 5139 * amount of memory for the kernel 5140 */ 5141 if (required_kernelcore < kernelcore_node) 5142 kernelcore_node = required_kernelcore / usable_nodes; 5143 5144 /* 5145 * As the map is walked, we track how much memory is usable 5146 * by the kernel using kernelcore_remaining. When it is 5147 * 0, the rest of the node is usable by ZONE_MOVABLE 5148 */ 5149 kernelcore_remaining = kernelcore_node; 5150 5151 /* Go through each range of PFNs within this node */ 5152 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 5153 unsigned long size_pages; 5154 5155 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 5156 if (start_pfn >= end_pfn) 5157 continue; 5158 5159 /* Account for what is only usable for kernelcore */ 5160 if (start_pfn < usable_startpfn) { 5161 unsigned long kernel_pages; 5162 kernel_pages = min(end_pfn, usable_startpfn) 5163 - start_pfn; 5164 5165 kernelcore_remaining -= min(kernel_pages, 5166 kernelcore_remaining); 5167 required_kernelcore -= min(kernel_pages, 5168 required_kernelcore); 5169 5170 /* Continue if range is now fully accounted */ 5171 if (end_pfn <= usable_startpfn) { 5172 5173 /* 5174 * Push zone_movable_pfn to the end so 5175 * that if we have to rebalance 5176 * kernelcore across nodes, we will 5177 * not double account here 5178 */ 5179 zone_movable_pfn[nid] = end_pfn; 5180 continue; 5181 } 5182 start_pfn = usable_startpfn; 5183 } 5184 5185 /* 5186 * The usable PFN range for ZONE_MOVABLE is from 5187 * start_pfn->end_pfn. Calculate size_pages as the 5188 * number of pages used as kernelcore 5189 */ 5190 size_pages = end_pfn - start_pfn; 5191 if (size_pages > kernelcore_remaining) 5192 size_pages = kernelcore_remaining; 5193 zone_movable_pfn[nid] = start_pfn + size_pages; 5194 5195 /* 5196 * Some kernelcore has been met, update counts and 5197 * break if the kernelcore for this node has been 5198 * satisfied 5199 */ 5200 required_kernelcore -= min(required_kernelcore, 5201 size_pages); 5202 kernelcore_remaining -= size_pages; 5203 if (!kernelcore_remaining) 5204 break; 5205 } 5206 } 5207 5208 /* 5209 * If there is still required_kernelcore, we do another pass with one 5210 * less node in the count. This will push zone_movable_pfn[nid] further 5211 * along on the nodes that still have memory until kernelcore is 5212 * satisfied 5213 */ 5214 usable_nodes--; 5215 if (usable_nodes && required_kernelcore > usable_nodes) 5216 goto restart; 5217 5218 out2: 5219 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 5220 for (nid = 0; nid < MAX_NUMNODES; nid++) 5221 zone_movable_pfn[nid] = 5222 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 5223 5224 out: 5225 /* restore the node_state */ 5226 node_states[N_MEMORY] = saved_node_state; 5227 } 5228 5229 /* Any regular or high memory on that node ? */ 5230 static void check_for_memory(pg_data_t *pgdat, int nid) 5231 { 5232 enum zone_type zone_type; 5233 5234 if (N_MEMORY == N_NORMAL_MEMORY) 5235 return; 5236 5237 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 5238 struct zone *zone = &pgdat->node_zones[zone_type]; 5239 if (populated_zone(zone)) { 5240 node_set_state(nid, N_HIGH_MEMORY); 5241 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 5242 zone_type <= ZONE_NORMAL) 5243 node_set_state(nid, N_NORMAL_MEMORY); 5244 break; 5245 } 5246 } 5247 } 5248 5249 /** 5250 * free_area_init_nodes - Initialise all pg_data_t and zone data 5251 * @max_zone_pfn: an array of max PFNs for each zone 5252 * 5253 * This will call free_area_init_node() for each active node in the system. 5254 * Using the page ranges provided by add_active_range(), the size of each 5255 * zone in each node and their holes is calculated. If the maximum PFN 5256 * between two adjacent zones match, it is assumed that the zone is empty. 5257 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 5258 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 5259 * starts where the previous one ended. For example, ZONE_DMA32 starts 5260 * at arch_max_dma_pfn. 5261 */ 5262 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 5263 { 5264 unsigned long start_pfn, end_pfn; 5265 int i, nid; 5266 5267 /* Record where the zone boundaries are */ 5268 memset(arch_zone_lowest_possible_pfn, 0, 5269 sizeof(arch_zone_lowest_possible_pfn)); 5270 memset(arch_zone_highest_possible_pfn, 0, 5271 sizeof(arch_zone_highest_possible_pfn)); 5272 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 5273 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 5274 for (i = 1; i < MAX_NR_ZONES; i++) { 5275 if (i == ZONE_MOVABLE) 5276 continue; 5277 arch_zone_lowest_possible_pfn[i] = 5278 arch_zone_highest_possible_pfn[i-1]; 5279 arch_zone_highest_possible_pfn[i] = 5280 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 5281 } 5282 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 5283 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 5284 5285 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 5286 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 5287 find_zone_movable_pfns_for_nodes(); 5288 5289 /* Print out the zone ranges */ 5290 printk("Zone ranges:\n"); 5291 for (i = 0; i < MAX_NR_ZONES; i++) { 5292 if (i == ZONE_MOVABLE) 5293 continue; 5294 printk(KERN_CONT " %-8s ", zone_names[i]); 5295 if (arch_zone_lowest_possible_pfn[i] == 5296 arch_zone_highest_possible_pfn[i]) 5297 printk(KERN_CONT "empty\n"); 5298 else 5299 printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n", 5300 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT, 5301 (arch_zone_highest_possible_pfn[i] 5302 << PAGE_SHIFT) - 1); 5303 } 5304 5305 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 5306 printk("Movable zone start for each node\n"); 5307 for (i = 0; i < MAX_NUMNODES; i++) { 5308 if (zone_movable_pfn[i]) 5309 printk(" Node %d: %#010lx\n", i, 5310 zone_movable_pfn[i] << PAGE_SHIFT); 5311 } 5312 5313 /* Print out the early node map */ 5314 printk("Early memory node ranges\n"); 5315 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 5316 printk(" node %3d: [mem %#010lx-%#010lx]\n", nid, 5317 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); 5318 5319 /* Initialise every node */ 5320 mminit_verify_pageflags_layout(); 5321 setup_nr_node_ids(); 5322 for_each_online_node(nid) { 5323 pg_data_t *pgdat = NODE_DATA(nid); 5324 free_area_init_node(nid, NULL, 5325 find_min_pfn_for_node(nid), NULL); 5326 5327 /* Any memory on that node */ 5328 if (pgdat->node_present_pages) 5329 node_set_state(nid, N_MEMORY); 5330 check_for_memory(pgdat, nid); 5331 } 5332 } 5333 5334 static int __init cmdline_parse_core(char *p, unsigned long *core) 5335 { 5336 unsigned long long coremem; 5337 if (!p) 5338 return -EINVAL; 5339 5340 coremem = memparse(p, &p); 5341 *core = coremem >> PAGE_SHIFT; 5342 5343 /* Paranoid check that UL is enough for the coremem value */ 5344 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 5345 5346 return 0; 5347 } 5348 5349 /* 5350 * kernelcore=size sets the amount of memory for use for allocations that 5351 * cannot be reclaimed or migrated. 5352 */ 5353 static int __init cmdline_parse_kernelcore(char *p) 5354 { 5355 return cmdline_parse_core(p, &required_kernelcore); 5356 } 5357 5358 /* 5359 * movablecore=size sets the amount of memory for use for allocations that 5360 * can be reclaimed or migrated. 5361 */ 5362 static int __init cmdline_parse_movablecore(char *p) 5363 { 5364 return cmdline_parse_core(p, &required_movablecore); 5365 } 5366 5367 early_param("kernelcore", cmdline_parse_kernelcore); 5368 early_param("movablecore", cmdline_parse_movablecore); 5369 5370 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5371 5372 void adjust_managed_page_count(struct page *page, long count) 5373 { 5374 spin_lock(&managed_page_count_lock); 5375 page_zone(page)->managed_pages += count; 5376 totalram_pages += count; 5377 #ifdef CONFIG_HIGHMEM 5378 if (PageHighMem(page)) 5379 totalhigh_pages += count; 5380 #endif 5381 spin_unlock(&managed_page_count_lock); 5382 } 5383 EXPORT_SYMBOL(adjust_managed_page_count); 5384 5385 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 5386 { 5387 void *pos; 5388 unsigned long pages = 0; 5389 5390 start = (void *)PAGE_ALIGN((unsigned long)start); 5391 end = (void *)((unsigned long)end & PAGE_MASK); 5392 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5393 if ((unsigned int)poison <= 0xFF) 5394 memset(pos, poison, PAGE_SIZE); 5395 free_reserved_page(virt_to_page(pos)); 5396 } 5397 5398 if (pages && s) 5399 pr_info("Freeing %s memory: %ldK (%p - %p)\n", 5400 s, pages << (PAGE_SHIFT - 10), start, end); 5401 5402 return pages; 5403 } 5404 EXPORT_SYMBOL(free_reserved_area); 5405 5406 #ifdef CONFIG_HIGHMEM 5407 void free_highmem_page(struct page *page) 5408 { 5409 __free_reserved_page(page); 5410 totalram_pages++; 5411 page_zone(page)->managed_pages++; 5412 totalhigh_pages++; 5413 } 5414 #endif 5415 5416 5417 void __init mem_init_print_info(const char *str) 5418 { 5419 unsigned long physpages, codesize, datasize, rosize, bss_size; 5420 unsigned long init_code_size, init_data_size; 5421 5422 physpages = get_num_physpages(); 5423 codesize = _etext - _stext; 5424 datasize = _edata - _sdata; 5425 rosize = __end_rodata - __start_rodata; 5426 bss_size = __bss_stop - __bss_start; 5427 init_data_size = __init_end - __init_begin; 5428 init_code_size = _einittext - _sinittext; 5429 5430 /* 5431 * Detect special cases and adjust section sizes accordingly: 5432 * 1) .init.* may be embedded into .data sections 5433 * 2) .init.text.* may be out of [__init_begin, __init_end], 5434 * please refer to arch/tile/kernel/vmlinux.lds.S. 5435 * 3) .rodata.* may be embedded into .text or .data sections. 5436 */ 5437 #define adj_init_size(start, end, size, pos, adj) \ 5438 do { \ 5439 if (start <= pos && pos < end && size > adj) \ 5440 size -= adj; \ 5441 } while (0) 5442 5443 adj_init_size(__init_begin, __init_end, init_data_size, 5444 _sinittext, init_code_size); 5445 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 5446 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 5447 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 5448 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 5449 5450 #undef adj_init_size 5451 5452 printk("Memory: %luK/%luK available " 5453 "(%luK kernel code, %luK rwdata, %luK rodata, " 5454 "%luK init, %luK bss, %luK reserved" 5455 #ifdef CONFIG_HIGHMEM 5456 ", %luK highmem" 5457 #endif 5458 "%s%s)\n", 5459 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), 5460 codesize >> 10, datasize >> 10, rosize >> 10, 5461 (init_data_size + init_code_size) >> 10, bss_size >> 10, 5462 (physpages - totalram_pages) << (PAGE_SHIFT-10), 5463 #ifdef CONFIG_HIGHMEM 5464 totalhigh_pages << (PAGE_SHIFT-10), 5465 #endif 5466 str ? ", " : "", str ? str : ""); 5467 } 5468 5469 /** 5470 * set_dma_reserve - set the specified number of pages reserved in the first zone 5471 * @new_dma_reserve: The number of pages to mark reserved 5472 * 5473 * The per-cpu batchsize and zone watermarks are determined by present_pages. 5474 * In the DMA zone, a significant percentage may be consumed by kernel image 5475 * and other unfreeable allocations which can skew the watermarks badly. This 5476 * function may optionally be used to account for unfreeable pages in the 5477 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 5478 * smaller per-cpu batchsize. 5479 */ 5480 void __init set_dma_reserve(unsigned long new_dma_reserve) 5481 { 5482 dma_reserve = new_dma_reserve; 5483 } 5484 5485 void __init free_area_init(unsigned long *zones_size) 5486 { 5487 free_area_init_node(0, zones_size, 5488 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 5489 } 5490 5491 static int page_alloc_cpu_notify(struct notifier_block *self, 5492 unsigned long action, void *hcpu) 5493 { 5494 int cpu = (unsigned long)hcpu; 5495 5496 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 5497 lru_add_drain_cpu(cpu); 5498 drain_pages(cpu); 5499 5500 /* 5501 * Spill the event counters of the dead processor 5502 * into the current processors event counters. 5503 * This artificially elevates the count of the current 5504 * processor. 5505 */ 5506 vm_events_fold_cpu(cpu); 5507 5508 /* 5509 * Zero the differential counters of the dead processor 5510 * so that the vm statistics are consistent. 5511 * 5512 * This is only okay since the processor is dead and cannot 5513 * race with what we are doing. 5514 */ 5515 cpu_vm_stats_fold(cpu); 5516 } 5517 return NOTIFY_OK; 5518 } 5519 5520 void __init page_alloc_init(void) 5521 { 5522 hotcpu_notifier(page_alloc_cpu_notify, 0); 5523 } 5524 5525 /* 5526 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 5527 * or min_free_kbytes changes. 5528 */ 5529 static void calculate_totalreserve_pages(void) 5530 { 5531 struct pglist_data *pgdat; 5532 unsigned long reserve_pages = 0; 5533 enum zone_type i, j; 5534 5535 for_each_online_pgdat(pgdat) { 5536 for (i = 0; i < MAX_NR_ZONES; i++) { 5537 struct zone *zone = pgdat->node_zones + i; 5538 unsigned long max = 0; 5539 5540 /* Find valid and maximum lowmem_reserve in the zone */ 5541 for (j = i; j < MAX_NR_ZONES; j++) { 5542 if (zone->lowmem_reserve[j] > max) 5543 max = zone->lowmem_reserve[j]; 5544 } 5545 5546 /* we treat the high watermark as reserved pages. */ 5547 max += high_wmark_pages(zone); 5548 5549 if (max > zone->managed_pages) 5550 max = zone->managed_pages; 5551 reserve_pages += max; 5552 /* 5553 * Lowmem reserves are not available to 5554 * GFP_HIGHUSER page cache allocations and 5555 * kswapd tries to balance zones to their high 5556 * watermark. As a result, neither should be 5557 * regarded as dirtyable memory, to prevent a 5558 * situation where reclaim has to clean pages 5559 * in order to balance the zones. 5560 */ 5561 zone->dirty_balance_reserve = max; 5562 } 5563 } 5564 dirty_balance_reserve = reserve_pages; 5565 totalreserve_pages = reserve_pages; 5566 } 5567 5568 /* 5569 * setup_per_zone_lowmem_reserve - called whenever 5570 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 5571 * has a correct pages reserved value, so an adequate number of 5572 * pages are left in the zone after a successful __alloc_pages(). 5573 */ 5574 static void setup_per_zone_lowmem_reserve(void) 5575 { 5576 struct pglist_data *pgdat; 5577 enum zone_type j, idx; 5578 5579 for_each_online_pgdat(pgdat) { 5580 for (j = 0; j < MAX_NR_ZONES; j++) { 5581 struct zone *zone = pgdat->node_zones + j; 5582 unsigned long managed_pages = zone->managed_pages; 5583 5584 zone->lowmem_reserve[j] = 0; 5585 5586 idx = j; 5587 while (idx) { 5588 struct zone *lower_zone; 5589 5590 idx--; 5591 5592 if (sysctl_lowmem_reserve_ratio[idx] < 1) 5593 sysctl_lowmem_reserve_ratio[idx] = 1; 5594 5595 lower_zone = pgdat->node_zones + idx; 5596 lower_zone->lowmem_reserve[j] = managed_pages / 5597 sysctl_lowmem_reserve_ratio[idx]; 5598 managed_pages += lower_zone->managed_pages; 5599 } 5600 } 5601 } 5602 5603 /* update totalreserve_pages */ 5604 calculate_totalreserve_pages(); 5605 } 5606 5607 static void __setup_per_zone_wmarks(void) 5608 { 5609 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5610 unsigned long lowmem_pages = 0; 5611 struct zone *zone; 5612 unsigned long flags; 5613 5614 /* Calculate total number of !ZONE_HIGHMEM pages */ 5615 for_each_zone(zone) { 5616 if (!is_highmem(zone)) 5617 lowmem_pages += zone->managed_pages; 5618 } 5619 5620 for_each_zone(zone) { 5621 u64 tmp; 5622 5623 spin_lock_irqsave(&zone->lock, flags); 5624 tmp = (u64)pages_min * zone->managed_pages; 5625 do_div(tmp, lowmem_pages); 5626 if (is_highmem(zone)) { 5627 /* 5628 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5629 * need highmem pages, so cap pages_min to a small 5630 * value here. 5631 * 5632 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5633 * deltas controls asynch page reclaim, and so should 5634 * not be capped for highmem. 5635 */ 5636 unsigned long min_pages; 5637 5638 min_pages = zone->managed_pages / 1024; 5639 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5640 zone->watermark[WMARK_MIN] = min_pages; 5641 } else { 5642 /* 5643 * If it's a lowmem zone, reserve a number of pages 5644 * proportionate to the zone's size. 5645 */ 5646 zone->watermark[WMARK_MIN] = tmp; 5647 } 5648 5649 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 5650 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5651 5652 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 5653 high_wmark_pages(zone) - 5654 low_wmark_pages(zone) - 5655 zone_page_state(zone, NR_ALLOC_BATCH)); 5656 5657 setup_zone_migrate_reserve(zone); 5658 spin_unlock_irqrestore(&zone->lock, flags); 5659 } 5660 5661 /* update totalreserve_pages */ 5662 calculate_totalreserve_pages(); 5663 } 5664 5665 /** 5666 * setup_per_zone_wmarks - called when min_free_kbytes changes 5667 * or when memory is hot-{added|removed} 5668 * 5669 * Ensures that the watermark[min,low,high] values for each zone are set 5670 * correctly with respect to min_free_kbytes. 5671 */ 5672 void setup_per_zone_wmarks(void) 5673 { 5674 mutex_lock(&zonelists_mutex); 5675 __setup_per_zone_wmarks(); 5676 mutex_unlock(&zonelists_mutex); 5677 } 5678 5679 /* 5680 * The inactive anon list should be small enough that the VM never has to 5681 * do too much work, but large enough that each inactive page has a chance 5682 * to be referenced again before it is swapped out. 5683 * 5684 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to 5685 * INACTIVE_ANON pages on this zone's LRU, maintained by the 5686 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of 5687 * the anonymous pages are kept on the inactive list. 5688 * 5689 * total target max 5690 * memory ratio inactive anon 5691 * ------------------------------------- 5692 * 10MB 1 5MB 5693 * 100MB 1 50MB 5694 * 1GB 3 250MB 5695 * 10GB 10 0.9GB 5696 * 100GB 31 3GB 5697 * 1TB 101 10GB 5698 * 10TB 320 32GB 5699 */ 5700 static void __meminit calculate_zone_inactive_ratio(struct zone *zone) 5701 { 5702 unsigned int gb, ratio; 5703 5704 /* Zone size in gigabytes */ 5705 gb = zone->managed_pages >> (30 - PAGE_SHIFT); 5706 if (gb) 5707 ratio = int_sqrt(10 * gb); 5708 else 5709 ratio = 1; 5710 5711 zone->inactive_ratio = ratio; 5712 } 5713 5714 static void __meminit setup_per_zone_inactive_ratio(void) 5715 { 5716 struct zone *zone; 5717 5718 for_each_zone(zone) 5719 calculate_zone_inactive_ratio(zone); 5720 } 5721 5722 /* 5723 * Initialise min_free_kbytes. 5724 * 5725 * For small machines we want it small (128k min). For large machines 5726 * we want it large (64MB max). But it is not linear, because network 5727 * bandwidth does not increase linearly with machine size. We use 5728 * 5729 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5730 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5731 * 5732 * which yields 5733 * 5734 * 16MB: 512k 5735 * 32MB: 724k 5736 * 64MB: 1024k 5737 * 128MB: 1448k 5738 * 256MB: 2048k 5739 * 512MB: 2896k 5740 * 1024MB: 4096k 5741 * 2048MB: 5792k 5742 * 4096MB: 8192k 5743 * 8192MB: 11584k 5744 * 16384MB: 16384k 5745 */ 5746 int __meminit init_per_zone_wmark_min(void) 5747 { 5748 unsigned long lowmem_kbytes; 5749 int new_min_free_kbytes; 5750 5751 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5752 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5753 5754 if (new_min_free_kbytes > user_min_free_kbytes) { 5755 min_free_kbytes = new_min_free_kbytes; 5756 if (min_free_kbytes < 128) 5757 min_free_kbytes = 128; 5758 if (min_free_kbytes > 65536) 5759 min_free_kbytes = 65536; 5760 } else { 5761 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 5762 new_min_free_kbytes, user_min_free_kbytes); 5763 } 5764 setup_per_zone_wmarks(); 5765 refresh_zone_stat_thresholds(); 5766 setup_per_zone_lowmem_reserve(); 5767 setup_per_zone_inactive_ratio(); 5768 return 0; 5769 } 5770 module_init(init_per_zone_wmark_min) 5771 5772 /* 5773 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5774 * that we can call two helper functions whenever min_free_kbytes 5775 * changes. 5776 */ 5777 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 5778 void __user *buffer, size_t *length, loff_t *ppos) 5779 { 5780 int rc; 5781 5782 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5783 if (rc) 5784 return rc; 5785 5786 if (write) { 5787 user_min_free_kbytes = min_free_kbytes; 5788 setup_per_zone_wmarks(); 5789 } 5790 return 0; 5791 } 5792 5793 #ifdef CONFIG_NUMA 5794 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write, 5795 void __user *buffer, size_t *length, loff_t *ppos) 5796 { 5797 struct zone *zone; 5798 int rc; 5799 5800 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5801 if (rc) 5802 return rc; 5803 5804 for_each_zone(zone) 5805 zone->min_unmapped_pages = (zone->managed_pages * 5806 sysctl_min_unmapped_ratio) / 100; 5807 return 0; 5808 } 5809 5810 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write, 5811 void __user *buffer, size_t *length, loff_t *ppos) 5812 { 5813 struct zone *zone; 5814 int rc; 5815 5816 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5817 if (rc) 5818 return rc; 5819 5820 for_each_zone(zone) 5821 zone->min_slab_pages = (zone->managed_pages * 5822 sysctl_min_slab_ratio) / 100; 5823 return 0; 5824 } 5825 #endif 5826 5827 /* 5828 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 5829 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 5830 * whenever sysctl_lowmem_reserve_ratio changes. 5831 * 5832 * The reserve ratio obviously has absolutely no relation with the 5833 * minimum watermarks. The lowmem reserve ratio can only make sense 5834 * if in function of the boot time zone sizes. 5835 */ 5836 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, 5837 void __user *buffer, size_t *length, loff_t *ppos) 5838 { 5839 proc_dointvec_minmax(table, write, buffer, length, ppos); 5840 setup_per_zone_lowmem_reserve(); 5841 return 0; 5842 } 5843 5844 /* 5845 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 5846 * cpu. It is the fraction of total pages in each zone that a hot per cpu 5847 * pagelist can have before it gets flushed back to buddy allocator. 5848 */ 5849 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, 5850 void __user *buffer, size_t *length, loff_t *ppos) 5851 { 5852 struct zone *zone; 5853 unsigned int cpu; 5854 int ret; 5855 5856 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5857 if (!write || (ret < 0)) 5858 return ret; 5859 5860 mutex_lock(&pcp_batch_high_lock); 5861 for_each_populated_zone(zone) { 5862 unsigned long high; 5863 high = zone->managed_pages / percpu_pagelist_fraction; 5864 for_each_possible_cpu(cpu) 5865 pageset_set_high(per_cpu_ptr(zone->pageset, cpu), 5866 high); 5867 } 5868 mutex_unlock(&pcp_batch_high_lock); 5869 return 0; 5870 } 5871 5872 int hashdist = HASHDIST_DEFAULT; 5873 5874 #ifdef CONFIG_NUMA 5875 static int __init set_hashdist(char *str) 5876 { 5877 if (!str) 5878 return 0; 5879 hashdist = simple_strtoul(str, &str, 0); 5880 return 1; 5881 } 5882 __setup("hashdist=", set_hashdist); 5883 #endif 5884 5885 /* 5886 * allocate a large system hash table from bootmem 5887 * - it is assumed that the hash table must contain an exact power-of-2 5888 * quantity of entries 5889 * - limit is the number of hash buckets, not the total allocation size 5890 */ 5891 void *__init alloc_large_system_hash(const char *tablename, 5892 unsigned long bucketsize, 5893 unsigned long numentries, 5894 int scale, 5895 int flags, 5896 unsigned int *_hash_shift, 5897 unsigned int *_hash_mask, 5898 unsigned long low_limit, 5899 unsigned long high_limit) 5900 { 5901 unsigned long long max = high_limit; 5902 unsigned long log2qty, size; 5903 void *table = NULL; 5904 5905 /* allow the kernel cmdline to have a say */ 5906 if (!numentries) { 5907 /* round applicable memory size up to nearest megabyte */ 5908 numentries = nr_kernel_pages; 5909 5910 /* It isn't necessary when PAGE_SIZE >= 1MB */ 5911 if (PAGE_SHIFT < 20) 5912 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 5913 5914 /* limit to 1 bucket per 2^scale bytes of low memory */ 5915 if (scale > PAGE_SHIFT) 5916 numentries >>= (scale - PAGE_SHIFT); 5917 else 5918 numentries <<= (PAGE_SHIFT - scale); 5919 5920 /* Make sure we've got at least a 0-order allocation.. */ 5921 if (unlikely(flags & HASH_SMALL)) { 5922 /* Makes no sense without HASH_EARLY */ 5923 WARN_ON(!(flags & HASH_EARLY)); 5924 if (!(numentries >> *_hash_shift)) { 5925 numentries = 1UL << *_hash_shift; 5926 BUG_ON(!numentries); 5927 } 5928 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 5929 numentries = PAGE_SIZE / bucketsize; 5930 } 5931 numentries = roundup_pow_of_two(numentries); 5932 5933 /* limit allocation size to 1/16 total memory by default */ 5934 if (max == 0) { 5935 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 5936 do_div(max, bucketsize); 5937 } 5938 max = min(max, 0x80000000ULL); 5939 5940 if (numentries < low_limit) 5941 numentries = low_limit; 5942 if (numentries > max) 5943 numentries = max; 5944 5945 log2qty = ilog2(numentries); 5946 5947 do { 5948 size = bucketsize << log2qty; 5949 if (flags & HASH_EARLY) 5950 table = memblock_virt_alloc_nopanic(size, 0); 5951 else if (hashdist) 5952 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 5953 else { 5954 /* 5955 * If bucketsize is not a power-of-two, we may free 5956 * some pages at the end of hash table which 5957 * alloc_pages_exact() automatically does 5958 */ 5959 if (get_order(size) < MAX_ORDER) { 5960 table = alloc_pages_exact(size, GFP_ATOMIC); 5961 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 5962 } 5963 } 5964 } while (!table && size > PAGE_SIZE && --log2qty); 5965 5966 if (!table) 5967 panic("Failed to allocate %s hash table\n", tablename); 5968 5969 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", 5970 tablename, 5971 (1UL << log2qty), 5972 ilog2(size) - PAGE_SHIFT, 5973 size); 5974 5975 if (_hash_shift) 5976 *_hash_shift = log2qty; 5977 if (_hash_mask) 5978 *_hash_mask = (1 << log2qty) - 1; 5979 5980 return table; 5981 } 5982 5983 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 5984 static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 5985 unsigned long pfn) 5986 { 5987 #ifdef CONFIG_SPARSEMEM 5988 return __pfn_to_section(pfn)->pageblock_flags; 5989 #else 5990 return zone->pageblock_flags; 5991 #endif /* CONFIG_SPARSEMEM */ 5992 } 5993 5994 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 5995 { 5996 #ifdef CONFIG_SPARSEMEM 5997 pfn &= (PAGES_PER_SECTION-1); 5998 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 5999 #else 6000 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); 6001 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 6002 #endif /* CONFIG_SPARSEMEM */ 6003 } 6004 6005 /** 6006 * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages 6007 * @page: The page within the block of interest 6008 * @start_bitidx: The first bit of interest to retrieve 6009 * @end_bitidx: The last bit of interest 6010 * returns pageblock_bits flags 6011 */ 6012 unsigned long get_pageblock_flags_group(struct page *page, 6013 int start_bitidx, int end_bitidx) 6014 { 6015 struct zone *zone; 6016 unsigned long *bitmap; 6017 unsigned long pfn, bitidx; 6018 unsigned long flags = 0; 6019 unsigned long value = 1; 6020 6021 zone = page_zone(page); 6022 pfn = page_to_pfn(page); 6023 bitmap = get_pageblock_bitmap(zone, pfn); 6024 bitidx = pfn_to_bitidx(zone, pfn); 6025 6026 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 6027 if (test_bit(bitidx + start_bitidx, bitmap)) 6028 flags |= value; 6029 6030 return flags; 6031 } 6032 6033 /** 6034 * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages 6035 * @page: The page within the block of interest 6036 * @start_bitidx: The first bit of interest 6037 * @end_bitidx: The last bit of interest 6038 * @flags: The flags to set 6039 */ 6040 void set_pageblock_flags_group(struct page *page, unsigned long flags, 6041 int start_bitidx, int end_bitidx) 6042 { 6043 struct zone *zone; 6044 unsigned long *bitmap; 6045 unsigned long pfn, bitidx; 6046 unsigned long value = 1; 6047 6048 zone = page_zone(page); 6049 pfn = page_to_pfn(page); 6050 bitmap = get_pageblock_bitmap(zone, pfn); 6051 bitidx = pfn_to_bitidx(zone, pfn); 6052 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); 6053 6054 for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) 6055 if (flags & value) 6056 __set_bit(bitidx + start_bitidx, bitmap); 6057 else 6058 __clear_bit(bitidx + start_bitidx, bitmap); 6059 } 6060 6061 /* 6062 * This function checks whether pageblock includes unmovable pages or not. 6063 * If @count is not zero, it is okay to include less @count unmovable pages 6064 * 6065 * PageLRU check without isolation or lru_lock could race so that 6066 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 6067 * expect this function should be exact. 6068 */ 6069 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 6070 bool skip_hwpoisoned_pages) 6071 { 6072 unsigned long pfn, iter, found; 6073 int mt; 6074 6075 /* 6076 * For avoiding noise data, lru_add_drain_all() should be called 6077 * If ZONE_MOVABLE, the zone never contains unmovable pages 6078 */ 6079 if (zone_idx(zone) == ZONE_MOVABLE) 6080 return false; 6081 mt = get_pageblock_migratetype(page); 6082 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) 6083 return false; 6084 6085 pfn = page_to_pfn(page); 6086 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 6087 unsigned long check = pfn + iter; 6088 6089 if (!pfn_valid_within(check)) 6090 continue; 6091 6092 page = pfn_to_page(check); 6093 6094 /* 6095 * Hugepages are not in LRU lists, but they're movable. 6096 * We need not scan over tail pages bacause we don't 6097 * handle each tail page individually in migration. 6098 */ 6099 if (PageHuge(page)) { 6100 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 6101 continue; 6102 } 6103 6104 /* 6105 * We can't use page_count without pin a page 6106 * because another CPU can free compound page. 6107 * This check already skips compound tails of THP 6108 * because their page->_count is zero at all time. 6109 */ 6110 if (!atomic_read(&page->_count)) { 6111 if (PageBuddy(page)) 6112 iter += (1 << page_order(page)) - 1; 6113 continue; 6114 } 6115 6116 /* 6117 * The HWPoisoned page may be not in buddy system, and 6118 * page_count() is not 0. 6119 */ 6120 if (skip_hwpoisoned_pages && PageHWPoison(page)) 6121 continue; 6122 6123 if (!PageLRU(page)) 6124 found++; 6125 /* 6126 * If there are RECLAIMABLE pages, we need to check it. 6127 * But now, memory offline itself doesn't call shrink_slab() 6128 * and it still to be fixed. 6129 */ 6130 /* 6131 * If the page is not RAM, page_count()should be 0. 6132 * we don't need more check. This is an _used_ not-movable page. 6133 * 6134 * The problematic thing here is PG_reserved pages. PG_reserved 6135 * is set to both of a memory hole page and a _used_ kernel 6136 * page at boot. 6137 */ 6138 if (found > count) 6139 return true; 6140 } 6141 return false; 6142 } 6143 6144 bool is_pageblock_removable_nolock(struct page *page) 6145 { 6146 struct zone *zone; 6147 unsigned long pfn; 6148 6149 /* 6150 * We have to be careful here because we are iterating over memory 6151 * sections which are not zone aware so we might end up outside of 6152 * the zone but still within the section. 6153 * We have to take care about the node as well. If the node is offline 6154 * its NODE_DATA will be NULL - see page_zone. 6155 */ 6156 if (!node_online(page_to_nid(page))) 6157 return false; 6158 6159 zone = page_zone(page); 6160 pfn = page_to_pfn(page); 6161 if (!zone_spans_pfn(zone, pfn)) 6162 return false; 6163 6164 return !has_unmovable_pages(zone, page, 0, true); 6165 } 6166 6167 #ifdef CONFIG_CMA 6168 6169 static unsigned long pfn_max_align_down(unsigned long pfn) 6170 { 6171 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 6172 pageblock_nr_pages) - 1); 6173 } 6174 6175 static unsigned long pfn_max_align_up(unsigned long pfn) 6176 { 6177 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 6178 pageblock_nr_pages)); 6179 } 6180 6181 /* [start, end) must belong to a single zone. */ 6182 static int __alloc_contig_migrate_range(struct compact_control *cc, 6183 unsigned long start, unsigned long end) 6184 { 6185 /* This function is based on compact_zone() from compaction.c. */ 6186 unsigned long nr_reclaimed; 6187 unsigned long pfn = start; 6188 unsigned int tries = 0; 6189 int ret = 0; 6190 6191 migrate_prep(); 6192 6193 while (pfn < end || !list_empty(&cc->migratepages)) { 6194 if (fatal_signal_pending(current)) { 6195 ret = -EINTR; 6196 break; 6197 } 6198 6199 if (list_empty(&cc->migratepages)) { 6200 cc->nr_migratepages = 0; 6201 pfn = isolate_migratepages_range(cc->zone, cc, 6202 pfn, end, true); 6203 if (!pfn) { 6204 ret = -EINTR; 6205 break; 6206 } 6207 tries = 0; 6208 } else if (++tries == 5) { 6209 ret = ret < 0 ? ret : -EBUSY; 6210 break; 6211 } 6212 6213 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6214 &cc->migratepages); 6215 cc->nr_migratepages -= nr_reclaimed; 6216 6217 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 6218 0, MIGRATE_SYNC, MR_CMA); 6219 } 6220 if (ret < 0) { 6221 putback_movable_pages(&cc->migratepages); 6222 return ret; 6223 } 6224 return 0; 6225 } 6226 6227 /** 6228 * alloc_contig_range() -- tries to allocate given range of pages 6229 * @start: start PFN to allocate 6230 * @end: one-past-the-last PFN to allocate 6231 * @migratetype: migratetype of the underlaying pageblocks (either 6232 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6233 * in range must have the same migratetype and it must 6234 * be either of the two. 6235 * 6236 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 6237 * aligned, however it's the caller's responsibility to guarantee that 6238 * we are the only thread that changes migrate type of pageblocks the 6239 * pages fall in. 6240 * 6241 * The PFN range must belong to a single zone. 6242 * 6243 * Returns zero on success or negative error code. On success all 6244 * pages which PFN is in [start, end) are allocated for the caller and 6245 * need to be freed with free_contig_range(). 6246 */ 6247 int alloc_contig_range(unsigned long start, unsigned long end, 6248 unsigned migratetype) 6249 { 6250 unsigned long outer_start, outer_end; 6251 int ret = 0, order; 6252 6253 struct compact_control cc = { 6254 .nr_migratepages = 0, 6255 .order = -1, 6256 .zone = page_zone(pfn_to_page(start)), 6257 .sync = true, 6258 .ignore_skip_hint = true, 6259 }; 6260 INIT_LIST_HEAD(&cc.migratepages); 6261 6262 /* 6263 * What we do here is we mark all pageblocks in range as 6264 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6265 * have different sizes, and due to the way page allocator 6266 * work, we align the range to biggest of the two pages so 6267 * that page allocator won't try to merge buddies from 6268 * different pageblocks and change MIGRATE_ISOLATE to some 6269 * other migration type. 6270 * 6271 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6272 * migrate the pages from an unaligned range (ie. pages that 6273 * we are interested in). This will put all the pages in 6274 * range back to page allocator as MIGRATE_ISOLATE. 6275 * 6276 * When this is done, we take the pages in range from page 6277 * allocator removing them from the buddy system. This way 6278 * page allocator will never consider using them. 6279 * 6280 * This lets us mark the pageblocks back as 6281 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6282 * aligned range but not in the unaligned, original range are 6283 * put back to page allocator so that buddy can use them. 6284 */ 6285 6286 ret = start_isolate_page_range(pfn_max_align_down(start), 6287 pfn_max_align_up(end), migratetype, 6288 false); 6289 if (ret) 6290 return ret; 6291 6292 ret = __alloc_contig_migrate_range(&cc, start, end); 6293 if (ret) 6294 goto done; 6295 6296 /* 6297 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 6298 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6299 * more, all pages in [start, end) are free in page allocator. 6300 * What we are going to do is to allocate all pages from 6301 * [start, end) (that is remove them from page allocator). 6302 * 6303 * The only problem is that pages at the beginning and at the 6304 * end of interesting range may be not aligned with pages that 6305 * page allocator holds, ie. they can be part of higher order 6306 * pages. Because of this, we reserve the bigger range and 6307 * once this is done free the pages we are not interested in. 6308 * 6309 * We don't have to hold zone->lock here because the pages are 6310 * isolated thus they won't get removed from buddy. 6311 */ 6312 6313 lru_add_drain_all(); 6314 drain_all_pages(); 6315 6316 order = 0; 6317 outer_start = start; 6318 while (!PageBuddy(pfn_to_page(outer_start))) { 6319 if (++order >= MAX_ORDER) { 6320 ret = -EBUSY; 6321 goto done; 6322 } 6323 outer_start &= ~0UL << order; 6324 } 6325 6326 /* Make sure the range is really isolated. */ 6327 if (test_pages_isolated(outer_start, end, false)) { 6328 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n", 6329 outer_start, end); 6330 ret = -EBUSY; 6331 goto done; 6332 } 6333 6334 6335 /* Grab isolated pages from freelists. */ 6336 outer_end = isolate_freepages_range(&cc, outer_start, end); 6337 if (!outer_end) { 6338 ret = -EBUSY; 6339 goto done; 6340 } 6341 6342 /* Free head and tail (if any) */ 6343 if (start != outer_start) 6344 free_contig_range(outer_start, start - outer_start); 6345 if (end != outer_end) 6346 free_contig_range(end, outer_end - end); 6347 6348 done: 6349 undo_isolate_page_range(pfn_max_align_down(start), 6350 pfn_max_align_up(end), migratetype); 6351 return ret; 6352 } 6353 6354 void free_contig_range(unsigned long pfn, unsigned nr_pages) 6355 { 6356 unsigned int count = 0; 6357 6358 for (; nr_pages--; pfn++) { 6359 struct page *page = pfn_to_page(pfn); 6360 6361 count += page_count(page) != 1; 6362 __free_page(page); 6363 } 6364 WARN(count != 0, "%d pages are still in use!\n", count); 6365 } 6366 #endif 6367 6368 #ifdef CONFIG_MEMORY_HOTPLUG 6369 /* 6370 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6371 * page high values need to be recalulated. 6372 */ 6373 void __meminit zone_pcp_update(struct zone *zone) 6374 { 6375 unsigned cpu; 6376 mutex_lock(&pcp_batch_high_lock); 6377 for_each_possible_cpu(cpu) 6378 pageset_set_high_and_batch(zone, 6379 per_cpu_ptr(zone->pageset, cpu)); 6380 mutex_unlock(&pcp_batch_high_lock); 6381 } 6382 #endif 6383 6384 void zone_pcp_reset(struct zone *zone) 6385 { 6386 unsigned long flags; 6387 int cpu; 6388 struct per_cpu_pageset *pset; 6389 6390 /* avoid races with drain_pages() */ 6391 local_irq_save(flags); 6392 if (zone->pageset != &boot_pageset) { 6393 for_each_online_cpu(cpu) { 6394 pset = per_cpu_ptr(zone->pageset, cpu); 6395 drain_zonestat(zone, pset); 6396 } 6397 free_percpu(zone->pageset); 6398 zone->pageset = &boot_pageset; 6399 } 6400 local_irq_restore(flags); 6401 } 6402 6403 #ifdef CONFIG_MEMORY_HOTREMOVE 6404 /* 6405 * All pages in the range must be isolated before calling this. 6406 */ 6407 void 6408 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6409 { 6410 struct page *page; 6411 struct zone *zone; 6412 int order, i; 6413 unsigned long pfn; 6414 unsigned long flags; 6415 /* find the first valid pfn */ 6416 for (pfn = start_pfn; pfn < end_pfn; pfn++) 6417 if (pfn_valid(pfn)) 6418 break; 6419 if (pfn == end_pfn) 6420 return; 6421 zone = page_zone(pfn_to_page(pfn)); 6422 spin_lock_irqsave(&zone->lock, flags); 6423 pfn = start_pfn; 6424 while (pfn < end_pfn) { 6425 if (!pfn_valid(pfn)) { 6426 pfn++; 6427 continue; 6428 } 6429 page = pfn_to_page(pfn); 6430 /* 6431 * The HWPoisoned page may be not in buddy system, and 6432 * page_count() is not 0. 6433 */ 6434 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6435 pfn++; 6436 SetPageReserved(page); 6437 continue; 6438 } 6439 6440 BUG_ON(page_count(page)); 6441 BUG_ON(!PageBuddy(page)); 6442 order = page_order(page); 6443 #ifdef CONFIG_DEBUG_VM 6444 printk(KERN_INFO "remove from free list %lx %d %lx\n", 6445 pfn, 1 << order, end_pfn); 6446 #endif 6447 list_del(&page->lru); 6448 rmv_page_order(page); 6449 zone->free_area[order].nr_free--; 6450 for (i = 0; i < (1 << order); i++) 6451 SetPageReserved((page+i)); 6452 pfn += (1 << order); 6453 } 6454 spin_unlock_irqrestore(&zone->lock, flags); 6455 } 6456 #endif 6457 6458 #ifdef CONFIG_MEMORY_FAILURE 6459 bool is_free_buddy_page(struct page *page) 6460 { 6461 struct zone *zone = page_zone(page); 6462 unsigned long pfn = page_to_pfn(page); 6463 unsigned long flags; 6464 int order; 6465 6466 spin_lock_irqsave(&zone->lock, flags); 6467 for (order = 0; order < MAX_ORDER; order++) { 6468 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6469 6470 if (PageBuddy(page_head) && page_order(page_head) >= order) 6471 break; 6472 } 6473 spin_unlock_irqrestore(&zone->lock, flags); 6474 6475 return order < MAX_ORDER; 6476 } 6477 #endif 6478 6479 static const struct trace_print_flags pageflag_names[] = { 6480 {1UL << PG_locked, "locked" }, 6481 {1UL << PG_error, "error" }, 6482 {1UL << PG_referenced, "referenced" }, 6483 {1UL << PG_uptodate, "uptodate" }, 6484 {1UL << PG_dirty, "dirty" }, 6485 {1UL << PG_lru, "lru" }, 6486 {1UL << PG_active, "active" }, 6487 {1UL << PG_slab, "slab" }, 6488 {1UL << PG_owner_priv_1, "owner_priv_1" }, 6489 {1UL << PG_arch_1, "arch_1" }, 6490 {1UL << PG_reserved, "reserved" }, 6491 {1UL << PG_private, "private" }, 6492 {1UL << PG_private_2, "private_2" }, 6493 {1UL << PG_writeback, "writeback" }, 6494 #ifdef CONFIG_PAGEFLAGS_EXTENDED 6495 {1UL << PG_head, "head" }, 6496 {1UL << PG_tail, "tail" }, 6497 #else 6498 {1UL << PG_compound, "compound" }, 6499 #endif 6500 {1UL << PG_swapcache, "swapcache" }, 6501 {1UL << PG_mappedtodisk, "mappedtodisk" }, 6502 {1UL << PG_reclaim, "reclaim" }, 6503 {1UL << PG_swapbacked, "swapbacked" }, 6504 {1UL << PG_unevictable, "unevictable" }, 6505 #ifdef CONFIG_MMU 6506 {1UL << PG_mlocked, "mlocked" }, 6507 #endif 6508 #ifdef CONFIG_ARCH_USES_PG_UNCACHED 6509 {1UL << PG_uncached, "uncached" }, 6510 #endif 6511 #ifdef CONFIG_MEMORY_FAILURE 6512 {1UL << PG_hwpoison, "hwpoison" }, 6513 #endif 6514 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6515 {1UL << PG_compound_lock, "compound_lock" }, 6516 #endif 6517 }; 6518 6519 static void dump_page_flags(unsigned long flags) 6520 { 6521 const char *delim = ""; 6522 unsigned long mask; 6523 int i; 6524 6525 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); 6526 6527 printk(KERN_ALERT "page flags: %#lx(", flags); 6528 6529 /* remove zone id */ 6530 flags &= (1UL << NR_PAGEFLAGS) - 1; 6531 6532 for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) { 6533 6534 mask = pageflag_names[i].mask; 6535 if ((flags & mask) != mask) 6536 continue; 6537 6538 flags &= ~mask; 6539 printk("%s%s", delim, pageflag_names[i].name); 6540 delim = "|"; 6541 } 6542 6543 /* check for left over flags */ 6544 if (flags) 6545 printk("%s%#lx", delim, flags); 6546 6547 printk(")\n"); 6548 } 6549 6550 void dump_page_badflags(struct page *page, const char *reason, 6551 unsigned long badflags) 6552 { 6553 printk(KERN_ALERT 6554 "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", 6555 page, atomic_read(&page->_count), page_mapcount(page), 6556 page->mapping, page->index); 6557 dump_page_flags(page->flags); 6558 if (reason) 6559 pr_alert("page dumped because: %s\n", reason); 6560 if (page->flags & badflags) { 6561 pr_alert("bad because of flags:\n"); 6562 dump_page_flags(page->flags & badflags); 6563 } 6564 mem_cgroup_print_bad_page(page); 6565 } 6566 6567 void dump_page(struct page *page, const char *reason) 6568 { 6569 dump_page_badflags(page, reason, 0); 6570 } 6571 EXPORT_SYMBOL(dump_page); 6572