1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/jiffies.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/compiler.h> 26 #include <linux/kernel.h> 27 #include <linux/kmemcheck.h> 28 #include <linux/kasan.h> 29 #include <linux/module.h> 30 #include <linux/suspend.h> 31 #include <linux/pagevec.h> 32 #include <linux/blkdev.h> 33 #include <linux/slab.h> 34 #include <linux/ratelimit.h> 35 #include <linux/oom.h> 36 #include <linux/notifier.h> 37 #include <linux/topology.h> 38 #include <linux/sysctl.h> 39 #include <linux/cpu.h> 40 #include <linux/cpuset.h> 41 #include <linux/memory_hotplug.h> 42 #include <linux/nodemask.h> 43 #include <linux/vmalloc.h> 44 #include <linux/vmstat.h> 45 #include <linux/mempolicy.h> 46 #include <linux/memremap.h> 47 #include <linux/stop_machine.h> 48 #include <linux/sort.h> 49 #include <linux/pfn.h> 50 #include <linux/backing-dev.h> 51 #include <linux/fault-inject.h> 52 #include <linux/page-isolation.h> 53 #include <linux/page_ext.h> 54 #include <linux/debugobjects.h> 55 #include <linux/kmemleak.h> 56 #include <linux/compaction.h> 57 #include <trace/events/kmem.h> 58 #include <linux/prefetch.h> 59 #include <linux/mm_inline.h> 60 #include <linux/migrate.h> 61 #include <linux/page_ext.h> 62 #include <linux/hugetlb.h> 63 #include <linux/sched/rt.h> 64 #include <linux/page_owner.h> 65 #include <linux/kthread.h> 66 67 #include <asm/sections.h> 68 #include <asm/tlbflush.h> 69 #include <asm/div64.h> 70 #include "internal.h" 71 72 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 73 static DEFINE_MUTEX(pcp_batch_high_lock); 74 #define MIN_PERCPU_PAGELIST_FRACTION (8) 75 76 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 77 DEFINE_PER_CPU(int, numa_node); 78 EXPORT_PER_CPU_SYMBOL(numa_node); 79 #endif 80 81 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 82 /* 83 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 84 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 85 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 86 * defined in <linux/topology.h>. 87 */ 88 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 89 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 90 int _node_numa_mem_[MAX_NUMNODES]; 91 #endif 92 93 /* 94 * Array of node states. 95 */ 96 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 97 [N_POSSIBLE] = NODE_MASK_ALL, 98 [N_ONLINE] = { { [0] = 1UL } }, 99 #ifndef CONFIG_NUMA 100 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 101 #ifdef CONFIG_HIGHMEM 102 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 103 #endif 104 #ifdef CONFIG_MOVABLE_NODE 105 [N_MEMORY] = { { [0] = 1UL } }, 106 #endif 107 [N_CPU] = { { [0] = 1UL } }, 108 #endif /* NUMA */ 109 }; 110 EXPORT_SYMBOL(node_states); 111 112 /* Protect totalram_pages and zone->managed_pages */ 113 static DEFINE_SPINLOCK(managed_page_count_lock); 114 115 unsigned long totalram_pages __read_mostly; 116 unsigned long totalreserve_pages __read_mostly; 117 unsigned long totalcma_pages __read_mostly; 118 119 int percpu_pagelist_fraction; 120 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 121 122 /* 123 * A cached value of the page's pageblock's migratetype, used when the page is 124 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 125 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 126 * Also the migratetype set in the page does not necessarily match the pcplist 127 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 128 * other index - this ensures that it will be put on the correct CMA freelist. 129 */ 130 static inline int get_pcppage_migratetype(struct page *page) 131 { 132 return page->index; 133 } 134 135 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 136 { 137 page->index = migratetype; 138 } 139 140 #ifdef CONFIG_PM_SLEEP 141 /* 142 * The following functions are used by the suspend/hibernate code to temporarily 143 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 144 * while devices are suspended. To avoid races with the suspend/hibernate code, 145 * they should always be called with pm_mutex held (gfp_allowed_mask also should 146 * only be modified with pm_mutex held, unless the suspend/hibernate code is 147 * guaranteed not to run in parallel with that modification). 148 */ 149 150 static gfp_t saved_gfp_mask; 151 152 void pm_restore_gfp_mask(void) 153 { 154 WARN_ON(!mutex_is_locked(&pm_mutex)); 155 if (saved_gfp_mask) { 156 gfp_allowed_mask = saved_gfp_mask; 157 saved_gfp_mask = 0; 158 } 159 } 160 161 void pm_restrict_gfp_mask(void) 162 { 163 WARN_ON(!mutex_is_locked(&pm_mutex)); 164 WARN_ON(saved_gfp_mask); 165 saved_gfp_mask = gfp_allowed_mask; 166 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 167 } 168 169 bool pm_suspended_storage(void) 170 { 171 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 172 return false; 173 return true; 174 } 175 #endif /* CONFIG_PM_SLEEP */ 176 177 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 178 unsigned int pageblock_order __read_mostly; 179 #endif 180 181 static void __free_pages_ok(struct page *page, unsigned int order); 182 183 /* 184 * results with 256, 32 in the lowmem_reserve sysctl: 185 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 186 * 1G machine -> (16M dma, 784M normal, 224M high) 187 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 188 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 189 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 190 * 191 * TBD: should special case ZONE_DMA32 machines here - in those we normally 192 * don't need any ZONE_NORMAL reservation 193 */ 194 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 195 #ifdef CONFIG_ZONE_DMA 196 256, 197 #endif 198 #ifdef CONFIG_ZONE_DMA32 199 256, 200 #endif 201 #ifdef CONFIG_HIGHMEM 202 32, 203 #endif 204 32, 205 }; 206 207 EXPORT_SYMBOL(totalram_pages); 208 209 static char * const zone_names[MAX_NR_ZONES] = { 210 #ifdef CONFIG_ZONE_DMA 211 "DMA", 212 #endif 213 #ifdef CONFIG_ZONE_DMA32 214 "DMA32", 215 #endif 216 "Normal", 217 #ifdef CONFIG_HIGHMEM 218 "HighMem", 219 #endif 220 "Movable", 221 #ifdef CONFIG_ZONE_DEVICE 222 "Device", 223 #endif 224 }; 225 226 char * const migratetype_names[MIGRATE_TYPES] = { 227 "Unmovable", 228 "Movable", 229 "Reclaimable", 230 "HighAtomic", 231 #ifdef CONFIG_CMA 232 "CMA", 233 #endif 234 #ifdef CONFIG_MEMORY_ISOLATION 235 "Isolate", 236 #endif 237 }; 238 239 compound_page_dtor * const compound_page_dtors[] = { 240 NULL, 241 free_compound_page, 242 #ifdef CONFIG_HUGETLB_PAGE 243 free_huge_page, 244 #endif 245 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 246 free_transhuge_page, 247 #endif 248 }; 249 250 int min_free_kbytes = 1024; 251 int user_min_free_kbytes = -1; 252 int watermark_scale_factor = 10; 253 254 static unsigned long __meminitdata nr_kernel_pages; 255 static unsigned long __meminitdata nr_all_pages; 256 static unsigned long __meminitdata dma_reserve; 257 258 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 259 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 260 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 261 static unsigned long __initdata required_kernelcore; 262 static unsigned long __initdata required_movablecore; 263 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 264 static bool mirrored_kernelcore; 265 266 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 267 int movable_zone; 268 EXPORT_SYMBOL(movable_zone); 269 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 270 271 #if MAX_NUMNODES > 1 272 int nr_node_ids __read_mostly = MAX_NUMNODES; 273 int nr_online_nodes __read_mostly = 1; 274 EXPORT_SYMBOL(nr_node_ids); 275 EXPORT_SYMBOL(nr_online_nodes); 276 #endif 277 278 int page_group_by_mobility_disabled __read_mostly; 279 280 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 281 static inline void reset_deferred_meminit(pg_data_t *pgdat) 282 { 283 pgdat->first_deferred_pfn = ULONG_MAX; 284 } 285 286 /* Returns true if the struct page for the pfn is uninitialised */ 287 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 288 { 289 if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn) 290 return true; 291 292 return false; 293 } 294 295 static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) 296 { 297 if (pfn >= NODE_DATA(nid)->first_deferred_pfn) 298 return true; 299 300 return false; 301 } 302 303 /* 304 * Returns false when the remaining initialisation should be deferred until 305 * later in the boot cycle when it can be parallelised. 306 */ 307 static inline bool update_defer_init(pg_data_t *pgdat, 308 unsigned long pfn, unsigned long zone_end, 309 unsigned long *nr_initialised) 310 { 311 unsigned long max_initialise; 312 313 /* Always populate low zones for address-contrained allocations */ 314 if (zone_end < pgdat_end_pfn(pgdat)) 315 return true; 316 /* 317 * Initialise at least 2G of a node but also take into account that 318 * two large system hashes that can take up 1GB for 0.25TB/node. 319 */ 320 max_initialise = max(2UL << (30 - PAGE_SHIFT), 321 (pgdat->node_spanned_pages >> 8)); 322 323 (*nr_initialised)++; 324 if ((*nr_initialised > max_initialise) && 325 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 326 pgdat->first_deferred_pfn = pfn; 327 return false; 328 } 329 330 return true; 331 } 332 #else 333 static inline void reset_deferred_meminit(pg_data_t *pgdat) 334 { 335 } 336 337 static inline bool early_page_uninitialised(unsigned long pfn) 338 { 339 return false; 340 } 341 342 static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) 343 { 344 return false; 345 } 346 347 static inline bool update_defer_init(pg_data_t *pgdat, 348 unsigned long pfn, unsigned long zone_end, 349 unsigned long *nr_initialised) 350 { 351 return true; 352 } 353 #endif 354 355 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 356 static inline unsigned long *get_pageblock_bitmap(struct page *page, 357 unsigned long pfn) 358 { 359 #ifdef CONFIG_SPARSEMEM 360 return __pfn_to_section(pfn)->pageblock_flags; 361 #else 362 return page_zone(page)->pageblock_flags; 363 #endif /* CONFIG_SPARSEMEM */ 364 } 365 366 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) 367 { 368 #ifdef CONFIG_SPARSEMEM 369 pfn &= (PAGES_PER_SECTION-1); 370 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 371 #else 372 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); 373 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 374 #endif /* CONFIG_SPARSEMEM */ 375 } 376 377 /** 378 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 379 * @page: The page within the block of interest 380 * @pfn: The target page frame number 381 * @end_bitidx: The last bit of interest to retrieve 382 * @mask: mask of bits that the caller is interested in 383 * 384 * Return: pageblock_bits flags 385 */ 386 static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page, 387 unsigned long pfn, 388 unsigned long end_bitidx, 389 unsigned long mask) 390 { 391 unsigned long *bitmap; 392 unsigned long bitidx, word_bitidx; 393 unsigned long word; 394 395 bitmap = get_pageblock_bitmap(page, pfn); 396 bitidx = pfn_to_bitidx(page, pfn); 397 word_bitidx = bitidx / BITS_PER_LONG; 398 bitidx &= (BITS_PER_LONG-1); 399 400 word = bitmap[word_bitidx]; 401 bitidx += end_bitidx; 402 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; 403 } 404 405 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 406 unsigned long end_bitidx, 407 unsigned long mask) 408 { 409 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask); 410 } 411 412 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) 413 { 414 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK); 415 } 416 417 /** 418 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 419 * @page: The page within the block of interest 420 * @flags: The flags to set 421 * @pfn: The target page frame number 422 * @end_bitidx: The last bit of interest 423 * @mask: mask of bits that the caller is interested in 424 */ 425 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 426 unsigned long pfn, 427 unsigned long end_bitidx, 428 unsigned long mask) 429 { 430 unsigned long *bitmap; 431 unsigned long bitidx, word_bitidx; 432 unsigned long old_word, word; 433 434 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 435 436 bitmap = get_pageblock_bitmap(page, pfn); 437 bitidx = pfn_to_bitidx(page, pfn); 438 word_bitidx = bitidx / BITS_PER_LONG; 439 bitidx &= (BITS_PER_LONG-1); 440 441 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 442 443 bitidx += end_bitidx; 444 mask <<= (BITS_PER_LONG - bitidx - 1); 445 flags <<= (BITS_PER_LONG - bitidx - 1); 446 447 word = READ_ONCE(bitmap[word_bitidx]); 448 for (;;) { 449 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 450 if (word == old_word) 451 break; 452 word = old_word; 453 } 454 } 455 456 void set_pageblock_migratetype(struct page *page, int migratetype) 457 { 458 if (unlikely(page_group_by_mobility_disabled && 459 migratetype < MIGRATE_PCPTYPES)) 460 migratetype = MIGRATE_UNMOVABLE; 461 462 set_pageblock_flags_group(page, (unsigned long)migratetype, 463 PB_migrate, PB_migrate_end); 464 } 465 466 #ifdef CONFIG_DEBUG_VM 467 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 468 { 469 int ret = 0; 470 unsigned seq; 471 unsigned long pfn = page_to_pfn(page); 472 unsigned long sp, start_pfn; 473 474 do { 475 seq = zone_span_seqbegin(zone); 476 start_pfn = zone->zone_start_pfn; 477 sp = zone->spanned_pages; 478 if (!zone_spans_pfn(zone, pfn)) 479 ret = 1; 480 } while (zone_span_seqretry(zone, seq)); 481 482 if (ret) 483 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 484 pfn, zone_to_nid(zone), zone->name, 485 start_pfn, start_pfn + sp); 486 487 return ret; 488 } 489 490 static int page_is_consistent(struct zone *zone, struct page *page) 491 { 492 if (!pfn_valid_within(page_to_pfn(page))) 493 return 0; 494 if (zone != page_zone(page)) 495 return 0; 496 497 return 1; 498 } 499 /* 500 * Temporary debugging check for pages not lying within a given zone. 501 */ 502 static int bad_range(struct zone *zone, struct page *page) 503 { 504 if (page_outside_zone_boundaries(zone, page)) 505 return 1; 506 if (!page_is_consistent(zone, page)) 507 return 1; 508 509 return 0; 510 } 511 #else 512 static inline int bad_range(struct zone *zone, struct page *page) 513 { 514 return 0; 515 } 516 #endif 517 518 static void bad_page(struct page *page, const char *reason, 519 unsigned long bad_flags) 520 { 521 static unsigned long resume; 522 static unsigned long nr_shown; 523 static unsigned long nr_unshown; 524 525 /* 526 * Allow a burst of 60 reports, then keep quiet for that minute; 527 * or allow a steady drip of one report per second. 528 */ 529 if (nr_shown == 60) { 530 if (time_before(jiffies, resume)) { 531 nr_unshown++; 532 goto out; 533 } 534 if (nr_unshown) { 535 pr_alert( 536 "BUG: Bad page state: %lu messages suppressed\n", 537 nr_unshown); 538 nr_unshown = 0; 539 } 540 nr_shown = 0; 541 } 542 if (nr_shown++ == 0) 543 resume = jiffies + 60 * HZ; 544 545 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 546 current->comm, page_to_pfn(page)); 547 __dump_page(page, reason); 548 bad_flags &= page->flags; 549 if (bad_flags) 550 pr_alert("bad because of flags: %#lx(%pGp)\n", 551 bad_flags, &bad_flags); 552 dump_page_owner(page); 553 554 print_modules(); 555 dump_stack(); 556 out: 557 /* Leave bad fields for debug, except PageBuddy could make trouble */ 558 page_mapcount_reset(page); /* remove PageBuddy */ 559 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 560 } 561 562 /* 563 * Higher-order pages are called "compound pages". They are structured thusly: 564 * 565 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 566 * 567 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 568 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 569 * 570 * The first tail page's ->compound_dtor holds the offset in array of compound 571 * page destructors. See compound_page_dtors. 572 * 573 * The first tail page's ->compound_order holds the order of allocation. 574 * This usage means that zero-order pages may not be compound. 575 */ 576 577 void free_compound_page(struct page *page) 578 { 579 __free_pages_ok(page, compound_order(page)); 580 } 581 582 void prep_compound_page(struct page *page, unsigned int order) 583 { 584 int i; 585 int nr_pages = 1 << order; 586 587 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 588 set_compound_order(page, order); 589 __SetPageHead(page); 590 for (i = 1; i < nr_pages; i++) { 591 struct page *p = page + i; 592 set_page_count(p, 0); 593 p->mapping = TAIL_MAPPING; 594 set_compound_head(p, page); 595 } 596 atomic_set(compound_mapcount_ptr(page), -1); 597 } 598 599 #ifdef CONFIG_DEBUG_PAGEALLOC 600 unsigned int _debug_guardpage_minorder; 601 bool _debug_pagealloc_enabled __read_mostly 602 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 603 EXPORT_SYMBOL(_debug_pagealloc_enabled); 604 bool _debug_guardpage_enabled __read_mostly; 605 606 static int __init early_debug_pagealloc(char *buf) 607 { 608 if (!buf) 609 return -EINVAL; 610 return kstrtobool(buf, &_debug_pagealloc_enabled); 611 } 612 early_param("debug_pagealloc", early_debug_pagealloc); 613 614 static bool need_debug_guardpage(void) 615 { 616 /* If we don't use debug_pagealloc, we don't need guard page */ 617 if (!debug_pagealloc_enabled()) 618 return false; 619 620 return true; 621 } 622 623 static void init_debug_guardpage(void) 624 { 625 if (!debug_pagealloc_enabled()) 626 return; 627 628 _debug_guardpage_enabled = true; 629 } 630 631 struct page_ext_operations debug_guardpage_ops = { 632 .need = need_debug_guardpage, 633 .init = init_debug_guardpage, 634 }; 635 636 static int __init debug_guardpage_minorder_setup(char *buf) 637 { 638 unsigned long res; 639 640 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 641 pr_err("Bad debug_guardpage_minorder value\n"); 642 return 0; 643 } 644 _debug_guardpage_minorder = res; 645 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 646 return 0; 647 } 648 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); 649 650 static inline void set_page_guard(struct zone *zone, struct page *page, 651 unsigned int order, int migratetype) 652 { 653 struct page_ext *page_ext; 654 655 if (!debug_guardpage_enabled()) 656 return; 657 658 page_ext = lookup_page_ext(page); 659 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 660 661 INIT_LIST_HEAD(&page->lru); 662 set_page_private(page, order); 663 /* Guard pages are not available for any usage */ 664 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 665 } 666 667 static inline void clear_page_guard(struct zone *zone, struct page *page, 668 unsigned int order, int migratetype) 669 { 670 struct page_ext *page_ext; 671 672 if (!debug_guardpage_enabled()) 673 return; 674 675 page_ext = lookup_page_ext(page); 676 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 677 678 set_page_private(page, 0); 679 if (!is_migrate_isolate(migratetype)) 680 __mod_zone_freepage_state(zone, (1 << order), migratetype); 681 } 682 #else 683 struct page_ext_operations debug_guardpage_ops = { NULL, }; 684 static inline void set_page_guard(struct zone *zone, struct page *page, 685 unsigned int order, int migratetype) {} 686 static inline void clear_page_guard(struct zone *zone, struct page *page, 687 unsigned int order, int migratetype) {} 688 #endif 689 690 static inline void set_page_order(struct page *page, unsigned int order) 691 { 692 set_page_private(page, order); 693 __SetPageBuddy(page); 694 } 695 696 static inline void rmv_page_order(struct page *page) 697 { 698 __ClearPageBuddy(page); 699 set_page_private(page, 0); 700 } 701 702 /* 703 * This function checks whether a page is free && is the buddy 704 * we can do coalesce a page and its buddy if 705 * (a) the buddy is not in a hole && 706 * (b) the buddy is in the buddy system && 707 * (c) a page and its buddy have the same order && 708 * (d) a page and its buddy are in the same zone. 709 * 710 * For recording whether a page is in the buddy system, we set ->_mapcount 711 * PAGE_BUDDY_MAPCOUNT_VALUE. 712 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is 713 * serialized by zone->lock. 714 * 715 * For recording page's order, we use page_private(page). 716 */ 717 static inline int page_is_buddy(struct page *page, struct page *buddy, 718 unsigned int order) 719 { 720 if (!pfn_valid_within(page_to_pfn(buddy))) 721 return 0; 722 723 if (page_is_guard(buddy) && page_order(buddy) == order) { 724 if (page_zone_id(page) != page_zone_id(buddy)) 725 return 0; 726 727 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 728 729 return 1; 730 } 731 732 if (PageBuddy(buddy) && page_order(buddy) == order) { 733 /* 734 * zone check is done late to avoid uselessly 735 * calculating zone/node ids for pages that could 736 * never merge. 737 */ 738 if (page_zone_id(page) != page_zone_id(buddy)) 739 return 0; 740 741 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 742 743 return 1; 744 } 745 return 0; 746 } 747 748 /* 749 * Freeing function for a buddy system allocator. 750 * 751 * The concept of a buddy system is to maintain direct-mapped table 752 * (containing bit values) for memory blocks of various "orders". 753 * The bottom level table contains the map for the smallest allocatable 754 * units of memory (here, pages), and each level above it describes 755 * pairs of units from the levels below, hence, "buddies". 756 * At a high level, all that happens here is marking the table entry 757 * at the bottom level available, and propagating the changes upward 758 * as necessary, plus some accounting needed to play nicely with other 759 * parts of the VM system. 760 * At each level, we keep a list of pages, which are heads of continuous 761 * free pages of length of (1 << order) and marked with _mapcount 762 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) 763 * field. 764 * So when we are allocating or freeing one, we can derive the state of the 765 * other. That is, if we allocate a small block, and both were 766 * free, the remainder of the region must be split into blocks. 767 * If a block is freed, and its buddy is also free, then this 768 * triggers coalescing into a block of larger size. 769 * 770 * -- nyc 771 */ 772 773 static inline void __free_one_page(struct page *page, 774 unsigned long pfn, 775 struct zone *zone, unsigned int order, 776 int migratetype) 777 { 778 unsigned long page_idx; 779 unsigned long combined_idx; 780 unsigned long uninitialized_var(buddy_idx); 781 struct page *buddy; 782 unsigned int max_order; 783 784 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); 785 786 VM_BUG_ON(!zone_is_initialized(zone)); 787 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 788 789 VM_BUG_ON(migratetype == -1); 790 if (likely(!is_migrate_isolate(migratetype))) 791 __mod_zone_freepage_state(zone, 1 << order, migratetype); 792 793 page_idx = pfn & ((1 << MAX_ORDER) - 1); 794 795 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 796 VM_BUG_ON_PAGE(bad_range(zone, page), page); 797 798 continue_merging: 799 while (order < max_order - 1) { 800 buddy_idx = __find_buddy_index(page_idx, order); 801 buddy = page + (buddy_idx - page_idx); 802 if (!page_is_buddy(page, buddy, order)) 803 goto done_merging; 804 /* 805 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 806 * merge with it and move up one order. 807 */ 808 if (page_is_guard(buddy)) { 809 clear_page_guard(zone, buddy, order, migratetype); 810 } else { 811 list_del(&buddy->lru); 812 zone->free_area[order].nr_free--; 813 rmv_page_order(buddy); 814 } 815 combined_idx = buddy_idx & page_idx; 816 page = page + (combined_idx - page_idx); 817 page_idx = combined_idx; 818 order++; 819 } 820 if (max_order < MAX_ORDER) { 821 /* If we are here, it means order is >= pageblock_order. 822 * We want to prevent merge between freepages on isolate 823 * pageblock and normal pageblock. Without this, pageblock 824 * isolation could cause incorrect freepage or CMA accounting. 825 * 826 * We don't want to hit this code for the more frequent 827 * low-order merging. 828 */ 829 if (unlikely(has_isolate_pageblock(zone))) { 830 int buddy_mt; 831 832 buddy_idx = __find_buddy_index(page_idx, order); 833 buddy = page + (buddy_idx - page_idx); 834 buddy_mt = get_pageblock_migratetype(buddy); 835 836 if (migratetype != buddy_mt 837 && (is_migrate_isolate(migratetype) || 838 is_migrate_isolate(buddy_mt))) 839 goto done_merging; 840 } 841 max_order++; 842 goto continue_merging; 843 } 844 845 done_merging: 846 set_page_order(page, order); 847 848 /* 849 * If this is not the largest possible page, check if the buddy 850 * of the next-highest order is free. If it is, it's possible 851 * that pages are being freed that will coalesce soon. In case, 852 * that is happening, add the free page to the tail of the list 853 * so it's less likely to be used soon and more likely to be merged 854 * as a higher order page 855 */ 856 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 857 struct page *higher_page, *higher_buddy; 858 combined_idx = buddy_idx & page_idx; 859 higher_page = page + (combined_idx - page_idx); 860 buddy_idx = __find_buddy_index(combined_idx, order + 1); 861 higher_buddy = higher_page + (buddy_idx - combined_idx); 862 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 863 list_add_tail(&page->lru, 864 &zone->free_area[order].free_list[migratetype]); 865 goto out; 866 } 867 } 868 869 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 870 out: 871 zone->free_area[order].nr_free++; 872 } 873 874 /* 875 * A bad page could be due to a number of fields. Instead of multiple branches, 876 * try and check multiple fields with one check. The caller must do a detailed 877 * check if necessary. 878 */ 879 static inline bool page_expected_state(struct page *page, 880 unsigned long check_flags) 881 { 882 if (unlikely(atomic_read(&page->_mapcount) != -1)) 883 return false; 884 885 if (unlikely((unsigned long)page->mapping | 886 page_ref_count(page) | 887 #ifdef CONFIG_MEMCG 888 (unsigned long)page->mem_cgroup | 889 #endif 890 (page->flags & check_flags))) 891 return false; 892 893 return true; 894 } 895 896 static void free_pages_check_bad(struct page *page) 897 { 898 const char *bad_reason; 899 unsigned long bad_flags; 900 901 bad_reason = NULL; 902 bad_flags = 0; 903 904 if (unlikely(atomic_read(&page->_mapcount) != -1)) 905 bad_reason = "nonzero mapcount"; 906 if (unlikely(page->mapping != NULL)) 907 bad_reason = "non-NULL mapping"; 908 if (unlikely(page_ref_count(page) != 0)) 909 bad_reason = "nonzero _refcount"; 910 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 911 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 912 bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 913 } 914 #ifdef CONFIG_MEMCG 915 if (unlikely(page->mem_cgroup)) 916 bad_reason = "page still charged to cgroup"; 917 #endif 918 bad_page(page, bad_reason, bad_flags); 919 } 920 921 static inline int free_pages_check(struct page *page) 922 { 923 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 924 return 0; 925 926 /* Something has gone sideways, find it */ 927 free_pages_check_bad(page); 928 return 1; 929 } 930 931 static int free_tail_pages_check(struct page *head_page, struct page *page) 932 { 933 int ret = 1; 934 935 /* 936 * We rely page->lru.next never has bit 0 set, unless the page 937 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 938 */ 939 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 940 941 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 942 ret = 0; 943 goto out; 944 } 945 switch (page - head_page) { 946 case 1: 947 /* the first tail page: ->mapping is compound_mapcount() */ 948 if (unlikely(compound_mapcount(page))) { 949 bad_page(page, "nonzero compound_mapcount", 0); 950 goto out; 951 } 952 break; 953 case 2: 954 /* 955 * the second tail page: ->mapping is 956 * page_deferred_list().next -- ignore value. 957 */ 958 break; 959 default: 960 if (page->mapping != TAIL_MAPPING) { 961 bad_page(page, "corrupted mapping in tail page", 0); 962 goto out; 963 } 964 break; 965 } 966 if (unlikely(!PageTail(page))) { 967 bad_page(page, "PageTail not set", 0); 968 goto out; 969 } 970 if (unlikely(compound_head(page) != head_page)) { 971 bad_page(page, "compound_head not consistent", 0); 972 goto out; 973 } 974 ret = 0; 975 out: 976 page->mapping = NULL; 977 clear_compound_head(page); 978 return ret; 979 } 980 981 static __always_inline bool free_pages_prepare(struct page *page, 982 unsigned int order, bool check_free) 983 { 984 int bad = 0; 985 986 VM_BUG_ON_PAGE(PageTail(page), page); 987 988 trace_mm_page_free(page, order); 989 kmemcheck_free_shadow(page, order); 990 991 /* 992 * Check tail pages before head page information is cleared to 993 * avoid checking PageCompound for order-0 pages. 994 */ 995 if (unlikely(order)) { 996 bool compound = PageCompound(page); 997 int i; 998 999 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1000 1001 for (i = 1; i < (1 << order); i++) { 1002 if (compound) 1003 bad += free_tail_pages_check(page, page + i); 1004 if (unlikely(free_pages_check(page + i))) { 1005 bad++; 1006 continue; 1007 } 1008 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1009 } 1010 } 1011 if (PageAnonHead(page)) 1012 page->mapping = NULL; 1013 if (check_free) 1014 bad += free_pages_check(page); 1015 if (bad) 1016 return false; 1017 1018 page_cpupid_reset_last(page); 1019 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1020 reset_page_owner(page, order); 1021 1022 if (!PageHighMem(page)) { 1023 debug_check_no_locks_freed(page_address(page), 1024 PAGE_SIZE << order); 1025 debug_check_no_obj_freed(page_address(page), 1026 PAGE_SIZE << order); 1027 } 1028 arch_free_page(page, order); 1029 kernel_poison_pages(page, 1 << order, 0); 1030 kernel_map_pages(page, 1 << order, 0); 1031 kasan_free_pages(page, order); 1032 1033 return true; 1034 } 1035 1036 #ifdef CONFIG_DEBUG_VM 1037 static inline bool free_pcp_prepare(struct page *page) 1038 { 1039 return free_pages_prepare(page, 0, true); 1040 } 1041 1042 static inline bool bulkfree_pcp_prepare(struct page *page) 1043 { 1044 return false; 1045 } 1046 #else 1047 static bool free_pcp_prepare(struct page *page) 1048 { 1049 return free_pages_prepare(page, 0, false); 1050 } 1051 1052 static bool bulkfree_pcp_prepare(struct page *page) 1053 { 1054 return free_pages_check(page); 1055 } 1056 #endif /* CONFIG_DEBUG_VM */ 1057 1058 /* 1059 * Frees a number of pages from the PCP lists 1060 * Assumes all pages on list are in same zone, and of same order. 1061 * count is the number of pages to free. 1062 * 1063 * If the zone was previously in an "all pages pinned" state then look to 1064 * see if this freeing clears that state. 1065 * 1066 * And clear the zone's pages_scanned counter, to hold off the "all pages are 1067 * pinned" detection logic. 1068 */ 1069 static void free_pcppages_bulk(struct zone *zone, int count, 1070 struct per_cpu_pages *pcp) 1071 { 1072 int migratetype = 0; 1073 int batch_free = 0; 1074 unsigned long nr_scanned; 1075 bool isolated_pageblocks; 1076 1077 spin_lock(&zone->lock); 1078 isolated_pageblocks = has_isolate_pageblock(zone); 1079 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); 1080 if (nr_scanned) 1081 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 1082 1083 while (count) { 1084 struct page *page; 1085 struct list_head *list; 1086 1087 /* 1088 * Remove pages from lists in a round-robin fashion. A 1089 * batch_free count is maintained that is incremented when an 1090 * empty list is encountered. This is so more pages are freed 1091 * off fuller lists instead of spinning excessively around empty 1092 * lists 1093 */ 1094 do { 1095 batch_free++; 1096 if (++migratetype == MIGRATE_PCPTYPES) 1097 migratetype = 0; 1098 list = &pcp->lists[migratetype]; 1099 } while (list_empty(list)); 1100 1101 /* This is the only non-empty list. Free them all. */ 1102 if (batch_free == MIGRATE_PCPTYPES) 1103 batch_free = count; 1104 1105 do { 1106 int mt; /* migratetype of the to-be-freed page */ 1107 1108 page = list_last_entry(list, struct page, lru); 1109 /* must delete as __free_one_page list manipulates */ 1110 list_del(&page->lru); 1111 1112 mt = get_pcppage_migratetype(page); 1113 /* MIGRATE_ISOLATE page should not go to pcplists */ 1114 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1115 /* Pageblock could have been isolated meanwhile */ 1116 if (unlikely(isolated_pageblocks)) 1117 mt = get_pageblock_migratetype(page); 1118 1119 if (bulkfree_pcp_prepare(page)) 1120 continue; 1121 1122 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 1123 trace_mm_page_pcpu_drain(page, 0, mt); 1124 } while (--count && --batch_free && !list_empty(list)); 1125 } 1126 spin_unlock(&zone->lock); 1127 } 1128 1129 static void free_one_page(struct zone *zone, 1130 struct page *page, unsigned long pfn, 1131 unsigned int order, 1132 int migratetype) 1133 { 1134 unsigned long nr_scanned; 1135 spin_lock(&zone->lock); 1136 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); 1137 if (nr_scanned) 1138 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 1139 1140 if (unlikely(has_isolate_pageblock(zone) || 1141 is_migrate_isolate(migratetype))) { 1142 migratetype = get_pfnblock_migratetype(page, pfn); 1143 } 1144 __free_one_page(page, pfn, zone, order, migratetype); 1145 spin_unlock(&zone->lock); 1146 } 1147 1148 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1149 unsigned long zone, int nid) 1150 { 1151 set_page_links(page, zone, nid, pfn); 1152 init_page_count(page); 1153 page_mapcount_reset(page); 1154 page_cpupid_reset_last(page); 1155 1156 INIT_LIST_HEAD(&page->lru); 1157 #ifdef WANT_PAGE_VIRTUAL 1158 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1159 if (!is_highmem_idx(zone)) 1160 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1161 #endif 1162 } 1163 1164 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, 1165 int nid) 1166 { 1167 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 1168 } 1169 1170 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1171 static void init_reserved_page(unsigned long pfn) 1172 { 1173 pg_data_t *pgdat; 1174 int nid, zid; 1175 1176 if (!early_page_uninitialised(pfn)) 1177 return; 1178 1179 nid = early_pfn_to_nid(pfn); 1180 pgdat = NODE_DATA(nid); 1181 1182 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1183 struct zone *zone = &pgdat->node_zones[zid]; 1184 1185 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) 1186 break; 1187 } 1188 __init_single_pfn(pfn, zid, nid); 1189 } 1190 #else 1191 static inline void init_reserved_page(unsigned long pfn) 1192 { 1193 } 1194 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1195 1196 /* 1197 * Initialised pages do not have PageReserved set. This function is 1198 * called for each range allocated by the bootmem allocator and 1199 * marks the pages PageReserved. The remaining valid pages are later 1200 * sent to the buddy page allocator. 1201 */ 1202 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 1203 { 1204 unsigned long start_pfn = PFN_DOWN(start); 1205 unsigned long end_pfn = PFN_UP(end); 1206 1207 for (; start_pfn < end_pfn; start_pfn++) { 1208 if (pfn_valid(start_pfn)) { 1209 struct page *page = pfn_to_page(start_pfn); 1210 1211 init_reserved_page(start_pfn); 1212 1213 /* Avoid false-positive PageTail() */ 1214 INIT_LIST_HEAD(&page->lru); 1215 1216 SetPageReserved(page); 1217 } 1218 } 1219 } 1220 1221 static void __free_pages_ok(struct page *page, unsigned int order) 1222 { 1223 unsigned long flags; 1224 int migratetype; 1225 unsigned long pfn = page_to_pfn(page); 1226 1227 if (!free_pages_prepare(page, order, true)) 1228 return; 1229 1230 migratetype = get_pfnblock_migratetype(page, pfn); 1231 local_irq_save(flags); 1232 __count_vm_events(PGFREE, 1 << order); 1233 free_one_page(page_zone(page), page, pfn, order, migratetype); 1234 local_irq_restore(flags); 1235 } 1236 1237 static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1238 { 1239 unsigned int nr_pages = 1 << order; 1240 struct page *p = page; 1241 unsigned int loop; 1242 1243 prefetchw(p); 1244 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1245 prefetchw(p + 1); 1246 __ClearPageReserved(p); 1247 set_page_count(p, 0); 1248 } 1249 __ClearPageReserved(p); 1250 set_page_count(p, 0); 1251 1252 page_zone(page)->managed_pages += nr_pages; 1253 set_page_refcounted(page); 1254 __free_pages(page, order); 1255 } 1256 1257 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 1258 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1259 1260 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1261 1262 int __meminit early_pfn_to_nid(unsigned long pfn) 1263 { 1264 static DEFINE_SPINLOCK(early_pfn_lock); 1265 int nid; 1266 1267 spin_lock(&early_pfn_lock); 1268 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1269 if (nid < 0) 1270 nid = 0; 1271 spin_unlock(&early_pfn_lock); 1272 1273 return nid; 1274 } 1275 #endif 1276 1277 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 1278 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1279 struct mminit_pfnnid_cache *state) 1280 { 1281 int nid; 1282 1283 nid = __early_pfn_to_nid(pfn, state); 1284 if (nid >= 0 && nid != node) 1285 return false; 1286 return true; 1287 } 1288 1289 /* Only safe to use early in boot when initialisation is single-threaded */ 1290 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1291 { 1292 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); 1293 } 1294 1295 #else 1296 1297 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1298 { 1299 return true; 1300 } 1301 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1302 struct mminit_pfnnid_cache *state) 1303 { 1304 return true; 1305 } 1306 #endif 1307 1308 1309 void __init __free_pages_bootmem(struct page *page, unsigned long pfn, 1310 unsigned int order) 1311 { 1312 if (early_page_uninitialised(pfn)) 1313 return; 1314 return __free_pages_boot_core(page, order); 1315 } 1316 1317 /* 1318 * Check that the whole (or subset of) a pageblock given by the interval of 1319 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1320 * with the migration of free compaction scanner. The scanners then need to 1321 * use only pfn_valid_within() check for arches that allow holes within 1322 * pageblocks. 1323 * 1324 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1325 * 1326 * It's possible on some configurations to have a setup like node0 node1 node0 1327 * i.e. it's possible that all pages within a zones range of pages do not 1328 * belong to a single zone. We assume that a border between node0 and node1 1329 * can occur within a single pageblock, but not a node0 node1 node0 1330 * interleaving within a single pageblock. It is therefore sufficient to check 1331 * the first and last page of a pageblock and avoid checking each individual 1332 * page in a pageblock. 1333 */ 1334 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1335 unsigned long end_pfn, struct zone *zone) 1336 { 1337 struct page *start_page; 1338 struct page *end_page; 1339 1340 /* end_pfn is one past the range we are checking */ 1341 end_pfn--; 1342 1343 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1344 return NULL; 1345 1346 start_page = pfn_to_page(start_pfn); 1347 1348 if (page_zone(start_page) != zone) 1349 return NULL; 1350 1351 end_page = pfn_to_page(end_pfn); 1352 1353 /* This gives a shorter code than deriving page_zone(end_page) */ 1354 if (page_zone_id(start_page) != page_zone_id(end_page)) 1355 return NULL; 1356 1357 return start_page; 1358 } 1359 1360 void set_zone_contiguous(struct zone *zone) 1361 { 1362 unsigned long block_start_pfn = zone->zone_start_pfn; 1363 unsigned long block_end_pfn; 1364 1365 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); 1366 for (; block_start_pfn < zone_end_pfn(zone); 1367 block_start_pfn = block_end_pfn, 1368 block_end_pfn += pageblock_nr_pages) { 1369 1370 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1371 1372 if (!__pageblock_pfn_to_page(block_start_pfn, 1373 block_end_pfn, zone)) 1374 return; 1375 } 1376 1377 /* We confirm that there is no hole */ 1378 zone->contiguous = true; 1379 } 1380 1381 void clear_zone_contiguous(struct zone *zone) 1382 { 1383 zone->contiguous = false; 1384 } 1385 1386 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1387 static void __init deferred_free_range(struct page *page, 1388 unsigned long pfn, int nr_pages) 1389 { 1390 int i; 1391 1392 if (!page) 1393 return; 1394 1395 /* Free a large naturally-aligned chunk if possible */ 1396 if (nr_pages == MAX_ORDER_NR_PAGES && 1397 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { 1398 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1399 __free_pages_boot_core(page, MAX_ORDER-1); 1400 return; 1401 } 1402 1403 for (i = 0; i < nr_pages; i++, page++) 1404 __free_pages_boot_core(page, 0); 1405 } 1406 1407 /* Completion tracking for deferred_init_memmap() threads */ 1408 static atomic_t pgdat_init_n_undone __initdata; 1409 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1410 1411 static inline void __init pgdat_init_report_one_done(void) 1412 { 1413 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1414 complete(&pgdat_init_all_done_comp); 1415 } 1416 1417 /* Initialise remaining memory on a node */ 1418 static int __init deferred_init_memmap(void *data) 1419 { 1420 pg_data_t *pgdat = data; 1421 int nid = pgdat->node_id; 1422 struct mminit_pfnnid_cache nid_init_state = { }; 1423 unsigned long start = jiffies; 1424 unsigned long nr_pages = 0; 1425 unsigned long walk_start, walk_end; 1426 int i, zid; 1427 struct zone *zone; 1428 unsigned long first_init_pfn = pgdat->first_deferred_pfn; 1429 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1430 1431 if (first_init_pfn == ULONG_MAX) { 1432 pgdat_init_report_one_done(); 1433 return 0; 1434 } 1435 1436 /* Bind memory initialisation thread to a local node if possible */ 1437 if (!cpumask_empty(cpumask)) 1438 set_cpus_allowed_ptr(current, cpumask); 1439 1440 /* Sanity check boundaries */ 1441 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 1442 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 1443 pgdat->first_deferred_pfn = ULONG_MAX; 1444 1445 /* Only the highest zone is deferred so find it */ 1446 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1447 zone = pgdat->node_zones + zid; 1448 if (first_init_pfn < zone_end_pfn(zone)) 1449 break; 1450 } 1451 1452 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { 1453 unsigned long pfn, end_pfn; 1454 struct page *page = NULL; 1455 struct page *free_base_page = NULL; 1456 unsigned long free_base_pfn = 0; 1457 int nr_to_free = 0; 1458 1459 end_pfn = min(walk_end, zone_end_pfn(zone)); 1460 pfn = first_init_pfn; 1461 if (pfn < walk_start) 1462 pfn = walk_start; 1463 if (pfn < zone->zone_start_pfn) 1464 pfn = zone->zone_start_pfn; 1465 1466 for (; pfn < end_pfn; pfn++) { 1467 if (!pfn_valid_within(pfn)) 1468 goto free_range; 1469 1470 /* 1471 * Ensure pfn_valid is checked every 1472 * MAX_ORDER_NR_PAGES for memory holes 1473 */ 1474 if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { 1475 if (!pfn_valid(pfn)) { 1476 page = NULL; 1477 goto free_range; 1478 } 1479 } 1480 1481 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { 1482 page = NULL; 1483 goto free_range; 1484 } 1485 1486 /* Minimise pfn page lookups and scheduler checks */ 1487 if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { 1488 page++; 1489 } else { 1490 nr_pages += nr_to_free; 1491 deferred_free_range(free_base_page, 1492 free_base_pfn, nr_to_free); 1493 free_base_page = NULL; 1494 free_base_pfn = nr_to_free = 0; 1495 1496 page = pfn_to_page(pfn); 1497 cond_resched(); 1498 } 1499 1500 if (page->flags) { 1501 VM_BUG_ON(page_zone(page) != zone); 1502 goto free_range; 1503 } 1504 1505 __init_single_page(page, pfn, zid, nid); 1506 if (!free_base_page) { 1507 free_base_page = page; 1508 free_base_pfn = pfn; 1509 nr_to_free = 0; 1510 } 1511 nr_to_free++; 1512 1513 /* Where possible, batch up pages for a single free */ 1514 continue; 1515 free_range: 1516 /* Free the current block of pages to allocator */ 1517 nr_pages += nr_to_free; 1518 deferred_free_range(free_base_page, free_base_pfn, 1519 nr_to_free); 1520 free_base_page = NULL; 1521 free_base_pfn = nr_to_free = 0; 1522 } 1523 1524 first_init_pfn = max(end_pfn, first_init_pfn); 1525 } 1526 1527 /* Sanity check that the next zone really is unpopulated */ 1528 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 1529 1530 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1531 jiffies_to_msecs(jiffies - start)); 1532 1533 pgdat_init_report_one_done(); 1534 return 0; 1535 } 1536 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1537 1538 void __init page_alloc_init_late(void) 1539 { 1540 struct zone *zone; 1541 1542 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1543 int nid; 1544 1545 /* There will be num_node_state(N_MEMORY) threads */ 1546 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 1547 for_each_node_state(nid, N_MEMORY) { 1548 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1549 } 1550 1551 /* Block until all are initialised */ 1552 wait_for_completion(&pgdat_init_all_done_comp); 1553 1554 /* Reinit limits that are based on free pages after the kernel is up */ 1555 files_maxfiles_init(); 1556 #endif 1557 1558 for_each_populated_zone(zone) 1559 set_zone_contiguous(zone); 1560 } 1561 1562 #ifdef CONFIG_CMA 1563 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1564 void __init init_cma_reserved_pageblock(struct page *page) 1565 { 1566 unsigned i = pageblock_nr_pages; 1567 struct page *p = page; 1568 1569 do { 1570 __ClearPageReserved(p); 1571 set_page_count(p, 0); 1572 } while (++p, --i); 1573 1574 set_pageblock_migratetype(page, MIGRATE_CMA); 1575 1576 if (pageblock_order >= MAX_ORDER) { 1577 i = pageblock_nr_pages; 1578 p = page; 1579 do { 1580 set_page_refcounted(p); 1581 __free_pages(p, MAX_ORDER - 1); 1582 p += MAX_ORDER_NR_PAGES; 1583 } while (i -= MAX_ORDER_NR_PAGES); 1584 } else { 1585 set_page_refcounted(page); 1586 __free_pages(page, pageblock_order); 1587 } 1588 1589 adjust_managed_page_count(page, pageblock_nr_pages); 1590 } 1591 #endif 1592 1593 /* 1594 * The order of subdivision here is critical for the IO subsystem. 1595 * Please do not alter this order without good reasons and regression 1596 * testing. Specifically, as large blocks of memory are subdivided, 1597 * the order in which smaller blocks are delivered depends on the order 1598 * they're subdivided in this function. This is the primary factor 1599 * influencing the order in which pages are delivered to the IO 1600 * subsystem according to empirical testing, and this is also justified 1601 * by considering the behavior of a buddy system containing a single 1602 * large block of memory acted on by a series of small allocations. 1603 * This behavior is a critical factor in sglist merging's success. 1604 * 1605 * -- nyc 1606 */ 1607 static inline void expand(struct zone *zone, struct page *page, 1608 int low, int high, struct free_area *area, 1609 int migratetype) 1610 { 1611 unsigned long size = 1 << high; 1612 1613 while (high > low) { 1614 area--; 1615 high--; 1616 size >>= 1; 1617 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1618 1619 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 1620 debug_guardpage_enabled() && 1621 high < debug_guardpage_minorder()) { 1622 /* 1623 * Mark as guard pages (or page), that will allow to 1624 * merge back to allocator when buddy will be freed. 1625 * Corresponding page table entries will not be touched, 1626 * pages will stay not present in virtual address space 1627 */ 1628 set_page_guard(zone, &page[size], high, migratetype); 1629 continue; 1630 } 1631 list_add(&page[size].lru, &area->free_list[migratetype]); 1632 area->nr_free++; 1633 set_page_order(&page[size], high); 1634 } 1635 } 1636 1637 static void check_new_page_bad(struct page *page) 1638 { 1639 const char *bad_reason = NULL; 1640 unsigned long bad_flags = 0; 1641 1642 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1643 bad_reason = "nonzero mapcount"; 1644 if (unlikely(page->mapping != NULL)) 1645 bad_reason = "non-NULL mapping"; 1646 if (unlikely(page_ref_count(page) != 0)) 1647 bad_reason = "nonzero _count"; 1648 if (unlikely(page->flags & __PG_HWPOISON)) { 1649 bad_reason = "HWPoisoned (hardware-corrupted)"; 1650 bad_flags = __PG_HWPOISON; 1651 /* Don't complain about hwpoisoned pages */ 1652 page_mapcount_reset(page); /* remove PageBuddy */ 1653 return; 1654 } 1655 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1656 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1657 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1658 } 1659 #ifdef CONFIG_MEMCG 1660 if (unlikely(page->mem_cgroup)) 1661 bad_reason = "page still charged to cgroup"; 1662 #endif 1663 bad_page(page, bad_reason, bad_flags); 1664 } 1665 1666 /* 1667 * This page is about to be returned from the page allocator 1668 */ 1669 static inline int check_new_page(struct page *page) 1670 { 1671 if (likely(page_expected_state(page, 1672 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1673 return 0; 1674 1675 check_new_page_bad(page); 1676 return 1; 1677 } 1678 1679 static inline bool free_pages_prezeroed(bool poisoned) 1680 { 1681 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && 1682 page_poisoning_enabled() && poisoned; 1683 } 1684 1685 #ifdef CONFIG_DEBUG_VM 1686 static bool check_pcp_refill(struct page *page) 1687 { 1688 return false; 1689 } 1690 1691 static bool check_new_pcp(struct page *page) 1692 { 1693 return check_new_page(page); 1694 } 1695 #else 1696 static bool check_pcp_refill(struct page *page) 1697 { 1698 return check_new_page(page); 1699 } 1700 static bool check_new_pcp(struct page *page) 1701 { 1702 return false; 1703 } 1704 #endif /* CONFIG_DEBUG_VM */ 1705 1706 static bool check_new_pages(struct page *page, unsigned int order) 1707 { 1708 int i; 1709 for (i = 0; i < (1 << order); i++) { 1710 struct page *p = page + i; 1711 1712 if (unlikely(check_new_page(p))) 1713 return true; 1714 } 1715 1716 return false; 1717 } 1718 1719 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1720 unsigned int alloc_flags) 1721 { 1722 int i; 1723 bool poisoned = true; 1724 1725 for (i = 0; i < (1 << order); i++) { 1726 struct page *p = page + i; 1727 if (poisoned) 1728 poisoned &= page_is_poisoned(p); 1729 } 1730 1731 set_page_private(page, 0); 1732 set_page_refcounted(page); 1733 1734 arch_alloc_page(page, order); 1735 kernel_map_pages(page, 1 << order, 1); 1736 kernel_poison_pages(page, 1 << order, 1); 1737 kasan_alloc_pages(page, order); 1738 1739 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) 1740 for (i = 0; i < (1 << order); i++) 1741 clear_highpage(page + i); 1742 1743 if (order && (gfp_flags & __GFP_COMP)) 1744 prep_compound_page(page, order); 1745 1746 set_page_owner(page, order, gfp_flags); 1747 1748 /* 1749 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1750 * allocate the page. The expectation is that the caller is taking 1751 * steps that will free more memory. The caller should avoid the page 1752 * being used for !PFMEMALLOC purposes. 1753 */ 1754 if (alloc_flags & ALLOC_NO_WATERMARKS) 1755 set_page_pfmemalloc(page); 1756 else 1757 clear_page_pfmemalloc(page); 1758 } 1759 1760 /* 1761 * Go through the free lists for the given migratetype and remove 1762 * the smallest available page from the freelists 1763 */ 1764 static inline 1765 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1766 int migratetype) 1767 { 1768 unsigned int current_order; 1769 struct free_area *area; 1770 struct page *page; 1771 1772 /* Find a page of the appropriate size in the preferred list */ 1773 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 1774 area = &(zone->free_area[current_order]); 1775 page = list_first_entry_or_null(&area->free_list[migratetype], 1776 struct page, lru); 1777 if (!page) 1778 continue; 1779 list_del(&page->lru); 1780 rmv_page_order(page); 1781 area->nr_free--; 1782 expand(zone, page, order, current_order, area, migratetype); 1783 set_pcppage_migratetype(page, migratetype); 1784 return page; 1785 } 1786 1787 return NULL; 1788 } 1789 1790 1791 /* 1792 * This array describes the order lists are fallen back to when 1793 * the free lists for the desirable migrate type are depleted 1794 */ 1795 static int fallbacks[MIGRATE_TYPES][4] = { 1796 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1797 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1798 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 1799 #ifdef CONFIG_CMA 1800 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ 1801 #endif 1802 #ifdef CONFIG_MEMORY_ISOLATION 1803 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ 1804 #endif 1805 }; 1806 1807 #ifdef CONFIG_CMA 1808 static struct page *__rmqueue_cma_fallback(struct zone *zone, 1809 unsigned int order) 1810 { 1811 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1812 } 1813 #else 1814 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1815 unsigned int order) { return NULL; } 1816 #endif 1817 1818 /* 1819 * Move the free pages in a range to the free lists of the requested type. 1820 * Note that start_page and end_pages are not aligned on a pageblock 1821 * boundary. If alignment is required, use move_freepages_block() 1822 */ 1823 int move_freepages(struct zone *zone, 1824 struct page *start_page, struct page *end_page, 1825 int migratetype) 1826 { 1827 struct page *page; 1828 unsigned int order; 1829 int pages_moved = 0; 1830 1831 #ifndef CONFIG_HOLES_IN_ZONE 1832 /* 1833 * page_zone is not safe to call in this context when 1834 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 1835 * anyway as we check zone boundaries in move_freepages_block(). 1836 * Remove at a later date when no bug reports exist related to 1837 * grouping pages by mobility 1838 */ 1839 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1840 #endif 1841 1842 for (page = start_page; page <= end_page;) { 1843 /* Make sure we are not inadvertently changing nodes */ 1844 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1845 1846 if (!pfn_valid_within(page_to_pfn(page))) { 1847 page++; 1848 continue; 1849 } 1850 1851 if (!PageBuddy(page)) { 1852 page++; 1853 continue; 1854 } 1855 1856 order = page_order(page); 1857 list_move(&page->lru, 1858 &zone->free_area[order].free_list[migratetype]); 1859 page += 1 << order; 1860 pages_moved += 1 << order; 1861 } 1862 1863 return pages_moved; 1864 } 1865 1866 int move_freepages_block(struct zone *zone, struct page *page, 1867 int migratetype) 1868 { 1869 unsigned long start_pfn, end_pfn; 1870 struct page *start_page, *end_page; 1871 1872 start_pfn = page_to_pfn(page); 1873 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 1874 start_page = pfn_to_page(start_pfn); 1875 end_page = start_page + pageblock_nr_pages - 1; 1876 end_pfn = start_pfn + pageblock_nr_pages - 1; 1877 1878 /* Do not cross zone boundaries */ 1879 if (!zone_spans_pfn(zone, start_pfn)) 1880 start_page = page; 1881 if (!zone_spans_pfn(zone, end_pfn)) 1882 return 0; 1883 1884 return move_freepages(zone, start_page, end_page, migratetype); 1885 } 1886 1887 static void change_pageblock_range(struct page *pageblock_page, 1888 int start_order, int migratetype) 1889 { 1890 int nr_pageblocks = 1 << (start_order - pageblock_order); 1891 1892 while (nr_pageblocks--) { 1893 set_pageblock_migratetype(pageblock_page, migratetype); 1894 pageblock_page += pageblock_nr_pages; 1895 } 1896 } 1897 1898 /* 1899 * When we are falling back to another migratetype during allocation, try to 1900 * steal extra free pages from the same pageblocks to satisfy further 1901 * allocations, instead of polluting multiple pageblocks. 1902 * 1903 * If we are stealing a relatively large buddy page, it is likely there will 1904 * be more free pages in the pageblock, so try to steal them all. For 1905 * reclaimable and unmovable allocations, we steal regardless of page size, 1906 * as fragmentation caused by those allocations polluting movable pageblocks 1907 * is worse than movable allocations stealing from unmovable and reclaimable 1908 * pageblocks. 1909 */ 1910 static bool can_steal_fallback(unsigned int order, int start_mt) 1911 { 1912 /* 1913 * Leaving this order check is intended, although there is 1914 * relaxed order check in next check. The reason is that 1915 * we can actually steal whole pageblock if this condition met, 1916 * but, below check doesn't guarantee it and that is just heuristic 1917 * so could be changed anytime. 1918 */ 1919 if (order >= pageblock_order) 1920 return true; 1921 1922 if (order >= pageblock_order / 2 || 1923 start_mt == MIGRATE_RECLAIMABLE || 1924 start_mt == MIGRATE_UNMOVABLE || 1925 page_group_by_mobility_disabled) 1926 return true; 1927 1928 return false; 1929 } 1930 1931 /* 1932 * This function implements actual steal behaviour. If order is large enough, 1933 * we can steal whole pageblock. If not, we first move freepages in this 1934 * pageblock and check whether half of pages are moved or not. If half of 1935 * pages are moved, we can change migratetype of pageblock and permanently 1936 * use it's pages as requested migratetype in the future. 1937 */ 1938 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1939 int start_type) 1940 { 1941 unsigned int current_order = page_order(page); 1942 int pages; 1943 1944 /* Take ownership for orders >= pageblock_order */ 1945 if (current_order >= pageblock_order) { 1946 change_pageblock_range(page, current_order, start_type); 1947 return; 1948 } 1949 1950 pages = move_freepages_block(zone, page, start_type); 1951 1952 /* Claim the whole block if over half of it is free */ 1953 if (pages >= (1 << (pageblock_order-1)) || 1954 page_group_by_mobility_disabled) 1955 set_pageblock_migratetype(page, start_type); 1956 } 1957 1958 /* 1959 * Check whether there is a suitable fallback freepage with requested order. 1960 * If only_stealable is true, this function returns fallback_mt only if 1961 * we can steal other freepages all together. This would help to reduce 1962 * fragmentation due to mixed migratetype pages in one pageblock. 1963 */ 1964 int find_suitable_fallback(struct free_area *area, unsigned int order, 1965 int migratetype, bool only_stealable, bool *can_steal) 1966 { 1967 int i; 1968 int fallback_mt; 1969 1970 if (area->nr_free == 0) 1971 return -1; 1972 1973 *can_steal = false; 1974 for (i = 0;; i++) { 1975 fallback_mt = fallbacks[migratetype][i]; 1976 if (fallback_mt == MIGRATE_TYPES) 1977 break; 1978 1979 if (list_empty(&area->free_list[fallback_mt])) 1980 continue; 1981 1982 if (can_steal_fallback(order, migratetype)) 1983 *can_steal = true; 1984 1985 if (!only_stealable) 1986 return fallback_mt; 1987 1988 if (*can_steal) 1989 return fallback_mt; 1990 } 1991 1992 return -1; 1993 } 1994 1995 /* 1996 * Reserve a pageblock for exclusive use of high-order atomic allocations if 1997 * there are no empty page blocks that contain a page with a suitable order 1998 */ 1999 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2000 unsigned int alloc_order) 2001 { 2002 int mt; 2003 unsigned long max_managed, flags; 2004 2005 /* 2006 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2007 * Check is race-prone but harmless. 2008 */ 2009 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; 2010 if (zone->nr_reserved_highatomic >= max_managed) 2011 return; 2012 2013 spin_lock_irqsave(&zone->lock, flags); 2014 2015 /* Recheck the nr_reserved_highatomic limit under the lock */ 2016 if (zone->nr_reserved_highatomic >= max_managed) 2017 goto out_unlock; 2018 2019 /* Yoink! */ 2020 mt = get_pageblock_migratetype(page); 2021 if (mt != MIGRATE_HIGHATOMIC && 2022 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { 2023 zone->nr_reserved_highatomic += pageblock_nr_pages; 2024 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2025 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); 2026 } 2027 2028 out_unlock: 2029 spin_unlock_irqrestore(&zone->lock, flags); 2030 } 2031 2032 /* 2033 * Used when an allocation is about to fail under memory pressure. This 2034 * potentially hurts the reliability of high-order allocations when under 2035 * intense memory pressure but failed atomic allocations should be easier 2036 * to recover from than an OOM. 2037 */ 2038 static void unreserve_highatomic_pageblock(const struct alloc_context *ac) 2039 { 2040 struct zonelist *zonelist = ac->zonelist; 2041 unsigned long flags; 2042 struct zoneref *z; 2043 struct zone *zone; 2044 struct page *page; 2045 int order; 2046 2047 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, 2048 ac->nodemask) { 2049 /* Preserve at least one pageblock */ 2050 if (zone->nr_reserved_highatomic <= pageblock_nr_pages) 2051 continue; 2052 2053 spin_lock_irqsave(&zone->lock, flags); 2054 for (order = 0; order < MAX_ORDER; order++) { 2055 struct free_area *area = &(zone->free_area[order]); 2056 2057 page = list_first_entry_or_null( 2058 &area->free_list[MIGRATE_HIGHATOMIC], 2059 struct page, lru); 2060 if (!page) 2061 continue; 2062 2063 /* 2064 * It should never happen but changes to locking could 2065 * inadvertently allow a per-cpu drain to add pages 2066 * to MIGRATE_HIGHATOMIC while unreserving so be safe 2067 * and watch for underflows. 2068 */ 2069 zone->nr_reserved_highatomic -= min(pageblock_nr_pages, 2070 zone->nr_reserved_highatomic); 2071 2072 /* 2073 * Convert to ac->migratetype and avoid the normal 2074 * pageblock stealing heuristics. Minimally, the caller 2075 * is doing the work and needs the pages. More 2076 * importantly, if the block was always converted to 2077 * MIGRATE_UNMOVABLE or another type then the number 2078 * of pageblocks that cannot be completely freed 2079 * may increase. 2080 */ 2081 set_pageblock_migratetype(page, ac->migratetype); 2082 move_freepages_block(zone, page, ac->migratetype); 2083 spin_unlock_irqrestore(&zone->lock, flags); 2084 return; 2085 } 2086 spin_unlock_irqrestore(&zone->lock, flags); 2087 } 2088 } 2089 2090 /* Remove an element from the buddy allocator from the fallback list */ 2091 static inline struct page * 2092 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) 2093 { 2094 struct free_area *area; 2095 unsigned int current_order; 2096 struct page *page; 2097 int fallback_mt; 2098 bool can_steal; 2099 2100 /* Find the largest possible block of pages in the other list */ 2101 for (current_order = MAX_ORDER-1; 2102 current_order >= order && current_order <= MAX_ORDER-1; 2103 --current_order) { 2104 area = &(zone->free_area[current_order]); 2105 fallback_mt = find_suitable_fallback(area, current_order, 2106 start_migratetype, false, &can_steal); 2107 if (fallback_mt == -1) 2108 continue; 2109 2110 page = list_first_entry(&area->free_list[fallback_mt], 2111 struct page, lru); 2112 if (can_steal) 2113 steal_suitable_fallback(zone, page, start_migratetype); 2114 2115 /* Remove the page from the freelists */ 2116 area->nr_free--; 2117 list_del(&page->lru); 2118 rmv_page_order(page); 2119 2120 expand(zone, page, order, current_order, area, 2121 start_migratetype); 2122 /* 2123 * The pcppage_migratetype may differ from pageblock's 2124 * migratetype depending on the decisions in 2125 * find_suitable_fallback(). This is OK as long as it does not 2126 * differ for MIGRATE_CMA pageblocks. Those can be used as 2127 * fallback only via special __rmqueue_cma_fallback() function 2128 */ 2129 set_pcppage_migratetype(page, start_migratetype); 2130 2131 trace_mm_page_alloc_extfrag(page, order, current_order, 2132 start_migratetype, fallback_mt); 2133 2134 return page; 2135 } 2136 2137 return NULL; 2138 } 2139 2140 /* 2141 * Do the hard work of removing an element from the buddy allocator. 2142 * Call me with the zone->lock already held. 2143 */ 2144 static struct page *__rmqueue(struct zone *zone, unsigned int order, 2145 int migratetype) 2146 { 2147 struct page *page; 2148 2149 page = __rmqueue_smallest(zone, order, migratetype); 2150 if (unlikely(!page)) { 2151 if (migratetype == MIGRATE_MOVABLE) 2152 page = __rmqueue_cma_fallback(zone, order); 2153 2154 if (!page) 2155 page = __rmqueue_fallback(zone, order, migratetype); 2156 } 2157 2158 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2159 return page; 2160 } 2161 2162 /* 2163 * Obtain a specified number of elements from the buddy allocator, all under 2164 * a single hold of the lock, for efficiency. Add them to the supplied list. 2165 * Returns the number of new pages which were placed at *list. 2166 */ 2167 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2168 unsigned long count, struct list_head *list, 2169 int migratetype, bool cold) 2170 { 2171 int i; 2172 2173 spin_lock(&zone->lock); 2174 for (i = 0; i < count; ++i) { 2175 struct page *page = __rmqueue(zone, order, migratetype); 2176 if (unlikely(page == NULL)) 2177 break; 2178 2179 if (unlikely(check_pcp_refill(page))) 2180 continue; 2181 2182 /* 2183 * Split buddy pages returned by expand() are received here 2184 * in physical page order. The page is added to the callers and 2185 * list and the list head then moves forward. From the callers 2186 * perspective, the linked list is ordered by page number in 2187 * some conditions. This is useful for IO devices that can 2188 * merge IO requests if the physical pages are ordered 2189 * properly. 2190 */ 2191 if (likely(!cold)) 2192 list_add(&page->lru, list); 2193 else 2194 list_add_tail(&page->lru, list); 2195 list = &page->lru; 2196 if (is_migrate_cma(get_pcppage_migratetype(page))) 2197 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2198 -(1 << order)); 2199 } 2200 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2201 spin_unlock(&zone->lock); 2202 return i; 2203 } 2204 2205 #ifdef CONFIG_NUMA 2206 /* 2207 * Called from the vmstat counter updater to drain pagesets of this 2208 * currently executing processor on remote nodes after they have 2209 * expired. 2210 * 2211 * Note that this function must be called with the thread pinned to 2212 * a single processor. 2213 */ 2214 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2215 { 2216 unsigned long flags; 2217 int to_drain, batch; 2218 2219 local_irq_save(flags); 2220 batch = READ_ONCE(pcp->batch); 2221 to_drain = min(pcp->count, batch); 2222 if (to_drain > 0) { 2223 free_pcppages_bulk(zone, to_drain, pcp); 2224 pcp->count -= to_drain; 2225 } 2226 local_irq_restore(flags); 2227 } 2228 #endif 2229 2230 /* 2231 * Drain pcplists of the indicated processor and zone. 2232 * 2233 * The processor must either be the current processor and the 2234 * thread pinned to the current processor or a processor that 2235 * is not online. 2236 */ 2237 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2238 { 2239 unsigned long flags; 2240 struct per_cpu_pageset *pset; 2241 struct per_cpu_pages *pcp; 2242 2243 local_irq_save(flags); 2244 pset = per_cpu_ptr(zone->pageset, cpu); 2245 2246 pcp = &pset->pcp; 2247 if (pcp->count) { 2248 free_pcppages_bulk(zone, pcp->count, pcp); 2249 pcp->count = 0; 2250 } 2251 local_irq_restore(flags); 2252 } 2253 2254 /* 2255 * Drain pcplists of all zones on the indicated processor. 2256 * 2257 * The processor must either be the current processor and the 2258 * thread pinned to the current processor or a processor that 2259 * is not online. 2260 */ 2261 static void drain_pages(unsigned int cpu) 2262 { 2263 struct zone *zone; 2264 2265 for_each_populated_zone(zone) { 2266 drain_pages_zone(cpu, zone); 2267 } 2268 } 2269 2270 /* 2271 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2272 * 2273 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 2274 * the single zone's pages. 2275 */ 2276 void drain_local_pages(struct zone *zone) 2277 { 2278 int cpu = smp_processor_id(); 2279 2280 if (zone) 2281 drain_pages_zone(cpu, zone); 2282 else 2283 drain_pages(cpu); 2284 } 2285 2286 /* 2287 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2288 * 2289 * When zone parameter is non-NULL, spill just the single zone's pages. 2290 * 2291 * Note that this code is protected against sending an IPI to an offline 2292 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: 2293 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but 2294 * nothing keeps CPUs from showing up after we populated the cpumask and 2295 * before the call to on_each_cpu_mask(). 2296 */ 2297 void drain_all_pages(struct zone *zone) 2298 { 2299 int cpu; 2300 2301 /* 2302 * Allocate in the BSS so we wont require allocation in 2303 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2304 */ 2305 static cpumask_t cpus_with_pcps; 2306 2307 /* 2308 * We don't care about racing with CPU hotplug event 2309 * as offline notification will cause the notified 2310 * cpu to drain that CPU pcps and on_each_cpu_mask 2311 * disables preemption as part of its processing 2312 */ 2313 for_each_online_cpu(cpu) { 2314 struct per_cpu_pageset *pcp; 2315 struct zone *z; 2316 bool has_pcps = false; 2317 2318 if (zone) { 2319 pcp = per_cpu_ptr(zone->pageset, cpu); 2320 if (pcp->pcp.count) 2321 has_pcps = true; 2322 } else { 2323 for_each_populated_zone(z) { 2324 pcp = per_cpu_ptr(z->pageset, cpu); 2325 if (pcp->pcp.count) { 2326 has_pcps = true; 2327 break; 2328 } 2329 } 2330 } 2331 2332 if (has_pcps) 2333 cpumask_set_cpu(cpu, &cpus_with_pcps); 2334 else 2335 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2336 } 2337 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, 2338 zone, 1); 2339 } 2340 2341 #ifdef CONFIG_HIBERNATION 2342 2343 void mark_free_pages(struct zone *zone) 2344 { 2345 unsigned long pfn, max_zone_pfn; 2346 unsigned long flags; 2347 unsigned int order, t; 2348 struct page *page; 2349 2350 if (zone_is_empty(zone)) 2351 return; 2352 2353 spin_lock_irqsave(&zone->lock, flags); 2354 2355 max_zone_pfn = zone_end_pfn(zone); 2356 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 2357 if (pfn_valid(pfn)) { 2358 page = pfn_to_page(pfn); 2359 2360 if (page_zone(page) != zone) 2361 continue; 2362 2363 if (!swsusp_page_is_forbidden(page)) 2364 swsusp_unset_page_free(page); 2365 } 2366 2367 for_each_migratetype_order(order, t) { 2368 list_for_each_entry(page, 2369 &zone->free_area[order].free_list[t], lru) { 2370 unsigned long i; 2371 2372 pfn = page_to_pfn(page); 2373 for (i = 0; i < (1UL << order); i++) 2374 swsusp_set_page_free(pfn_to_page(pfn + i)); 2375 } 2376 } 2377 spin_unlock_irqrestore(&zone->lock, flags); 2378 } 2379 #endif /* CONFIG_PM */ 2380 2381 /* 2382 * Free a 0-order page 2383 * cold == true ? free a cold page : free a hot page 2384 */ 2385 void free_hot_cold_page(struct page *page, bool cold) 2386 { 2387 struct zone *zone = page_zone(page); 2388 struct per_cpu_pages *pcp; 2389 unsigned long flags; 2390 unsigned long pfn = page_to_pfn(page); 2391 int migratetype; 2392 2393 if (!free_pcp_prepare(page)) 2394 return; 2395 2396 migratetype = get_pfnblock_migratetype(page, pfn); 2397 set_pcppage_migratetype(page, migratetype); 2398 local_irq_save(flags); 2399 __count_vm_event(PGFREE); 2400 2401 /* 2402 * We only track unmovable, reclaimable and movable on pcp lists. 2403 * Free ISOLATE pages back to the allocator because they are being 2404 * offlined but treat RESERVE as movable pages so we can get those 2405 * areas back if necessary. Otherwise, we may have to free 2406 * excessively into the page allocator 2407 */ 2408 if (migratetype >= MIGRATE_PCPTYPES) { 2409 if (unlikely(is_migrate_isolate(migratetype))) { 2410 free_one_page(zone, page, pfn, 0, migratetype); 2411 goto out; 2412 } 2413 migratetype = MIGRATE_MOVABLE; 2414 } 2415 2416 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2417 if (!cold) 2418 list_add(&page->lru, &pcp->lists[migratetype]); 2419 else 2420 list_add_tail(&page->lru, &pcp->lists[migratetype]); 2421 pcp->count++; 2422 if (pcp->count >= pcp->high) { 2423 unsigned long batch = READ_ONCE(pcp->batch); 2424 free_pcppages_bulk(zone, batch, pcp); 2425 pcp->count -= batch; 2426 } 2427 2428 out: 2429 local_irq_restore(flags); 2430 } 2431 2432 /* 2433 * Free a list of 0-order pages 2434 */ 2435 void free_hot_cold_page_list(struct list_head *list, bool cold) 2436 { 2437 struct page *page, *next; 2438 2439 list_for_each_entry_safe(page, next, list, lru) { 2440 trace_mm_page_free_batched(page, cold); 2441 free_hot_cold_page(page, cold); 2442 } 2443 } 2444 2445 /* 2446 * split_page takes a non-compound higher-order page, and splits it into 2447 * n (1<<order) sub-pages: page[0..n] 2448 * Each sub-page must be freed individually. 2449 * 2450 * Note: this is probably too low level an operation for use in drivers. 2451 * Please consult with lkml before using this in your driver. 2452 */ 2453 void split_page(struct page *page, unsigned int order) 2454 { 2455 int i; 2456 gfp_t gfp_mask; 2457 2458 VM_BUG_ON_PAGE(PageCompound(page), page); 2459 VM_BUG_ON_PAGE(!page_count(page), page); 2460 2461 #ifdef CONFIG_KMEMCHECK 2462 /* 2463 * Split shadow pages too, because free(page[0]) would 2464 * otherwise free the whole shadow. 2465 */ 2466 if (kmemcheck_page_is_tracked(page)) 2467 split_page(virt_to_page(page[0].shadow), order); 2468 #endif 2469 2470 gfp_mask = get_page_owner_gfp(page); 2471 set_page_owner(page, 0, gfp_mask); 2472 for (i = 1; i < (1 << order); i++) { 2473 set_page_refcounted(page + i); 2474 set_page_owner(page + i, 0, gfp_mask); 2475 } 2476 } 2477 EXPORT_SYMBOL_GPL(split_page); 2478 2479 int __isolate_free_page(struct page *page, unsigned int order) 2480 { 2481 unsigned long watermark; 2482 struct zone *zone; 2483 int mt; 2484 2485 BUG_ON(!PageBuddy(page)); 2486 2487 zone = page_zone(page); 2488 mt = get_pageblock_migratetype(page); 2489 2490 if (!is_migrate_isolate(mt)) { 2491 /* Obey watermarks as if the page was being allocated */ 2492 watermark = low_wmark_pages(zone) + (1 << order); 2493 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 2494 return 0; 2495 2496 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2497 } 2498 2499 /* Remove page from free list */ 2500 list_del(&page->lru); 2501 zone->free_area[order].nr_free--; 2502 rmv_page_order(page); 2503 2504 set_page_owner(page, order, __GFP_MOVABLE); 2505 2506 /* Set the pageblock if the isolated page is at least a pageblock */ 2507 if (order >= pageblock_order - 1) { 2508 struct page *endpage = page + (1 << order) - 1; 2509 for (; page < endpage; page += pageblock_nr_pages) { 2510 int mt = get_pageblock_migratetype(page); 2511 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)) 2512 set_pageblock_migratetype(page, 2513 MIGRATE_MOVABLE); 2514 } 2515 } 2516 2517 2518 return 1UL << order; 2519 } 2520 2521 /* 2522 * Similar to split_page except the page is already free. As this is only 2523 * being used for migration, the migratetype of the block also changes. 2524 * As this is called with interrupts disabled, the caller is responsible 2525 * for calling arch_alloc_page() and kernel_map_page() after interrupts 2526 * are enabled. 2527 * 2528 * Note: this is probably too low level an operation for use in drivers. 2529 * Please consult with lkml before using this in your driver. 2530 */ 2531 int split_free_page(struct page *page) 2532 { 2533 unsigned int order; 2534 int nr_pages; 2535 2536 order = page_order(page); 2537 2538 nr_pages = __isolate_free_page(page, order); 2539 if (!nr_pages) 2540 return 0; 2541 2542 /* Split into individual pages */ 2543 set_page_refcounted(page); 2544 split_page(page, order); 2545 return nr_pages; 2546 } 2547 2548 /* 2549 * Update NUMA hit/miss statistics 2550 * 2551 * Must be called with interrupts disabled. 2552 * 2553 * When __GFP_OTHER_NODE is set assume the node of the preferred 2554 * zone is the local node. This is useful for daemons who allocate 2555 * memory on behalf of other processes. 2556 */ 2557 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2558 gfp_t flags) 2559 { 2560 #ifdef CONFIG_NUMA 2561 int local_nid = numa_node_id(); 2562 enum zone_stat_item local_stat = NUMA_LOCAL; 2563 2564 if (unlikely(flags & __GFP_OTHER_NODE)) { 2565 local_stat = NUMA_OTHER; 2566 local_nid = preferred_zone->node; 2567 } 2568 2569 if (z->node == local_nid) { 2570 __inc_zone_state(z, NUMA_HIT); 2571 __inc_zone_state(z, local_stat); 2572 } else { 2573 __inc_zone_state(z, NUMA_MISS); 2574 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 2575 } 2576 #endif 2577 } 2578 2579 /* 2580 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 2581 */ 2582 static inline 2583 struct page *buffered_rmqueue(struct zone *preferred_zone, 2584 struct zone *zone, unsigned int order, 2585 gfp_t gfp_flags, unsigned int alloc_flags, 2586 int migratetype) 2587 { 2588 unsigned long flags; 2589 struct page *page; 2590 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2591 2592 if (likely(order == 0)) { 2593 struct per_cpu_pages *pcp; 2594 struct list_head *list; 2595 2596 local_irq_save(flags); 2597 do { 2598 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2599 list = &pcp->lists[migratetype]; 2600 if (list_empty(list)) { 2601 pcp->count += rmqueue_bulk(zone, 0, 2602 pcp->batch, list, 2603 migratetype, cold); 2604 if (unlikely(list_empty(list))) 2605 goto failed; 2606 } 2607 2608 if (cold) 2609 page = list_last_entry(list, struct page, lru); 2610 else 2611 page = list_first_entry(list, struct page, lru); 2612 } while (page && check_new_pcp(page)); 2613 2614 __dec_zone_state(zone, NR_ALLOC_BATCH); 2615 list_del(&page->lru); 2616 pcp->count--; 2617 } else { 2618 /* 2619 * We most definitely don't want callers attempting to 2620 * allocate greater than order-1 page units with __GFP_NOFAIL. 2621 */ 2622 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 2623 spin_lock_irqsave(&zone->lock, flags); 2624 2625 do { 2626 page = NULL; 2627 if (alloc_flags & ALLOC_HARDER) { 2628 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2629 if (page) 2630 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2631 } 2632 if (!page) 2633 page = __rmqueue(zone, order, migratetype); 2634 } while (page && check_new_pages(page, order)); 2635 spin_unlock(&zone->lock); 2636 if (!page) 2637 goto failed; 2638 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 2639 __mod_zone_freepage_state(zone, -(1 << order), 2640 get_pcppage_migratetype(page)); 2641 } 2642 2643 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && 2644 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) 2645 set_bit(ZONE_FAIR_DEPLETED, &zone->flags); 2646 2647 __count_zone_vm_events(PGALLOC, zone, 1 << order); 2648 zone_statistics(preferred_zone, zone, gfp_flags); 2649 local_irq_restore(flags); 2650 2651 VM_BUG_ON_PAGE(bad_range(zone, page), page); 2652 return page; 2653 2654 failed: 2655 local_irq_restore(flags); 2656 return NULL; 2657 } 2658 2659 #ifdef CONFIG_FAIL_PAGE_ALLOC 2660 2661 static struct { 2662 struct fault_attr attr; 2663 2664 bool ignore_gfp_highmem; 2665 bool ignore_gfp_reclaim; 2666 u32 min_order; 2667 } fail_page_alloc = { 2668 .attr = FAULT_ATTR_INITIALIZER, 2669 .ignore_gfp_reclaim = true, 2670 .ignore_gfp_highmem = true, 2671 .min_order = 1, 2672 }; 2673 2674 static int __init setup_fail_page_alloc(char *str) 2675 { 2676 return setup_fault_attr(&fail_page_alloc.attr, str); 2677 } 2678 __setup("fail_page_alloc=", setup_fail_page_alloc); 2679 2680 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2681 { 2682 if (order < fail_page_alloc.min_order) 2683 return false; 2684 if (gfp_mask & __GFP_NOFAIL) 2685 return false; 2686 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 2687 return false; 2688 if (fail_page_alloc.ignore_gfp_reclaim && 2689 (gfp_mask & __GFP_DIRECT_RECLAIM)) 2690 return false; 2691 2692 return should_fail(&fail_page_alloc.attr, 1 << order); 2693 } 2694 2695 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 2696 2697 static int __init fail_page_alloc_debugfs(void) 2698 { 2699 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 2700 struct dentry *dir; 2701 2702 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 2703 &fail_page_alloc.attr); 2704 if (IS_ERR(dir)) 2705 return PTR_ERR(dir); 2706 2707 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 2708 &fail_page_alloc.ignore_gfp_reclaim)) 2709 goto fail; 2710 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 2711 &fail_page_alloc.ignore_gfp_highmem)) 2712 goto fail; 2713 if (!debugfs_create_u32("min-order", mode, dir, 2714 &fail_page_alloc.min_order)) 2715 goto fail; 2716 2717 return 0; 2718 fail: 2719 debugfs_remove_recursive(dir); 2720 2721 return -ENOMEM; 2722 } 2723 2724 late_initcall(fail_page_alloc_debugfs); 2725 2726 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 2727 2728 #else /* CONFIG_FAIL_PAGE_ALLOC */ 2729 2730 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2731 { 2732 return false; 2733 } 2734 2735 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 2736 2737 /* 2738 * Return true if free base pages are above 'mark'. For high-order checks it 2739 * will return true of the order-0 watermark is reached and there is at least 2740 * one free page of a suitable size. Checking now avoids taking the zone lock 2741 * to check in the allocation paths if no pages are free. 2742 */ 2743 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2744 int classzone_idx, unsigned int alloc_flags, 2745 long free_pages) 2746 { 2747 long min = mark; 2748 int o; 2749 const bool alloc_harder = (alloc_flags & ALLOC_HARDER); 2750 2751 /* free_pages may go negative - that's OK */ 2752 free_pages -= (1 << order) - 1; 2753 2754 if (alloc_flags & ALLOC_HIGH) 2755 min -= min / 2; 2756 2757 /* 2758 * If the caller does not have rights to ALLOC_HARDER then subtract 2759 * the high-atomic reserves. This will over-estimate the size of the 2760 * atomic reserve but it avoids a search. 2761 */ 2762 if (likely(!alloc_harder)) 2763 free_pages -= z->nr_reserved_highatomic; 2764 else 2765 min -= min / 4; 2766 2767 #ifdef CONFIG_CMA 2768 /* If allocation can't use CMA areas don't use free CMA pages */ 2769 if (!(alloc_flags & ALLOC_CMA)) 2770 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 2771 #endif 2772 2773 /* 2774 * Check watermarks for an order-0 allocation request. If these 2775 * are not met, then a high-order request also cannot go ahead 2776 * even if a suitable page happened to be free. 2777 */ 2778 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 2779 return false; 2780 2781 /* If this is an order-0 request then the watermark is fine */ 2782 if (!order) 2783 return true; 2784 2785 /* For a high-order request, check at least one suitable page is free */ 2786 for (o = order; o < MAX_ORDER; o++) { 2787 struct free_area *area = &z->free_area[o]; 2788 int mt; 2789 2790 if (!area->nr_free) 2791 continue; 2792 2793 if (alloc_harder) 2794 return true; 2795 2796 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2797 if (!list_empty(&area->free_list[mt])) 2798 return true; 2799 } 2800 2801 #ifdef CONFIG_CMA 2802 if ((alloc_flags & ALLOC_CMA) && 2803 !list_empty(&area->free_list[MIGRATE_CMA])) { 2804 return true; 2805 } 2806 #endif 2807 } 2808 return false; 2809 } 2810 2811 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2812 int classzone_idx, unsigned int alloc_flags) 2813 { 2814 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2815 zone_page_state(z, NR_FREE_PAGES)); 2816 } 2817 2818 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 2819 unsigned long mark, int classzone_idx, unsigned int alloc_flags) 2820 { 2821 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2822 long cma_pages = 0; 2823 2824 #ifdef CONFIG_CMA 2825 /* If allocation can't use CMA areas don't use free CMA pages */ 2826 if (!(alloc_flags & ALLOC_CMA)) 2827 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES); 2828 #endif 2829 2830 /* 2831 * Fast check for order-0 only. If this fails then the reserves 2832 * need to be calculated. There is a corner case where the check 2833 * passes but only the high-order atomic reserve are free. If 2834 * the caller is !atomic then it'll uselessly search the free 2835 * list. That corner case is then slower but it is harmless. 2836 */ 2837 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) 2838 return true; 2839 2840 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2841 free_pages); 2842 } 2843 2844 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2845 unsigned long mark, int classzone_idx) 2846 { 2847 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2848 2849 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2850 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2851 2852 return __zone_watermark_ok(z, order, mark, classzone_idx, 0, 2853 free_pages); 2854 } 2855 2856 #ifdef CONFIG_NUMA 2857 static bool zone_local(struct zone *local_zone, struct zone *zone) 2858 { 2859 return local_zone->node == zone->node; 2860 } 2861 2862 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2863 { 2864 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < 2865 RECLAIM_DISTANCE; 2866 } 2867 #else /* CONFIG_NUMA */ 2868 static bool zone_local(struct zone *local_zone, struct zone *zone) 2869 { 2870 return true; 2871 } 2872 2873 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2874 { 2875 return true; 2876 } 2877 #endif /* CONFIG_NUMA */ 2878 2879 static void reset_alloc_batches(struct zone *preferred_zone) 2880 { 2881 struct zone *zone = preferred_zone->zone_pgdat->node_zones; 2882 2883 do { 2884 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2885 high_wmark_pages(zone) - low_wmark_pages(zone) - 2886 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 2887 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); 2888 } while (zone++ != preferred_zone); 2889 } 2890 2891 /* 2892 * get_page_from_freelist goes through the zonelist trying to allocate 2893 * a page. 2894 */ 2895 static struct page * 2896 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 2897 const struct alloc_context *ac) 2898 { 2899 struct zoneref *z = ac->preferred_zoneref; 2900 struct zone *zone; 2901 bool fair_skipped = false; 2902 bool apply_fair = (alloc_flags & ALLOC_FAIR); 2903 2904 zonelist_scan: 2905 /* 2906 * Scan zonelist, looking for a zone with enough free. 2907 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 2908 */ 2909 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 2910 ac->nodemask) { 2911 struct page *page; 2912 unsigned long mark; 2913 2914 if (cpusets_enabled() && 2915 (alloc_flags & ALLOC_CPUSET) && 2916 !__cpuset_zone_allowed(zone, gfp_mask)) 2917 continue; 2918 /* 2919 * Distribute pages in proportion to the individual 2920 * zone size to ensure fair page aging. The zone a 2921 * page was allocated in should have no effect on the 2922 * time the page has in memory before being reclaimed. 2923 */ 2924 if (apply_fair) { 2925 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { 2926 fair_skipped = true; 2927 continue; 2928 } 2929 if (!zone_local(ac->preferred_zoneref->zone, zone)) { 2930 if (fair_skipped) 2931 goto reset_fair; 2932 apply_fair = false; 2933 } 2934 } 2935 /* 2936 * When allocating a page cache page for writing, we 2937 * want to get it from a zone that is within its dirty 2938 * limit, such that no single zone holds more than its 2939 * proportional share of globally allowed dirty pages. 2940 * The dirty limits take into account the zone's 2941 * lowmem reserves and high watermark so that kswapd 2942 * should be able to balance it without having to 2943 * write pages from its LRU list. 2944 * 2945 * This may look like it could increase pressure on 2946 * lower zones by failing allocations in higher zones 2947 * before they are full. But the pages that do spill 2948 * over are limited as the lower zones are protected 2949 * by this very same mechanism. It should not become 2950 * a practical burden to them. 2951 * 2952 * XXX: For now, allow allocations to potentially 2953 * exceed the per-zone dirty limit in the slowpath 2954 * (spread_dirty_pages unset) before going into reclaim, 2955 * which is important when on a NUMA setup the allowed 2956 * zones are together not big enough to reach the 2957 * global limit. The proper fix for these situations 2958 * will require awareness of zones in the 2959 * dirty-throttling and the flusher threads. 2960 */ 2961 if (ac->spread_dirty_pages && !zone_dirty_ok(zone)) 2962 continue; 2963 2964 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 2965 if (!zone_watermark_fast(zone, order, mark, 2966 ac_classzone_idx(ac), alloc_flags)) { 2967 int ret; 2968 2969 /* Checked here to keep the fast path fast */ 2970 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 2971 if (alloc_flags & ALLOC_NO_WATERMARKS) 2972 goto try_this_zone; 2973 2974 if (zone_reclaim_mode == 0 || 2975 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 2976 continue; 2977 2978 ret = zone_reclaim(zone, gfp_mask, order); 2979 switch (ret) { 2980 case ZONE_RECLAIM_NOSCAN: 2981 /* did not scan */ 2982 continue; 2983 case ZONE_RECLAIM_FULL: 2984 /* scanned but unreclaimable */ 2985 continue; 2986 default: 2987 /* did we reclaim enough */ 2988 if (zone_watermark_ok(zone, order, mark, 2989 ac_classzone_idx(ac), alloc_flags)) 2990 goto try_this_zone; 2991 2992 continue; 2993 } 2994 } 2995 2996 try_this_zone: 2997 page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order, 2998 gfp_mask, alloc_flags, ac->migratetype); 2999 if (page) { 3000 prep_new_page(page, order, gfp_mask, alloc_flags); 3001 3002 /* 3003 * If this is a high-order atomic allocation then check 3004 * if the pageblock should be reserved for the future 3005 */ 3006 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 3007 reserve_highatomic_pageblock(page, zone, order); 3008 3009 return page; 3010 } 3011 } 3012 3013 /* 3014 * The first pass makes sure allocations are spread fairly within the 3015 * local node. However, the local node might have free pages left 3016 * after the fairness batches are exhausted, and remote zones haven't 3017 * even been considered yet. Try once more without fairness, and 3018 * include remote zones now, before entering the slowpath and waking 3019 * kswapd: prefer spilling to a remote zone over swapping locally. 3020 */ 3021 if (fair_skipped) { 3022 reset_fair: 3023 apply_fair = false; 3024 fair_skipped = false; 3025 reset_alloc_batches(ac->preferred_zoneref->zone); 3026 goto zonelist_scan; 3027 } 3028 3029 return NULL; 3030 } 3031 3032 /* 3033 * Large machines with many possible nodes should not always dump per-node 3034 * meminfo in irq context. 3035 */ 3036 static inline bool should_suppress_show_mem(void) 3037 { 3038 bool ret = false; 3039 3040 #if NODES_SHIFT > 8 3041 ret = in_interrupt(); 3042 #endif 3043 return ret; 3044 } 3045 3046 static DEFINE_RATELIMIT_STATE(nopage_rs, 3047 DEFAULT_RATELIMIT_INTERVAL, 3048 DEFAULT_RATELIMIT_BURST); 3049 3050 void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...) 3051 { 3052 unsigned int filter = SHOW_MEM_FILTER_NODES; 3053 3054 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || 3055 debug_guardpage_minorder() > 0) 3056 return; 3057 3058 /* 3059 * This documents exceptions given to allocations in certain 3060 * contexts that are allowed to allocate outside current's set 3061 * of allowed nodes. 3062 */ 3063 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3064 if (test_thread_flag(TIF_MEMDIE) || 3065 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3066 filter &= ~SHOW_MEM_FILTER_NODES; 3067 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3068 filter &= ~SHOW_MEM_FILTER_NODES; 3069 3070 if (fmt) { 3071 struct va_format vaf; 3072 va_list args; 3073 3074 va_start(args, fmt); 3075 3076 vaf.fmt = fmt; 3077 vaf.va = &args; 3078 3079 pr_warn("%pV", &vaf); 3080 3081 va_end(args); 3082 } 3083 3084 pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n", 3085 current->comm, order, gfp_mask, &gfp_mask); 3086 dump_stack(); 3087 if (!should_suppress_show_mem()) 3088 show_mem(filter); 3089 } 3090 3091 static inline struct page * 3092 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3093 const struct alloc_context *ac, unsigned long *did_some_progress) 3094 { 3095 struct oom_control oc = { 3096 .zonelist = ac->zonelist, 3097 .nodemask = ac->nodemask, 3098 .gfp_mask = gfp_mask, 3099 .order = order, 3100 }; 3101 struct page *page; 3102 3103 *did_some_progress = 0; 3104 3105 /* 3106 * Acquire the oom lock. If that fails, somebody else is 3107 * making progress for us. 3108 */ 3109 if (!mutex_trylock(&oom_lock)) { 3110 *did_some_progress = 1; 3111 schedule_timeout_uninterruptible(1); 3112 return NULL; 3113 } 3114 3115 /* 3116 * Go through the zonelist yet one more time, keep very high watermark 3117 * here, this is only to catch a parallel oom killing, we must fail if 3118 * we're still under heavy pressure. 3119 */ 3120 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, 3121 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3122 if (page) 3123 goto out; 3124 3125 if (!(gfp_mask & __GFP_NOFAIL)) { 3126 /* Coredumps can quickly deplete all memory reserves */ 3127 if (current->flags & PF_DUMPCORE) 3128 goto out; 3129 /* The OOM killer will not help higher order allocs */ 3130 if (order > PAGE_ALLOC_COSTLY_ORDER) 3131 goto out; 3132 /* The OOM killer does not needlessly kill tasks for lowmem */ 3133 if (ac->high_zoneidx < ZONE_NORMAL) 3134 goto out; 3135 if (pm_suspended_storage()) 3136 goto out; 3137 /* 3138 * XXX: GFP_NOFS allocations should rather fail than rely on 3139 * other request to make a forward progress. 3140 * We are in an unfortunate situation where out_of_memory cannot 3141 * do much for this context but let's try it to at least get 3142 * access to memory reserved if the current task is killed (see 3143 * out_of_memory). Once filesystems are ready to handle allocation 3144 * failures more gracefully we should just bail out here. 3145 */ 3146 3147 /* The OOM killer may not free memory on a specific node */ 3148 if (gfp_mask & __GFP_THISNODE) 3149 goto out; 3150 } 3151 /* Exhausted what can be done so it's blamo time */ 3152 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { 3153 *did_some_progress = 1; 3154 3155 if (gfp_mask & __GFP_NOFAIL) { 3156 page = get_page_from_freelist(gfp_mask, order, 3157 ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac); 3158 /* 3159 * fallback to ignore cpuset restriction if our nodes 3160 * are depleted 3161 */ 3162 if (!page) 3163 page = get_page_from_freelist(gfp_mask, order, 3164 ALLOC_NO_WATERMARKS, ac); 3165 } 3166 } 3167 out: 3168 mutex_unlock(&oom_lock); 3169 return page; 3170 } 3171 3172 3173 /* 3174 * Maximum number of compaction retries wit a progress before OOM 3175 * killer is consider as the only way to move forward. 3176 */ 3177 #define MAX_COMPACT_RETRIES 16 3178 3179 #ifdef CONFIG_COMPACTION 3180 /* Try memory compaction for high-order allocations before reclaim */ 3181 static struct page * 3182 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3183 unsigned int alloc_flags, const struct alloc_context *ac, 3184 enum migrate_mode mode, enum compact_result *compact_result) 3185 { 3186 struct page *page; 3187 int contended_compaction; 3188 3189 if (!order) 3190 return NULL; 3191 3192 current->flags |= PF_MEMALLOC; 3193 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3194 mode, &contended_compaction); 3195 current->flags &= ~PF_MEMALLOC; 3196 3197 if (*compact_result <= COMPACT_INACTIVE) 3198 return NULL; 3199 3200 /* 3201 * At least in one zone compaction wasn't deferred or skipped, so let's 3202 * count a compaction stall 3203 */ 3204 count_vm_event(COMPACTSTALL); 3205 3206 page = get_page_from_freelist(gfp_mask, order, 3207 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 3208 3209 if (page) { 3210 struct zone *zone = page_zone(page); 3211 3212 zone->compact_blockskip_flush = false; 3213 compaction_defer_reset(zone, order, true); 3214 count_vm_event(COMPACTSUCCESS); 3215 return page; 3216 } 3217 3218 /* 3219 * It's bad if compaction run occurs and fails. The most likely reason 3220 * is that pages exist, but not enough to satisfy watermarks. 3221 */ 3222 count_vm_event(COMPACTFAIL); 3223 3224 /* 3225 * In all zones where compaction was attempted (and not 3226 * deferred or skipped), lock contention has been detected. 3227 * For THP allocation we do not want to disrupt the others 3228 * so we fallback to base pages instead. 3229 */ 3230 if (contended_compaction == COMPACT_CONTENDED_LOCK) 3231 *compact_result = COMPACT_CONTENDED; 3232 3233 /* 3234 * If compaction was aborted due to need_resched(), we do not 3235 * want to further increase allocation latency, unless it is 3236 * khugepaged trying to collapse. 3237 */ 3238 if (contended_compaction == COMPACT_CONTENDED_SCHED 3239 && !(current->flags & PF_KTHREAD)) 3240 *compact_result = COMPACT_CONTENDED; 3241 3242 cond_resched(); 3243 3244 return NULL; 3245 } 3246 3247 static inline bool 3248 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3249 enum compact_result compact_result, enum migrate_mode *migrate_mode, 3250 int compaction_retries) 3251 { 3252 int max_retries = MAX_COMPACT_RETRIES; 3253 3254 if (!order) 3255 return false; 3256 3257 /* 3258 * compaction considers all the zone as desperately out of memory 3259 * so it doesn't really make much sense to retry except when the 3260 * failure could be caused by weak migration mode. 3261 */ 3262 if (compaction_failed(compact_result)) { 3263 if (*migrate_mode == MIGRATE_ASYNC) { 3264 *migrate_mode = MIGRATE_SYNC_LIGHT; 3265 return true; 3266 } 3267 return false; 3268 } 3269 3270 /* 3271 * make sure the compaction wasn't deferred or didn't bail out early 3272 * due to locks contention before we declare that we should give up. 3273 * But do not retry if the given zonelist is not suitable for 3274 * compaction. 3275 */ 3276 if (compaction_withdrawn(compact_result)) 3277 return compaction_zonelist_suitable(ac, order, alloc_flags); 3278 3279 /* 3280 * !costly requests are much more important than __GFP_REPEAT 3281 * costly ones because they are de facto nofail and invoke OOM 3282 * killer to move on while costly can fail and users are ready 3283 * to cope with that. 1/4 retries is rather arbitrary but we 3284 * would need much more detailed feedback from compaction to 3285 * make a better decision. 3286 */ 3287 if (order > PAGE_ALLOC_COSTLY_ORDER) 3288 max_retries /= 4; 3289 if (compaction_retries <= max_retries) 3290 return true; 3291 3292 return false; 3293 } 3294 #else 3295 static inline struct page * 3296 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3297 unsigned int alloc_flags, const struct alloc_context *ac, 3298 enum migrate_mode mode, enum compact_result *compact_result) 3299 { 3300 *compact_result = COMPACT_SKIPPED; 3301 return NULL; 3302 } 3303 3304 static inline bool 3305 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3306 enum compact_result compact_result, 3307 enum migrate_mode *migrate_mode, 3308 int compaction_retries) 3309 { 3310 struct zone *zone; 3311 struct zoneref *z; 3312 3313 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3314 return false; 3315 3316 /* 3317 * There are setups with compaction disabled which would prefer to loop 3318 * inside the allocator rather than hit the oom killer prematurely. 3319 * Let's give them a good hope and keep retrying while the order-0 3320 * watermarks are OK. 3321 */ 3322 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 3323 ac->nodemask) { 3324 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3325 ac_classzone_idx(ac), alloc_flags)) 3326 return true; 3327 } 3328 return false; 3329 } 3330 #endif /* CONFIG_COMPACTION */ 3331 3332 /* Perform direct synchronous page reclaim */ 3333 static int 3334 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3335 const struct alloc_context *ac) 3336 { 3337 struct reclaim_state reclaim_state; 3338 int progress; 3339 3340 cond_resched(); 3341 3342 /* We now go into synchronous reclaim */ 3343 cpuset_memory_pressure_bump(); 3344 current->flags |= PF_MEMALLOC; 3345 lockdep_set_current_reclaim_state(gfp_mask); 3346 reclaim_state.reclaimed_slab = 0; 3347 current->reclaim_state = &reclaim_state; 3348 3349 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3350 ac->nodemask); 3351 3352 current->reclaim_state = NULL; 3353 lockdep_clear_current_reclaim_state(); 3354 current->flags &= ~PF_MEMALLOC; 3355 3356 cond_resched(); 3357 3358 return progress; 3359 } 3360 3361 /* The really slow allocator path where we enter direct reclaim */ 3362 static inline struct page * 3363 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3364 unsigned int alloc_flags, const struct alloc_context *ac, 3365 unsigned long *did_some_progress) 3366 { 3367 struct page *page = NULL; 3368 bool drained = false; 3369 3370 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3371 if (unlikely(!(*did_some_progress))) 3372 return NULL; 3373 3374 retry: 3375 page = get_page_from_freelist(gfp_mask, order, 3376 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 3377 3378 /* 3379 * If an allocation failed after direct reclaim, it could be because 3380 * pages are pinned on the per-cpu lists or in high alloc reserves. 3381 * Shrink them them and try again 3382 */ 3383 if (!page && !drained) { 3384 unreserve_highatomic_pageblock(ac); 3385 drain_all_pages(NULL); 3386 drained = true; 3387 goto retry; 3388 } 3389 3390 return page; 3391 } 3392 3393 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) 3394 { 3395 struct zoneref *z; 3396 struct zone *zone; 3397 3398 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3399 ac->high_zoneidx, ac->nodemask) 3400 wakeup_kswapd(zone, order, ac_classzone_idx(ac)); 3401 } 3402 3403 static inline unsigned int 3404 gfp_to_alloc_flags(gfp_t gfp_mask) 3405 { 3406 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3407 3408 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 3409 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 3410 3411 /* 3412 * The caller may dip into page reserves a bit more if the caller 3413 * cannot run direct reclaim, or if the caller has realtime scheduling 3414 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3415 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 3416 */ 3417 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 3418 3419 if (gfp_mask & __GFP_ATOMIC) { 3420 /* 3421 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3422 * if it can't schedule. 3423 */ 3424 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3425 alloc_flags |= ALLOC_HARDER; 3426 /* 3427 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 3428 * comment for __cpuset_node_allowed(). 3429 */ 3430 alloc_flags &= ~ALLOC_CPUSET; 3431 } else if (unlikely(rt_task(current)) && !in_interrupt()) 3432 alloc_flags |= ALLOC_HARDER; 3433 3434 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 3435 if (gfp_mask & __GFP_MEMALLOC) 3436 alloc_flags |= ALLOC_NO_WATERMARKS; 3437 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 3438 alloc_flags |= ALLOC_NO_WATERMARKS; 3439 else if (!in_interrupt() && 3440 ((current->flags & PF_MEMALLOC) || 3441 unlikely(test_thread_flag(TIF_MEMDIE)))) 3442 alloc_flags |= ALLOC_NO_WATERMARKS; 3443 } 3444 #ifdef CONFIG_CMA 3445 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3446 alloc_flags |= ALLOC_CMA; 3447 #endif 3448 return alloc_flags; 3449 } 3450 3451 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 3452 { 3453 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); 3454 } 3455 3456 static inline bool is_thp_gfp_mask(gfp_t gfp_mask) 3457 { 3458 return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE; 3459 } 3460 3461 /* 3462 * Maximum number of reclaim retries without any progress before OOM killer 3463 * is consider as the only way to move forward. 3464 */ 3465 #define MAX_RECLAIM_RETRIES 16 3466 3467 /* 3468 * Checks whether it makes sense to retry the reclaim to make a forward progress 3469 * for the given allocation request. 3470 * The reclaim feedback represented by did_some_progress (any progress during 3471 * the last reclaim round) and no_progress_loops (number of reclaim rounds without 3472 * any progress in a row) is considered as well as the reclaimable pages on the 3473 * applicable zone list (with a backoff mechanism which is a function of 3474 * no_progress_loops). 3475 * 3476 * Returns true if a retry is viable or false to enter the oom path. 3477 */ 3478 static inline bool 3479 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 3480 struct alloc_context *ac, int alloc_flags, 3481 bool did_some_progress, int no_progress_loops) 3482 { 3483 struct zone *zone; 3484 struct zoneref *z; 3485 3486 /* 3487 * Make sure we converge to OOM if we cannot make any progress 3488 * several times in the row. 3489 */ 3490 if (no_progress_loops > MAX_RECLAIM_RETRIES) 3491 return false; 3492 3493 /* 3494 * Keep reclaiming pages while there is a chance this will lead somewhere. 3495 * If none of the target zones can satisfy our allocation request even 3496 * if all reclaimable pages are considered then we are screwed and have 3497 * to go OOM. 3498 */ 3499 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 3500 ac->nodemask) { 3501 unsigned long available; 3502 unsigned long reclaimable; 3503 3504 available = reclaimable = zone_reclaimable_pages(zone); 3505 available -= DIV_ROUND_UP(no_progress_loops * available, 3506 MAX_RECLAIM_RETRIES); 3507 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 3508 3509 /* 3510 * Would the allocation succeed if we reclaimed the whole 3511 * available? 3512 */ 3513 if (__zone_watermark_ok(zone, order, min_wmark_pages(zone), 3514 ac_classzone_idx(ac), alloc_flags, available)) { 3515 /* 3516 * If we didn't make any progress and have a lot of 3517 * dirty + writeback pages then we should wait for 3518 * an IO to complete to slow down the reclaim and 3519 * prevent from pre mature OOM 3520 */ 3521 if (!did_some_progress) { 3522 unsigned long writeback; 3523 unsigned long dirty; 3524 3525 writeback = zone_page_state_snapshot(zone, 3526 NR_WRITEBACK); 3527 dirty = zone_page_state_snapshot(zone, NR_FILE_DIRTY); 3528 3529 if (2*(writeback + dirty) > reclaimable) { 3530 congestion_wait(BLK_RW_ASYNC, HZ/10); 3531 return true; 3532 } 3533 } 3534 3535 /* 3536 * Memory allocation/reclaim might be called from a WQ 3537 * context and the current implementation of the WQ 3538 * concurrency control doesn't recognize that 3539 * a particular WQ is congested if the worker thread is 3540 * looping without ever sleeping. Therefore we have to 3541 * do a short sleep here rather than calling 3542 * cond_resched(). 3543 */ 3544 if (current->flags & PF_WQ_WORKER) 3545 schedule_timeout_uninterruptible(1); 3546 else 3547 cond_resched(); 3548 3549 return true; 3550 } 3551 } 3552 3553 return false; 3554 } 3555 3556 static inline struct page * 3557 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 3558 struct alloc_context *ac) 3559 { 3560 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3561 struct page *page = NULL; 3562 unsigned int alloc_flags; 3563 unsigned long did_some_progress; 3564 enum migrate_mode migration_mode = MIGRATE_ASYNC; 3565 enum compact_result compact_result; 3566 int compaction_retries = 0; 3567 int no_progress_loops = 0; 3568 3569 /* 3570 * In the slowpath, we sanity check order to avoid ever trying to 3571 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 3572 * be using allocators in order of preference for an area that is 3573 * too large. 3574 */ 3575 if (order >= MAX_ORDER) { 3576 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 3577 return NULL; 3578 } 3579 3580 /* 3581 * We also sanity check to catch abuse of atomic reserves being used by 3582 * callers that are not in atomic context. 3583 */ 3584 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 3585 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 3586 gfp_mask &= ~__GFP_ATOMIC; 3587 3588 retry: 3589 if (gfp_mask & __GFP_KSWAPD_RECLAIM) 3590 wake_all_kswapds(order, ac); 3591 3592 /* 3593 * OK, we're below the kswapd watermark and have kicked background 3594 * reclaim. Now things get more complex, so set up alloc_flags according 3595 * to how we want to proceed. 3596 */ 3597 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3598 3599 /* This is the last chance, in general, before the goto nopage. */ 3600 page = get_page_from_freelist(gfp_mask, order, 3601 alloc_flags & ~ALLOC_NO_WATERMARKS, ac); 3602 if (page) 3603 goto got_pg; 3604 3605 /* Allocate without watermarks if the context allows */ 3606 if (alloc_flags & ALLOC_NO_WATERMARKS) { 3607 /* 3608 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds 3609 * the allocation is high priority and these type of 3610 * allocations are system rather than user orientated 3611 */ 3612 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); 3613 page = get_page_from_freelist(gfp_mask, order, 3614 ALLOC_NO_WATERMARKS, ac); 3615 if (page) 3616 goto got_pg; 3617 } 3618 3619 /* Caller is not willing to reclaim, we can't balance anything */ 3620 if (!can_direct_reclaim) { 3621 /* 3622 * All existing users of the __GFP_NOFAIL are blockable, so warn 3623 * of any new users that actually allow this type of allocation 3624 * to fail. 3625 */ 3626 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); 3627 goto nopage; 3628 } 3629 3630 /* Avoid recursion of direct reclaim */ 3631 if (current->flags & PF_MEMALLOC) { 3632 /* 3633 * __GFP_NOFAIL request from this context is rather bizarre 3634 * because we cannot reclaim anything and only can loop waiting 3635 * for somebody to do a work for us. 3636 */ 3637 if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { 3638 cond_resched(); 3639 goto retry; 3640 } 3641 goto nopage; 3642 } 3643 3644 /* Avoid allocations with no watermarks from looping endlessly */ 3645 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 3646 goto nopage; 3647 3648 /* 3649 * Try direct compaction. The first pass is asynchronous. Subsequent 3650 * attempts after direct reclaim are synchronous 3651 */ 3652 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 3653 migration_mode, 3654 &compact_result); 3655 if (page) 3656 goto got_pg; 3657 3658 /* Checks for THP-specific high-order allocations */ 3659 if (is_thp_gfp_mask(gfp_mask)) { 3660 /* 3661 * If compaction is deferred for high-order allocations, it is 3662 * because sync compaction recently failed. If this is the case 3663 * and the caller requested a THP allocation, we do not want 3664 * to heavily disrupt the system, so we fail the allocation 3665 * instead of entering direct reclaim. 3666 */ 3667 if (compact_result == COMPACT_DEFERRED) 3668 goto nopage; 3669 3670 /* 3671 * Compaction is contended so rather back off than cause 3672 * excessive stalls. 3673 */ 3674 if(compact_result == COMPACT_CONTENDED) 3675 goto nopage; 3676 } 3677 3678 if (order && compaction_made_progress(compact_result)) 3679 compaction_retries++; 3680 3681 /* Try direct reclaim and then allocating */ 3682 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 3683 &did_some_progress); 3684 if (page) 3685 goto got_pg; 3686 3687 /* Do not loop if specifically requested */ 3688 if (gfp_mask & __GFP_NORETRY) 3689 goto noretry; 3690 3691 /* 3692 * Do not retry costly high order allocations unless they are 3693 * __GFP_REPEAT 3694 */ 3695 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT)) 3696 goto noretry; 3697 3698 /* 3699 * Costly allocations might have made a progress but this doesn't mean 3700 * their order will become available due to high fragmentation so 3701 * always increment the no progress counter for them 3702 */ 3703 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 3704 no_progress_loops = 0; 3705 else 3706 no_progress_loops++; 3707 3708 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 3709 did_some_progress > 0, no_progress_loops)) 3710 goto retry; 3711 3712 /* 3713 * It doesn't make any sense to retry for the compaction if the order-0 3714 * reclaim is not able to make any progress because the current 3715 * implementation of the compaction depends on the sufficient amount 3716 * of free memory (see __compaction_suitable) 3717 */ 3718 if (did_some_progress > 0 && 3719 should_compact_retry(ac, order, alloc_flags, 3720 compact_result, &migration_mode, 3721 compaction_retries)) 3722 goto retry; 3723 3724 /* Reclaim has failed us, start killing things */ 3725 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 3726 if (page) 3727 goto got_pg; 3728 3729 /* Retry as long as the OOM killer is making progress */ 3730 if (did_some_progress) { 3731 no_progress_loops = 0; 3732 goto retry; 3733 } 3734 3735 noretry: 3736 /* 3737 * High-order allocations do not necessarily loop after direct reclaim 3738 * and reclaim/compaction depends on compaction being called after 3739 * reclaim so call directly if necessary. 3740 * It can become very expensive to allocate transparent hugepages at 3741 * fault, so use asynchronous memory compaction for THP unless it is 3742 * khugepaged trying to collapse. All other requests should tolerate 3743 * at least light sync migration. 3744 */ 3745 if (is_thp_gfp_mask(gfp_mask) && !(current->flags & PF_KTHREAD)) 3746 migration_mode = MIGRATE_ASYNC; 3747 else 3748 migration_mode = MIGRATE_SYNC_LIGHT; 3749 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, 3750 ac, migration_mode, 3751 &compact_result); 3752 if (page) 3753 goto got_pg; 3754 nopage: 3755 warn_alloc_failed(gfp_mask, order, NULL); 3756 got_pg: 3757 return page; 3758 } 3759 3760 /* 3761 * This is the 'heart' of the zoned buddy allocator. 3762 */ 3763 struct page * 3764 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 3765 struct zonelist *zonelist, nodemask_t *nodemask) 3766 { 3767 struct page *page; 3768 unsigned int cpuset_mems_cookie; 3769 unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR; 3770 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ 3771 struct alloc_context ac = { 3772 .high_zoneidx = gfp_zone(gfp_mask), 3773 .zonelist = zonelist, 3774 .nodemask = nodemask, 3775 .migratetype = gfpflags_to_migratetype(gfp_mask), 3776 }; 3777 3778 if (cpusets_enabled()) { 3779 alloc_mask |= __GFP_HARDWALL; 3780 alloc_flags |= ALLOC_CPUSET; 3781 if (!ac.nodemask) 3782 ac.nodemask = &cpuset_current_mems_allowed; 3783 } 3784 3785 gfp_mask &= gfp_allowed_mask; 3786 3787 lockdep_trace_alloc(gfp_mask); 3788 3789 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 3790 3791 if (should_fail_alloc_page(gfp_mask, order)) 3792 return NULL; 3793 3794 /* 3795 * Check the zones suitable for the gfp_mask contain at least one 3796 * valid zone. It's possible to have an empty zonelist as a result 3797 * of __GFP_THISNODE and a memoryless node 3798 */ 3799 if (unlikely(!zonelist->_zonerefs->zone)) 3800 return NULL; 3801 3802 if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE) 3803 alloc_flags |= ALLOC_CMA; 3804 3805 retry_cpuset: 3806 cpuset_mems_cookie = read_mems_allowed_begin(); 3807 3808 /* Dirty zone balancing only done in the fast path */ 3809 ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3810 3811 /* The preferred zone is used for statistics later */ 3812 ac.preferred_zoneref = first_zones_zonelist(ac.zonelist, 3813 ac.high_zoneidx, ac.nodemask); 3814 if (!ac.preferred_zoneref) { 3815 page = NULL; 3816 goto no_zone; 3817 } 3818 3819 /* First allocation attempt */ 3820 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); 3821 if (likely(page)) 3822 goto out; 3823 3824 /* 3825 * Runtime PM, block IO and its error handling path can deadlock 3826 * because I/O on the device might not complete. 3827 */ 3828 alloc_mask = memalloc_noio_flags(gfp_mask); 3829 ac.spread_dirty_pages = false; 3830 3831 /* 3832 * Restore the original nodemask if it was potentially replaced with 3833 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 3834 */ 3835 if (cpusets_enabled()) 3836 ac.nodemask = nodemask; 3837 page = __alloc_pages_slowpath(alloc_mask, order, &ac); 3838 3839 no_zone: 3840 /* 3841 * When updating a task's mems_allowed, it is possible to race with 3842 * parallel threads in such a way that an allocation can fail while 3843 * the mask is being updated. If a page allocation is about to fail, 3844 * check if the cpuset changed during allocation and if so, retry. 3845 */ 3846 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) { 3847 alloc_mask = gfp_mask; 3848 goto retry_cpuset; 3849 } 3850 3851 out: 3852 if (kmemcheck_enabled && page) 3853 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 3854 3855 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); 3856 3857 return page; 3858 } 3859 EXPORT_SYMBOL(__alloc_pages_nodemask); 3860 3861 /* 3862 * Common helper functions. 3863 */ 3864 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 3865 { 3866 struct page *page; 3867 3868 /* 3869 * __get_free_pages() returns a 32-bit address, which cannot represent 3870 * a highmem page 3871 */ 3872 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 3873 3874 page = alloc_pages(gfp_mask, order); 3875 if (!page) 3876 return 0; 3877 return (unsigned long) page_address(page); 3878 } 3879 EXPORT_SYMBOL(__get_free_pages); 3880 3881 unsigned long get_zeroed_page(gfp_t gfp_mask) 3882 { 3883 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 3884 } 3885 EXPORT_SYMBOL(get_zeroed_page); 3886 3887 void __free_pages(struct page *page, unsigned int order) 3888 { 3889 if (put_page_testzero(page)) { 3890 if (order == 0) 3891 free_hot_cold_page(page, false); 3892 else 3893 __free_pages_ok(page, order); 3894 } 3895 } 3896 3897 EXPORT_SYMBOL(__free_pages); 3898 3899 void free_pages(unsigned long addr, unsigned int order) 3900 { 3901 if (addr != 0) { 3902 VM_BUG_ON(!virt_addr_valid((void *)addr)); 3903 __free_pages(virt_to_page((void *)addr), order); 3904 } 3905 } 3906 3907 EXPORT_SYMBOL(free_pages); 3908 3909 /* 3910 * Page Fragment: 3911 * An arbitrary-length arbitrary-offset area of memory which resides 3912 * within a 0 or higher order page. Multiple fragments within that page 3913 * are individually refcounted, in the page's reference counter. 3914 * 3915 * The page_frag functions below provide a simple allocation framework for 3916 * page fragments. This is used by the network stack and network device 3917 * drivers to provide a backing region of memory for use as either an 3918 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 3919 */ 3920 static struct page *__page_frag_refill(struct page_frag_cache *nc, 3921 gfp_t gfp_mask) 3922 { 3923 struct page *page = NULL; 3924 gfp_t gfp = gfp_mask; 3925 3926 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3927 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 3928 __GFP_NOMEMALLOC; 3929 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 3930 PAGE_FRAG_CACHE_MAX_ORDER); 3931 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 3932 #endif 3933 if (unlikely(!page)) 3934 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 3935 3936 nc->va = page ? page_address(page) : NULL; 3937 3938 return page; 3939 } 3940 3941 void *__alloc_page_frag(struct page_frag_cache *nc, 3942 unsigned int fragsz, gfp_t gfp_mask) 3943 { 3944 unsigned int size = PAGE_SIZE; 3945 struct page *page; 3946 int offset; 3947 3948 if (unlikely(!nc->va)) { 3949 refill: 3950 page = __page_frag_refill(nc, gfp_mask); 3951 if (!page) 3952 return NULL; 3953 3954 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3955 /* if size can vary use size else just use PAGE_SIZE */ 3956 size = nc->size; 3957 #endif 3958 /* Even if we own the page, we do not use atomic_set(). 3959 * This would break get_page_unless_zero() users. 3960 */ 3961 page_ref_add(page, size - 1); 3962 3963 /* reset page count bias and offset to start of new frag */ 3964 nc->pfmemalloc = page_is_pfmemalloc(page); 3965 nc->pagecnt_bias = size; 3966 nc->offset = size; 3967 } 3968 3969 offset = nc->offset - fragsz; 3970 if (unlikely(offset < 0)) { 3971 page = virt_to_page(nc->va); 3972 3973 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 3974 goto refill; 3975 3976 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 3977 /* if size can vary use size else just use PAGE_SIZE */ 3978 size = nc->size; 3979 #endif 3980 /* OK, page count is 0, we can safely set it */ 3981 set_page_count(page, size); 3982 3983 /* reset page count bias and offset to start of new frag */ 3984 nc->pagecnt_bias = size; 3985 offset = size - fragsz; 3986 } 3987 3988 nc->pagecnt_bias--; 3989 nc->offset = offset; 3990 3991 return nc->va + offset; 3992 } 3993 EXPORT_SYMBOL(__alloc_page_frag); 3994 3995 /* 3996 * Frees a page fragment allocated out of either a compound or order 0 page. 3997 */ 3998 void __free_page_frag(void *addr) 3999 { 4000 struct page *page = virt_to_head_page(addr); 4001 4002 if (unlikely(put_page_testzero(page))) 4003 __free_pages_ok(page, compound_order(page)); 4004 } 4005 EXPORT_SYMBOL(__free_page_frag); 4006 4007 /* 4008 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter 4009 * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is 4010 * equivalent to alloc_pages. 4011 * 4012 * It should be used when the caller would like to use kmalloc, but since the 4013 * allocation is large, it has to fall back to the page allocator. 4014 */ 4015 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) 4016 { 4017 struct page *page; 4018 4019 page = alloc_pages(gfp_mask, order); 4020 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 4021 __free_pages(page, order); 4022 page = NULL; 4023 } 4024 return page; 4025 } 4026 4027 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 4028 { 4029 struct page *page; 4030 4031 page = alloc_pages_node(nid, gfp_mask, order); 4032 if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) { 4033 __free_pages(page, order); 4034 page = NULL; 4035 } 4036 return page; 4037 } 4038 4039 /* 4040 * __free_kmem_pages and free_kmem_pages will free pages allocated with 4041 * alloc_kmem_pages. 4042 */ 4043 void __free_kmem_pages(struct page *page, unsigned int order) 4044 { 4045 memcg_kmem_uncharge(page, order); 4046 __free_pages(page, order); 4047 } 4048 4049 void free_kmem_pages(unsigned long addr, unsigned int order) 4050 { 4051 if (addr != 0) { 4052 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4053 __free_kmem_pages(virt_to_page((void *)addr), order); 4054 } 4055 } 4056 4057 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4058 size_t size) 4059 { 4060 if (addr) { 4061 unsigned long alloc_end = addr + (PAGE_SIZE << order); 4062 unsigned long used = addr + PAGE_ALIGN(size); 4063 4064 split_page(virt_to_page((void *)addr), order); 4065 while (used < alloc_end) { 4066 free_page(used); 4067 used += PAGE_SIZE; 4068 } 4069 } 4070 return (void *)addr; 4071 } 4072 4073 /** 4074 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4075 * @size: the number of bytes to allocate 4076 * @gfp_mask: GFP flags for the allocation 4077 * 4078 * This function is similar to alloc_pages(), except that it allocates the 4079 * minimum number of pages to satisfy the request. alloc_pages() can only 4080 * allocate memory in power-of-two pages. 4081 * 4082 * This function is also limited by MAX_ORDER. 4083 * 4084 * Memory allocated by this function must be released by free_pages_exact(). 4085 */ 4086 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 4087 { 4088 unsigned int order = get_order(size); 4089 unsigned long addr; 4090 4091 addr = __get_free_pages(gfp_mask, order); 4092 return make_alloc_exact(addr, order, size); 4093 } 4094 EXPORT_SYMBOL(alloc_pages_exact); 4095 4096 /** 4097 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4098 * pages on a node. 4099 * @nid: the preferred node ID where memory should be allocated 4100 * @size: the number of bytes to allocate 4101 * @gfp_mask: GFP flags for the allocation 4102 * 4103 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4104 * back. 4105 */ 4106 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 4107 { 4108 unsigned int order = get_order(size); 4109 struct page *p = alloc_pages_node(nid, gfp_mask, order); 4110 if (!p) 4111 return NULL; 4112 return make_alloc_exact((unsigned long)page_address(p), order, size); 4113 } 4114 4115 /** 4116 * free_pages_exact - release memory allocated via alloc_pages_exact() 4117 * @virt: the value returned by alloc_pages_exact. 4118 * @size: size of allocation, same value as passed to alloc_pages_exact(). 4119 * 4120 * Release the memory allocated by a previous call to alloc_pages_exact. 4121 */ 4122 void free_pages_exact(void *virt, size_t size) 4123 { 4124 unsigned long addr = (unsigned long)virt; 4125 unsigned long end = addr + PAGE_ALIGN(size); 4126 4127 while (addr < end) { 4128 free_page(addr); 4129 addr += PAGE_SIZE; 4130 } 4131 } 4132 EXPORT_SYMBOL(free_pages_exact); 4133 4134 /** 4135 * nr_free_zone_pages - count number of pages beyond high watermark 4136 * @offset: The zone index of the highest zone 4137 * 4138 * nr_free_zone_pages() counts the number of counts pages which are beyond the 4139 * high watermark within all zones at or below a given zone index. For each 4140 * zone, the number of pages is calculated as: 4141 * managed_pages - high_pages 4142 */ 4143 static unsigned long nr_free_zone_pages(int offset) 4144 { 4145 struct zoneref *z; 4146 struct zone *zone; 4147 4148 /* Just pick one node, since fallback list is circular */ 4149 unsigned long sum = 0; 4150 4151 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 4152 4153 for_each_zone_zonelist(zone, z, zonelist, offset) { 4154 unsigned long size = zone->managed_pages; 4155 unsigned long high = high_wmark_pages(zone); 4156 if (size > high) 4157 sum += size - high; 4158 } 4159 4160 return sum; 4161 } 4162 4163 /** 4164 * nr_free_buffer_pages - count number of pages beyond high watermark 4165 * 4166 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4167 * watermark within ZONE_DMA and ZONE_NORMAL. 4168 */ 4169 unsigned long nr_free_buffer_pages(void) 4170 { 4171 return nr_free_zone_pages(gfp_zone(GFP_USER)); 4172 } 4173 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 4174 4175 /** 4176 * nr_free_pagecache_pages - count number of pages beyond high watermark 4177 * 4178 * nr_free_pagecache_pages() counts the number of pages which are beyond the 4179 * high watermark within all zones. 4180 */ 4181 unsigned long nr_free_pagecache_pages(void) 4182 { 4183 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 4184 } 4185 4186 static inline void show_node(struct zone *zone) 4187 { 4188 if (IS_ENABLED(CONFIG_NUMA)) 4189 printk("Node %d ", zone_to_nid(zone)); 4190 } 4191 4192 long si_mem_available(void) 4193 { 4194 long available; 4195 unsigned long pagecache; 4196 unsigned long wmark_low = 0; 4197 unsigned long pages[NR_LRU_LISTS]; 4198 struct zone *zone; 4199 int lru; 4200 4201 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 4202 pages[lru] = global_page_state(NR_LRU_BASE + lru); 4203 4204 for_each_zone(zone) 4205 wmark_low += zone->watermark[WMARK_LOW]; 4206 4207 /* 4208 * Estimate the amount of memory available for userspace allocations, 4209 * without causing swapping. 4210 */ 4211 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages; 4212 4213 /* 4214 * Not all the page cache can be freed, otherwise the system will 4215 * start swapping. Assume at least half of the page cache, or the 4216 * low watermark worth of cache, needs to stay. 4217 */ 4218 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 4219 pagecache -= min(pagecache / 2, wmark_low); 4220 available += pagecache; 4221 4222 /* 4223 * Part of the reclaimable slab consists of items that are in use, 4224 * and cannot be freed. Cap this estimate at the low watermark. 4225 */ 4226 available += global_page_state(NR_SLAB_RECLAIMABLE) - 4227 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); 4228 4229 if (available < 0) 4230 available = 0; 4231 return available; 4232 } 4233 EXPORT_SYMBOL_GPL(si_mem_available); 4234 4235 void si_meminfo(struct sysinfo *val) 4236 { 4237 val->totalram = totalram_pages; 4238 val->sharedram = global_page_state(NR_SHMEM); 4239 val->freeram = global_page_state(NR_FREE_PAGES); 4240 val->bufferram = nr_blockdev_pages(); 4241 val->totalhigh = totalhigh_pages; 4242 val->freehigh = nr_free_highpages(); 4243 val->mem_unit = PAGE_SIZE; 4244 } 4245 4246 EXPORT_SYMBOL(si_meminfo); 4247 4248 #ifdef CONFIG_NUMA 4249 void si_meminfo_node(struct sysinfo *val, int nid) 4250 { 4251 int zone_type; /* needs to be signed */ 4252 unsigned long managed_pages = 0; 4253 unsigned long managed_highpages = 0; 4254 unsigned long free_highpages = 0; 4255 pg_data_t *pgdat = NODE_DATA(nid); 4256 4257 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 4258 managed_pages += pgdat->node_zones[zone_type].managed_pages; 4259 val->totalram = managed_pages; 4260 val->sharedram = node_page_state(nid, NR_SHMEM); 4261 val->freeram = node_page_state(nid, NR_FREE_PAGES); 4262 #ifdef CONFIG_HIGHMEM 4263 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 4264 struct zone *zone = &pgdat->node_zones[zone_type]; 4265 4266 if (is_highmem(zone)) { 4267 managed_highpages += zone->managed_pages; 4268 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 4269 } 4270 } 4271 val->totalhigh = managed_highpages; 4272 val->freehigh = free_highpages; 4273 #else 4274 val->totalhigh = managed_highpages; 4275 val->freehigh = free_highpages; 4276 #endif 4277 val->mem_unit = PAGE_SIZE; 4278 } 4279 #endif 4280 4281 /* 4282 * Determine whether the node should be displayed or not, depending on whether 4283 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 4284 */ 4285 bool skip_free_areas_node(unsigned int flags, int nid) 4286 { 4287 bool ret = false; 4288 unsigned int cpuset_mems_cookie; 4289 4290 if (!(flags & SHOW_MEM_FILTER_NODES)) 4291 goto out; 4292 4293 do { 4294 cpuset_mems_cookie = read_mems_allowed_begin(); 4295 ret = !node_isset(nid, cpuset_current_mems_allowed); 4296 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 4297 out: 4298 return ret; 4299 } 4300 4301 #define K(x) ((x) << (PAGE_SHIFT-10)) 4302 4303 static void show_migration_types(unsigned char type) 4304 { 4305 static const char types[MIGRATE_TYPES] = { 4306 [MIGRATE_UNMOVABLE] = 'U', 4307 [MIGRATE_MOVABLE] = 'M', 4308 [MIGRATE_RECLAIMABLE] = 'E', 4309 [MIGRATE_HIGHATOMIC] = 'H', 4310 #ifdef CONFIG_CMA 4311 [MIGRATE_CMA] = 'C', 4312 #endif 4313 #ifdef CONFIG_MEMORY_ISOLATION 4314 [MIGRATE_ISOLATE] = 'I', 4315 #endif 4316 }; 4317 char tmp[MIGRATE_TYPES + 1]; 4318 char *p = tmp; 4319 int i; 4320 4321 for (i = 0; i < MIGRATE_TYPES; i++) { 4322 if (type & (1 << i)) 4323 *p++ = types[i]; 4324 } 4325 4326 *p = '\0'; 4327 printk("(%s) ", tmp); 4328 } 4329 4330 /* 4331 * Show free area list (used inside shift_scroll-lock stuff) 4332 * We also calculate the percentage fragmentation. We do this by counting the 4333 * memory on each free list with the exception of the first item on the list. 4334 * 4335 * Bits in @filter: 4336 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 4337 * cpuset. 4338 */ 4339 void show_free_areas(unsigned int filter) 4340 { 4341 unsigned long free_pcp = 0; 4342 int cpu; 4343 struct zone *zone; 4344 4345 for_each_populated_zone(zone) { 4346 if (skip_free_areas_node(filter, zone_to_nid(zone))) 4347 continue; 4348 4349 for_each_online_cpu(cpu) 4350 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 4351 } 4352 4353 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 4354 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 4355 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" 4356 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 4357 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 4358 " free:%lu free_pcp:%lu free_cma:%lu\n", 4359 global_page_state(NR_ACTIVE_ANON), 4360 global_page_state(NR_INACTIVE_ANON), 4361 global_page_state(NR_ISOLATED_ANON), 4362 global_page_state(NR_ACTIVE_FILE), 4363 global_page_state(NR_INACTIVE_FILE), 4364 global_page_state(NR_ISOLATED_FILE), 4365 global_page_state(NR_UNEVICTABLE), 4366 global_page_state(NR_FILE_DIRTY), 4367 global_page_state(NR_WRITEBACK), 4368 global_page_state(NR_UNSTABLE_NFS), 4369 global_page_state(NR_SLAB_RECLAIMABLE), 4370 global_page_state(NR_SLAB_UNRECLAIMABLE), 4371 global_page_state(NR_FILE_MAPPED), 4372 global_page_state(NR_SHMEM), 4373 global_page_state(NR_PAGETABLE), 4374 global_page_state(NR_BOUNCE), 4375 global_page_state(NR_FREE_PAGES), 4376 free_pcp, 4377 global_page_state(NR_FREE_CMA_PAGES)); 4378 4379 for_each_populated_zone(zone) { 4380 int i; 4381 4382 if (skip_free_areas_node(filter, zone_to_nid(zone))) 4383 continue; 4384 4385 free_pcp = 0; 4386 for_each_online_cpu(cpu) 4387 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 4388 4389 show_node(zone); 4390 printk("%s" 4391 " free:%lukB" 4392 " min:%lukB" 4393 " low:%lukB" 4394 " high:%lukB" 4395 " active_anon:%lukB" 4396 " inactive_anon:%lukB" 4397 " active_file:%lukB" 4398 " inactive_file:%lukB" 4399 " unevictable:%lukB" 4400 " isolated(anon):%lukB" 4401 " isolated(file):%lukB" 4402 " present:%lukB" 4403 " managed:%lukB" 4404 " mlocked:%lukB" 4405 " dirty:%lukB" 4406 " writeback:%lukB" 4407 " mapped:%lukB" 4408 " shmem:%lukB" 4409 " slab_reclaimable:%lukB" 4410 " slab_unreclaimable:%lukB" 4411 " kernel_stack:%lukB" 4412 " pagetables:%lukB" 4413 " unstable:%lukB" 4414 " bounce:%lukB" 4415 " free_pcp:%lukB" 4416 " local_pcp:%ukB" 4417 " free_cma:%lukB" 4418 " writeback_tmp:%lukB" 4419 " pages_scanned:%lu" 4420 " all_unreclaimable? %s" 4421 "\n", 4422 zone->name, 4423 K(zone_page_state(zone, NR_FREE_PAGES)), 4424 K(min_wmark_pages(zone)), 4425 K(low_wmark_pages(zone)), 4426 K(high_wmark_pages(zone)), 4427 K(zone_page_state(zone, NR_ACTIVE_ANON)), 4428 K(zone_page_state(zone, NR_INACTIVE_ANON)), 4429 K(zone_page_state(zone, NR_ACTIVE_FILE)), 4430 K(zone_page_state(zone, NR_INACTIVE_FILE)), 4431 K(zone_page_state(zone, NR_UNEVICTABLE)), 4432 K(zone_page_state(zone, NR_ISOLATED_ANON)), 4433 K(zone_page_state(zone, NR_ISOLATED_FILE)), 4434 K(zone->present_pages), 4435 K(zone->managed_pages), 4436 K(zone_page_state(zone, NR_MLOCK)), 4437 K(zone_page_state(zone, NR_FILE_DIRTY)), 4438 K(zone_page_state(zone, NR_WRITEBACK)), 4439 K(zone_page_state(zone, NR_FILE_MAPPED)), 4440 K(zone_page_state(zone, NR_SHMEM)), 4441 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 4442 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 4443 zone_page_state(zone, NR_KERNEL_STACK) * 4444 THREAD_SIZE / 1024, 4445 K(zone_page_state(zone, NR_PAGETABLE)), 4446 K(zone_page_state(zone, NR_UNSTABLE_NFS)), 4447 K(zone_page_state(zone, NR_BOUNCE)), 4448 K(free_pcp), 4449 K(this_cpu_read(zone->pageset->pcp.count)), 4450 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 4451 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 4452 K(zone_page_state(zone, NR_PAGES_SCANNED)), 4453 (!zone_reclaimable(zone) ? "yes" : "no") 4454 ); 4455 printk("lowmem_reserve[]:"); 4456 for (i = 0; i < MAX_NR_ZONES; i++) 4457 printk(" %ld", zone->lowmem_reserve[i]); 4458 printk("\n"); 4459 } 4460 4461 for_each_populated_zone(zone) { 4462 unsigned int order; 4463 unsigned long nr[MAX_ORDER], flags, total = 0; 4464 unsigned char types[MAX_ORDER]; 4465 4466 if (skip_free_areas_node(filter, zone_to_nid(zone))) 4467 continue; 4468 show_node(zone); 4469 printk("%s: ", zone->name); 4470 4471 spin_lock_irqsave(&zone->lock, flags); 4472 for (order = 0; order < MAX_ORDER; order++) { 4473 struct free_area *area = &zone->free_area[order]; 4474 int type; 4475 4476 nr[order] = area->nr_free; 4477 total += nr[order] << order; 4478 4479 types[order] = 0; 4480 for (type = 0; type < MIGRATE_TYPES; type++) { 4481 if (!list_empty(&area->free_list[type])) 4482 types[order] |= 1 << type; 4483 } 4484 } 4485 spin_unlock_irqrestore(&zone->lock, flags); 4486 for (order = 0; order < MAX_ORDER; order++) { 4487 printk("%lu*%lukB ", nr[order], K(1UL) << order); 4488 if (nr[order]) 4489 show_migration_types(types[order]); 4490 } 4491 printk("= %lukB\n", K(total)); 4492 } 4493 4494 hugetlb_show_meminfo(); 4495 4496 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 4497 4498 show_swap_cache_info(); 4499 } 4500 4501 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 4502 { 4503 zoneref->zone = zone; 4504 zoneref->zone_idx = zone_idx(zone); 4505 } 4506 4507 /* 4508 * Builds allocation fallback zone lists. 4509 * 4510 * Add all populated zones of a node to the zonelist. 4511 */ 4512 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 4513 int nr_zones) 4514 { 4515 struct zone *zone; 4516 enum zone_type zone_type = MAX_NR_ZONES; 4517 4518 do { 4519 zone_type--; 4520 zone = pgdat->node_zones + zone_type; 4521 if (populated_zone(zone)) { 4522 zoneref_set_zone(zone, 4523 &zonelist->_zonerefs[nr_zones++]); 4524 check_highest_zone(zone_type); 4525 } 4526 } while (zone_type); 4527 4528 return nr_zones; 4529 } 4530 4531 4532 /* 4533 * zonelist_order: 4534 * 0 = automatic detection of better ordering. 4535 * 1 = order by ([node] distance, -zonetype) 4536 * 2 = order by (-zonetype, [node] distance) 4537 * 4538 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 4539 * the same zonelist. So only NUMA can configure this param. 4540 */ 4541 #define ZONELIST_ORDER_DEFAULT 0 4542 #define ZONELIST_ORDER_NODE 1 4543 #define ZONELIST_ORDER_ZONE 2 4544 4545 /* zonelist order in the kernel. 4546 * set_zonelist_order() will set this to NODE or ZONE. 4547 */ 4548 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 4549 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 4550 4551 4552 #ifdef CONFIG_NUMA 4553 /* The value user specified ....changed by config */ 4554 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 4555 /* string for sysctl */ 4556 #define NUMA_ZONELIST_ORDER_LEN 16 4557 char numa_zonelist_order[16] = "default"; 4558 4559 /* 4560 * interface for configure zonelist ordering. 4561 * command line option "numa_zonelist_order" 4562 * = "[dD]efault - default, automatic configuration. 4563 * = "[nN]ode - order by node locality, then by zone within node 4564 * = "[zZ]one - order by zone, then by locality within zone 4565 */ 4566 4567 static int __parse_numa_zonelist_order(char *s) 4568 { 4569 if (*s == 'd' || *s == 'D') { 4570 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 4571 } else if (*s == 'n' || *s == 'N') { 4572 user_zonelist_order = ZONELIST_ORDER_NODE; 4573 } else if (*s == 'z' || *s == 'Z') { 4574 user_zonelist_order = ZONELIST_ORDER_ZONE; 4575 } else { 4576 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s); 4577 return -EINVAL; 4578 } 4579 return 0; 4580 } 4581 4582 static __init int setup_numa_zonelist_order(char *s) 4583 { 4584 int ret; 4585 4586 if (!s) 4587 return 0; 4588 4589 ret = __parse_numa_zonelist_order(s); 4590 if (ret == 0) 4591 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 4592 4593 return ret; 4594 } 4595 early_param("numa_zonelist_order", setup_numa_zonelist_order); 4596 4597 /* 4598 * sysctl handler for numa_zonelist_order 4599 */ 4600 int numa_zonelist_order_handler(struct ctl_table *table, int write, 4601 void __user *buffer, size_t *length, 4602 loff_t *ppos) 4603 { 4604 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 4605 int ret; 4606 static DEFINE_MUTEX(zl_order_mutex); 4607 4608 mutex_lock(&zl_order_mutex); 4609 if (write) { 4610 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { 4611 ret = -EINVAL; 4612 goto out; 4613 } 4614 strcpy(saved_string, (char *)table->data); 4615 } 4616 ret = proc_dostring(table, write, buffer, length, ppos); 4617 if (ret) 4618 goto out; 4619 if (write) { 4620 int oldval = user_zonelist_order; 4621 4622 ret = __parse_numa_zonelist_order((char *)table->data); 4623 if (ret) { 4624 /* 4625 * bogus value. restore saved string 4626 */ 4627 strncpy((char *)table->data, saved_string, 4628 NUMA_ZONELIST_ORDER_LEN); 4629 user_zonelist_order = oldval; 4630 } else if (oldval != user_zonelist_order) { 4631 mutex_lock(&zonelists_mutex); 4632 build_all_zonelists(NULL, NULL); 4633 mutex_unlock(&zonelists_mutex); 4634 } 4635 } 4636 out: 4637 mutex_unlock(&zl_order_mutex); 4638 return ret; 4639 } 4640 4641 4642 #define MAX_NODE_LOAD (nr_online_nodes) 4643 static int node_load[MAX_NUMNODES]; 4644 4645 /** 4646 * find_next_best_node - find the next node that should appear in a given node's fallback list 4647 * @node: node whose fallback list we're appending 4648 * @used_node_mask: nodemask_t of already used nodes 4649 * 4650 * We use a number of factors to determine which is the next node that should 4651 * appear on a given node's fallback list. The node should not have appeared 4652 * already in @node's fallback list, and it should be the next closest node 4653 * according to the distance array (which contains arbitrary distance values 4654 * from each node to each node in the system), and should also prefer nodes 4655 * with no CPUs, since presumably they'll have very little allocation pressure 4656 * on them otherwise. 4657 * It returns -1 if no node is found. 4658 */ 4659 static int find_next_best_node(int node, nodemask_t *used_node_mask) 4660 { 4661 int n, val; 4662 int min_val = INT_MAX; 4663 int best_node = NUMA_NO_NODE; 4664 const struct cpumask *tmp = cpumask_of_node(0); 4665 4666 /* Use the local node if we haven't already */ 4667 if (!node_isset(node, *used_node_mask)) { 4668 node_set(node, *used_node_mask); 4669 return node; 4670 } 4671 4672 for_each_node_state(n, N_MEMORY) { 4673 4674 /* Don't want a node to appear more than once */ 4675 if (node_isset(n, *used_node_mask)) 4676 continue; 4677 4678 /* Use the distance array to find the distance */ 4679 val = node_distance(node, n); 4680 4681 /* Penalize nodes under us ("prefer the next node") */ 4682 val += (n < node); 4683 4684 /* Give preference to headless and unused nodes */ 4685 tmp = cpumask_of_node(n); 4686 if (!cpumask_empty(tmp)) 4687 val += PENALTY_FOR_NODE_WITH_CPUS; 4688 4689 /* Slight preference for less loaded node */ 4690 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 4691 val += node_load[n]; 4692 4693 if (val < min_val) { 4694 min_val = val; 4695 best_node = n; 4696 } 4697 } 4698 4699 if (best_node >= 0) 4700 node_set(best_node, *used_node_mask); 4701 4702 return best_node; 4703 } 4704 4705 4706 /* 4707 * Build zonelists ordered by node and zones within node. 4708 * This results in maximum locality--normal zone overflows into local 4709 * DMA zone, if any--but risks exhausting DMA zone. 4710 */ 4711 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 4712 { 4713 int j; 4714 struct zonelist *zonelist; 4715 4716 zonelist = &pgdat->node_zonelists[0]; 4717 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 4718 ; 4719 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4720 zonelist->_zonerefs[j].zone = NULL; 4721 zonelist->_zonerefs[j].zone_idx = 0; 4722 } 4723 4724 /* 4725 * Build gfp_thisnode zonelists 4726 */ 4727 static void build_thisnode_zonelists(pg_data_t *pgdat) 4728 { 4729 int j; 4730 struct zonelist *zonelist; 4731 4732 zonelist = &pgdat->node_zonelists[1]; 4733 j = build_zonelists_node(pgdat, zonelist, 0); 4734 zonelist->_zonerefs[j].zone = NULL; 4735 zonelist->_zonerefs[j].zone_idx = 0; 4736 } 4737 4738 /* 4739 * Build zonelists ordered by zone and nodes within zones. 4740 * This results in conserving DMA zone[s] until all Normal memory is 4741 * exhausted, but results in overflowing to remote node while memory 4742 * may still exist in local DMA zone. 4743 */ 4744 static int node_order[MAX_NUMNODES]; 4745 4746 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 4747 { 4748 int pos, j, node; 4749 int zone_type; /* needs to be signed */ 4750 struct zone *z; 4751 struct zonelist *zonelist; 4752 4753 zonelist = &pgdat->node_zonelists[0]; 4754 pos = 0; 4755 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 4756 for (j = 0; j < nr_nodes; j++) { 4757 node = node_order[j]; 4758 z = &NODE_DATA(node)->node_zones[zone_type]; 4759 if (populated_zone(z)) { 4760 zoneref_set_zone(z, 4761 &zonelist->_zonerefs[pos++]); 4762 check_highest_zone(zone_type); 4763 } 4764 } 4765 } 4766 zonelist->_zonerefs[pos].zone = NULL; 4767 zonelist->_zonerefs[pos].zone_idx = 0; 4768 } 4769 4770 #if defined(CONFIG_64BIT) 4771 /* 4772 * Devices that require DMA32/DMA are relatively rare and do not justify a 4773 * penalty to every machine in case the specialised case applies. Default 4774 * to Node-ordering on 64-bit NUMA machines 4775 */ 4776 static int default_zonelist_order(void) 4777 { 4778 return ZONELIST_ORDER_NODE; 4779 } 4780 #else 4781 /* 4782 * On 32-bit, the Normal zone needs to be preserved for allocations accessible 4783 * by the kernel. If processes running on node 0 deplete the low memory zone 4784 * then reclaim will occur more frequency increasing stalls and potentially 4785 * be easier to OOM if a large percentage of the zone is under writeback or 4786 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set. 4787 * Hence, default to zone ordering on 32-bit. 4788 */ 4789 static int default_zonelist_order(void) 4790 { 4791 return ZONELIST_ORDER_ZONE; 4792 } 4793 #endif /* CONFIG_64BIT */ 4794 4795 static void set_zonelist_order(void) 4796 { 4797 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 4798 current_zonelist_order = default_zonelist_order(); 4799 else 4800 current_zonelist_order = user_zonelist_order; 4801 } 4802 4803 static void build_zonelists(pg_data_t *pgdat) 4804 { 4805 int i, node, load; 4806 nodemask_t used_mask; 4807 int local_node, prev_node; 4808 struct zonelist *zonelist; 4809 unsigned int order = current_zonelist_order; 4810 4811 /* initialize zonelists */ 4812 for (i = 0; i < MAX_ZONELISTS; i++) { 4813 zonelist = pgdat->node_zonelists + i; 4814 zonelist->_zonerefs[0].zone = NULL; 4815 zonelist->_zonerefs[0].zone_idx = 0; 4816 } 4817 4818 /* NUMA-aware ordering of nodes */ 4819 local_node = pgdat->node_id; 4820 load = nr_online_nodes; 4821 prev_node = local_node; 4822 nodes_clear(used_mask); 4823 4824 memset(node_order, 0, sizeof(node_order)); 4825 i = 0; 4826 4827 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 4828 /* 4829 * We don't want to pressure a particular node. 4830 * So adding penalty to the first node in same 4831 * distance group to make it round-robin. 4832 */ 4833 if (node_distance(local_node, node) != 4834 node_distance(local_node, prev_node)) 4835 node_load[node] = load; 4836 4837 prev_node = node; 4838 load--; 4839 if (order == ZONELIST_ORDER_NODE) 4840 build_zonelists_in_node_order(pgdat, node); 4841 else 4842 node_order[i++] = node; /* remember order */ 4843 } 4844 4845 if (order == ZONELIST_ORDER_ZONE) { 4846 /* calculate node order -- i.e., DMA last! */ 4847 build_zonelists_in_zone_order(pgdat, i); 4848 } 4849 4850 build_thisnode_zonelists(pgdat); 4851 } 4852 4853 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 4854 /* 4855 * Return node id of node used for "local" allocations. 4856 * I.e., first node id of first zone in arg node's generic zonelist. 4857 * Used for initializing percpu 'numa_mem', which is used primarily 4858 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 4859 */ 4860 int local_memory_node(int node) 4861 { 4862 struct zoneref *z; 4863 4864 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 4865 gfp_zone(GFP_KERNEL), 4866 NULL); 4867 return z->zone->node; 4868 } 4869 #endif 4870 4871 #else /* CONFIG_NUMA */ 4872 4873 static void set_zonelist_order(void) 4874 { 4875 current_zonelist_order = ZONELIST_ORDER_ZONE; 4876 } 4877 4878 static void build_zonelists(pg_data_t *pgdat) 4879 { 4880 int node, local_node; 4881 enum zone_type j; 4882 struct zonelist *zonelist; 4883 4884 local_node = pgdat->node_id; 4885 4886 zonelist = &pgdat->node_zonelists[0]; 4887 j = build_zonelists_node(pgdat, zonelist, 0); 4888 4889 /* 4890 * Now we build the zonelist so that it contains the zones 4891 * of all the other nodes. 4892 * We don't want to pressure a particular node, so when 4893 * building the zones for node N, we make sure that the 4894 * zones coming right after the local ones are those from 4895 * node N+1 (modulo N) 4896 */ 4897 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 4898 if (!node_online(node)) 4899 continue; 4900 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4901 } 4902 for (node = 0; node < local_node; node++) { 4903 if (!node_online(node)) 4904 continue; 4905 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4906 } 4907 4908 zonelist->_zonerefs[j].zone = NULL; 4909 zonelist->_zonerefs[j].zone_idx = 0; 4910 } 4911 4912 #endif /* CONFIG_NUMA */ 4913 4914 /* 4915 * Boot pageset table. One per cpu which is going to be used for all 4916 * zones and all nodes. The parameters will be set in such a way 4917 * that an item put on a list will immediately be handed over to 4918 * the buddy list. This is safe since pageset manipulation is done 4919 * with interrupts disabled. 4920 * 4921 * The boot_pagesets must be kept even after bootup is complete for 4922 * unused processors and/or zones. They do play a role for bootstrapping 4923 * hotplugged processors. 4924 * 4925 * zoneinfo_show() and maybe other functions do 4926 * not check if the processor is online before following the pageset pointer. 4927 * Other parts of the kernel may not check if the zone is available. 4928 */ 4929 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 4930 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 4931 static void setup_zone_pageset(struct zone *zone); 4932 4933 /* 4934 * Global mutex to protect against size modification of zonelists 4935 * as well as to serialize pageset setup for the new populated zone. 4936 */ 4937 DEFINE_MUTEX(zonelists_mutex); 4938 4939 /* return values int ....just for stop_machine() */ 4940 static int __build_all_zonelists(void *data) 4941 { 4942 int nid; 4943 int cpu; 4944 pg_data_t *self = data; 4945 4946 #ifdef CONFIG_NUMA 4947 memset(node_load, 0, sizeof(node_load)); 4948 #endif 4949 4950 if (self && !node_online(self->node_id)) { 4951 build_zonelists(self); 4952 } 4953 4954 for_each_online_node(nid) { 4955 pg_data_t *pgdat = NODE_DATA(nid); 4956 4957 build_zonelists(pgdat); 4958 } 4959 4960 /* 4961 * Initialize the boot_pagesets that are going to be used 4962 * for bootstrapping processors. The real pagesets for 4963 * each zone will be allocated later when the per cpu 4964 * allocator is available. 4965 * 4966 * boot_pagesets are used also for bootstrapping offline 4967 * cpus if the system is already booted because the pagesets 4968 * are needed to initialize allocators on a specific cpu too. 4969 * F.e. the percpu allocator needs the page allocator which 4970 * needs the percpu allocator in order to allocate its pagesets 4971 * (a chicken-egg dilemma). 4972 */ 4973 for_each_possible_cpu(cpu) { 4974 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 4975 4976 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 4977 /* 4978 * We now know the "local memory node" for each node-- 4979 * i.e., the node of the first zone in the generic zonelist. 4980 * Set up numa_mem percpu variable for on-line cpus. During 4981 * boot, only the boot cpu should be on-line; we'll init the 4982 * secondary cpus' numa_mem as they come on-line. During 4983 * node/memory hotplug, we'll fixup all on-line cpus. 4984 */ 4985 if (cpu_online(cpu)) 4986 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 4987 #endif 4988 } 4989 4990 return 0; 4991 } 4992 4993 static noinline void __init 4994 build_all_zonelists_init(void) 4995 { 4996 __build_all_zonelists(NULL); 4997 mminit_verify_zonelist(); 4998 cpuset_init_current_mems_allowed(); 4999 } 5000 5001 /* 5002 * Called with zonelists_mutex held always 5003 * unless system_state == SYSTEM_BOOTING. 5004 * 5005 * __ref due to (1) call of __meminit annotated setup_zone_pageset 5006 * [we're only called with non-NULL zone through __meminit paths] and 5007 * (2) call of __init annotated helper build_all_zonelists_init 5008 * [protected by SYSTEM_BOOTING]. 5009 */ 5010 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) 5011 { 5012 set_zonelist_order(); 5013 5014 if (system_state == SYSTEM_BOOTING) { 5015 build_all_zonelists_init(); 5016 } else { 5017 #ifdef CONFIG_MEMORY_HOTPLUG 5018 if (zone) 5019 setup_zone_pageset(zone); 5020 #endif 5021 /* we have to stop all cpus to guarantee there is no user 5022 of zonelist */ 5023 stop_machine(__build_all_zonelists, pgdat, NULL); 5024 /* cpuset refresh routine should be here */ 5025 } 5026 vm_total_pages = nr_free_pagecache_pages(); 5027 /* 5028 * Disable grouping by mobility if the number of pages in the 5029 * system is too low to allow the mechanism to work. It would be 5030 * more accurate, but expensive to check per-zone. This check is 5031 * made on memory-hotadd so a system can start with mobility 5032 * disabled and enable it later 5033 */ 5034 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5035 page_group_by_mobility_disabled = 1; 5036 else 5037 page_group_by_mobility_disabled = 0; 5038 5039 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n", 5040 nr_online_nodes, 5041 zonelist_order_name[current_zonelist_order], 5042 page_group_by_mobility_disabled ? "off" : "on", 5043 vm_total_pages); 5044 #ifdef CONFIG_NUMA 5045 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5046 #endif 5047 } 5048 5049 /* 5050 * Helper functions to size the waitqueue hash table. 5051 * Essentially these want to choose hash table sizes sufficiently 5052 * large so that collisions trying to wait on pages are rare. 5053 * But in fact, the number of active page waitqueues on typical 5054 * systems is ridiculously low, less than 200. So this is even 5055 * conservative, even though it seems large. 5056 * 5057 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 5058 * waitqueues, i.e. the size of the waitq table given the number of pages. 5059 */ 5060 #define PAGES_PER_WAITQUEUE 256 5061 5062 #ifndef CONFIG_MEMORY_HOTPLUG 5063 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 5064 { 5065 unsigned long size = 1; 5066 5067 pages /= PAGES_PER_WAITQUEUE; 5068 5069 while (size < pages) 5070 size <<= 1; 5071 5072 /* 5073 * Once we have dozens or even hundreds of threads sleeping 5074 * on IO we've got bigger problems than wait queue collision. 5075 * Limit the size of the wait table to a reasonable size. 5076 */ 5077 size = min(size, 4096UL); 5078 5079 return max(size, 4UL); 5080 } 5081 #else 5082 /* 5083 * A zone's size might be changed by hot-add, so it is not possible to determine 5084 * a suitable size for its wait_table. So we use the maximum size now. 5085 * 5086 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 5087 * 5088 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 5089 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 5090 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 5091 * 5092 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 5093 * or more by the traditional way. (See above). It equals: 5094 * 5095 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 5096 * ia64(16K page size) : = ( 8G + 4M)byte. 5097 * powerpc (64K page size) : = (32G +16M)byte. 5098 */ 5099 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 5100 { 5101 return 4096UL; 5102 } 5103 #endif 5104 5105 /* 5106 * This is an integer logarithm so that shifts can be used later 5107 * to extract the more random high bits from the multiplicative 5108 * hash function before the remainder is taken. 5109 */ 5110 static inline unsigned long wait_table_bits(unsigned long size) 5111 { 5112 return ffz(~size); 5113 } 5114 5115 /* 5116 * Initially all pages are reserved - free ones are freed 5117 * up by free_all_bootmem() once the early boot process is 5118 * done. Non-atomic initialization, single-pass. 5119 */ 5120 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 5121 unsigned long start_pfn, enum memmap_context context) 5122 { 5123 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn)); 5124 unsigned long end_pfn = start_pfn + size; 5125 pg_data_t *pgdat = NODE_DATA(nid); 5126 unsigned long pfn; 5127 unsigned long nr_initialised = 0; 5128 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5129 struct memblock_region *r = NULL, *tmp; 5130 #endif 5131 5132 if (highest_memmap_pfn < end_pfn - 1) 5133 highest_memmap_pfn = end_pfn - 1; 5134 5135 /* 5136 * Honor reservation requested by the driver for this ZONE_DEVICE 5137 * memory 5138 */ 5139 if (altmap && start_pfn == altmap->base_pfn) 5140 start_pfn += altmap->reserve; 5141 5142 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 5143 /* 5144 * There can be holes in boot-time mem_map[]s handed to this 5145 * function. They do not exist on hotplugged memory. 5146 */ 5147 if (context != MEMMAP_EARLY) 5148 goto not_early; 5149 5150 if (!early_pfn_valid(pfn)) 5151 continue; 5152 if (!early_pfn_in_nid(pfn, nid)) 5153 continue; 5154 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) 5155 break; 5156 5157 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5158 /* 5159 * If not mirrored_kernelcore and ZONE_MOVABLE exists, range 5160 * from zone_movable_pfn[nid] to end of each node should be 5161 * ZONE_MOVABLE not ZONE_NORMAL. skip it. 5162 */ 5163 if (!mirrored_kernelcore && zone_movable_pfn[nid]) 5164 if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid]) 5165 continue; 5166 5167 /* 5168 * Check given memblock attribute by firmware which can affect 5169 * kernel memory layout. If zone==ZONE_MOVABLE but memory is 5170 * mirrored, it's an overlapped memmap init. skip it. 5171 */ 5172 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 5173 if (!r || pfn >= memblock_region_memory_end_pfn(r)) { 5174 for_each_memblock(memory, tmp) 5175 if (pfn < memblock_region_memory_end_pfn(tmp)) 5176 break; 5177 r = tmp; 5178 } 5179 if (pfn >= memblock_region_memory_base_pfn(r) && 5180 memblock_is_mirror(r)) { 5181 /* already initialized as NORMAL */ 5182 pfn = memblock_region_memory_end_pfn(r); 5183 continue; 5184 } 5185 } 5186 #endif 5187 5188 not_early: 5189 /* 5190 * Mark the block movable so that blocks are reserved for 5191 * movable at startup. This will force kernel allocations 5192 * to reserve their blocks rather than leaking throughout 5193 * the address space during boot when many long-lived 5194 * kernel allocations are made. 5195 * 5196 * bitmap is created for zone's valid pfn range. but memmap 5197 * can be created for invalid pages (for alignment) 5198 * check here not to call set_pageblock_migratetype() against 5199 * pfn out of zone. 5200 */ 5201 if (!(pfn & (pageblock_nr_pages - 1))) { 5202 struct page *page = pfn_to_page(pfn); 5203 5204 __init_single_page(page, pfn, zone, nid); 5205 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 5206 } else { 5207 __init_single_pfn(pfn, zone, nid); 5208 } 5209 } 5210 } 5211 5212 static void __meminit zone_init_free_lists(struct zone *zone) 5213 { 5214 unsigned int order, t; 5215 for_each_migratetype_order(order, t) { 5216 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 5217 zone->free_area[order].nr_free = 0; 5218 } 5219 } 5220 5221 #ifndef __HAVE_ARCH_MEMMAP_INIT 5222 #define memmap_init(size, nid, zone, start_pfn) \ 5223 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 5224 #endif 5225 5226 static int zone_batchsize(struct zone *zone) 5227 { 5228 #ifdef CONFIG_MMU 5229 int batch; 5230 5231 /* 5232 * The per-cpu-pages pools are set to around 1000th of the 5233 * size of the zone. But no more than 1/2 of a meg. 5234 * 5235 * OK, so we don't know how big the cache is. So guess. 5236 */ 5237 batch = zone->managed_pages / 1024; 5238 if (batch * PAGE_SIZE > 512 * 1024) 5239 batch = (512 * 1024) / PAGE_SIZE; 5240 batch /= 4; /* We effectively *= 4 below */ 5241 if (batch < 1) 5242 batch = 1; 5243 5244 /* 5245 * Clamp the batch to a 2^n - 1 value. Having a power 5246 * of 2 value was found to be more likely to have 5247 * suboptimal cache aliasing properties in some cases. 5248 * 5249 * For example if 2 tasks are alternately allocating 5250 * batches of pages, one task can end up with a lot 5251 * of pages of one half of the possible page colors 5252 * and the other with pages of the other colors. 5253 */ 5254 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5255 5256 return batch; 5257 5258 #else 5259 /* The deferral and batching of frees should be suppressed under NOMMU 5260 * conditions. 5261 * 5262 * The problem is that NOMMU needs to be able to allocate large chunks 5263 * of contiguous memory as there's no hardware page translation to 5264 * assemble apparent contiguous memory from discontiguous pages. 5265 * 5266 * Queueing large contiguous runs of pages for batching, however, 5267 * causes the pages to actually be freed in smaller chunks. As there 5268 * can be a significant delay between the individual batches being 5269 * recycled, this leads to the once large chunks of space being 5270 * fragmented and becoming unavailable for high-order allocations. 5271 */ 5272 return 0; 5273 #endif 5274 } 5275 5276 /* 5277 * pcp->high and pcp->batch values are related and dependent on one another: 5278 * ->batch must never be higher then ->high. 5279 * The following function updates them in a safe manner without read side 5280 * locking. 5281 * 5282 * Any new users of pcp->batch and pcp->high should ensure they can cope with 5283 * those fields changing asynchronously (acording the the above rule). 5284 * 5285 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5286 * outside of boot time (or some other assurance that no concurrent updaters 5287 * exist). 5288 */ 5289 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 5290 unsigned long batch) 5291 { 5292 /* start with a fail safe value for batch */ 5293 pcp->batch = 1; 5294 smp_wmb(); 5295 5296 /* Update high, then batch, in order */ 5297 pcp->high = high; 5298 smp_wmb(); 5299 5300 pcp->batch = batch; 5301 } 5302 5303 /* a companion to pageset_set_high() */ 5304 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) 5305 { 5306 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); 5307 } 5308 5309 static void pageset_init(struct per_cpu_pageset *p) 5310 { 5311 struct per_cpu_pages *pcp; 5312 int migratetype; 5313 5314 memset(p, 0, sizeof(*p)); 5315 5316 pcp = &p->pcp; 5317 pcp->count = 0; 5318 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 5319 INIT_LIST_HEAD(&pcp->lists[migratetype]); 5320 } 5321 5322 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 5323 { 5324 pageset_init(p); 5325 pageset_set_batch(p, batch); 5326 } 5327 5328 /* 5329 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist 5330 * to the value high for the pageset p. 5331 */ 5332 static void pageset_set_high(struct per_cpu_pageset *p, 5333 unsigned long high) 5334 { 5335 unsigned long batch = max(1UL, high / 4); 5336 if ((high / 4) > (PAGE_SHIFT * 8)) 5337 batch = PAGE_SHIFT * 8; 5338 5339 pageset_update(&p->pcp, high, batch); 5340 } 5341 5342 static void pageset_set_high_and_batch(struct zone *zone, 5343 struct per_cpu_pageset *pcp) 5344 { 5345 if (percpu_pagelist_fraction) 5346 pageset_set_high(pcp, 5347 (zone->managed_pages / 5348 percpu_pagelist_fraction)); 5349 else 5350 pageset_set_batch(pcp, zone_batchsize(zone)); 5351 } 5352 5353 static void __meminit zone_pageset_init(struct zone *zone, int cpu) 5354 { 5355 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 5356 5357 pageset_init(pcp); 5358 pageset_set_high_and_batch(zone, pcp); 5359 } 5360 5361 static void __meminit setup_zone_pageset(struct zone *zone) 5362 { 5363 int cpu; 5364 zone->pageset = alloc_percpu(struct per_cpu_pageset); 5365 for_each_possible_cpu(cpu) 5366 zone_pageset_init(zone, cpu); 5367 } 5368 5369 /* 5370 * Allocate per cpu pagesets and initialize them. 5371 * Before this call only boot pagesets were available. 5372 */ 5373 void __init setup_per_cpu_pageset(void) 5374 { 5375 struct zone *zone; 5376 5377 for_each_populated_zone(zone) 5378 setup_zone_pageset(zone); 5379 } 5380 5381 static noinline __init_refok 5382 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 5383 { 5384 int i; 5385 size_t alloc_size; 5386 5387 /* 5388 * The per-page waitqueue mechanism uses hashed waitqueues 5389 * per zone. 5390 */ 5391 zone->wait_table_hash_nr_entries = 5392 wait_table_hash_nr_entries(zone_size_pages); 5393 zone->wait_table_bits = 5394 wait_table_bits(zone->wait_table_hash_nr_entries); 5395 alloc_size = zone->wait_table_hash_nr_entries 5396 * sizeof(wait_queue_head_t); 5397 5398 if (!slab_is_available()) { 5399 zone->wait_table = (wait_queue_head_t *) 5400 memblock_virt_alloc_node_nopanic( 5401 alloc_size, zone->zone_pgdat->node_id); 5402 } else { 5403 /* 5404 * This case means that a zone whose size was 0 gets new memory 5405 * via memory hot-add. 5406 * But it may be the case that a new node was hot-added. In 5407 * this case vmalloc() will not be able to use this new node's 5408 * memory - this wait_table must be initialized to use this new 5409 * node itself as well. 5410 * To use this new node's memory, further consideration will be 5411 * necessary. 5412 */ 5413 zone->wait_table = vmalloc(alloc_size); 5414 } 5415 if (!zone->wait_table) 5416 return -ENOMEM; 5417 5418 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) 5419 init_waitqueue_head(zone->wait_table + i); 5420 5421 return 0; 5422 } 5423 5424 static __meminit void zone_pcp_init(struct zone *zone) 5425 { 5426 /* 5427 * per cpu subsystem is not up at this point. The following code 5428 * relies on the ability of the linker to provide the 5429 * offset of a (static) per cpu variable into the per cpu area. 5430 */ 5431 zone->pageset = &boot_pageset; 5432 5433 if (populated_zone(zone)) 5434 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 5435 zone->name, zone->present_pages, 5436 zone_batchsize(zone)); 5437 } 5438 5439 int __meminit init_currently_empty_zone(struct zone *zone, 5440 unsigned long zone_start_pfn, 5441 unsigned long size) 5442 { 5443 struct pglist_data *pgdat = zone->zone_pgdat; 5444 int ret; 5445 ret = zone_wait_table_init(zone, size); 5446 if (ret) 5447 return ret; 5448 pgdat->nr_zones = zone_idx(zone) + 1; 5449 5450 zone->zone_start_pfn = zone_start_pfn; 5451 5452 mminit_dprintk(MMINIT_TRACE, "memmap_init", 5453 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 5454 pgdat->node_id, 5455 (unsigned long)zone_idx(zone), 5456 zone_start_pfn, (zone_start_pfn + size)); 5457 5458 zone_init_free_lists(zone); 5459 5460 return 0; 5461 } 5462 5463 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5464 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 5465 5466 /* 5467 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 5468 */ 5469 int __meminit __early_pfn_to_nid(unsigned long pfn, 5470 struct mminit_pfnnid_cache *state) 5471 { 5472 unsigned long start_pfn, end_pfn; 5473 int nid; 5474 5475 if (state->last_start <= pfn && pfn < state->last_end) 5476 return state->last_nid; 5477 5478 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 5479 if (nid != -1) { 5480 state->last_start = start_pfn; 5481 state->last_end = end_pfn; 5482 state->last_nid = nid; 5483 } 5484 5485 return nid; 5486 } 5487 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 5488 5489 /** 5490 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 5491 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 5492 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid 5493 * 5494 * If an architecture guarantees that all ranges registered contain no holes 5495 * and may be freed, this this function may be used instead of calling 5496 * memblock_free_early_nid() manually. 5497 */ 5498 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 5499 { 5500 unsigned long start_pfn, end_pfn; 5501 int i, this_nid; 5502 5503 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { 5504 start_pfn = min(start_pfn, max_low_pfn); 5505 end_pfn = min(end_pfn, max_low_pfn); 5506 5507 if (start_pfn < end_pfn) 5508 memblock_free_early_nid(PFN_PHYS(start_pfn), 5509 (end_pfn - start_pfn) << PAGE_SHIFT, 5510 this_nid); 5511 } 5512 } 5513 5514 /** 5515 * sparse_memory_present_with_active_regions - Call memory_present for each active range 5516 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 5517 * 5518 * If an architecture guarantees that all ranges registered contain no holes and may 5519 * be freed, this function may be used instead of calling memory_present() manually. 5520 */ 5521 void __init sparse_memory_present_with_active_regions(int nid) 5522 { 5523 unsigned long start_pfn, end_pfn; 5524 int i, this_nid; 5525 5526 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 5527 memory_present(this_nid, start_pfn, end_pfn); 5528 } 5529 5530 /** 5531 * get_pfn_range_for_nid - Return the start and end page frames for a node 5532 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 5533 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 5534 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 5535 * 5536 * It returns the start and end page frame of a node based on information 5537 * provided by memblock_set_node(). If called for a node 5538 * with no available memory, a warning is printed and the start and end 5539 * PFNs will be 0. 5540 */ 5541 void __meminit get_pfn_range_for_nid(unsigned int nid, 5542 unsigned long *start_pfn, unsigned long *end_pfn) 5543 { 5544 unsigned long this_start_pfn, this_end_pfn; 5545 int i; 5546 5547 *start_pfn = -1UL; 5548 *end_pfn = 0; 5549 5550 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 5551 *start_pfn = min(*start_pfn, this_start_pfn); 5552 *end_pfn = max(*end_pfn, this_end_pfn); 5553 } 5554 5555 if (*start_pfn == -1UL) 5556 *start_pfn = 0; 5557 } 5558 5559 /* 5560 * This finds a zone that can be used for ZONE_MOVABLE pages. The 5561 * assumption is made that zones within a node are ordered in monotonic 5562 * increasing memory addresses so that the "highest" populated zone is used 5563 */ 5564 static void __init find_usable_zone_for_movable(void) 5565 { 5566 int zone_index; 5567 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 5568 if (zone_index == ZONE_MOVABLE) 5569 continue; 5570 5571 if (arch_zone_highest_possible_pfn[zone_index] > 5572 arch_zone_lowest_possible_pfn[zone_index]) 5573 break; 5574 } 5575 5576 VM_BUG_ON(zone_index == -1); 5577 movable_zone = zone_index; 5578 } 5579 5580 /* 5581 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 5582 * because it is sized independent of architecture. Unlike the other zones, 5583 * the starting point for ZONE_MOVABLE is not fixed. It may be different 5584 * in each node depending on the size of each node and how evenly kernelcore 5585 * is distributed. This helper function adjusts the zone ranges 5586 * provided by the architecture for a given node by using the end of the 5587 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 5588 * zones within a node are in order of monotonic increases memory addresses 5589 */ 5590 static void __meminit adjust_zone_range_for_zone_movable(int nid, 5591 unsigned long zone_type, 5592 unsigned long node_start_pfn, 5593 unsigned long node_end_pfn, 5594 unsigned long *zone_start_pfn, 5595 unsigned long *zone_end_pfn) 5596 { 5597 /* Only adjust if ZONE_MOVABLE is on this node */ 5598 if (zone_movable_pfn[nid]) { 5599 /* Size ZONE_MOVABLE */ 5600 if (zone_type == ZONE_MOVABLE) { 5601 *zone_start_pfn = zone_movable_pfn[nid]; 5602 *zone_end_pfn = min(node_end_pfn, 5603 arch_zone_highest_possible_pfn[movable_zone]); 5604 5605 /* Check if this whole range is within ZONE_MOVABLE */ 5606 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 5607 *zone_start_pfn = *zone_end_pfn; 5608 } 5609 } 5610 5611 /* 5612 * Return the number of pages a zone spans in a node, including holes 5613 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 5614 */ 5615 static unsigned long __meminit zone_spanned_pages_in_node(int nid, 5616 unsigned long zone_type, 5617 unsigned long node_start_pfn, 5618 unsigned long node_end_pfn, 5619 unsigned long *zone_start_pfn, 5620 unsigned long *zone_end_pfn, 5621 unsigned long *ignored) 5622 { 5623 /* When hotadd a new node from cpu_up(), the node should be empty */ 5624 if (!node_start_pfn && !node_end_pfn) 5625 return 0; 5626 5627 /* Get the start and end of the zone */ 5628 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 5629 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 5630 adjust_zone_range_for_zone_movable(nid, zone_type, 5631 node_start_pfn, node_end_pfn, 5632 zone_start_pfn, zone_end_pfn); 5633 5634 /* Check that this node has pages within the zone's required range */ 5635 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 5636 return 0; 5637 5638 /* Move the zone boundaries inside the node if necessary */ 5639 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 5640 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 5641 5642 /* Return the spanned pages */ 5643 return *zone_end_pfn - *zone_start_pfn; 5644 } 5645 5646 /* 5647 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 5648 * then all holes in the requested range will be accounted for. 5649 */ 5650 unsigned long __meminit __absent_pages_in_range(int nid, 5651 unsigned long range_start_pfn, 5652 unsigned long range_end_pfn) 5653 { 5654 unsigned long nr_absent = range_end_pfn - range_start_pfn; 5655 unsigned long start_pfn, end_pfn; 5656 int i; 5657 5658 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 5659 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 5660 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 5661 nr_absent -= end_pfn - start_pfn; 5662 } 5663 return nr_absent; 5664 } 5665 5666 /** 5667 * absent_pages_in_range - Return number of page frames in holes within a range 5668 * @start_pfn: The start PFN to start searching for holes 5669 * @end_pfn: The end PFN to stop searching for holes 5670 * 5671 * It returns the number of pages frames in memory holes within a range. 5672 */ 5673 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 5674 unsigned long end_pfn) 5675 { 5676 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 5677 } 5678 5679 /* Return the number of page frames in holes in a zone on a node */ 5680 static unsigned long __meminit zone_absent_pages_in_node(int nid, 5681 unsigned long zone_type, 5682 unsigned long node_start_pfn, 5683 unsigned long node_end_pfn, 5684 unsigned long *ignored) 5685 { 5686 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 5687 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5688 unsigned long zone_start_pfn, zone_end_pfn; 5689 unsigned long nr_absent; 5690 5691 /* When hotadd a new node from cpu_up(), the node should be empty */ 5692 if (!node_start_pfn && !node_end_pfn) 5693 return 0; 5694 5695 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5696 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5697 5698 adjust_zone_range_for_zone_movable(nid, zone_type, 5699 node_start_pfn, node_end_pfn, 5700 &zone_start_pfn, &zone_end_pfn); 5701 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 5702 5703 /* 5704 * ZONE_MOVABLE handling. 5705 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 5706 * and vice versa. 5707 */ 5708 if (zone_movable_pfn[nid]) { 5709 if (mirrored_kernelcore) { 5710 unsigned long start_pfn, end_pfn; 5711 struct memblock_region *r; 5712 5713 for_each_memblock(memory, r) { 5714 start_pfn = clamp(memblock_region_memory_base_pfn(r), 5715 zone_start_pfn, zone_end_pfn); 5716 end_pfn = clamp(memblock_region_memory_end_pfn(r), 5717 zone_start_pfn, zone_end_pfn); 5718 5719 if (zone_type == ZONE_MOVABLE && 5720 memblock_is_mirror(r)) 5721 nr_absent += end_pfn - start_pfn; 5722 5723 if (zone_type == ZONE_NORMAL && 5724 !memblock_is_mirror(r)) 5725 nr_absent += end_pfn - start_pfn; 5726 } 5727 } else { 5728 if (zone_type == ZONE_NORMAL) 5729 nr_absent += node_end_pfn - zone_movable_pfn[nid]; 5730 } 5731 } 5732 5733 return nr_absent; 5734 } 5735 5736 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5737 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 5738 unsigned long zone_type, 5739 unsigned long node_start_pfn, 5740 unsigned long node_end_pfn, 5741 unsigned long *zone_start_pfn, 5742 unsigned long *zone_end_pfn, 5743 unsigned long *zones_size) 5744 { 5745 unsigned int zone; 5746 5747 *zone_start_pfn = node_start_pfn; 5748 for (zone = 0; zone < zone_type; zone++) 5749 *zone_start_pfn += zones_size[zone]; 5750 5751 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type]; 5752 5753 return zones_size[zone_type]; 5754 } 5755 5756 static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 5757 unsigned long zone_type, 5758 unsigned long node_start_pfn, 5759 unsigned long node_end_pfn, 5760 unsigned long *zholes_size) 5761 { 5762 if (!zholes_size) 5763 return 0; 5764 5765 return zholes_size[zone_type]; 5766 } 5767 5768 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5769 5770 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 5771 unsigned long node_start_pfn, 5772 unsigned long node_end_pfn, 5773 unsigned long *zones_size, 5774 unsigned long *zholes_size) 5775 { 5776 unsigned long realtotalpages = 0, totalpages = 0; 5777 enum zone_type i; 5778 5779 for (i = 0; i < MAX_NR_ZONES; i++) { 5780 struct zone *zone = pgdat->node_zones + i; 5781 unsigned long zone_start_pfn, zone_end_pfn; 5782 unsigned long size, real_size; 5783 5784 size = zone_spanned_pages_in_node(pgdat->node_id, i, 5785 node_start_pfn, 5786 node_end_pfn, 5787 &zone_start_pfn, 5788 &zone_end_pfn, 5789 zones_size); 5790 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i, 5791 node_start_pfn, node_end_pfn, 5792 zholes_size); 5793 if (size) 5794 zone->zone_start_pfn = zone_start_pfn; 5795 else 5796 zone->zone_start_pfn = 0; 5797 zone->spanned_pages = size; 5798 zone->present_pages = real_size; 5799 5800 totalpages += size; 5801 realtotalpages += real_size; 5802 } 5803 5804 pgdat->node_spanned_pages = totalpages; 5805 pgdat->node_present_pages = realtotalpages; 5806 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 5807 realtotalpages); 5808 } 5809 5810 #ifndef CONFIG_SPARSEMEM 5811 /* 5812 * Calculate the size of the zone->blockflags rounded to an unsigned long 5813 * Start by making sure zonesize is a multiple of pageblock_order by rounding 5814 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 5815 * round what is now in bits to nearest long in bits, then return it in 5816 * bytes. 5817 */ 5818 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 5819 { 5820 unsigned long usemapsize; 5821 5822 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 5823 usemapsize = roundup(zonesize, pageblock_nr_pages); 5824 usemapsize = usemapsize >> pageblock_order; 5825 usemapsize *= NR_PAGEBLOCK_BITS; 5826 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 5827 5828 return usemapsize / 8; 5829 } 5830 5831 static void __init setup_usemap(struct pglist_data *pgdat, 5832 struct zone *zone, 5833 unsigned long zone_start_pfn, 5834 unsigned long zonesize) 5835 { 5836 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 5837 zone->pageblock_flags = NULL; 5838 if (usemapsize) 5839 zone->pageblock_flags = 5840 memblock_virt_alloc_node_nopanic(usemapsize, 5841 pgdat->node_id); 5842 } 5843 #else 5844 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 5845 unsigned long zone_start_pfn, unsigned long zonesize) {} 5846 #endif /* CONFIG_SPARSEMEM */ 5847 5848 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 5849 5850 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 5851 void __paginginit set_pageblock_order(void) 5852 { 5853 unsigned int order; 5854 5855 /* Check that pageblock_nr_pages has not already been setup */ 5856 if (pageblock_order) 5857 return; 5858 5859 if (HPAGE_SHIFT > PAGE_SHIFT) 5860 order = HUGETLB_PAGE_ORDER; 5861 else 5862 order = MAX_ORDER - 1; 5863 5864 /* 5865 * Assume the largest contiguous order of interest is a huge page. 5866 * This value may be variable depending on boot parameters on IA64 and 5867 * powerpc. 5868 */ 5869 pageblock_order = order; 5870 } 5871 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5872 5873 /* 5874 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 5875 * is unused as pageblock_order is set at compile-time. See 5876 * include/linux/pageblock-flags.h for the values of pageblock_order based on 5877 * the kernel config 5878 */ 5879 void __paginginit set_pageblock_order(void) 5880 { 5881 } 5882 5883 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5884 5885 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, 5886 unsigned long present_pages) 5887 { 5888 unsigned long pages = spanned_pages; 5889 5890 /* 5891 * Provide a more accurate estimation if there are holes within 5892 * the zone and SPARSEMEM is in use. If there are holes within the 5893 * zone, each populated memory region may cost us one or two extra 5894 * memmap pages due to alignment because memmap pages for each 5895 * populated regions may not naturally algined on page boundary. 5896 * So the (present_pages >> 4) heuristic is a tradeoff for that. 5897 */ 5898 if (spanned_pages > present_pages + (present_pages >> 4) && 5899 IS_ENABLED(CONFIG_SPARSEMEM)) 5900 pages = present_pages; 5901 5902 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 5903 } 5904 5905 /* 5906 * Set up the zone data structures: 5907 * - mark all pages reserved 5908 * - mark all memory queues empty 5909 * - clear the memory bitmaps 5910 * 5911 * NOTE: pgdat should get zeroed by caller. 5912 */ 5913 static void __paginginit free_area_init_core(struct pglist_data *pgdat) 5914 { 5915 enum zone_type j; 5916 int nid = pgdat->node_id; 5917 int ret; 5918 5919 pgdat_resize_init(pgdat); 5920 #ifdef CONFIG_NUMA_BALANCING 5921 spin_lock_init(&pgdat->numabalancing_migrate_lock); 5922 pgdat->numabalancing_migrate_nr_pages = 0; 5923 pgdat->numabalancing_migrate_next_window = jiffies; 5924 #endif 5925 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5926 spin_lock_init(&pgdat->split_queue_lock); 5927 INIT_LIST_HEAD(&pgdat->split_queue); 5928 pgdat->split_queue_len = 0; 5929 #endif 5930 init_waitqueue_head(&pgdat->kswapd_wait); 5931 init_waitqueue_head(&pgdat->pfmemalloc_wait); 5932 #ifdef CONFIG_COMPACTION 5933 init_waitqueue_head(&pgdat->kcompactd_wait); 5934 #endif 5935 pgdat_page_ext_init(pgdat); 5936 5937 for (j = 0; j < MAX_NR_ZONES; j++) { 5938 struct zone *zone = pgdat->node_zones + j; 5939 unsigned long size, realsize, freesize, memmap_pages; 5940 unsigned long zone_start_pfn = zone->zone_start_pfn; 5941 5942 size = zone->spanned_pages; 5943 realsize = freesize = zone->present_pages; 5944 5945 /* 5946 * Adjust freesize so that it accounts for how much memory 5947 * is used by this zone for memmap. This affects the watermark 5948 * and per-cpu initialisations 5949 */ 5950 memmap_pages = calc_memmap_size(size, realsize); 5951 if (!is_highmem_idx(j)) { 5952 if (freesize >= memmap_pages) { 5953 freesize -= memmap_pages; 5954 if (memmap_pages) 5955 printk(KERN_DEBUG 5956 " %s zone: %lu pages used for memmap\n", 5957 zone_names[j], memmap_pages); 5958 } else 5959 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", 5960 zone_names[j], memmap_pages, freesize); 5961 } 5962 5963 /* Account for reserved pages */ 5964 if (j == 0 && freesize > dma_reserve) { 5965 freesize -= dma_reserve; 5966 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 5967 zone_names[0], dma_reserve); 5968 } 5969 5970 if (!is_highmem_idx(j)) 5971 nr_kernel_pages += freesize; 5972 /* Charge for highmem memmap if there are enough kernel pages */ 5973 else if (nr_kernel_pages > memmap_pages * 2) 5974 nr_kernel_pages -= memmap_pages; 5975 nr_all_pages += freesize; 5976 5977 /* 5978 * Set an approximate value for lowmem here, it will be adjusted 5979 * when the bootmem allocator frees pages into the buddy system. 5980 * And all highmem pages will be managed by the buddy system. 5981 */ 5982 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 5983 #ifdef CONFIG_NUMA 5984 zone->node = nid; 5985 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) 5986 / 100; 5987 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 5988 #endif 5989 zone->name = zone_names[j]; 5990 spin_lock_init(&zone->lock); 5991 spin_lock_init(&zone->lru_lock); 5992 zone_seqlock_init(zone); 5993 zone->zone_pgdat = pgdat; 5994 zone_pcp_init(zone); 5995 5996 /* For bootup, initialized properly in watermark setup */ 5997 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); 5998 5999 lruvec_init(&zone->lruvec); 6000 if (!size) 6001 continue; 6002 6003 set_pageblock_order(); 6004 setup_usemap(pgdat, zone, zone_start_pfn, size); 6005 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 6006 BUG_ON(ret); 6007 memmap_init(size, nid, j, zone_start_pfn); 6008 } 6009 } 6010 6011 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 6012 { 6013 unsigned long __maybe_unused start = 0; 6014 unsigned long __maybe_unused offset = 0; 6015 6016 /* Skip empty nodes */ 6017 if (!pgdat->node_spanned_pages) 6018 return; 6019 6020 #ifdef CONFIG_FLAT_NODE_MEM_MAP 6021 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 6022 offset = pgdat->node_start_pfn - start; 6023 /* ia64 gets its own node_mem_map, before this, without bootmem */ 6024 if (!pgdat->node_mem_map) { 6025 unsigned long size, end; 6026 struct page *map; 6027 6028 /* 6029 * The zone's endpoints aren't required to be MAX_ORDER 6030 * aligned but the node_mem_map endpoints must be in order 6031 * for the buddy allocator to function correctly. 6032 */ 6033 end = pgdat_end_pfn(pgdat); 6034 end = ALIGN(end, MAX_ORDER_NR_PAGES); 6035 size = (end - start) * sizeof(struct page); 6036 map = alloc_remap(pgdat->node_id, size); 6037 if (!map) 6038 map = memblock_virt_alloc_node_nopanic(size, 6039 pgdat->node_id); 6040 pgdat->node_mem_map = map + offset; 6041 } 6042 #ifndef CONFIG_NEED_MULTIPLE_NODES 6043 /* 6044 * With no DISCONTIG, the global mem_map is just set as node 0's 6045 */ 6046 if (pgdat == NODE_DATA(0)) { 6047 mem_map = NODE_DATA(0)->node_mem_map; 6048 #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) 6049 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 6050 mem_map -= offset; 6051 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 6052 } 6053 #endif 6054 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 6055 } 6056 6057 void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 6058 unsigned long node_start_pfn, unsigned long *zholes_size) 6059 { 6060 pg_data_t *pgdat = NODE_DATA(nid); 6061 unsigned long start_pfn = 0; 6062 unsigned long end_pfn = 0; 6063 6064 /* pg_data_t should be reset to zero when it's allocated */ 6065 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 6066 6067 reset_deferred_meminit(pgdat); 6068 pgdat->node_id = nid; 6069 pgdat->node_start_pfn = node_start_pfn; 6070 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 6071 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 6072 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 6073 (u64)start_pfn << PAGE_SHIFT, 6074 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 6075 #else 6076 start_pfn = node_start_pfn; 6077 #endif 6078 calculate_node_totalpages(pgdat, start_pfn, end_pfn, 6079 zones_size, zholes_size); 6080 6081 alloc_node_mem_map(pgdat); 6082 #ifdef CONFIG_FLAT_NODE_MEM_MAP 6083 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 6084 nid, (unsigned long)pgdat, 6085 (unsigned long)pgdat->node_mem_map); 6086 #endif 6087 6088 free_area_init_core(pgdat); 6089 } 6090 6091 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 6092 6093 #if MAX_NUMNODES > 1 6094 /* 6095 * Figure out the number of possible node ids. 6096 */ 6097 void __init setup_nr_node_ids(void) 6098 { 6099 unsigned int highest; 6100 6101 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 6102 nr_node_ids = highest + 1; 6103 } 6104 #endif 6105 6106 /** 6107 * node_map_pfn_alignment - determine the maximum internode alignment 6108 * 6109 * This function should be called after node map is populated and sorted. 6110 * It calculates the maximum power of two alignment which can distinguish 6111 * all the nodes. 6112 * 6113 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 6114 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 6115 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 6116 * shifted, 1GiB is enough and this function will indicate so. 6117 * 6118 * This is used to test whether pfn -> nid mapping of the chosen memory 6119 * model has fine enough granularity to avoid incorrect mapping for the 6120 * populated node map. 6121 * 6122 * Returns the determined alignment in pfn's. 0 if there is no alignment 6123 * requirement (single node). 6124 */ 6125 unsigned long __init node_map_pfn_alignment(void) 6126 { 6127 unsigned long accl_mask = 0, last_end = 0; 6128 unsigned long start, end, mask; 6129 int last_nid = -1; 6130 int i, nid; 6131 6132 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 6133 if (!start || last_nid < 0 || last_nid == nid) { 6134 last_nid = nid; 6135 last_end = end; 6136 continue; 6137 } 6138 6139 /* 6140 * Start with a mask granular enough to pin-point to the 6141 * start pfn and tick off bits one-by-one until it becomes 6142 * too coarse to separate the current node from the last. 6143 */ 6144 mask = ~((1 << __ffs(start)) - 1); 6145 while (mask && last_end <= (start & (mask << 1))) 6146 mask <<= 1; 6147 6148 /* accumulate all internode masks */ 6149 accl_mask |= mask; 6150 } 6151 6152 /* convert mask to number of pages */ 6153 return ~accl_mask + 1; 6154 } 6155 6156 /* Find the lowest pfn for a node */ 6157 static unsigned long __init find_min_pfn_for_node(int nid) 6158 { 6159 unsigned long min_pfn = ULONG_MAX; 6160 unsigned long start_pfn; 6161 int i; 6162 6163 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) 6164 min_pfn = min(min_pfn, start_pfn); 6165 6166 if (min_pfn == ULONG_MAX) { 6167 pr_warn("Could not find start_pfn for node %d\n", nid); 6168 return 0; 6169 } 6170 6171 return min_pfn; 6172 } 6173 6174 /** 6175 * find_min_pfn_with_active_regions - Find the minimum PFN registered 6176 * 6177 * It returns the minimum PFN based on information provided via 6178 * memblock_set_node(). 6179 */ 6180 unsigned long __init find_min_pfn_with_active_regions(void) 6181 { 6182 return find_min_pfn_for_node(MAX_NUMNODES); 6183 } 6184 6185 /* 6186 * early_calculate_totalpages() 6187 * Sum pages in active regions for movable zone. 6188 * Populate N_MEMORY for calculating usable_nodes. 6189 */ 6190 static unsigned long __init early_calculate_totalpages(void) 6191 { 6192 unsigned long totalpages = 0; 6193 unsigned long start_pfn, end_pfn; 6194 int i, nid; 6195 6196 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 6197 unsigned long pages = end_pfn - start_pfn; 6198 6199 totalpages += pages; 6200 if (pages) 6201 node_set_state(nid, N_MEMORY); 6202 } 6203 return totalpages; 6204 } 6205 6206 /* 6207 * Find the PFN the Movable zone begins in each node. Kernel memory 6208 * is spread evenly between nodes as long as the nodes have enough 6209 * memory. When they don't, some nodes will have more kernelcore than 6210 * others 6211 */ 6212 static void __init find_zone_movable_pfns_for_nodes(void) 6213 { 6214 int i, nid; 6215 unsigned long usable_startpfn; 6216 unsigned long kernelcore_node, kernelcore_remaining; 6217 /* save the state before borrow the nodemask */ 6218 nodemask_t saved_node_state = node_states[N_MEMORY]; 6219 unsigned long totalpages = early_calculate_totalpages(); 6220 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 6221 struct memblock_region *r; 6222 6223 /* Need to find movable_zone earlier when movable_node is specified. */ 6224 find_usable_zone_for_movable(); 6225 6226 /* 6227 * If movable_node is specified, ignore kernelcore and movablecore 6228 * options. 6229 */ 6230 if (movable_node_is_enabled()) { 6231 for_each_memblock(memory, r) { 6232 if (!memblock_is_hotpluggable(r)) 6233 continue; 6234 6235 nid = r->nid; 6236 6237 usable_startpfn = PFN_DOWN(r->base); 6238 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 6239 min(usable_startpfn, zone_movable_pfn[nid]) : 6240 usable_startpfn; 6241 } 6242 6243 goto out2; 6244 } 6245 6246 /* 6247 * If kernelcore=mirror is specified, ignore movablecore option 6248 */ 6249 if (mirrored_kernelcore) { 6250 bool mem_below_4gb_not_mirrored = false; 6251 6252 for_each_memblock(memory, r) { 6253 if (memblock_is_mirror(r)) 6254 continue; 6255 6256 nid = r->nid; 6257 6258 usable_startpfn = memblock_region_memory_base_pfn(r); 6259 6260 if (usable_startpfn < 0x100000) { 6261 mem_below_4gb_not_mirrored = true; 6262 continue; 6263 } 6264 6265 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 6266 min(usable_startpfn, zone_movable_pfn[nid]) : 6267 usable_startpfn; 6268 } 6269 6270 if (mem_below_4gb_not_mirrored) 6271 pr_warn("This configuration results in unmirrored kernel memory."); 6272 6273 goto out2; 6274 } 6275 6276 /* 6277 * If movablecore=nn[KMG] was specified, calculate what size of 6278 * kernelcore that corresponds so that memory usable for 6279 * any allocation type is evenly spread. If both kernelcore 6280 * and movablecore are specified, then the value of kernelcore 6281 * will be used for required_kernelcore if it's greater than 6282 * what movablecore would have allowed. 6283 */ 6284 if (required_movablecore) { 6285 unsigned long corepages; 6286 6287 /* 6288 * Round-up so that ZONE_MOVABLE is at least as large as what 6289 * was requested by the user 6290 */ 6291 required_movablecore = 6292 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 6293 required_movablecore = min(totalpages, required_movablecore); 6294 corepages = totalpages - required_movablecore; 6295 6296 required_kernelcore = max(required_kernelcore, corepages); 6297 } 6298 6299 /* 6300 * If kernelcore was not specified or kernelcore size is larger 6301 * than totalpages, there is no ZONE_MOVABLE. 6302 */ 6303 if (!required_kernelcore || required_kernelcore >= totalpages) 6304 goto out; 6305 6306 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 6307 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 6308 6309 restart: 6310 /* Spread kernelcore memory as evenly as possible throughout nodes */ 6311 kernelcore_node = required_kernelcore / usable_nodes; 6312 for_each_node_state(nid, N_MEMORY) { 6313 unsigned long start_pfn, end_pfn; 6314 6315 /* 6316 * Recalculate kernelcore_node if the division per node 6317 * now exceeds what is necessary to satisfy the requested 6318 * amount of memory for the kernel 6319 */ 6320 if (required_kernelcore < kernelcore_node) 6321 kernelcore_node = required_kernelcore / usable_nodes; 6322 6323 /* 6324 * As the map is walked, we track how much memory is usable 6325 * by the kernel using kernelcore_remaining. When it is 6326 * 0, the rest of the node is usable by ZONE_MOVABLE 6327 */ 6328 kernelcore_remaining = kernelcore_node; 6329 6330 /* Go through each range of PFNs within this node */ 6331 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 6332 unsigned long size_pages; 6333 6334 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 6335 if (start_pfn >= end_pfn) 6336 continue; 6337 6338 /* Account for what is only usable for kernelcore */ 6339 if (start_pfn < usable_startpfn) { 6340 unsigned long kernel_pages; 6341 kernel_pages = min(end_pfn, usable_startpfn) 6342 - start_pfn; 6343 6344 kernelcore_remaining -= min(kernel_pages, 6345 kernelcore_remaining); 6346 required_kernelcore -= min(kernel_pages, 6347 required_kernelcore); 6348 6349 /* Continue if range is now fully accounted */ 6350 if (end_pfn <= usable_startpfn) { 6351 6352 /* 6353 * Push zone_movable_pfn to the end so 6354 * that if we have to rebalance 6355 * kernelcore across nodes, we will 6356 * not double account here 6357 */ 6358 zone_movable_pfn[nid] = end_pfn; 6359 continue; 6360 } 6361 start_pfn = usable_startpfn; 6362 } 6363 6364 /* 6365 * The usable PFN range for ZONE_MOVABLE is from 6366 * start_pfn->end_pfn. Calculate size_pages as the 6367 * number of pages used as kernelcore 6368 */ 6369 size_pages = end_pfn - start_pfn; 6370 if (size_pages > kernelcore_remaining) 6371 size_pages = kernelcore_remaining; 6372 zone_movable_pfn[nid] = start_pfn + size_pages; 6373 6374 /* 6375 * Some kernelcore has been met, update counts and 6376 * break if the kernelcore for this node has been 6377 * satisfied 6378 */ 6379 required_kernelcore -= min(required_kernelcore, 6380 size_pages); 6381 kernelcore_remaining -= size_pages; 6382 if (!kernelcore_remaining) 6383 break; 6384 } 6385 } 6386 6387 /* 6388 * If there is still required_kernelcore, we do another pass with one 6389 * less node in the count. This will push zone_movable_pfn[nid] further 6390 * along on the nodes that still have memory until kernelcore is 6391 * satisfied 6392 */ 6393 usable_nodes--; 6394 if (usable_nodes && required_kernelcore > usable_nodes) 6395 goto restart; 6396 6397 out2: 6398 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 6399 for (nid = 0; nid < MAX_NUMNODES; nid++) 6400 zone_movable_pfn[nid] = 6401 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 6402 6403 out: 6404 /* restore the node_state */ 6405 node_states[N_MEMORY] = saved_node_state; 6406 } 6407 6408 /* Any regular or high memory on that node ? */ 6409 static void check_for_memory(pg_data_t *pgdat, int nid) 6410 { 6411 enum zone_type zone_type; 6412 6413 if (N_MEMORY == N_NORMAL_MEMORY) 6414 return; 6415 6416 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 6417 struct zone *zone = &pgdat->node_zones[zone_type]; 6418 if (populated_zone(zone)) { 6419 node_set_state(nid, N_HIGH_MEMORY); 6420 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 6421 zone_type <= ZONE_NORMAL) 6422 node_set_state(nid, N_NORMAL_MEMORY); 6423 break; 6424 } 6425 } 6426 } 6427 6428 /** 6429 * free_area_init_nodes - Initialise all pg_data_t and zone data 6430 * @max_zone_pfn: an array of max PFNs for each zone 6431 * 6432 * This will call free_area_init_node() for each active node in the system. 6433 * Using the page ranges provided by memblock_set_node(), the size of each 6434 * zone in each node and their holes is calculated. If the maximum PFN 6435 * between two adjacent zones match, it is assumed that the zone is empty. 6436 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 6437 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 6438 * starts where the previous one ended. For example, ZONE_DMA32 starts 6439 * at arch_max_dma_pfn. 6440 */ 6441 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 6442 { 6443 unsigned long start_pfn, end_pfn; 6444 int i, nid; 6445 6446 /* Record where the zone boundaries are */ 6447 memset(arch_zone_lowest_possible_pfn, 0, 6448 sizeof(arch_zone_lowest_possible_pfn)); 6449 memset(arch_zone_highest_possible_pfn, 0, 6450 sizeof(arch_zone_highest_possible_pfn)); 6451 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 6452 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 6453 for (i = 1; i < MAX_NR_ZONES; i++) { 6454 if (i == ZONE_MOVABLE) 6455 continue; 6456 arch_zone_lowest_possible_pfn[i] = 6457 arch_zone_highest_possible_pfn[i-1]; 6458 arch_zone_highest_possible_pfn[i] = 6459 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 6460 } 6461 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 6462 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 6463 6464 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 6465 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 6466 find_zone_movable_pfns_for_nodes(); 6467 6468 /* Print out the zone ranges */ 6469 pr_info("Zone ranges:\n"); 6470 for (i = 0; i < MAX_NR_ZONES; i++) { 6471 if (i == ZONE_MOVABLE) 6472 continue; 6473 pr_info(" %-8s ", zone_names[i]); 6474 if (arch_zone_lowest_possible_pfn[i] == 6475 arch_zone_highest_possible_pfn[i]) 6476 pr_cont("empty\n"); 6477 else 6478 pr_cont("[mem %#018Lx-%#018Lx]\n", 6479 (u64)arch_zone_lowest_possible_pfn[i] 6480 << PAGE_SHIFT, 6481 ((u64)arch_zone_highest_possible_pfn[i] 6482 << PAGE_SHIFT) - 1); 6483 } 6484 6485 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 6486 pr_info("Movable zone start for each node\n"); 6487 for (i = 0; i < MAX_NUMNODES; i++) { 6488 if (zone_movable_pfn[i]) 6489 pr_info(" Node %d: %#018Lx\n", i, 6490 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 6491 } 6492 6493 /* Print out the early node map */ 6494 pr_info("Early memory node ranges\n"); 6495 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 6496 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 6497 (u64)start_pfn << PAGE_SHIFT, 6498 ((u64)end_pfn << PAGE_SHIFT) - 1); 6499 6500 /* Initialise every node */ 6501 mminit_verify_pageflags_layout(); 6502 setup_nr_node_ids(); 6503 for_each_online_node(nid) { 6504 pg_data_t *pgdat = NODE_DATA(nid); 6505 free_area_init_node(nid, NULL, 6506 find_min_pfn_for_node(nid), NULL); 6507 6508 /* Any memory on that node */ 6509 if (pgdat->node_present_pages) 6510 node_set_state(nid, N_MEMORY); 6511 check_for_memory(pgdat, nid); 6512 } 6513 } 6514 6515 static int __init cmdline_parse_core(char *p, unsigned long *core) 6516 { 6517 unsigned long long coremem; 6518 if (!p) 6519 return -EINVAL; 6520 6521 coremem = memparse(p, &p); 6522 *core = coremem >> PAGE_SHIFT; 6523 6524 /* Paranoid check that UL is enough for the coremem value */ 6525 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 6526 6527 return 0; 6528 } 6529 6530 /* 6531 * kernelcore=size sets the amount of memory for use for allocations that 6532 * cannot be reclaimed or migrated. 6533 */ 6534 static int __init cmdline_parse_kernelcore(char *p) 6535 { 6536 /* parse kernelcore=mirror */ 6537 if (parse_option_str(p, "mirror")) { 6538 mirrored_kernelcore = true; 6539 return 0; 6540 } 6541 6542 return cmdline_parse_core(p, &required_kernelcore); 6543 } 6544 6545 /* 6546 * movablecore=size sets the amount of memory for use for allocations that 6547 * can be reclaimed or migrated. 6548 */ 6549 static int __init cmdline_parse_movablecore(char *p) 6550 { 6551 return cmdline_parse_core(p, &required_movablecore); 6552 } 6553 6554 early_param("kernelcore", cmdline_parse_kernelcore); 6555 early_param("movablecore", cmdline_parse_movablecore); 6556 6557 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 6558 6559 void adjust_managed_page_count(struct page *page, long count) 6560 { 6561 spin_lock(&managed_page_count_lock); 6562 page_zone(page)->managed_pages += count; 6563 totalram_pages += count; 6564 #ifdef CONFIG_HIGHMEM 6565 if (PageHighMem(page)) 6566 totalhigh_pages += count; 6567 #endif 6568 spin_unlock(&managed_page_count_lock); 6569 } 6570 EXPORT_SYMBOL(adjust_managed_page_count); 6571 6572 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 6573 { 6574 void *pos; 6575 unsigned long pages = 0; 6576 6577 start = (void *)PAGE_ALIGN((unsigned long)start); 6578 end = (void *)((unsigned long)end & PAGE_MASK); 6579 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6580 if ((unsigned int)poison <= 0xFF) 6581 memset(pos, poison, PAGE_SIZE); 6582 free_reserved_page(virt_to_page(pos)); 6583 } 6584 6585 if (pages && s) 6586 pr_info("Freeing %s memory: %ldK (%p - %p)\n", 6587 s, pages << (PAGE_SHIFT - 10), start, end); 6588 6589 return pages; 6590 } 6591 EXPORT_SYMBOL(free_reserved_area); 6592 6593 #ifdef CONFIG_HIGHMEM 6594 void free_highmem_page(struct page *page) 6595 { 6596 __free_reserved_page(page); 6597 totalram_pages++; 6598 page_zone(page)->managed_pages++; 6599 totalhigh_pages++; 6600 } 6601 #endif 6602 6603 6604 void __init mem_init_print_info(const char *str) 6605 { 6606 unsigned long physpages, codesize, datasize, rosize, bss_size; 6607 unsigned long init_code_size, init_data_size; 6608 6609 physpages = get_num_physpages(); 6610 codesize = _etext - _stext; 6611 datasize = _edata - _sdata; 6612 rosize = __end_rodata - __start_rodata; 6613 bss_size = __bss_stop - __bss_start; 6614 init_data_size = __init_end - __init_begin; 6615 init_code_size = _einittext - _sinittext; 6616 6617 /* 6618 * Detect special cases and adjust section sizes accordingly: 6619 * 1) .init.* may be embedded into .data sections 6620 * 2) .init.text.* may be out of [__init_begin, __init_end], 6621 * please refer to arch/tile/kernel/vmlinux.lds.S. 6622 * 3) .rodata.* may be embedded into .text or .data sections. 6623 */ 6624 #define adj_init_size(start, end, size, pos, adj) \ 6625 do { \ 6626 if (start <= pos && pos < end && size > adj) \ 6627 size -= adj; \ 6628 } while (0) 6629 6630 adj_init_size(__init_begin, __init_end, init_data_size, 6631 _sinittext, init_code_size); 6632 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 6633 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 6634 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 6635 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 6636 6637 #undef adj_init_size 6638 6639 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 6640 #ifdef CONFIG_HIGHMEM 6641 ", %luK highmem" 6642 #endif 6643 "%s%s)\n", 6644 nr_free_pages() << (PAGE_SHIFT - 10), 6645 physpages << (PAGE_SHIFT - 10), 6646 codesize >> 10, datasize >> 10, rosize >> 10, 6647 (init_data_size + init_code_size) >> 10, bss_size >> 10, 6648 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10), 6649 totalcma_pages << (PAGE_SHIFT - 10), 6650 #ifdef CONFIG_HIGHMEM 6651 totalhigh_pages << (PAGE_SHIFT - 10), 6652 #endif 6653 str ? ", " : "", str ? str : ""); 6654 } 6655 6656 /** 6657 * set_dma_reserve - set the specified number of pages reserved in the first zone 6658 * @new_dma_reserve: The number of pages to mark reserved 6659 * 6660 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 6661 * In the DMA zone, a significant percentage may be consumed by kernel image 6662 * and other unfreeable allocations which can skew the watermarks badly. This 6663 * function may optionally be used to account for unfreeable pages in the 6664 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 6665 * smaller per-cpu batchsize. 6666 */ 6667 void __init set_dma_reserve(unsigned long new_dma_reserve) 6668 { 6669 dma_reserve = new_dma_reserve; 6670 } 6671 6672 void __init free_area_init(unsigned long *zones_size) 6673 { 6674 free_area_init_node(0, zones_size, 6675 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 6676 } 6677 6678 static int page_alloc_cpu_notify(struct notifier_block *self, 6679 unsigned long action, void *hcpu) 6680 { 6681 int cpu = (unsigned long)hcpu; 6682 6683 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 6684 lru_add_drain_cpu(cpu); 6685 drain_pages(cpu); 6686 6687 /* 6688 * Spill the event counters of the dead processor 6689 * into the current processors event counters. 6690 * This artificially elevates the count of the current 6691 * processor. 6692 */ 6693 vm_events_fold_cpu(cpu); 6694 6695 /* 6696 * Zero the differential counters of the dead processor 6697 * so that the vm statistics are consistent. 6698 * 6699 * This is only okay since the processor is dead and cannot 6700 * race with what we are doing. 6701 */ 6702 cpu_vm_stats_fold(cpu); 6703 } 6704 return NOTIFY_OK; 6705 } 6706 6707 void __init page_alloc_init(void) 6708 { 6709 hotcpu_notifier(page_alloc_cpu_notify, 0); 6710 } 6711 6712 /* 6713 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6714 * or min_free_kbytes changes. 6715 */ 6716 static void calculate_totalreserve_pages(void) 6717 { 6718 struct pglist_data *pgdat; 6719 unsigned long reserve_pages = 0; 6720 enum zone_type i, j; 6721 6722 for_each_online_pgdat(pgdat) { 6723 for (i = 0; i < MAX_NR_ZONES; i++) { 6724 struct zone *zone = pgdat->node_zones + i; 6725 long max = 0; 6726 6727 /* Find valid and maximum lowmem_reserve in the zone */ 6728 for (j = i; j < MAX_NR_ZONES; j++) { 6729 if (zone->lowmem_reserve[j] > max) 6730 max = zone->lowmem_reserve[j]; 6731 } 6732 6733 /* we treat the high watermark as reserved pages. */ 6734 max += high_wmark_pages(zone); 6735 6736 if (max > zone->managed_pages) 6737 max = zone->managed_pages; 6738 6739 zone->totalreserve_pages = max; 6740 6741 reserve_pages += max; 6742 } 6743 } 6744 totalreserve_pages = reserve_pages; 6745 } 6746 6747 /* 6748 * setup_per_zone_lowmem_reserve - called whenever 6749 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6750 * has a correct pages reserved value, so an adequate number of 6751 * pages are left in the zone after a successful __alloc_pages(). 6752 */ 6753 static void setup_per_zone_lowmem_reserve(void) 6754 { 6755 struct pglist_data *pgdat; 6756 enum zone_type j, idx; 6757 6758 for_each_online_pgdat(pgdat) { 6759 for (j = 0; j < MAX_NR_ZONES; j++) { 6760 struct zone *zone = pgdat->node_zones + j; 6761 unsigned long managed_pages = zone->managed_pages; 6762 6763 zone->lowmem_reserve[j] = 0; 6764 6765 idx = j; 6766 while (idx) { 6767 struct zone *lower_zone; 6768 6769 idx--; 6770 6771 if (sysctl_lowmem_reserve_ratio[idx] < 1) 6772 sysctl_lowmem_reserve_ratio[idx] = 1; 6773 6774 lower_zone = pgdat->node_zones + idx; 6775 lower_zone->lowmem_reserve[j] = managed_pages / 6776 sysctl_lowmem_reserve_ratio[idx]; 6777 managed_pages += lower_zone->managed_pages; 6778 } 6779 } 6780 } 6781 6782 /* update totalreserve_pages */ 6783 calculate_totalreserve_pages(); 6784 } 6785 6786 static void __setup_per_zone_wmarks(void) 6787 { 6788 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6789 unsigned long lowmem_pages = 0; 6790 struct zone *zone; 6791 unsigned long flags; 6792 6793 /* Calculate total number of !ZONE_HIGHMEM pages */ 6794 for_each_zone(zone) { 6795 if (!is_highmem(zone)) 6796 lowmem_pages += zone->managed_pages; 6797 } 6798 6799 for_each_zone(zone) { 6800 u64 tmp; 6801 6802 spin_lock_irqsave(&zone->lock, flags); 6803 tmp = (u64)pages_min * zone->managed_pages; 6804 do_div(tmp, lowmem_pages); 6805 if (is_highmem(zone)) { 6806 /* 6807 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6808 * need highmem pages, so cap pages_min to a small 6809 * value here. 6810 * 6811 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6812 * deltas control asynch page reclaim, and so should 6813 * not be capped for highmem. 6814 */ 6815 unsigned long min_pages; 6816 6817 min_pages = zone->managed_pages / 1024; 6818 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6819 zone->watermark[WMARK_MIN] = min_pages; 6820 } else { 6821 /* 6822 * If it's a lowmem zone, reserve a number of pages 6823 * proportionate to the zone's size. 6824 */ 6825 zone->watermark[WMARK_MIN] = tmp; 6826 } 6827 6828 /* 6829 * Set the kswapd watermarks distance according to the 6830 * scale factor in proportion to available memory, but 6831 * ensure a minimum size on small systems. 6832 */ 6833 tmp = max_t(u64, tmp >> 2, 6834 mult_frac(zone->managed_pages, 6835 watermark_scale_factor, 10000)); 6836 6837 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6838 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; 6839 6840 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 6841 high_wmark_pages(zone) - low_wmark_pages(zone) - 6842 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 6843 6844 spin_unlock_irqrestore(&zone->lock, flags); 6845 } 6846 6847 /* update totalreserve_pages */ 6848 calculate_totalreserve_pages(); 6849 } 6850 6851 /** 6852 * setup_per_zone_wmarks - called when min_free_kbytes changes 6853 * or when memory is hot-{added|removed} 6854 * 6855 * Ensures that the watermark[min,low,high] values for each zone are set 6856 * correctly with respect to min_free_kbytes. 6857 */ 6858 void setup_per_zone_wmarks(void) 6859 { 6860 mutex_lock(&zonelists_mutex); 6861 __setup_per_zone_wmarks(); 6862 mutex_unlock(&zonelists_mutex); 6863 } 6864 6865 /* 6866 * Initialise min_free_kbytes. 6867 * 6868 * For small machines we want it small (128k min). For large machines 6869 * we want it large (64MB max). But it is not linear, because network 6870 * bandwidth does not increase linearly with machine size. We use 6871 * 6872 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6873 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6874 * 6875 * which yields 6876 * 6877 * 16MB: 512k 6878 * 32MB: 724k 6879 * 64MB: 1024k 6880 * 128MB: 1448k 6881 * 256MB: 2048k 6882 * 512MB: 2896k 6883 * 1024MB: 4096k 6884 * 2048MB: 5792k 6885 * 4096MB: 8192k 6886 * 8192MB: 11584k 6887 * 16384MB: 16384k 6888 */ 6889 int __meminit init_per_zone_wmark_min(void) 6890 { 6891 unsigned long lowmem_kbytes; 6892 int new_min_free_kbytes; 6893 6894 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6895 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6896 6897 if (new_min_free_kbytes > user_min_free_kbytes) { 6898 min_free_kbytes = new_min_free_kbytes; 6899 if (min_free_kbytes < 128) 6900 min_free_kbytes = 128; 6901 if (min_free_kbytes > 65536) 6902 min_free_kbytes = 65536; 6903 } else { 6904 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6905 new_min_free_kbytes, user_min_free_kbytes); 6906 } 6907 setup_per_zone_wmarks(); 6908 refresh_zone_stat_thresholds(); 6909 setup_per_zone_lowmem_reserve(); 6910 return 0; 6911 } 6912 core_initcall(init_per_zone_wmark_min) 6913 6914 /* 6915 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6916 * that we can call two helper functions whenever min_free_kbytes 6917 * changes. 6918 */ 6919 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 6920 void __user *buffer, size_t *length, loff_t *ppos) 6921 { 6922 int rc; 6923 6924 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6925 if (rc) 6926 return rc; 6927 6928 if (write) { 6929 user_min_free_kbytes = min_free_kbytes; 6930 setup_per_zone_wmarks(); 6931 } 6932 return 0; 6933 } 6934 6935 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 6936 void __user *buffer, size_t *length, loff_t *ppos) 6937 { 6938 int rc; 6939 6940 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6941 if (rc) 6942 return rc; 6943 6944 if (write) 6945 setup_per_zone_wmarks(); 6946 6947 return 0; 6948 } 6949 6950 #ifdef CONFIG_NUMA 6951 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6952 void __user *buffer, size_t *length, loff_t *ppos) 6953 { 6954 struct zone *zone; 6955 int rc; 6956 6957 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6958 if (rc) 6959 return rc; 6960 6961 for_each_zone(zone) 6962 zone->min_unmapped_pages = (zone->managed_pages * 6963 sysctl_min_unmapped_ratio) / 100; 6964 return 0; 6965 } 6966 6967 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6968 void __user *buffer, size_t *length, loff_t *ppos) 6969 { 6970 struct zone *zone; 6971 int rc; 6972 6973 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6974 if (rc) 6975 return rc; 6976 6977 for_each_zone(zone) 6978 zone->min_slab_pages = (zone->managed_pages * 6979 sysctl_min_slab_ratio) / 100; 6980 return 0; 6981 } 6982 #endif 6983 6984 /* 6985 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6986 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6987 * whenever sysctl_lowmem_reserve_ratio changes. 6988 * 6989 * The reserve ratio obviously has absolutely no relation with the 6990 * minimum watermarks. The lowmem reserve ratio can only make sense 6991 * if in function of the boot time zone sizes. 6992 */ 6993 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 6994 void __user *buffer, size_t *length, loff_t *ppos) 6995 { 6996 proc_dointvec_minmax(table, write, buffer, length, ppos); 6997 setup_per_zone_lowmem_reserve(); 6998 return 0; 6999 } 7000 7001 /* 7002 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 7003 * cpu. It is the fraction of total pages in each zone that a hot per cpu 7004 * pagelist can have before it gets flushed back to buddy allocator. 7005 */ 7006 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, 7007 void __user *buffer, size_t *length, loff_t *ppos) 7008 { 7009 struct zone *zone; 7010 int old_percpu_pagelist_fraction; 7011 int ret; 7012 7013 mutex_lock(&pcp_batch_high_lock); 7014 old_percpu_pagelist_fraction = percpu_pagelist_fraction; 7015 7016 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 7017 if (!write || ret < 0) 7018 goto out; 7019 7020 /* Sanity checking to avoid pcp imbalance */ 7021 if (percpu_pagelist_fraction && 7022 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { 7023 percpu_pagelist_fraction = old_percpu_pagelist_fraction; 7024 ret = -EINVAL; 7025 goto out; 7026 } 7027 7028 /* No change? */ 7029 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) 7030 goto out; 7031 7032 for_each_populated_zone(zone) { 7033 unsigned int cpu; 7034 7035 for_each_possible_cpu(cpu) 7036 pageset_set_high_and_batch(zone, 7037 per_cpu_ptr(zone->pageset, cpu)); 7038 } 7039 out: 7040 mutex_unlock(&pcp_batch_high_lock); 7041 return ret; 7042 } 7043 7044 #ifdef CONFIG_NUMA 7045 int hashdist = HASHDIST_DEFAULT; 7046 7047 static int __init set_hashdist(char *str) 7048 { 7049 if (!str) 7050 return 0; 7051 hashdist = simple_strtoul(str, &str, 0); 7052 return 1; 7053 } 7054 __setup("hashdist=", set_hashdist); 7055 #endif 7056 7057 /* 7058 * allocate a large system hash table from bootmem 7059 * - it is assumed that the hash table must contain an exact power-of-2 7060 * quantity of entries 7061 * - limit is the number of hash buckets, not the total allocation size 7062 */ 7063 void *__init alloc_large_system_hash(const char *tablename, 7064 unsigned long bucketsize, 7065 unsigned long numentries, 7066 int scale, 7067 int flags, 7068 unsigned int *_hash_shift, 7069 unsigned int *_hash_mask, 7070 unsigned long low_limit, 7071 unsigned long high_limit) 7072 { 7073 unsigned long long max = high_limit; 7074 unsigned long log2qty, size; 7075 void *table = NULL; 7076 7077 /* allow the kernel cmdline to have a say */ 7078 if (!numentries) { 7079 /* round applicable memory size up to nearest megabyte */ 7080 numentries = nr_kernel_pages; 7081 7082 /* It isn't necessary when PAGE_SIZE >= 1MB */ 7083 if (PAGE_SHIFT < 20) 7084 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 7085 7086 /* limit to 1 bucket per 2^scale bytes of low memory */ 7087 if (scale > PAGE_SHIFT) 7088 numentries >>= (scale - PAGE_SHIFT); 7089 else 7090 numentries <<= (PAGE_SHIFT - scale); 7091 7092 /* Make sure we've got at least a 0-order allocation.. */ 7093 if (unlikely(flags & HASH_SMALL)) { 7094 /* Makes no sense without HASH_EARLY */ 7095 WARN_ON(!(flags & HASH_EARLY)); 7096 if (!(numentries >> *_hash_shift)) { 7097 numentries = 1UL << *_hash_shift; 7098 BUG_ON(!numentries); 7099 } 7100 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 7101 numentries = PAGE_SIZE / bucketsize; 7102 } 7103 numentries = roundup_pow_of_two(numentries); 7104 7105 /* limit allocation size to 1/16 total memory by default */ 7106 if (max == 0) { 7107 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 7108 do_div(max, bucketsize); 7109 } 7110 max = min(max, 0x80000000ULL); 7111 7112 if (numentries < low_limit) 7113 numentries = low_limit; 7114 if (numentries > max) 7115 numentries = max; 7116 7117 log2qty = ilog2(numentries); 7118 7119 do { 7120 size = bucketsize << log2qty; 7121 if (flags & HASH_EARLY) 7122 table = memblock_virt_alloc_nopanic(size, 0); 7123 else if (hashdist) 7124 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 7125 else { 7126 /* 7127 * If bucketsize is not a power-of-two, we may free 7128 * some pages at the end of hash table which 7129 * alloc_pages_exact() automatically does 7130 */ 7131 if (get_order(size) < MAX_ORDER) { 7132 table = alloc_pages_exact(size, GFP_ATOMIC); 7133 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 7134 } 7135 } 7136 } while (!table && size > PAGE_SIZE && --log2qty); 7137 7138 if (!table) 7139 panic("Failed to allocate %s hash table\n", tablename); 7140 7141 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n", 7142 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size); 7143 7144 if (_hash_shift) 7145 *_hash_shift = log2qty; 7146 if (_hash_mask) 7147 *_hash_mask = (1 << log2qty) - 1; 7148 7149 return table; 7150 } 7151 7152 /* 7153 * This function checks whether pageblock includes unmovable pages or not. 7154 * If @count is not zero, it is okay to include less @count unmovable pages 7155 * 7156 * PageLRU check without isolation or lru_lock could race so that 7157 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 7158 * expect this function should be exact. 7159 */ 7160 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 7161 bool skip_hwpoisoned_pages) 7162 { 7163 unsigned long pfn, iter, found; 7164 int mt; 7165 7166 /* 7167 * For avoiding noise data, lru_add_drain_all() should be called 7168 * If ZONE_MOVABLE, the zone never contains unmovable pages 7169 */ 7170 if (zone_idx(zone) == ZONE_MOVABLE) 7171 return false; 7172 mt = get_pageblock_migratetype(page); 7173 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) 7174 return false; 7175 7176 pfn = page_to_pfn(page); 7177 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 7178 unsigned long check = pfn + iter; 7179 7180 if (!pfn_valid_within(check)) 7181 continue; 7182 7183 page = pfn_to_page(check); 7184 7185 /* 7186 * Hugepages are not in LRU lists, but they're movable. 7187 * We need not scan over tail pages bacause we don't 7188 * handle each tail page individually in migration. 7189 */ 7190 if (PageHuge(page)) { 7191 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 7192 continue; 7193 } 7194 7195 /* 7196 * We can't use page_count without pin a page 7197 * because another CPU can free compound page. 7198 * This check already skips compound tails of THP 7199 * because their page->_refcount is zero at all time. 7200 */ 7201 if (!page_ref_count(page)) { 7202 if (PageBuddy(page)) 7203 iter += (1 << page_order(page)) - 1; 7204 continue; 7205 } 7206 7207 /* 7208 * The HWPoisoned page may be not in buddy system, and 7209 * page_count() is not 0. 7210 */ 7211 if (skip_hwpoisoned_pages && PageHWPoison(page)) 7212 continue; 7213 7214 if (!PageLRU(page)) 7215 found++; 7216 /* 7217 * If there are RECLAIMABLE pages, we need to check 7218 * it. But now, memory offline itself doesn't call 7219 * shrink_node_slabs() and it still to be fixed. 7220 */ 7221 /* 7222 * If the page is not RAM, page_count()should be 0. 7223 * we don't need more check. This is an _used_ not-movable page. 7224 * 7225 * The problematic thing here is PG_reserved pages. PG_reserved 7226 * is set to both of a memory hole page and a _used_ kernel 7227 * page at boot. 7228 */ 7229 if (found > count) 7230 return true; 7231 } 7232 return false; 7233 } 7234 7235 bool is_pageblock_removable_nolock(struct page *page) 7236 { 7237 struct zone *zone; 7238 unsigned long pfn; 7239 7240 /* 7241 * We have to be careful here because we are iterating over memory 7242 * sections which are not zone aware so we might end up outside of 7243 * the zone but still within the section. 7244 * We have to take care about the node as well. If the node is offline 7245 * its NODE_DATA will be NULL - see page_zone. 7246 */ 7247 if (!node_online(page_to_nid(page))) 7248 return false; 7249 7250 zone = page_zone(page); 7251 pfn = page_to_pfn(page); 7252 if (!zone_spans_pfn(zone, pfn)) 7253 return false; 7254 7255 return !has_unmovable_pages(zone, page, 0, true); 7256 } 7257 7258 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) 7259 7260 static unsigned long pfn_max_align_down(unsigned long pfn) 7261 { 7262 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 7263 pageblock_nr_pages) - 1); 7264 } 7265 7266 static unsigned long pfn_max_align_up(unsigned long pfn) 7267 { 7268 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 7269 pageblock_nr_pages)); 7270 } 7271 7272 /* [start, end) must belong to a single zone. */ 7273 static int __alloc_contig_migrate_range(struct compact_control *cc, 7274 unsigned long start, unsigned long end) 7275 { 7276 /* This function is based on compact_zone() from compaction.c. */ 7277 unsigned long nr_reclaimed; 7278 unsigned long pfn = start; 7279 unsigned int tries = 0; 7280 int ret = 0; 7281 7282 migrate_prep(); 7283 7284 while (pfn < end || !list_empty(&cc->migratepages)) { 7285 if (fatal_signal_pending(current)) { 7286 ret = -EINTR; 7287 break; 7288 } 7289 7290 if (list_empty(&cc->migratepages)) { 7291 cc->nr_migratepages = 0; 7292 pfn = isolate_migratepages_range(cc, pfn, end); 7293 if (!pfn) { 7294 ret = -EINTR; 7295 break; 7296 } 7297 tries = 0; 7298 } else if (++tries == 5) { 7299 ret = ret < 0 ? ret : -EBUSY; 7300 break; 7301 } 7302 7303 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 7304 &cc->migratepages); 7305 cc->nr_migratepages -= nr_reclaimed; 7306 7307 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 7308 NULL, 0, cc->mode, MR_CMA); 7309 } 7310 if (ret < 0) { 7311 putback_movable_pages(&cc->migratepages); 7312 return ret; 7313 } 7314 return 0; 7315 } 7316 7317 /** 7318 * alloc_contig_range() -- tries to allocate given range of pages 7319 * @start: start PFN to allocate 7320 * @end: one-past-the-last PFN to allocate 7321 * @migratetype: migratetype of the underlaying pageblocks (either 7322 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 7323 * in range must have the same migratetype and it must 7324 * be either of the two. 7325 * 7326 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 7327 * aligned, however it's the caller's responsibility to guarantee that 7328 * we are the only thread that changes migrate type of pageblocks the 7329 * pages fall in. 7330 * 7331 * The PFN range must belong to a single zone. 7332 * 7333 * Returns zero on success or negative error code. On success all 7334 * pages which PFN is in [start, end) are allocated for the caller and 7335 * need to be freed with free_contig_range(). 7336 */ 7337 int alloc_contig_range(unsigned long start, unsigned long end, 7338 unsigned migratetype) 7339 { 7340 unsigned long outer_start, outer_end; 7341 unsigned int order; 7342 int ret = 0; 7343 7344 struct compact_control cc = { 7345 .nr_migratepages = 0, 7346 .order = -1, 7347 .zone = page_zone(pfn_to_page(start)), 7348 .mode = MIGRATE_SYNC, 7349 .ignore_skip_hint = true, 7350 }; 7351 INIT_LIST_HEAD(&cc.migratepages); 7352 7353 /* 7354 * What we do here is we mark all pageblocks in range as 7355 * MIGRATE_ISOLATE. Because pageblock and max order pages may 7356 * have different sizes, and due to the way page allocator 7357 * work, we align the range to biggest of the two pages so 7358 * that page allocator won't try to merge buddies from 7359 * different pageblocks and change MIGRATE_ISOLATE to some 7360 * other migration type. 7361 * 7362 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 7363 * migrate the pages from an unaligned range (ie. pages that 7364 * we are interested in). This will put all the pages in 7365 * range back to page allocator as MIGRATE_ISOLATE. 7366 * 7367 * When this is done, we take the pages in range from page 7368 * allocator removing them from the buddy system. This way 7369 * page allocator will never consider using them. 7370 * 7371 * This lets us mark the pageblocks back as 7372 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 7373 * aligned range but not in the unaligned, original range are 7374 * put back to page allocator so that buddy can use them. 7375 */ 7376 7377 ret = start_isolate_page_range(pfn_max_align_down(start), 7378 pfn_max_align_up(end), migratetype, 7379 false); 7380 if (ret) 7381 return ret; 7382 7383 /* 7384 * In case of -EBUSY, we'd like to know which page causes problem. 7385 * So, just fall through. We will check it in test_pages_isolated(). 7386 */ 7387 ret = __alloc_contig_migrate_range(&cc, start, end); 7388 if (ret && ret != -EBUSY) 7389 goto done; 7390 7391 /* 7392 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 7393 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 7394 * more, all pages in [start, end) are free in page allocator. 7395 * What we are going to do is to allocate all pages from 7396 * [start, end) (that is remove them from page allocator). 7397 * 7398 * The only problem is that pages at the beginning and at the 7399 * end of interesting range may be not aligned with pages that 7400 * page allocator holds, ie. they can be part of higher order 7401 * pages. Because of this, we reserve the bigger range and 7402 * once this is done free the pages we are not interested in. 7403 * 7404 * We don't have to hold zone->lock here because the pages are 7405 * isolated thus they won't get removed from buddy. 7406 */ 7407 7408 lru_add_drain_all(); 7409 drain_all_pages(cc.zone); 7410 7411 order = 0; 7412 outer_start = start; 7413 while (!PageBuddy(pfn_to_page(outer_start))) { 7414 if (++order >= MAX_ORDER) { 7415 outer_start = start; 7416 break; 7417 } 7418 outer_start &= ~0UL << order; 7419 } 7420 7421 if (outer_start != start) { 7422 order = page_order(pfn_to_page(outer_start)); 7423 7424 /* 7425 * outer_start page could be small order buddy page and 7426 * it doesn't include start page. Adjust outer_start 7427 * in this case to report failed page properly 7428 * on tracepoint in test_pages_isolated() 7429 */ 7430 if (outer_start + (1UL << order) <= start) 7431 outer_start = start; 7432 } 7433 7434 /* Make sure the range is really isolated. */ 7435 if (test_pages_isolated(outer_start, end, false)) { 7436 pr_info("%s: [%lx, %lx) PFNs busy\n", 7437 __func__, outer_start, end); 7438 ret = -EBUSY; 7439 goto done; 7440 } 7441 7442 /* Grab isolated pages from freelists. */ 7443 outer_end = isolate_freepages_range(&cc, outer_start, end); 7444 if (!outer_end) { 7445 ret = -EBUSY; 7446 goto done; 7447 } 7448 7449 /* Free head and tail (if any) */ 7450 if (start != outer_start) 7451 free_contig_range(outer_start, start - outer_start); 7452 if (end != outer_end) 7453 free_contig_range(end, outer_end - end); 7454 7455 done: 7456 undo_isolate_page_range(pfn_max_align_down(start), 7457 pfn_max_align_up(end), migratetype); 7458 return ret; 7459 } 7460 7461 void free_contig_range(unsigned long pfn, unsigned nr_pages) 7462 { 7463 unsigned int count = 0; 7464 7465 for (; nr_pages--; pfn++) { 7466 struct page *page = pfn_to_page(pfn); 7467 7468 count += page_count(page) != 1; 7469 __free_page(page); 7470 } 7471 WARN(count != 0, "%d pages are still in use!\n", count); 7472 } 7473 #endif 7474 7475 #ifdef CONFIG_MEMORY_HOTPLUG 7476 /* 7477 * The zone indicated has a new number of managed_pages; batch sizes and percpu 7478 * page high values need to be recalulated. 7479 */ 7480 void __meminit zone_pcp_update(struct zone *zone) 7481 { 7482 unsigned cpu; 7483 mutex_lock(&pcp_batch_high_lock); 7484 for_each_possible_cpu(cpu) 7485 pageset_set_high_and_batch(zone, 7486 per_cpu_ptr(zone->pageset, cpu)); 7487 mutex_unlock(&pcp_batch_high_lock); 7488 } 7489 #endif 7490 7491 void zone_pcp_reset(struct zone *zone) 7492 { 7493 unsigned long flags; 7494 int cpu; 7495 struct per_cpu_pageset *pset; 7496 7497 /* avoid races with drain_pages() */ 7498 local_irq_save(flags); 7499 if (zone->pageset != &boot_pageset) { 7500 for_each_online_cpu(cpu) { 7501 pset = per_cpu_ptr(zone->pageset, cpu); 7502 drain_zonestat(zone, pset); 7503 } 7504 free_percpu(zone->pageset); 7505 zone->pageset = &boot_pageset; 7506 } 7507 local_irq_restore(flags); 7508 } 7509 7510 #ifdef CONFIG_MEMORY_HOTREMOVE 7511 /* 7512 * All pages in the range must be in a single zone and isolated 7513 * before calling this. 7514 */ 7515 void 7516 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 7517 { 7518 struct page *page; 7519 struct zone *zone; 7520 unsigned int order, i; 7521 unsigned long pfn; 7522 unsigned long flags; 7523 /* find the first valid pfn */ 7524 for (pfn = start_pfn; pfn < end_pfn; pfn++) 7525 if (pfn_valid(pfn)) 7526 break; 7527 if (pfn == end_pfn) 7528 return; 7529 zone = page_zone(pfn_to_page(pfn)); 7530 spin_lock_irqsave(&zone->lock, flags); 7531 pfn = start_pfn; 7532 while (pfn < end_pfn) { 7533 if (!pfn_valid(pfn)) { 7534 pfn++; 7535 continue; 7536 } 7537 page = pfn_to_page(pfn); 7538 /* 7539 * The HWPoisoned page may be not in buddy system, and 7540 * page_count() is not 0. 7541 */ 7542 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7543 pfn++; 7544 SetPageReserved(page); 7545 continue; 7546 } 7547 7548 BUG_ON(page_count(page)); 7549 BUG_ON(!PageBuddy(page)); 7550 order = page_order(page); 7551 #ifdef CONFIG_DEBUG_VM 7552 pr_info("remove from free list %lx %d %lx\n", 7553 pfn, 1 << order, end_pfn); 7554 #endif 7555 list_del(&page->lru); 7556 rmv_page_order(page); 7557 zone->free_area[order].nr_free--; 7558 for (i = 0; i < (1 << order); i++) 7559 SetPageReserved((page+i)); 7560 pfn += (1 << order); 7561 } 7562 spin_unlock_irqrestore(&zone->lock, flags); 7563 } 7564 #endif 7565 7566 bool is_free_buddy_page(struct page *page) 7567 { 7568 struct zone *zone = page_zone(page); 7569 unsigned long pfn = page_to_pfn(page); 7570 unsigned long flags; 7571 unsigned int order; 7572 7573 spin_lock_irqsave(&zone->lock, flags); 7574 for (order = 0; order < MAX_ORDER; order++) { 7575 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7576 7577 if (PageBuddy(page_head) && page_order(page_head) >= order) 7578 break; 7579 } 7580 spin_unlock_irqrestore(&zone->lock, flags); 7581 7582 return order < MAX_ORDER; 7583 } 7584