1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/swap.h> 22 #include <linux/interrupt.h> 23 #include <linux/pagemap.h> 24 #include <linux/jiffies.h> 25 #include <linux/memblock.h> 26 #include <linux/compiler.h> 27 #include <linux/kernel.h> 28 #include <linux/kasan.h> 29 #include <linux/module.h> 30 #include <linux/suspend.h> 31 #include <linux/pagevec.h> 32 #include <linux/blkdev.h> 33 #include <linux/slab.h> 34 #include <linux/ratelimit.h> 35 #include <linux/oom.h> 36 #include <linux/topology.h> 37 #include <linux/sysctl.h> 38 #include <linux/cpu.h> 39 #include <linux/cpuset.h> 40 #include <linux/memory_hotplug.h> 41 #include <linux/nodemask.h> 42 #include <linux/vmalloc.h> 43 #include <linux/vmstat.h> 44 #include <linux/mempolicy.h> 45 #include <linux/memremap.h> 46 #include <linux/stop_machine.h> 47 #include <linux/random.h> 48 #include <linux/sort.h> 49 #include <linux/pfn.h> 50 #include <linux/backing-dev.h> 51 #include <linux/fault-inject.h> 52 #include <linux/page-isolation.h> 53 #include <linux/debugobjects.h> 54 #include <linux/kmemleak.h> 55 #include <linux/compaction.h> 56 #include <trace/events/kmem.h> 57 #include <trace/events/oom.h> 58 #include <linux/prefetch.h> 59 #include <linux/mm_inline.h> 60 #include <linux/mmu_notifier.h> 61 #include <linux/migrate.h> 62 #include <linux/hugetlb.h> 63 #include <linux/sched/rt.h> 64 #include <linux/sched/mm.h> 65 #include <linux/page_owner.h> 66 #include <linux/kthread.h> 67 #include <linux/memcontrol.h> 68 #include <linux/ftrace.h> 69 #include <linux/lockdep.h> 70 #include <linux/nmi.h> 71 #include <linux/psi.h> 72 #include <linux/padata.h> 73 #include <linux/khugepaged.h> 74 #include <linux/buffer_head.h> 75 76 #include <asm/sections.h> 77 #include <asm/tlbflush.h> 78 #include <asm/div64.h> 79 #include "internal.h" 80 #include "shuffle.h" 81 #include "page_reporting.h" 82 83 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 84 typedef int __bitwise fpi_t; 85 86 /* No special request */ 87 #define FPI_NONE ((__force fpi_t)0) 88 89 /* 90 * Skip free page reporting notification for the (possibly merged) page. 91 * This does not hinder free page reporting from grabbing the page, 92 * reporting it and marking it "reported" - it only skips notifying 93 * the free page reporting infrastructure about a newly freed page. For 94 * example, used when temporarily pulling a page from a freelist and 95 * putting it back unmodified. 96 */ 97 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 98 99 /* 100 * Place the (possibly merged) page to the tail of the freelist. Will ignore 101 * page shuffling (relevant code - e.g., memory onlining - is expected to 102 * shuffle the whole zone). 103 * 104 * Note: No code should rely on this flag for correctness - it's purely 105 * to allow for optimizations when handing back either fresh pages 106 * (memory onlining) or untouched pages (page isolation, free page 107 * reporting). 108 */ 109 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 110 111 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 112 static DEFINE_MUTEX(pcp_batch_high_lock); 113 #define MIN_PERCPU_PAGELIST_FRACTION (8) 114 115 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 116 DEFINE_PER_CPU(int, numa_node); 117 EXPORT_PER_CPU_SYMBOL(numa_node); 118 #endif 119 120 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 121 122 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 123 /* 124 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 125 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 126 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 127 * defined in <linux/topology.h>. 128 */ 129 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 130 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 131 #endif 132 133 /* work_structs for global per-cpu drains */ 134 struct pcpu_drain { 135 struct zone *zone; 136 struct work_struct work; 137 }; 138 static DEFINE_MUTEX(pcpu_drain_mutex); 139 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); 140 141 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 142 volatile unsigned long latent_entropy __latent_entropy; 143 EXPORT_SYMBOL(latent_entropy); 144 #endif 145 146 /* 147 * Array of node states. 148 */ 149 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 150 [N_POSSIBLE] = NODE_MASK_ALL, 151 [N_ONLINE] = { { [0] = 1UL } }, 152 #ifndef CONFIG_NUMA 153 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 154 #ifdef CONFIG_HIGHMEM 155 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 156 #endif 157 [N_MEMORY] = { { [0] = 1UL } }, 158 [N_CPU] = { { [0] = 1UL } }, 159 #endif /* NUMA */ 160 }; 161 EXPORT_SYMBOL(node_states); 162 163 atomic_long_t _totalram_pages __read_mostly; 164 EXPORT_SYMBOL(_totalram_pages); 165 unsigned long totalreserve_pages __read_mostly; 166 unsigned long totalcma_pages __read_mostly; 167 168 int percpu_pagelist_fraction; 169 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 170 DEFINE_STATIC_KEY_FALSE(init_on_alloc); 171 EXPORT_SYMBOL(init_on_alloc); 172 173 DEFINE_STATIC_KEY_FALSE(init_on_free); 174 EXPORT_SYMBOL(init_on_free); 175 176 static bool _init_on_alloc_enabled_early __read_mostly 177 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 178 static int __init early_init_on_alloc(char *buf) 179 { 180 181 return kstrtobool(buf, &_init_on_alloc_enabled_early); 182 } 183 early_param("init_on_alloc", early_init_on_alloc); 184 185 static bool _init_on_free_enabled_early __read_mostly 186 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 187 static int __init early_init_on_free(char *buf) 188 { 189 return kstrtobool(buf, &_init_on_free_enabled_early); 190 } 191 early_param("init_on_free", early_init_on_free); 192 193 /* 194 * A cached value of the page's pageblock's migratetype, used when the page is 195 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 196 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 197 * Also the migratetype set in the page does not necessarily match the pcplist 198 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 199 * other index - this ensures that it will be put on the correct CMA freelist. 200 */ 201 static inline int get_pcppage_migratetype(struct page *page) 202 { 203 return page->index; 204 } 205 206 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 207 { 208 page->index = migratetype; 209 } 210 211 #ifdef CONFIG_PM_SLEEP 212 /* 213 * The following functions are used by the suspend/hibernate code to temporarily 214 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 215 * while devices are suspended. To avoid races with the suspend/hibernate code, 216 * they should always be called with system_transition_mutex held 217 * (gfp_allowed_mask also should only be modified with system_transition_mutex 218 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 219 * with that modification). 220 */ 221 222 static gfp_t saved_gfp_mask; 223 224 void pm_restore_gfp_mask(void) 225 { 226 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 227 if (saved_gfp_mask) { 228 gfp_allowed_mask = saved_gfp_mask; 229 saved_gfp_mask = 0; 230 } 231 } 232 233 void pm_restrict_gfp_mask(void) 234 { 235 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 236 WARN_ON(saved_gfp_mask); 237 saved_gfp_mask = gfp_allowed_mask; 238 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 239 } 240 241 bool pm_suspended_storage(void) 242 { 243 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 244 return false; 245 return true; 246 } 247 #endif /* CONFIG_PM_SLEEP */ 248 249 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 250 unsigned int pageblock_order __read_mostly; 251 #endif 252 253 static void __free_pages_ok(struct page *page, unsigned int order, 254 fpi_t fpi_flags); 255 256 /* 257 * results with 256, 32 in the lowmem_reserve sysctl: 258 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 259 * 1G machine -> (16M dma, 784M normal, 224M high) 260 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 261 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 262 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 263 * 264 * TBD: should special case ZONE_DMA32 machines here - in those we normally 265 * don't need any ZONE_NORMAL reservation 266 */ 267 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 268 #ifdef CONFIG_ZONE_DMA 269 [ZONE_DMA] = 256, 270 #endif 271 #ifdef CONFIG_ZONE_DMA32 272 [ZONE_DMA32] = 256, 273 #endif 274 [ZONE_NORMAL] = 32, 275 #ifdef CONFIG_HIGHMEM 276 [ZONE_HIGHMEM] = 0, 277 #endif 278 [ZONE_MOVABLE] = 0, 279 }; 280 281 static char * const zone_names[MAX_NR_ZONES] = { 282 #ifdef CONFIG_ZONE_DMA 283 "DMA", 284 #endif 285 #ifdef CONFIG_ZONE_DMA32 286 "DMA32", 287 #endif 288 "Normal", 289 #ifdef CONFIG_HIGHMEM 290 "HighMem", 291 #endif 292 "Movable", 293 #ifdef CONFIG_ZONE_DEVICE 294 "Device", 295 #endif 296 }; 297 298 const char * const migratetype_names[MIGRATE_TYPES] = { 299 "Unmovable", 300 "Movable", 301 "Reclaimable", 302 "HighAtomic", 303 #ifdef CONFIG_CMA 304 "CMA", 305 #endif 306 #ifdef CONFIG_MEMORY_ISOLATION 307 "Isolate", 308 #endif 309 }; 310 311 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { 312 [NULL_COMPOUND_DTOR] = NULL, 313 [COMPOUND_PAGE_DTOR] = free_compound_page, 314 #ifdef CONFIG_HUGETLB_PAGE 315 [HUGETLB_PAGE_DTOR] = free_huge_page, 316 #endif 317 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 318 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, 319 #endif 320 }; 321 322 int min_free_kbytes = 1024; 323 int user_min_free_kbytes = -1; 324 #ifdef CONFIG_DISCONTIGMEM 325 /* 326 * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges 327 * are not on separate NUMA nodes. Functionally this works but with 328 * watermark_boost_factor, it can reclaim prematurely as the ranges can be 329 * quite small. By default, do not boost watermarks on discontigmem as in 330 * many cases very high-order allocations like THP are likely to be 331 * unsupported and the premature reclaim offsets the advantage of long-term 332 * fragmentation avoidance. 333 */ 334 int watermark_boost_factor __read_mostly; 335 #else 336 int watermark_boost_factor __read_mostly = 15000; 337 #endif 338 int watermark_scale_factor = 10; 339 340 static unsigned long nr_kernel_pages __initdata; 341 static unsigned long nr_all_pages __initdata; 342 static unsigned long dma_reserve __initdata; 343 344 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 345 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 346 static unsigned long required_kernelcore __initdata; 347 static unsigned long required_kernelcore_percent __initdata; 348 static unsigned long required_movablecore __initdata; 349 static unsigned long required_movablecore_percent __initdata; 350 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 351 static bool mirrored_kernelcore __meminitdata; 352 353 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 354 int movable_zone; 355 EXPORT_SYMBOL(movable_zone); 356 357 #if MAX_NUMNODES > 1 358 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 359 unsigned int nr_online_nodes __read_mostly = 1; 360 EXPORT_SYMBOL(nr_node_ids); 361 EXPORT_SYMBOL(nr_online_nodes); 362 #endif 363 364 int page_group_by_mobility_disabled __read_mostly; 365 366 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 367 /* 368 * During boot we initialize deferred pages on-demand, as needed, but once 369 * page_alloc_init_late() has finished, the deferred pages are all initialized, 370 * and we can permanently disable that path. 371 */ 372 static DEFINE_STATIC_KEY_TRUE(deferred_pages); 373 374 /* 375 * Calling kasan_free_pages() only after deferred memory initialization 376 * has completed. Poisoning pages during deferred memory init will greatly 377 * lengthen the process and cause problem in large memory systems as the 378 * deferred pages initialization is done with interrupt disabled. 379 * 380 * Assuming that there will be no reference to those newly initialized 381 * pages before they are ever allocated, this should have no effect on 382 * KASAN memory tracking as the poison will be properly inserted at page 383 * allocation time. The only corner case is when pages are allocated by 384 * on-demand allocation and then freed again before the deferred pages 385 * initialization is done, but this is not likely to happen. 386 */ 387 static inline void kasan_free_nondeferred_pages(struct page *page, int order) 388 { 389 if (!static_branch_unlikely(&deferred_pages)) 390 kasan_free_pages(page, order); 391 } 392 393 /* Returns true if the struct page for the pfn is uninitialised */ 394 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 395 { 396 int nid = early_pfn_to_nid(pfn); 397 398 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 399 return true; 400 401 return false; 402 } 403 404 /* 405 * Returns true when the remaining initialisation should be deferred until 406 * later in the boot cycle when it can be parallelised. 407 */ 408 static bool __meminit 409 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 410 { 411 static unsigned long prev_end_pfn, nr_initialised; 412 413 /* 414 * prev_end_pfn static that contains the end of previous zone 415 * No need to protect because called very early in boot before smp_init. 416 */ 417 if (prev_end_pfn != end_pfn) { 418 prev_end_pfn = end_pfn; 419 nr_initialised = 0; 420 } 421 422 /* Always populate low zones for address-constrained allocations */ 423 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 424 return false; 425 426 /* 427 * We start only with one section of pages, more pages are added as 428 * needed until the rest of deferred pages are initialized. 429 */ 430 nr_initialised++; 431 if ((nr_initialised > PAGES_PER_SECTION) && 432 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 433 NODE_DATA(nid)->first_deferred_pfn = pfn; 434 return true; 435 } 436 return false; 437 } 438 #else 439 #define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o) 440 441 static inline bool early_page_uninitialised(unsigned long pfn) 442 { 443 return false; 444 } 445 446 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 447 { 448 return false; 449 } 450 #endif 451 452 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 453 static inline unsigned long *get_pageblock_bitmap(struct page *page, 454 unsigned long pfn) 455 { 456 #ifdef CONFIG_SPARSEMEM 457 return section_to_usemap(__pfn_to_section(pfn)); 458 #else 459 return page_zone(page)->pageblock_flags; 460 #endif /* CONFIG_SPARSEMEM */ 461 } 462 463 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) 464 { 465 #ifdef CONFIG_SPARSEMEM 466 pfn &= (PAGES_PER_SECTION-1); 467 #else 468 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); 469 #endif /* CONFIG_SPARSEMEM */ 470 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 471 } 472 473 static __always_inline 474 unsigned long __get_pfnblock_flags_mask(struct page *page, 475 unsigned long pfn, 476 unsigned long mask) 477 { 478 unsigned long *bitmap; 479 unsigned long bitidx, word_bitidx; 480 unsigned long word; 481 482 bitmap = get_pageblock_bitmap(page, pfn); 483 bitidx = pfn_to_bitidx(page, pfn); 484 word_bitidx = bitidx / BITS_PER_LONG; 485 bitidx &= (BITS_PER_LONG-1); 486 487 word = bitmap[word_bitidx]; 488 return (word >> bitidx) & mask; 489 } 490 491 /** 492 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 493 * @page: The page within the block of interest 494 * @pfn: The target page frame number 495 * @mask: mask of bits that the caller is interested in 496 * 497 * Return: pageblock_bits flags 498 */ 499 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 500 unsigned long mask) 501 { 502 return __get_pfnblock_flags_mask(page, pfn, mask); 503 } 504 505 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) 506 { 507 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 508 } 509 510 /** 511 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 512 * @page: The page within the block of interest 513 * @flags: The flags to set 514 * @pfn: The target page frame number 515 * @mask: mask of bits that the caller is interested in 516 */ 517 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 518 unsigned long pfn, 519 unsigned long mask) 520 { 521 unsigned long *bitmap; 522 unsigned long bitidx, word_bitidx; 523 unsigned long old_word, word; 524 525 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 526 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 527 528 bitmap = get_pageblock_bitmap(page, pfn); 529 bitidx = pfn_to_bitidx(page, pfn); 530 word_bitidx = bitidx / BITS_PER_LONG; 531 bitidx &= (BITS_PER_LONG-1); 532 533 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 534 535 mask <<= bitidx; 536 flags <<= bitidx; 537 538 word = READ_ONCE(bitmap[word_bitidx]); 539 for (;;) { 540 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 541 if (word == old_word) 542 break; 543 word = old_word; 544 } 545 } 546 547 void set_pageblock_migratetype(struct page *page, int migratetype) 548 { 549 if (unlikely(page_group_by_mobility_disabled && 550 migratetype < MIGRATE_PCPTYPES)) 551 migratetype = MIGRATE_UNMOVABLE; 552 553 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 554 page_to_pfn(page), MIGRATETYPE_MASK); 555 } 556 557 #ifdef CONFIG_DEBUG_VM 558 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 559 { 560 int ret = 0; 561 unsigned seq; 562 unsigned long pfn = page_to_pfn(page); 563 unsigned long sp, start_pfn; 564 565 do { 566 seq = zone_span_seqbegin(zone); 567 start_pfn = zone->zone_start_pfn; 568 sp = zone->spanned_pages; 569 if (!zone_spans_pfn(zone, pfn)) 570 ret = 1; 571 } while (zone_span_seqretry(zone, seq)); 572 573 if (ret) 574 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 575 pfn, zone_to_nid(zone), zone->name, 576 start_pfn, start_pfn + sp); 577 578 return ret; 579 } 580 581 static int page_is_consistent(struct zone *zone, struct page *page) 582 { 583 if (!pfn_valid_within(page_to_pfn(page))) 584 return 0; 585 if (zone != page_zone(page)) 586 return 0; 587 588 return 1; 589 } 590 /* 591 * Temporary debugging check for pages not lying within a given zone. 592 */ 593 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 594 { 595 if (page_outside_zone_boundaries(zone, page)) 596 return 1; 597 if (!page_is_consistent(zone, page)) 598 return 1; 599 600 return 0; 601 } 602 #else 603 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 604 { 605 return 0; 606 } 607 #endif 608 609 static void bad_page(struct page *page, const char *reason) 610 { 611 static unsigned long resume; 612 static unsigned long nr_shown; 613 static unsigned long nr_unshown; 614 615 /* 616 * Allow a burst of 60 reports, then keep quiet for that minute; 617 * or allow a steady drip of one report per second. 618 */ 619 if (nr_shown == 60) { 620 if (time_before(jiffies, resume)) { 621 nr_unshown++; 622 goto out; 623 } 624 if (nr_unshown) { 625 pr_alert( 626 "BUG: Bad page state: %lu messages suppressed\n", 627 nr_unshown); 628 nr_unshown = 0; 629 } 630 nr_shown = 0; 631 } 632 if (nr_shown++ == 0) 633 resume = jiffies + 60 * HZ; 634 635 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 636 current->comm, page_to_pfn(page)); 637 __dump_page(page, reason); 638 dump_page_owner(page); 639 640 print_modules(); 641 dump_stack(); 642 out: 643 /* Leave bad fields for debug, except PageBuddy could make trouble */ 644 page_mapcount_reset(page); /* remove PageBuddy */ 645 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 646 } 647 648 /* 649 * Higher-order pages are called "compound pages". They are structured thusly: 650 * 651 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 652 * 653 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 654 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 655 * 656 * The first tail page's ->compound_dtor holds the offset in array of compound 657 * page destructors. See compound_page_dtors. 658 * 659 * The first tail page's ->compound_order holds the order of allocation. 660 * This usage means that zero-order pages may not be compound. 661 */ 662 663 void free_compound_page(struct page *page) 664 { 665 mem_cgroup_uncharge(page); 666 __free_pages_ok(page, compound_order(page), FPI_NONE); 667 } 668 669 void prep_compound_page(struct page *page, unsigned int order) 670 { 671 int i; 672 int nr_pages = 1 << order; 673 674 __SetPageHead(page); 675 for (i = 1; i < nr_pages; i++) { 676 struct page *p = page + i; 677 set_page_count(p, 0); 678 p->mapping = TAIL_MAPPING; 679 set_compound_head(p, page); 680 } 681 682 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 683 set_compound_order(page, order); 684 atomic_set(compound_mapcount_ptr(page), -1); 685 if (hpage_pincount_available(page)) 686 atomic_set(compound_pincount_ptr(page), 0); 687 } 688 689 #ifdef CONFIG_DEBUG_PAGEALLOC 690 unsigned int _debug_guardpage_minorder; 691 692 bool _debug_pagealloc_enabled_early __read_mostly 693 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 694 EXPORT_SYMBOL(_debug_pagealloc_enabled_early); 695 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 696 EXPORT_SYMBOL(_debug_pagealloc_enabled); 697 698 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 699 700 static int __init early_debug_pagealloc(char *buf) 701 { 702 return kstrtobool(buf, &_debug_pagealloc_enabled_early); 703 } 704 early_param("debug_pagealloc", early_debug_pagealloc); 705 706 static int __init debug_guardpage_minorder_setup(char *buf) 707 { 708 unsigned long res; 709 710 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 711 pr_err("Bad debug_guardpage_minorder value\n"); 712 return 0; 713 } 714 _debug_guardpage_minorder = res; 715 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 716 return 0; 717 } 718 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); 719 720 static inline bool set_page_guard(struct zone *zone, struct page *page, 721 unsigned int order, int migratetype) 722 { 723 if (!debug_guardpage_enabled()) 724 return false; 725 726 if (order >= debug_guardpage_minorder()) 727 return false; 728 729 __SetPageGuard(page); 730 INIT_LIST_HEAD(&page->lru); 731 set_page_private(page, order); 732 /* Guard pages are not available for any usage */ 733 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 734 735 return true; 736 } 737 738 static inline void clear_page_guard(struct zone *zone, struct page *page, 739 unsigned int order, int migratetype) 740 { 741 if (!debug_guardpage_enabled()) 742 return; 743 744 __ClearPageGuard(page); 745 746 set_page_private(page, 0); 747 if (!is_migrate_isolate(migratetype)) 748 __mod_zone_freepage_state(zone, (1 << order), migratetype); 749 } 750 #else 751 static inline bool set_page_guard(struct zone *zone, struct page *page, 752 unsigned int order, int migratetype) { return false; } 753 static inline void clear_page_guard(struct zone *zone, struct page *page, 754 unsigned int order, int migratetype) {} 755 #endif 756 757 /* 758 * Enable static keys related to various memory debugging and hardening options. 759 * Some override others, and depend on early params that are evaluated in the 760 * order of appearance. So we need to first gather the full picture of what was 761 * enabled, and then make decisions. 762 */ 763 void init_mem_debugging_and_hardening(void) 764 { 765 if (_init_on_alloc_enabled_early) { 766 if (page_poisoning_enabled()) 767 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 768 "will take precedence over init_on_alloc\n"); 769 else 770 static_branch_enable(&init_on_alloc); 771 } 772 if (_init_on_free_enabled_early) { 773 if (page_poisoning_enabled()) 774 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 775 "will take precedence over init_on_free\n"); 776 else 777 static_branch_enable(&init_on_free); 778 } 779 780 #ifdef CONFIG_PAGE_POISONING 781 /* 782 * Page poisoning is debug page alloc for some arches. If 783 * either of those options are enabled, enable poisoning. 784 */ 785 if (page_poisoning_enabled() || 786 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 787 debug_pagealloc_enabled())) 788 static_branch_enable(&_page_poisoning_enabled); 789 #endif 790 791 #ifdef CONFIG_DEBUG_PAGEALLOC 792 if (!debug_pagealloc_enabled()) 793 return; 794 795 static_branch_enable(&_debug_pagealloc_enabled); 796 797 if (!debug_guardpage_minorder()) 798 return; 799 800 static_branch_enable(&_debug_guardpage_enabled); 801 #endif 802 } 803 804 static inline void set_buddy_order(struct page *page, unsigned int order) 805 { 806 set_page_private(page, order); 807 __SetPageBuddy(page); 808 } 809 810 /* 811 * This function checks whether a page is free && is the buddy 812 * we can coalesce a page and its buddy if 813 * (a) the buddy is not in a hole (check before calling!) && 814 * (b) the buddy is in the buddy system && 815 * (c) a page and its buddy have the same order && 816 * (d) a page and its buddy are in the same zone. 817 * 818 * For recording whether a page is in the buddy system, we set PageBuddy. 819 * Setting, clearing, and testing PageBuddy is serialized by zone->lock. 820 * 821 * For recording page's order, we use page_private(page). 822 */ 823 static inline bool page_is_buddy(struct page *page, struct page *buddy, 824 unsigned int order) 825 { 826 if (!page_is_guard(buddy) && !PageBuddy(buddy)) 827 return false; 828 829 if (buddy_order(buddy) != order) 830 return false; 831 832 /* 833 * zone check is done late to avoid uselessly calculating 834 * zone/node ids for pages that could never merge. 835 */ 836 if (page_zone_id(page) != page_zone_id(buddy)) 837 return false; 838 839 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 840 841 return true; 842 } 843 844 #ifdef CONFIG_COMPACTION 845 static inline struct capture_control *task_capc(struct zone *zone) 846 { 847 struct capture_control *capc = current->capture_control; 848 849 return unlikely(capc) && 850 !(current->flags & PF_KTHREAD) && 851 !capc->page && 852 capc->cc->zone == zone ? capc : NULL; 853 } 854 855 static inline bool 856 compaction_capture(struct capture_control *capc, struct page *page, 857 int order, int migratetype) 858 { 859 if (!capc || order != capc->cc->order) 860 return false; 861 862 /* Do not accidentally pollute CMA or isolated regions*/ 863 if (is_migrate_cma(migratetype) || 864 is_migrate_isolate(migratetype)) 865 return false; 866 867 /* 868 * Do not let lower order allocations polluate a movable pageblock. 869 * This might let an unmovable request use a reclaimable pageblock 870 * and vice-versa but no more than normal fallback logic which can 871 * have trouble finding a high-order free page. 872 */ 873 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 874 return false; 875 876 capc->page = page; 877 return true; 878 } 879 880 #else 881 static inline struct capture_control *task_capc(struct zone *zone) 882 { 883 return NULL; 884 } 885 886 static inline bool 887 compaction_capture(struct capture_control *capc, struct page *page, 888 int order, int migratetype) 889 { 890 return false; 891 } 892 #endif /* CONFIG_COMPACTION */ 893 894 /* Used for pages not on another list */ 895 static inline void add_to_free_list(struct page *page, struct zone *zone, 896 unsigned int order, int migratetype) 897 { 898 struct free_area *area = &zone->free_area[order]; 899 900 list_add(&page->lru, &area->free_list[migratetype]); 901 area->nr_free++; 902 } 903 904 /* Used for pages not on another list */ 905 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 906 unsigned int order, int migratetype) 907 { 908 struct free_area *area = &zone->free_area[order]; 909 910 list_add_tail(&page->lru, &area->free_list[migratetype]); 911 area->nr_free++; 912 } 913 914 /* 915 * Used for pages which are on another list. Move the pages to the tail 916 * of the list - so the moved pages won't immediately be considered for 917 * allocation again (e.g., optimization for memory onlining). 918 */ 919 static inline void move_to_free_list(struct page *page, struct zone *zone, 920 unsigned int order, int migratetype) 921 { 922 struct free_area *area = &zone->free_area[order]; 923 924 list_move_tail(&page->lru, &area->free_list[migratetype]); 925 } 926 927 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 928 unsigned int order) 929 { 930 /* clear reported state and update reported page count */ 931 if (page_reported(page)) 932 __ClearPageReported(page); 933 934 list_del(&page->lru); 935 __ClearPageBuddy(page); 936 set_page_private(page, 0); 937 zone->free_area[order].nr_free--; 938 } 939 940 /* 941 * If this is not the largest possible page, check if the buddy 942 * of the next-highest order is free. If it is, it's possible 943 * that pages are being freed that will coalesce soon. In case, 944 * that is happening, add the free page to the tail of the list 945 * so it's less likely to be used soon and more likely to be merged 946 * as a higher order page 947 */ 948 static inline bool 949 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 950 struct page *page, unsigned int order) 951 { 952 struct page *higher_page, *higher_buddy; 953 unsigned long combined_pfn; 954 955 if (order >= MAX_ORDER - 2) 956 return false; 957 958 if (!pfn_valid_within(buddy_pfn)) 959 return false; 960 961 combined_pfn = buddy_pfn & pfn; 962 higher_page = page + (combined_pfn - pfn); 963 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); 964 higher_buddy = higher_page + (buddy_pfn - combined_pfn); 965 966 return pfn_valid_within(buddy_pfn) && 967 page_is_buddy(higher_page, higher_buddy, order + 1); 968 } 969 970 /* 971 * Freeing function for a buddy system allocator. 972 * 973 * The concept of a buddy system is to maintain direct-mapped table 974 * (containing bit values) for memory blocks of various "orders". 975 * The bottom level table contains the map for the smallest allocatable 976 * units of memory (here, pages), and each level above it describes 977 * pairs of units from the levels below, hence, "buddies". 978 * At a high level, all that happens here is marking the table entry 979 * at the bottom level available, and propagating the changes upward 980 * as necessary, plus some accounting needed to play nicely with other 981 * parts of the VM system. 982 * At each level, we keep a list of pages, which are heads of continuous 983 * free pages of length of (1 << order) and marked with PageBuddy. 984 * Page's order is recorded in page_private(page) field. 985 * So when we are allocating or freeing one, we can derive the state of the 986 * other. That is, if we allocate a small block, and both were 987 * free, the remainder of the region must be split into blocks. 988 * If a block is freed, and its buddy is also free, then this 989 * triggers coalescing into a block of larger size. 990 * 991 * -- nyc 992 */ 993 994 static inline void __free_one_page(struct page *page, 995 unsigned long pfn, 996 struct zone *zone, unsigned int order, 997 int migratetype, fpi_t fpi_flags) 998 { 999 struct capture_control *capc = task_capc(zone); 1000 unsigned long buddy_pfn; 1001 unsigned long combined_pfn; 1002 unsigned int max_order; 1003 struct page *buddy; 1004 bool to_tail; 1005 1006 max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order); 1007 1008 VM_BUG_ON(!zone_is_initialized(zone)); 1009 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 1010 1011 VM_BUG_ON(migratetype == -1); 1012 if (likely(!is_migrate_isolate(migratetype))) 1013 __mod_zone_freepage_state(zone, 1 << order, migratetype); 1014 1015 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 1016 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1017 1018 continue_merging: 1019 while (order < max_order) { 1020 if (compaction_capture(capc, page, order, migratetype)) { 1021 __mod_zone_freepage_state(zone, -(1 << order), 1022 migratetype); 1023 return; 1024 } 1025 buddy_pfn = __find_buddy_pfn(pfn, order); 1026 buddy = page + (buddy_pfn - pfn); 1027 1028 if (!pfn_valid_within(buddy_pfn)) 1029 goto done_merging; 1030 if (!page_is_buddy(page, buddy, order)) 1031 goto done_merging; 1032 /* 1033 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 1034 * merge with it and move up one order. 1035 */ 1036 if (page_is_guard(buddy)) 1037 clear_page_guard(zone, buddy, order, migratetype); 1038 else 1039 del_page_from_free_list(buddy, zone, order); 1040 combined_pfn = buddy_pfn & pfn; 1041 page = page + (combined_pfn - pfn); 1042 pfn = combined_pfn; 1043 order++; 1044 } 1045 if (order < MAX_ORDER - 1) { 1046 /* If we are here, it means order is >= pageblock_order. 1047 * We want to prevent merge between freepages on isolate 1048 * pageblock and normal pageblock. Without this, pageblock 1049 * isolation could cause incorrect freepage or CMA accounting. 1050 * 1051 * We don't want to hit this code for the more frequent 1052 * low-order merging. 1053 */ 1054 if (unlikely(has_isolate_pageblock(zone))) { 1055 int buddy_mt; 1056 1057 buddy_pfn = __find_buddy_pfn(pfn, order); 1058 buddy = page + (buddy_pfn - pfn); 1059 buddy_mt = get_pageblock_migratetype(buddy); 1060 1061 if (migratetype != buddy_mt 1062 && (is_migrate_isolate(migratetype) || 1063 is_migrate_isolate(buddy_mt))) 1064 goto done_merging; 1065 } 1066 max_order = order + 1; 1067 goto continue_merging; 1068 } 1069 1070 done_merging: 1071 set_buddy_order(page, order); 1072 1073 if (fpi_flags & FPI_TO_TAIL) 1074 to_tail = true; 1075 else if (is_shuffle_order(order)) 1076 to_tail = shuffle_pick_tail(); 1077 else 1078 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1079 1080 if (to_tail) 1081 add_to_free_list_tail(page, zone, order, migratetype); 1082 else 1083 add_to_free_list(page, zone, order, migratetype); 1084 1085 /* Notify page reporting subsystem of freed page */ 1086 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1087 page_reporting_notify_free(order); 1088 } 1089 1090 /* 1091 * A bad page could be due to a number of fields. Instead of multiple branches, 1092 * try and check multiple fields with one check. The caller must do a detailed 1093 * check if necessary. 1094 */ 1095 static inline bool page_expected_state(struct page *page, 1096 unsigned long check_flags) 1097 { 1098 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1099 return false; 1100 1101 if (unlikely((unsigned long)page->mapping | 1102 page_ref_count(page) | 1103 #ifdef CONFIG_MEMCG 1104 (unsigned long)page_memcg(page) | 1105 #endif 1106 (page->flags & check_flags))) 1107 return false; 1108 1109 return true; 1110 } 1111 1112 static const char *page_bad_reason(struct page *page, unsigned long flags) 1113 { 1114 const char *bad_reason = NULL; 1115 1116 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1117 bad_reason = "nonzero mapcount"; 1118 if (unlikely(page->mapping != NULL)) 1119 bad_reason = "non-NULL mapping"; 1120 if (unlikely(page_ref_count(page) != 0)) 1121 bad_reason = "nonzero _refcount"; 1122 if (unlikely(page->flags & flags)) { 1123 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1124 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1125 else 1126 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1127 } 1128 #ifdef CONFIG_MEMCG 1129 if (unlikely(page_memcg(page))) 1130 bad_reason = "page still charged to cgroup"; 1131 #endif 1132 return bad_reason; 1133 } 1134 1135 static void check_free_page_bad(struct page *page) 1136 { 1137 bad_page(page, 1138 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1139 } 1140 1141 static inline int check_free_page(struct page *page) 1142 { 1143 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1144 return 0; 1145 1146 /* Something has gone sideways, find it */ 1147 check_free_page_bad(page); 1148 return 1; 1149 } 1150 1151 static int free_tail_pages_check(struct page *head_page, struct page *page) 1152 { 1153 int ret = 1; 1154 1155 /* 1156 * We rely page->lru.next never has bit 0 set, unless the page 1157 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1158 */ 1159 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1160 1161 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 1162 ret = 0; 1163 goto out; 1164 } 1165 switch (page - head_page) { 1166 case 1: 1167 /* the first tail page: ->mapping may be compound_mapcount() */ 1168 if (unlikely(compound_mapcount(page))) { 1169 bad_page(page, "nonzero compound_mapcount"); 1170 goto out; 1171 } 1172 break; 1173 case 2: 1174 /* 1175 * the second tail page: ->mapping is 1176 * deferred_list.next -- ignore value. 1177 */ 1178 break; 1179 default: 1180 if (page->mapping != TAIL_MAPPING) { 1181 bad_page(page, "corrupted mapping in tail page"); 1182 goto out; 1183 } 1184 break; 1185 } 1186 if (unlikely(!PageTail(page))) { 1187 bad_page(page, "PageTail not set"); 1188 goto out; 1189 } 1190 if (unlikely(compound_head(page) != head_page)) { 1191 bad_page(page, "compound_head not consistent"); 1192 goto out; 1193 } 1194 ret = 0; 1195 out: 1196 page->mapping = NULL; 1197 clear_compound_head(page); 1198 return ret; 1199 } 1200 1201 static void kernel_init_free_pages(struct page *page, int numpages) 1202 { 1203 int i; 1204 1205 /* s390's use of memset() could override KASAN redzones. */ 1206 kasan_disable_current(); 1207 for (i = 0; i < numpages; i++) 1208 clear_highpage(page + i); 1209 kasan_enable_current(); 1210 } 1211 1212 static __always_inline bool free_pages_prepare(struct page *page, 1213 unsigned int order, bool check_free) 1214 { 1215 int bad = 0; 1216 1217 VM_BUG_ON_PAGE(PageTail(page), page); 1218 1219 trace_mm_page_free(page, order); 1220 1221 if (unlikely(PageHWPoison(page)) && !order) { 1222 /* 1223 * Do not let hwpoison pages hit pcplists/buddy 1224 * Untie memcg state and reset page's owner 1225 */ 1226 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1227 __memcg_kmem_uncharge_page(page, order); 1228 reset_page_owner(page, order); 1229 return false; 1230 } 1231 1232 /* 1233 * Check tail pages before head page information is cleared to 1234 * avoid checking PageCompound for order-0 pages. 1235 */ 1236 if (unlikely(order)) { 1237 bool compound = PageCompound(page); 1238 int i; 1239 1240 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1241 1242 if (compound) 1243 ClearPageDoubleMap(page); 1244 for (i = 1; i < (1 << order); i++) { 1245 if (compound) 1246 bad += free_tail_pages_check(page, page + i); 1247 if (unlikely(check_free_page(page + i))) { 1248 bad++; 1249 continue; 1250 } 1251 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1252 } 1253 } 1254 if (PageMappingFlags(page)) 1255 page->mapping = NULL; 1256 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1257 __memcg_kmem_uncharge_page(page, order); 1258 if (check_free) 1259 bad += check_free_page(page); 1260 if (bad) 1261 return false; 1262 1263 page_cpupid_reset_last(page); 1264 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1265 reset_page_owner(page, order); 1266 1267 if (!PageHighMem(page)) { 1268 debug_check_no_locks_freed(page_address(page), 1269 PAGE_SIZE << order); 1270 debug_check_no_obj_freed(page_address(page), 1271 PAGE_SIZE << order); 1272 } 1273 if (want_init_on_free()) 1274 kernel_init_free_pages(page, 1 << order); 1275 1276 kernel_poison_pages(page, 1 << order); 1277 1278 /* 1279 * arch_free_page() can make the page's contents inaccessible. s390 1280 * does this. So nothing which can access the page's contents should 1281 * happen after this. 1282 */ 1283 arch_free_page(page, order); 1284 1285 debug_pagealloc_unmap_pages(page, 1 << order); 1286 1287 kasan_free_nondeferred_pages(page, order); 1288 1289 return true; 1290 } 1291 1292 #ifdef CONFIG_DEBUG_VM 1293 /* 1294 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed 1295 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when 1296 * moved from pcp lists to free lists. 1297 */ 1298 static bool free_pcp_prepare(struct page *page) 1299 { 1300 return free_pages_prepare(page, 0, true); 1301 } 1302 1303 static bool bulkfree_pcp_prepare(struct page *page) 1304 { 1305 if (debug_pagealloc_enabled_static()) 1306 return check_free_page(page); 1307 else 1308 return false; 1309 } 1310 #else 1311 /* 1312 * With DEBUG_VM disabled, order-0 pages being freed are checked only when 1313 * moving from pcp lists to free list in order to reduce overhead. With 1314 * debug_pagealloc enabled, they are checked also immediately when being freed 1315 * to the pcp lists. 1316 */ 1317 static bool free_pcp_prepare(struct page *page) 1318 { 1319 if (debug_pagealloc_enabled_static()) 1320 return free_pages_prepare(page, 0, true); 1321 else 1322 return free_pages_prepare(page, 0, false); 1323 } 1324 1325 static bool bulkfree_pcp_prepare(struct page *page) 1326 { 1327 return check_free_page(page); 1328 } 1329 #endif /* CONFIG_DEBUG_VM */ 1330 1331 static inline void prefetch_buddy(struct page *page) 1332 { 1333 unsigned long pfn = page_to_pfn(page); 1334 unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0); 1335 struct page *buddy = page + (buddy_pfn - pfn); 1336 1337 prefetch(buddy); 1338 } 1339 1340 /* 1341 * Frees a number of pages from the PCP lists 1342 * Assumes all pages on list are in same zone, and of same order. 1343 * count is the number of pages to free. 1344 * 1345 * If the zone was previously in an "all pages pinned" state then look to 1346 * see if this freeing clears that state. 1347 * 1348 * And clear the zone's pages_scanned counter, to hold off the "all pages are 1349 * pinned" detection logic. 1350 */ 1351 static void free_pcppages_bulk(struct zone *zone, int count, 1352 struct per_cpu_pages *pcp) 1353 { 1354 int migratetype = 0; 1355 int batch_free = 0; 1356 int prefetch_nr = READ_ONCE(pcp->batch); 1357 bool isolated_pageblocks; 1358 struct page *page, *tmp; 1359 LIST_HEAD(head); 1360 1361 /* 1362 * Ensure proper count is passed which otherwise would stuck in the 1363 * below while (list_empty(list)) loop. 1364 */ 1365 count = min(pcp->count, count); 1366 while (count) { 1367 struct list_head *list; 1368 1369 /* 1370 * Remove pages from lists in a round-robin fashion. A 1371 * batch_free count is maintained that is incremented when an 1372 * empty list is encountered. This is so more pages are freed 1373 * off fuller lists instead of spinning excessively around empty 1374 * lists 1375 */ 1376 do { 1377 batch_free++; 1378 if (++migratetype == MIGRATE_PCPTYPES) 1379 migratetype = 0; 1380 list = &pcp->lists[migratetype]; 1381 } while (list_empty(list)); 1382 1383 /* This is the only non-empty list. Free them all. */ 1384 if (batch_free == MIGRATE_PCPTYPES) 1385 batch_free = count; 1386 1387 do { 1388 page = list_last_entry(list, struct page, lru); 1389 /* must delete to avoid corrupting pcp list */ 1390 list_del(&page->lru); 1391 pcp->count--; 1392 1393 if (bulkfree_pcp_prepare(page)) 1394 continue; 1395 1396 list_add_tail(&page->lru, &head); 1397 1398 /* 1399 * We are going to put the page back to the global 1400 * pool, prefetch its buddy to speed up later access 1401 * under zone->lock. It is believed the overhead of 1402 * an additional test and calculating buddy_pfn here 1403 * can be offset by reduced memory latency later. To 1404 * avoid excessive prefetching due to large count, only 1405 * prefetch buddy for the first pcp->batch nr of pages. 1406 */ 1407 if (prefetch_nr) { 1408 prefetch_buddy(page); 1409 prefetch_nr--; 1410 } 1411 } while (--count && --batch_free && !list_empty(list)); 1412 } 1413 1414 spin_lock(&zone->lock); 1415 isolated_pageblocks = has_isolate_pageblock(zone); 1416 1417 /* 1418 * Use safe version since after __free_one_page(), 1419 * page->lru.next will not point to original list. 1420 */ 1421 list_for_each_entry_safe(page, tmp, &head, lru) { 1422 int mt = get_pcppage_migratetype(page); 1423 /* MIGRATE_ISOLATE page should not go to pcplists */ 1424 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1425 /* Pageblock could have been isolated meanwhile */ 1426 if (unlikely(isolated_pageblocks)) 1427 mt = get_pageblock_migratetype(page); 1428 1429 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); 1430 trace_mm_page_pcpu_drain(page, 0, mt); 1431 } 1432 spin_unlock(&zone->lock); 1433 } 1434 1435 static void free_one_page(struct zone *zone, 1436 struct page *page, unsigned long pfn, 1437 unsigned int order, 1438 int migratetype, fpi_t fpi_flags) 1439 { 1440 spin_lock(&zone->lock); 1441 if (unlikely(has_isolate_pageblock(zone) || 1442 is_migrate_isolate(migratetype))) { 1443 migratetype = get_pfnblock_migratetype(page, pfn); 1444 } 1445 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1446 spin_unlock(&zone->lock); 1447 } 1448 1449 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1450 unsigned long zone, int nid) 1451 { 1452 mm_zero_struct_page(page); 1453 set_page_links(page, zone, nid, pfn); 1454 init_page_count(page); 1455 page_mapcount_reset(page); 1456 page_cpupid_reset_last(page); 1457 page_kasan_tag_reset(page); 1458 1459 INIT_LIST_HEAD(&page->lru); 1460 #ifdef WANT_PAGE_VIRTUAL 1461 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1462 if (!is_highmem_idx(zone)) 1463 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1464 #endif 1465 } 1466 1467 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1468 static void __meminit init_reserved_page(unsigned long pfn) 1469 { 1470 pg_data_t *pgdat; 1471 int nid, zid; 1472 1473 if (!early_page_uninitialised(pfn)) 1474 return; 1475 1476 nid = early_pfn_to_nid(pfn); 1477 pgdat = NODE_DATA(nid); 1478 1479 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1480 struct zone *zone = &pgdat->node_zones[zid]; 1481 1482 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) 1483 break; 1484 } 1485 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 1486 } 1487 #else 1488 static inline void init_reserved_page(unsigned long pfn) 1489 { 1490 } 1491 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1492 1493 /* 1494 * Initialised pages do not have PageReserved set. This function is 1495 * called for each range allocated by the bootmem allocator and 1496 * marks the pages PageReserved. The remaining valid pages are later 1497 * sent to the buddy page allocator. 1498 */ 1499 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 1500 { 1501 unsigned long start_pfn = PFN_DOWN(start); 1502 unsigned long end_pfn = PFN_UP(end); 1503 1504 for (; start_pfn < end_pfn; start_pfn++) { 1505 if (pfn_valid(start_pfn)) { 1506 struct page *page = pfn_to_page(start_pfn); 1507 1508 init_reserved_page(start_pfn); 1509 1510 /* Avoid false-positive PageTail() */ 1511 INIT_LIST_HEAD(&page->lru); 1512 1513 /* 1514 * no need for atomic set_bit because the struct 1515 * page is not visible yet so nobody should 1516 * access it yet. 1517 */ 1518 __SetPageReserved(page); 1519 } 1520 } 1521 } 1522 1523 static void __free_pages_ok(struct page *page, unsigned int order, 1524 fpi_t fpi_flags) 1525 { 1526 unsigned long flags; 1527 int migratetype; 1528 unsigned long pfn = page_to_pfn(page); 1529 1530 if (!free_pages_prepare(page, order, true)) 1531 return; 1532 1533 migratetype = get_pfnblock_migratetype(page, pfn); 1534 local_irq_save(flags); 1535 __count_vm_events(PGFREE, 1 << order); 1536 free_one_page(page_zone(page), page, pfn, order, migratetype, 1537 fpi_flags); 1538 local_irq_restore(flags); 1539 } 1540 1541 void __free_pages_core(struct page *page, unsigned int order) 1542 { 1543 unsigned int nr_pages = 1 << order; 1544 struct page *p = page; 1545 unsigned int loop; 1546 1547 /* 1548 * When initializing the memmap, __init_single_page() sets the refcount 1549 * of all pages to 1 ("allocated"/"not free"). We have to set the 1550 * refcount of all involved pages to 0. 1551 */ 1552 prefetchw(p); 1553 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1554 prefetchw(p + 1); 1555 __ClearPageReserved(p); 1556 set_page_count(p, 0); 1557 } 1558 __ClearPageReserved(p); 1559 set_page_count(p, 0); 1560 1561 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1562 1563 /* 1564 * Bypass PCP and place fresh pages right to the tail, primarily 1565 * relevant for memory onlining. 1566 */ 1567 __free_pages_ok(page, order, FPI_TO_TAIL); 1568 } 1569 1570 #ifdef CONFIG_NEED_MULTIPLE_NODES 1571 1572 /* 1573 * During memory init memblocks map pfns to nids. The search is expensive and 1574 * this caches recent lookups. The implementation of __early_pfn_to_nid 1575 * treats start/end as pfns. 1576 */ 1577 struct mminit_pfnnid_cache { 1578 unsigned long last_start; 1579 unsigned long last_end; 1580 int last_nid; 1581 }; 1582 1583 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1584 1585 /* 1586 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 1587 */ 1588 static int __meminit __early_pfn_to_nid(unsigned long pfn, 1589 struct mminit_pfnnid_cache *state) 1590 { 1591 unsigned long start_pfn, end_pfn; 1592 int nid; 1593 1594 if (state->last_start <= pfn && pfn < state->last_end) 1595 return state->last_nid; 1596 1597 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 1598 if (nid != NUMA_NO_NODE) { 1599 state->last_start = start_pfn; 1600 state->last_end = end_pfn; 1601 state->last_nid = nid; 1602 } 1603 1604 return nid; 1605 } 1606 1607 int __meminit early_pfn_to_nid(unsigned long pfn) 1608 { 1609 static DEFINE_SPINLOCK(early_pfn_lock); 1610 int nid; 1611 1612 spin_lock(&early_pfn_lock); 1613 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1614 if (nid < 0) 1615 nid = first_online_node; 1616 spin_unlock(&early_pfn_lock); 1617 1618 return nid; 1619 } 1620 #endif /* CONFIG_NEED_MULTIPLE_NODES */ 1621 1622 void __init memblock_free_pages(struct page *page, unsigned long pfn, 1623 unsigned int order) 1624 { 1625 if (early_page_uninitialised(pfn)) 1626 return; 1627 __free_pages_core(page, order); 1628 } 1629 1630 /* 1631 * Check that the whole (or subset of) a pageblock given by the interval of 1632 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1633 * with the migration of free compaction scanner. The scanners then need to 1634 * use only pfn_valid_within() check for arches that allow holes within 1635 * pageblocks. 1636 * 1637 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1638 * 1639 * It's possible on some configurations to have a setup like node0 node1 node0 1640 * i.e. it's possible that all pages within a zones range of pages do not 1641 * belong to a single zone. We assume that a border between node0 and node1 1642 * can occur within a single pageblock, but not a node0 node1 node0 1643 * interleaving within a single pageblock. It is therefore sufficient to check 1644 * the first and last page of a pageblock and avoid checking each individual 1645 * page in a pageblock. 1646 */ 1647 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1648 unsigned long end_pfn, struct zone *zone) 1649 { 1650 struct page *start_page; 1651 struct page *end_page; 1652 1653 /* end_pfn is one past the range we are checking */ 1654 end_pfn--; 1655 1656 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1657 return NULL; 1658 1659 start_page = pfn_to_online_page(start_pfn); 1660 if (!start_page) 1661 return NULL; 1662 1663 if (page_zone(start_page) != zone) 1664 return NULL; 1665 1666 end_page = pfn_to_page(end_pfn); 1667 1668 /* This gives a shorter code than deriving page_zone(end_page) */ 1669 if (page_zone_id(start_page) != page_zone_id(end_page)) 1670 return NULL; 1671 1672 return start_page; 1673 } 1674 1675 void set_zone_contiguous(struct zone *zone) 1676 { 1677 unsigned long block_start_pfn = zone->zone_start_pfn; 1678 unsigned long block_end_pfn; 1679 1680 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); 1681 for (; block_start_pfn < zone_end_pfn(zone); 1682 block_start_pfn = block_end_pfn, 1683 block_end_pfn += pageblock_nr_pages) { 1684 1685 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1686 1687 if (!__pageblock_pfn_to_page(block_start_pfn, 1688 block_end_pfn, zone)) 1689 return; 1690 cond_resched(); 1691 } 1692 1693 /* We confirm that there is no hole */ 1694 zone->contiguous = true; 1695 } 1696 1697 void clear_zone_contiguous(struct zone *zone) 1698 { 1699 zone->contiguous = false; 1700 } 1701 1702 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1703 static void __init deferred_free_range(unsigned long pfn, 1704 unsigned long nr_pages) 1705 { 1706 struct page *page; 1707 unsigned long i; 1708 1709 if (!nr_pages) 1710 return; 1711 1712 page = pfn_to_page(pfn); 1713 1714 /* Free a large naturally-aligned chunk if possible */ 1715 if (nr_pages == pageblock_nr_pages && 1716 (pfn & (pageblock_nr_pages - 1)) == 0) { 1717 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1718 __free_pages_core(page, pageblock_order); 1719 return; 1720 } 1721 1722 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1723 if ((pfn & (pageblock_nr_pages - 1)) == 0) 1724 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1725 __free_pages_core(page, 0); 1726 } 1727 } 1728 1729 /* Completion tracking for deferred_init_memmap() threads */ 1730 static atomic_t pgdat_init_n_undone __initdata; 1731 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1732 1733 static inline void __init pgdat_init_report_one_done(void) 1734 { 1735 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1736 complete(&pgdat_init_all_done_comp); 1737 } 1738 1739 /* 1740 * Returns true if page needs to be initialized or freed to buddy allocator. 1741 * 1742 * First we check if pfn is valid on architectures where it is possible to have 1743 * holes within pageblock_nr_pages. On systems where it is not possible, this 1744 * function is optimized out. 1745 * 1746 * Then, we check if a current large page is valid by only checking the validity 1747 * of the head pfn. 1748 */ 1749 static inline bool __init deferred_pfn_valid(unsigned long pfn) 1750 { 1751 if (!pfn_valid_within(pfn)) 1752 return false; 1753 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) 1754 return false; 1755 return true; 1756 } 1757 1758 /* 1759 * Free pages to buddy allocator. Try to free aligned pages in 1760 * pageblock_nr_pages sizes. 1761 */ 1762 static void __init deferred_free_pages(unsigned long pfn, 1763 unsigned long end_pfn) 1764 { 1765 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1766 unsigned long nr_free = 0; 1767 1768 for (; pfn < end_pfn; pfn++) { 1769 if (!deferred_pfn_valid(pfn)) { 1770 deferred_free_range(pfn - nr_free, nr_free); 1771 nr_free = 0; 1772 } else if (!(pfn & nr_pgmask)) { 1773 deferred_free_range(pfn - nr_free, nr_free); 1774 nr_free = 1; 1775 } else { 1776 nr_free++; 1777 } 1778 } 1779 /* Free the last block of pages to allocator */ 1780 deferred_free_range(pfn - nr_free, nr_free); 1781 } 1782 1783 /* 1784 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 1785 * by performing it only once every pageblock_nr_pages. 1786 * Return number of pages initialized. 1787 */ 1788 static unsigned long __init deferred_init_pages(struct zone *zone, 1789 unsigned long pfn, 1790 unsigned long end_pfn) 1791 { 1792 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1793 int nid = zone_to_nid(zone); 1794 unsigned long nr_pages = 0; 1795 int zid = zone_idx(zone); 1796 struct page *page = NULL; 1797 1798 for (; pfn < end_pfn; pfn++) { 1799 if (!deferred_pfn_valid(pfn)) { 1800 page = NULL; 1801 continue; 1802 } else if (!page || !(pfn & nr_pgmask)) { 1803 page = pfn_to_page(pfn); 1804 } else { 1805 page++; 1806 } 1807 __init_single_page(page, pfn, zid, nid); 1808 nr_pages++; 1809 } 1810 return (nr_pages); 1811 } 1812 1813 /* 1814 * This function is meant to pre-load the iterator for the zone init. 1815 * Specifically it walks through the ranges until we are caught up to the 1816 * first_init_pfn value and exits there. If we never encounter the value we 1817 * return false indicating there are no valid ranges left. 1818 */ 1819 static bool __init 1820 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, 1821 unsigned long *spfn, unsigned long *epfn, 1822 unsigned long first_init_pfn) 1823 { 1824 u64 j; 1825 1826 /* 1827 * Start out by walking through the ranges in this zone that have 1828 * already been initialized. We don't need to do anything with them 1829 * so we just need to flush them out of the system. 1830 */ 1831 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { 1832 if (*epfn <= first_init_pfn) 1833 continue; 1834 if (*spfn < first_init_pfn) 1835 *spfn = first_init_pfn; 1836 *i = j; 1837 return true; 1838 } 1839 1840 return false; 1841 } 1842 1843 /* 1844 * Initialize and free pages. We do it in two loops: first we initialize 1845 * struct page, then free to buddy allocator, because while we are 1846 * freeing pages we can access pages that are ahead (computing buddy 1847 * page in __free_one_page()). 1848 * 1849 * In order to try and keep some memory in the cache we have the loop 1850 * broken along max page order boundaries. This way we will not cause 1851 * any issues with the buddy page computation. 1852 */ 1853 static unsigned long __init 1854 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, 1855 unsigned long *end_pfn) 1856 { 1857 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); 1858 unsigned long spfn = *start_pfn, epfn = *end_pfn; 1859 unsigned long nr_pages = 0; 1860 u64 j = *i; 1861 1862 /* First we loop through and initialize the page values */ 1863 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { 1864 unsigned long t; 1865 1866 if (mo_pfn <= *start_pfn) 1867 break; 1868 1869 t = min(mo_pfn, *end_pfn); 1870 nr_pages += deferred_init_pages(zone, *start_pfn, t); 1871 1872 if (mo_pfn < *end_pfn) { 1873 *start_pfn = mo_pfn; 1874 break; 1875 } 1876 } 1877 1878 /* Reset values and now loop through freeing pages as needed */ 1879 swap(j, *i); 1880 1881 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { 1882 unsigned long t; 1883 1884 if (mo_pfn <= spfn) 1885 break; 1886 1887 t = min(mo_pfn, epfn); 1888 deferred_free_pages(spfn, t); 1889 1890 if (mo_pfn <= epfn) 1891 break; 1892 } 1893 1894 return nr_pages; 1895 } 1896 1897 static void __init 1898 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 1899 void *arg) 1900 { 1901 unsigned long spfn, epfn; 1902 struct zone *zone = arg; 1903 u64 i; 1904 1905 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); 1906 1907 /* 1908 * Initialize and free pages in MAX_ORDER sized increments so that we 1909 * can avoid introducing any issues with the buddy allocator. 1910 */ 1911 while (spfn < end_pfn) { 1912 deferred_init_maxorder(&i, zone, &spfn, &epfn); 1913 cond_resched(); 1914 } 1915 } 1916 1917 /* An arch may override for more concurrency. */ 1918 __weak int __init 1919 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 1920 { 1921 return 1; 1922 } 1923 1924 /* Initialise remaining memory on a node */ 1925 static int __init deferred_init_memmap(void *data) 1926 { 1927 pg_data_t *pgdat = data; 1928 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1929 unsigned long spfn = 0, epfn = 0; 1930 unsigned long first_init_pfn, flags; 1931 unsigned long start = jiffies; 1932 struct zone *zone; 1933 int zid, max_threads; 1934 u64 i; 1935 1936 /* Bind memory initialisation thread to a local node if possible */ 1937 if (!cpumask_empty(cpumask)) 1938 set_cpus_allowed_ptr(current, cpumask); 1939 1940 pgdat_resize_lock(pgdat, &flags); 1941 first_init_pfn = pgdat->first_deferred_pfn; 1942 if (first_init_pfn == ULONG_MAX) { 1943 pgdat_resize_unlock(pgdat, &flags); 1944 pgdat_init_report_one_done(); 1945 return 0; 1946 } 1947 1948 /* Sanity check boundaries */ 1949 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 1950 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 1951 pgdat->first_deferred_pfn = ULONG_MAX; 1952 1953 /* 1954 * Once we unlock here, the zone cannot be grown anymore, thus if an 1955 * interrupt thread must allocate this early in boot, zone must be 1956 * pre-grown prior to start of deferred page initialization. 1957 */ 1958 pgdat_resize_unlock(pgdat, &flags); 1959 1960 /* Only the highest zone is deferred so find it */ 1961 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1962 zone = pgdat->node_zones + zid; 1963 if (first_init_pfn < zone_end_pfn(zone)) 1964 break; 1965 } 1966 1967 /* If the zone is empty somebody else may have cleared out the zone */ 1968 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 1969 first_init_pfn)) 1970 goto zone_empty; 1971 1972 max_threads = deferred_page_init_max_threads(cpumask); 1973 1974 while (spfn < epfn) { 1975 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); 1976 struct padata_mt_job job = { 1977 .thread_fn = deferred_init_memmap_chunk, 1978 .fn_arg = zone, 1979 .start = spfn, 1980 .size = epfn_align - spfn, 1981 .align = PAGES_PER_SECTION, 1982 .min_chunk = PAGES_PER_SECTION, 1983 .max_threads = max_threads, 1984 }; 1985 1986 padata_do_multithreaded(&job); 1987 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 1988 epfn_align); 1989 } 1990 zone_empty: 1991 /* Sanity check that the next zone really is unpopulated */ 1992 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 1993 1994 pr_info("node %d deferred pages initialised in %ums\n", 1995 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 1996 1997 pgdat_init_report_one_done(); 1998 return 0; 1999 } 2000 2001 /* 2002 * If this zone has deferred pages, try to grow it by initializing enough 2003 * deferred pages to satisfy the allocation specified by order, rounded up to 2004 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2005 * of SECTION_SIZE bytes by initializing struct pages in increments of 2006 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2007 * 2008 * Return true when zone was grown, otherwise return false. We return true even 2009 * when we grow less than requested, to let the caller decide if there are 2010 * enough pages to satisfy the allocation. 2011 * 2012 * Note: We use noinline because this function is needed only during boot, and 2013 * it is called from a __ref function _deferred_grow_zone. This way we are 2014 * making sure that it is not inlined into permanent text section. 2015 */ 2016 static noinline bool __init 2017 deferred_grow_zone(struct zone *zone, unsigned int order) 2018 { 2019 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 2020 pg_data_t *pgdat = zone->zone_pgdat; 2021 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2022 unsigned long spfn, epfn, flags; 2023 unsigned long nr_pages = 0; 2024 u64 i; 2025 2026 /* Only the last zone may have deferred pages */ 2027 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2028 return false; 2029 2030 pgdat_resize_lock(pgdat, &flags); 2031 2032 /* 2033 * If someone grew this zone while we were waiting for spinlock, return 2034 * true, as there might be enough pages already. 2035 */ 2036 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2037 pgdat_resize_unlock(pgdat, &flags); 2038 return true; 2039 } 2040 2041 /* If the zone is empty somebody else may have cleared out the zone */ 2042 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2043 first_deferred_pfn)) { 2044 pgdat->first_deferred_pfn = ULONG_MAX; 2045 pgdat_resize_unlock(pgdat, &flags); 2046 /* Retry only once. */ 2047 return first_deferred_pfn != ULONG_MAX; 2048 } 2049 2050 /* 2051 * Initialize and free pages in MAX_ORDER sized increments so 2052 * that we can avoid introducing any issues with the buddy 2053 * allocator. 2054 */ 2055 while (spfn < epfn) { 2056 /* update our first deferred PFN for this section */ 2057 first_deferred_pfn = spfn; 2058 2059 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); 2060 touch_nmi_watchdog(); 2061 2062 /* We should only stop along section boundaries */ 2063 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) 2064 continue; 2065 2066 /* If our quota has been met we can stop here */ 2067 if (nr_pages >= nr_pages_needed) 2068 break; 2069 } 2070 2071 pgdat->first_deferred_pfn = spfn; 2072 pgdat_resize_unlock(pgdat, &flags); 2073 2074 return nr_pages > 0; 2075 } 2076 2077 /* 2078 * deferred_grow_zone() is __init, but it is called from 2079 * get_page_from_freelist() during early boot until deferred_pages permanently 2080 * disables this call. This is why we have refdata wrapper to avoid warning, 2081 * and to ensure that the function body gets unloaded. 2082 */ 2083 static bool __ref 2084 _deferred_grow_zone(struct zone *zone, unsigned int order) 2085 { 2086 return deferred_grow_zone(zone, order); 2087 } 2088 2089 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2090 2091 void __init page_alloc_init_late(void) 2092 { 2093 struct zone *zone; 2094 int nid; 2095 2096 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2097 2098 /* There will be num_node_state(N_MEMORY) threads */ 2099 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2100 for_each_node_state(nid, N_MEMORY) { 2101 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2102 } 2103 2104 /* Block until all are initialised */ 2105 wait_for_completion(&pgdat_init_all_done_comp); 2106 2107 /* 2108 * The number of managed pages has changed due to the initialisation 2109 * so the pcpu batch and high limits needs to be updated or the limits 2110 * will be artificially small. 2111 */ 2112 for_each_populated_zone(zone) 2113 zone_pcp_update(zone); 2114 2115 /* 2116 * We initialized the rest of the deferred pages. Permanently disable 2117 * on-demand struct page initialization. 2118 */ 2119 static_branch_disable(&deferred_pages); 2120 2121 /* Reinit limits that are based on free pages after the kernel is up */ 2122 files_maxfiles_init(); 2123 #endif 2124 2125 buffer_init(); 2126 2127 /* Discard memblock private memory */ 2128 memblock_discard(); 2129 2130 for_each_node_state(nid, N_MEMORY) 2131 shuffle_free_memory(NODE_DATA(nid)); 2132 2133 for_each_populated_zone(zone) 2134 set_zone_contiguous(zone); 2135 } 2136 2137 #ifdef CONFIG_CMA 2138 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 2139 void __init init_cma_reserved_pageblock(struct page *page) 2140 { 2141 unsigned i = pageblock_nr_pages; 2142 struct page *p = page; 2143 2144 do { 2145 __ClearPageReserved(p); 2146 set_page_count(p, 0); 2147 } while (++p, --i); 2148 2149 set_pageblock_migratetype(page, MIGRATE_CMA); 2150 2151 if (pageblock_order >= MAX_ORDER) { 2152 i = pageblock_nr_pages; 2153 p = page; 2154 do { 2155 set_page_refcounted(p); 2156 __free_pages(p, MAX_ORDER - 1); 2157 p += MAX_ORDER_NR_PAGES; 2158 } while (i -= MAX_ORDER_NR_PAGES); 2159 } else { 2160 set_page_refcounted(page); 2161 __free_pages(page, pageblock_order); 2162 } 2163 2164 adjust_managed_page_count(page, pageblock_nr_pages); 2165 } 2166 #endif 2167 2168 /* 2169 * The order of subdivision here is critical for the IO subsystem. 2170 * Please do not alter this order without good reasons and regression 2171 * testing. Specifically, as large blocks of memory are subdivided, 2172 * the order in which smaller blocks are delivered depends on the order 2173 * they're subdivided in this function. This is the primary factor 2174 * influencing the order in which pages are delivered to the IO 2175 * subsystem according to empirical testing, and this is also justified 2176 * by considering the behavior of a buddy system containing a single 2177 * large block of memory acted on by a series of small allocations. 2178 * This behavior is a critical factor in sglist merging's success. 2179 * 2180 * -- nyc 2181 */ 2182 static inline void expand(struct zone *zone, struct page *page, 2183 int low, int high, int migratetype) 2184 { 2185 unsigned long size = 1 << high; 2186 2187 while (high > low) { 2188 high--; 2189 size >>= 1; 2190 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 2191 2192 /* 2193 * Mark as guard pages (or page), that will allow to 2194 * merge back to allocator when buddy will be freed. 2195 * Corresponding page table entries will not be touched, 2196 * pages will stay not present in virtual address space 2197 */ 2198 if (set_page_guard(zone, &page[size], high, migratetype)) 2199 continue; 2200 2201 add_to_free_list(&page[size], zone, high, migratetype); 2202 set_buddy_order(&page[size], high); 2203 } 2204 } 2205 2206 static void check_new_page_bad(struct page *page) 2207 { 2208 if (unlikely(page->flags & __PG_HWPOISON)) { 2209 /* Don't complain about hwpoisoned pages */ 2210 page_mapcount_reset(page); /* remove PageBuddy */ 2211 return; 2212 } 2213 2214 bad_page(page, 2215 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 2216 } 2217 2218 /* 2219 * This page is about to be returned from the page allocator 2220 */ 2221 static inline int check_new_page(struct page *page) 2222 { 2223 if (likely(page_expected_state(page, 2224 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 2225 return 0; 2226 2227 check_new_page_bad(page); 2228 return 1; 2229 } 2230 2231 #ifdef CONFIG_DEBUG_VM 2232 /* 2233 * With DEBUG_VM enabled, order-0 pages are checked for expected state when 2234 * being allocated from pcp lists. With debug_pagealloc also enabled, they are 2235 * also checked when pcp lists are refilled from the free lists. 2236 */ 2237 static inline bool check_pcp_refill(struct page *page) 2238 { 2239 if (debug_pagealloc_enabled_static()) 2240 return check_new_page(page); 2241 else 2242 return false; 2243 } 2244 2245 static inline bool check_new_pcp(struct page *page) 2246 { 2247 return check_new_page(page); 2248 } 2249 #else 2250 /* 2251 * With DEBUG_VM disabled, free order-0 pages are checked for expected state 2252 * when pcp lists are being refilled from the free lists. With debug_pagealloc 2253 * enabled, they are also checked when being allocated from the pcp lists. 2254 */ 2255 static inline bool check_pcp_refill(struct page *page) 2256 { 2257 return check_new_page(page); 2258 } 2259 static inline bool check_new_pcp(struct page *page) 2260 { 2261 if (debug_pagealloc_enabled_static()) 2262 return check_new_page(page); 2263 else 2264 return false; 2265 } 2266 #endif /* CONFIG_DEBUG_VM */ 2267 2268 static bool check_new_pages(struct page *page, unsigned int order) 2269 { 2270 int i; 2271 for (i = 0; i < (1 << order); i++) { 2272 struct page *p = page + i; 2273 2274 if (unlikely(check_new_page(p))) 2275 return true; 2276 } 2277 2278 return false; 2279 } 2280 2281 inline void post_alloc_hook(struct page *page, unsigned int order, 2282 gfp_t gfp_flags) 2283 { 2284 set_page_private(page, 0); 2285 set_page_refcounted(page); 2286 2287 arch_alloc_page(page, order); 2288 debug_pagealloc_map_pages(page, 1 << order); 2289 kasan_alloc_pages(page, order); 2290 kernel_unpoison_pages(page, 1 << order); 2291 set_page_owner(page, order, gfp_flags); 2292 2293 if (!want_init_on_free() && want_init_on_alloc(gfp_flags)) 2294 kernel_init_free_pages(page, 1 << order); 2295 } 2296 2297 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 2298 unsigned int alloc_flags) 2299 { 2300 post_alloc_hook(page, order, gfp_flags); 2301 2302 if (order && (gfp_flags & __GFP_COMP)) 2303 prep_compound_page(page, order); 2304 2305 /* 2306 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 2307 * allocate the page. The expectation is that the caller is taking 2308 * steps that will free more memory. The caller should avoid the page 2309 * being used for !PFMEMALLOC purposes. 2310 */ 2311 if (alloc_flags & ALLOC_NO_WATERMARKS) 2312 set_page_pfmemalloc(page); 2313 else 2314 clear_page_pfmemalloc(page); 2315 } 2316 2317 /* 2318 * Go through the free lists for the given migratetype and remove 2319 * the smallest available page from the freelists 2320 */ 2321 static __always_inline 2322 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 2323 int migratetype) 2324 { 2325 unsigned int current_order; 2326 struct free_area *area; 2327 struct page *page; 2328 2329 /* Find a page of the appropriate size in the preferred list */ 2330 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 2331 area = &(zone->free_area[current_order]); 2332 page = get_page_from_free_area(area, migratetype); 2333 if (!page) 2334 continue; 2335 del_page_from_free_list(page, zone, current_order); 2336 expand(zone, page, order, current_order, migratetype); 2337 set_pcppage_migratetype(page, migratetype); 2338 return page; 2339 } 2340 2341 return NULL; 2342 } 2343 2344 2345 /* 2346 * This array describes the order lists are fallen back to when 2347 * the free lists for the desirable migrate type are depleted 2348 */ 2349 static int fallbacks[MIGRATE_TYPES][3] = { 2350 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2351 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 2352 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2353 #ifdef CONFIG_CMA 2354 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ 2355 #endif 2356 #ifdef CONFIG_MEMORY_ISOLATION 2357 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ 2358 #endif 2359 }; 2360 2361 #ifdef CONFIG_CMA 2362 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2363 unsigned int order) 2364 { 2365 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 2366 } 2367 #else 2368 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2369 unsigned int order) { return NULL; } 2370 #endif 2371 2372 /* 2373 * Move the free pages in a range to the freelist tail of the requested type. 2374 * Note that start_page and end_pages are not aligned on a pageblock 2375 * boundary. If alignment is required, use move_freepages_block() 2376 */ 2377 static int move_freepages(struct zone *zone, 2378 struct page *start_page, struct page *end_page, 2379 int migratetype, int *num_movable) 2380 { 2381 struct page *page; 2382 unsigned int order; 2383 int pages_moved = 0; 2384 2385 for (page = start_page; page <= end_page;) { 2386 if (!pfn_valid_within(page_to_pfn(page))) { 2387 page++; 2388 continue; 2389 } 2390 2391 if (!PageBuddy(page)) { 2392 /* 2393 * We assume that pages that could be isolated for 2394 * migration are movable. But we don't actually try 2395 * isolating, as that would be expensive. 2396 */ 2397 if (num_movable && 2398 (PageLRU(page) || __PageMovable(page))) 2399 (*num_movable)++; 2400 2401 page++; 2402 continue; 2403 } 2404 2405 /* Make sure we are not inadvertently changing nodes */ 2406 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 2407 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 2408 2409 order = buddy_order(page); 2410 move_to_free_list(page, zone, order, migratetype); 2411 page += 1 << order; 2412 pages_moved += 1 << order; 2413 } 2414 2415 return pages_moved; 2416 } 2417 2418 int move_freepages_block(struct zone *zone, struct page *page, 2419 int migratetype, int *num_movable) 2420 { 2421 unsigned long start_pfn, end_pfn; 2422 struct page *start_page, *end_page; 2423 2424 if (num_movable) 2425 *num_movable = 0; 2426 2427 start_pfn = page_to_pfn(page); 2428 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 2429 start_page = pfn_to_page(start_pfn); 2430 end_page = start_page + pageblock_nr_pages - 1; 2431 end_pfn = start_pfn + pageblock_nr_pages - 1; 2432 2433 /* Do not cross zone boundaries */ 2434 if (!zone_spans_pfn(zone, start_pfn)) 2435 start_page = page; 2436 if (!zone_spans_pfn(zone, end_pfn)) 2437 return 0; 2438 2439 return move_freepages(zone, start_page, end_page, migratetype, 2440 num_movable); 2441 } 2442 2443 static void change_pageblock_range(struct page *pageblock_page, 2444 int start_order, int migratetype) 2445 { 2446 int nr_pageblocks = 1 << (start_order - pageblock_order); 2447 2448 while (nr_pageblocks--) { 2449 set_pageblock_migratetype(pageblock_page, migratetype); 2450 pageblock_page += pageblock_nr_pages; 2451 } 2452 } 2453 2454 /* 2455 * When we are falling back to another migratetype during allocation, try to 2456 * steal extra free pages from the same pageblocks to satisfy further 2457 * allocations, instead of polluting multiple pageblocks. 2458 * 2459 * If we are stealing a relatively large buddy page, it is likely there will 2460 * be more free pages in the pageblock, so try to steal them all. For 2461 * reclaimable and unmovable allocations, we steal regardless of page size, 2462 * as fragmentation caused by those allocations polluting movable pageblocks 2463 * is worse than movable allocations stealing from unmovable and reclaimable 2464 * pageblocks. 2465 */ 2466 static bool can_steal_fallback(unsigned int order, int start_mt) 2467 { 2468 /* 2469 * Leaving this order check is intended, although there is 2470 * relaxed order check in next check. The reason is that 2471 * we can actually steal whole pageblock if this condition met, 2472 * but, below check doesn't guarantee it and that is just heuristic 2473 * so could be changed anytime. 2474 */ 2475 if (order >= pageblock_order) 2476 return true; 2477 2478 if (order >= pageblock_order / 2 || 2479 start_mt == MIGRATE_RECLAIMABLE || 2480 start_mt == MIGRATE_UNMOVABLE || 2481 page_group_by_mobility_disabled) 2482 return true; 2483 2484 return false; 2485 } 2486 2487 static inline bool boost_watermark(struct zone *zone) 2488 { 2489 unsigned long max_boost; 2490 2491 if (!watermark_boost_factor) 2492 return false; 2493 /* 2494 * Don't bother in zones that are unlikely to produce results. 2495 * On small machines, including kdump capture kernels running 2496 * in a small area, boosting the watermark can cause an out of 2497 * memory situation immediately. 2498 */ 2499 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2500 return false; 2501 2502 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2503 watermark_boost_factor, 10000); 2504 2505 /* 2506 * high watermark may be uninitialised if fragmentation occurs 2507 * very early in boot so do not boost. We do not fall 2508 * through and boost by pageblock_nr_pages as failing 2509 * allocations that early means that reclaim is not going 2510 * to help and it may even be impossible to reclaim the 2511 * boosted watermark resulting in a hang. 2512 */ 2513 if (!max_boost) 2514 return false; 2515 2516 max_boost = max(pageblock_nr_pages, max_boost); 2517 2518 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2519 max_boost); 2520 2521 return true; 2522 } 2523 2524 /* 2525 * This function implements actual steal behaviour. If order is large enough, 2526 * we can steal whole pageblock. If not, we first move freepages in this 2527 * pageblock to our migratetype and determine how many already-allocated pages 2528 * are there in the pageblock with a compatible migratetype. If at least half 2529 * of pages are free or compatible, we can change migratetype of the pageblock 2530 * itself, so pages freed in the future will be put on the correct free list. 2531 */ 2532 static void steal_suitable_fallback(struct zone *zone, struct page *page, 2533 unsigned int alloc_flags, int start_type, bool whole_block) 2534 { 2535 unsigned int current_order = buddy_order(page); 2536 int free_pages, movable_pages, alike_pages; 2537 int old_block_type; 2538 2539 old_block_type = get_pageblock_migratetype(page); 2540 2541 /* 2542 * This can happen due to races and we want to prevent broken 2543 * highatomic accounting. 2544 */ 2545 if (is_migrate_highatomic(old_block_type)) 2546 goto single_page; 2547 2548 /* Take ownership for orders >= pageblock_order */ 2549 if (current_order >= pageblock_order) { 2550 change_pageblock_range(page, current_order, start_type); 2551 goto single_page; 2552 } 2553 2554 /* 2555 * Boost watermarks to increase reclaim pressure to reduce the 2556 * likelihood of future fallbacks. Wake kswapd now as the node 2557 * may be balanced overall and kswapd will not wake naturally. 2558 */ 2559 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2560 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2561 2562 /* We are not allowed to try stealing from the whole block */ 2563 if (!whole_block) 2564 goto single_page; 2565 2566 free_pages = move_freepages_block(zone, page, start_type, 2567 &movable_pages); 2568 /* 2569 * Determine how many pages are compatible with our allocation. 2570 * For movable allocation, it's the number of movable pages which 2571 * we just obtained. For other types it's a bit more tricky. 2572 */ 2573 if (start_type == MIGRATE_MOVABLE) { 2574 alike_pages = movable_pages; 2575 } else { 2576 /* 2577 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2578 * to MOVABLE pageblock, consider all non-movable pages as 2579 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2580 * vice versa, be conservative since we can't distinguish the 2581 * exact migratetype of non-movable pages. 2582 */ 2583 if (old_block_type == MIGRATE_MOVABLE) 2584 alike_pages = pageblock_nr_pages 2585 - (free_pages + movable_pages); 2586 else 2587 alike_pages = 0; 2588 } 2589 2590 /* moving whole block can fail due to zone boundary conditions */ 2591 if (!free_pages) 2592 goto single_page; 2593 2594 /* 2595 * If a sufficient number of pages in the block are either free or of 2596 * comparable migratability as our allocation, claim the whole block. 2597 */ 2598 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2599 page_group_by_mobility_disabled) 2600 set_pageblock_migratetype(page, start_type); 2601 2602 return; 2603 2604 single_page: 2605 move_to_free_list(page, zone, current_order, start_type); 2606 } 2607 2608 /* 2609 * Check whether there is a suitable fallback freepage with requested order. 2610 * If only_stealable is true, this function returns fallback_mt only if 2611 * we can steal other freepages all together. This would help to reduce 2612 * fragmentation due to mixed migratetype pages in one pageblock. 2613 */ 2614 int find_suitable_fallback(struct free_area *area, unsigned int order, 2615 int migratetype, bool only_stealable, bool *can_steal) 2616 { 2617 int i; 2618 int fallback_mt; 2619 2620 if (area->nr_free == 0) 2621 return -1; 2622 2623 *can_steal = false; 2624 for (i = 0;; i++) { 2625 fallback_mt = fallbacks[migratetype][i]; 2626 if (fallback_mt == MIGRATE_TYPES) 2627 break; 2628 2629 if (free_area_empty(area, fallback_mt)) 2630 continue; 2631 2632 if (can_steal_fallback(order, migratetype)) 2633 *can_steal = true; 2634 2635 if (!only_stealable) 2636 return fallback_mt; 2637 2638 if (*can_steal) 2639 return fallback_mt; 2640 } 2641 2642 return -1; 2643 } 2644 2645 /* 2646 * Reserve a pageblock for exclusive use of high-order atomic allocations if 2647 * there are no empty page blocks that contain a page with a suitable order 2648 */ 2649 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2650 unsigned int alloc_order) 2651 { 2652 int mt; 2653 unsigned long max_managed, flags; 2654 2655 /* 2656 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2657 * Check is race-prone but harmless. 2658 */ 2659 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 2660 if (zone->nr_reserved_highatomic >= max_managed) 2661 return; 2662 2663 spin_lock_irqsave(&zone->lock, flags); 2664 2665 /* Recheck the nr_reserved_highatomic limit under the lock */ 2666 if (zone->nr_reserved_highatomic >= max_managed) 2667 goto out_unlock; 2668 2669 /* Yoink! */ 2670 mt = get_pageblock_migratetype(page); 2671 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt) 2672 && !is_migrate_cma(mt)) { 2673 zone->nr_reserved_highatomic += pageblock_nr_pages; 2674 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2675 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 2676 } 2677 2678 out_unlock: 2679 spin_unlock_irqrestore(&zone->lock, flags); 2680 } 2681 2682 /* 2683 * Used when an allocation is about to fail under memory pressure. This 2684 * potentially hurts the reliability of high-order allocations when under 2685 * intense memory pressure but failed atomic allocations should be easier 2686 * to recover from than an OOM. 2687 * 2688 * If @force is true, try to unreserve a pageblock even though highatomic 2689 * pageblock is exhausted. 2690 */ 2691 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2692 bool force) 2693 { 2694 struct zonelist *zonelist = ac->zonelist; 2695 unsigned long flags; 2696 struct zoneref *z; 2697 struct zone *zone; 2698 struct page *page; 2699 int order; 2700 bool ret; 2701 2702 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2703 ac->nodemask) { 2704 /* 2705 * Preserve at least one pageblock unless memory pressure 2706 * is really high. 2707 */ 2708 if (!force && zone->nr_reserved_highatomic <= 2709 pageblock_nr_pages) 2710 continue; 2711 2712 spin_lock_irqsave(&zone->lock, flags); 2713 for (order = 0; order < MAX_ORDER; order++) { 2714 struct free_area *area = &(zone->free_area[order]); 2715 2716 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2717 if (!page) 2718 continue; 2719 2720 /* 2721 * In page freeing path, migratetype change is racy so 2722 * we can counter several free pages in a pageblock 2723 * in this loop althoug we changed the pageblock type 2724 * from highatomic to ac->migratetype. So we should 2725 * adjust the count once. 2726 */ 2727 if (is_migrate_highatomic_page(page)) { 2728 /* 2729 * It should never happen but changes to 2730 * locking could inadvertently allow a per-cpu 2731 * drain to add pages to MIGRATE_HIGHATOMIC 2732 * while unreserving so be safe and watch for 2733 * underflows. 2734 */ 2735 zone->nr_reserved_highatomic -= min( 2736 pageblock_nr_pages, 2737 zone->nr_reserved_highatomic); 2738 } 2739 2740 /* 2741 * Convert to ac->migratetype and avoid the normal 2742 * pageblock stealing heuristics. Minimally, the caller 2743 * is doing the work and needs the pages. More 2744 * importantly, if the block was always converted to 2745 * MIGRATE_UNMOVABLE or another type then the number 2746 * of pageblocks that cannot be completely freed 2747 * may increase. 2748 */ 2749 set_pageblock_migratetype(page, ac->migratetype); 2750 ret = move_freepages_block(zone, page, ac->migratetype, 2751 NULL); 2752 if (ret) { 2753 spin_unlock_irqrestore(&zone->lock, flags); 2754 return ret; 2755 } 2756 } 2757 spin_unlock_irqrestore(&zone->lock, flags); 2758 } 2759 2760 return false; 2761 } 2762 2763 /* 2764 * Try finding a free buddy page on the fallback list and put it on the free 2765 * list of requested migratetype, possibly along with other pages from the same 2766 * block, depending on fragmentation avoidance heuristics. Returns true if 2767 * fallback was found so that __rmqueue_smallest() can grab it. 2768 * 2769 * The use of signed ints for order and current_order is a deliberate 2770 * deviation from the rest of this file, to make the for loop 2771 * condition simpler. 2772 */ 2773 static __always_inline bool 2774 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2775 unsigned int alloc_flags) 2776 { 2777 struct free_area *area; 2778 int current_order; 2779 int min_order = order; 2780 struct page *page; 2781 int fallback_mt; 2782 bool can_steal; 2783 2784 /* 2785 * Do not steal pages from freelists belonging to other pageblocks 2786 * i.e. orders < pageblock_order. If there are no local zones free, 2787 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2788 */ 2789 if (alloc_flags & ALLOC_NOFRAGMENT) 2790 min_order = pageblock_order; 2791 2792 /* 2793 * Find the largest available free page in the other list. This roughly 2794 * approximates finding the pageblock with the most free pages, which 2795 * would be too costly to do exactly. 2796 */ 2797 for (current_order = MAX_ORDER - 1; current_order >= min_order; 2798 --current_order) { 2799 area = &(zone->free_area[current_order]); 2800 fallback_mt = find_suitable_fallback(area, current_order, 2801 start_migratetype, false, &can_steal); 2802 if (fallback_mt == -1) 2803 continue; 2804 2805 /* 2806 * We cannot steal all free pages from the pageblock and the 2807 * requested migratetype is movable. In that case it's better to 2808 * steal and split the smallest available page instead of the 2809 * largest available page, because even if the next movable 2810 * allocation falls back into a different pageblock than this 2811 * one, it won't cause permanent fragmentation. 2812 */ 2813 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2814 && current_order > order) 2815 goto find_smallest; 2816 2817 goto do_steal; 2818 } 2819 2820 return false; 2821 2822 find_smallest: 2823 for (current_order = order; current_order < MAX_ORDER; 2824 current_order++) { 2825 area = &(zone->free_area[current_order]); 2826 fallback_mt = find_suitable_fallback(area, current_order, 2827 start_migratetype, false, &can_steal); 2828 if (fallback_mt != -1) 2829 break; 2830 } 2831 2832 /* 2833 * This should not happen - we already found a suitable fallback 2834 * when looking for the largest page. 2835 */ 2836 VM_BUG_ON(current_order == MAX_ORDER); 2837 2838 do_steal: 2839 page = get_page_from_free_area(area, fallback_mt); 2840 2841 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2842 can_steal); 2843 2844 trace_mm_page_alloc_extfrag(page, order, current_order, 2845 start_migratetype, fallback_mt); 2846 2847 return true; 2848 2849 } 2850 2851 /* 2852 * Do the hard work of removing an element from the buddy allocator. 2853 * Call me with the zone->lock already held. 2854 */ 2855 static __always_inline struct page * 2856 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2857 unsigned int alloc_flags) 2858 { 2859 struct page *page; 2860 2861 #ifdef CONFIG_CMA 2862 /* 2863 * Balance movable allocations between regular and CMA areas by 2864 * allocating from CMA when over half of the zone's free memory 2865 * is in the CMA area. 2866 */ 2867 if (alloc_flags & ALLOC_CMA && 2868 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2869 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2870 page = __rmqueue_cma_fallback(zone, order); 2871 if (page) 2872 return page; 2873 } 2874 #endif 2875 retry: 2876 page = __rmqueue_smallest(zone, order, migratetype); 2877 if (unlikely(!page)) { 2878 if (alloc_flags & ALLOC_CMA) 2879 page = __rmqueue_cma_fallback(zone, order); 2880 2881 if (!page && __rmqueue_fallback(zone, order, migratetype, 2882 alloc_flags)) 2883 goto retry; 2884 } 2885 2886 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2887 return page; 2888 } 2889 2890 /* 2891 * Obtain a specified number of elements from the buddy allocator, all under 2892 * a single hold of the lock, for efficiency. Add them to the supplied list. 2893 * Returns the number of new pages which were placed at *list. 2894 */ 2895 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2896 unsigned long count, struct list_head *list, 2897 int migratetype, unsigned int alloc_flags) 2898 { 2899 int i, alloced = 0; 2900 2901 spin_lock(&zone->lock); 2902 for (i = 0; i < count; ++i) { 2903 struct page *page = __rmqueue(zone, order, migratetype, 2904 alloc_flags); 2905 if (unlikely(page == NULL)) 2906 break; 2907 2908 if (unlikely(check_pcp_refill(page))) 2909 continue; 2910 2911 /* 2912 * Split buddy pages returned by expand() are received here in 2913 * physical page order. The page is added to the tail of 2914 * caller's list. From the callers perspective, the linked list 2915 * is ordered by page number under some conditions. This is 2916 * useful for IO devices that can forward direction from the 2917 * head, thus also in the physical page order. This is useful 2918 * for IO devices that can merge IO requests if the physical 2919 * pages are ordered properly. 2920 */ 2921 list_add_tail(&page->lru, list); 2922 alloced++; 2923 if (is_migrate_cma(get_pcppage_migratetype(page))) 2924 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2925 -(1 << order)); 2926 } 2927 2928 /* 2929 * i pages were removed from the buddy list even if some leak due 2930 * to check_pcp_refill failing so adjust NR_FREE_PAGES based 2931 * on i. Do not confuse with 'alloced' which is the number of 2932 * pages added to the pcp list. 2933 */ 2934 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2935 spin_unlock(&zone->lock); 2936 return alloced; 2937 } 2938 2939 #ifdef CONFIG_NUMA 2940 /* 2941 * Called from the vmstat counter updater to drain pagesets of this 2942 * currently executing processor on remote nodes after they have 2943 * expired. 2944 * 2945 * Note that this function must be called with the thread pinned to 2946 * a single processor. 2947 */ 2948 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2949 { 2950 unsigned long flags; 2951 int to_drain, batch; 2952 2953 local_irq_save(flags); 2954 batch = READ_ONCE(pcp->batch); 2955 to_drain = min(pcp->count, batch); 2956 if (to_drain > 0) 2957 free_pcppages_bulk(zone, to_drain, pcp); 2958 local_irq_restore(flags); 2959 } 2960 #endif 2961 2962 /* 2963 * Drain pcplists of the indicated processor and zone. 2964 * 2965 * The processor must either be the current processor and the 2966 * thread pinned to the current processor or a processor that 2967 * is not online. 2968 */ 2969 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2970 { 2971 unsigned long flags; 2972 struct per_cpu_pageset *pset; 2973 struct per_cpu_pages *pcp; 2974 2975 local_irq_save(flags); 2976 pset = per_cpu_ptr(zone->pageset, cpu); 2977 2978 pcp = &pset->pcp; 2979 if (pcp->count) 2980 free_pcppages_bulk(zone, pcp->count, pcp); 2981 local_irq_restore(flags); 2982 } 2983 2984 /* 2985 * Drain pcplists of all zones on the indicated processor. 2986 * 2987 * The processor must either be the current processor and the 2988 * thread pinned to the current processor or a processor that 2989 * is not online. 2990 */ 2991 static void drain_pages(unsigned int cpu) 2992 { 2993 struct zone *zone; 2994 2995 for_each_populated_zone(zone) { 2996 drain_pages_zone(cpu, zone); 2997 } 2998 } 2999 3000 /* 3001 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 3002 * 3003 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 3004 * the single zone's pages. 3005 */ 3006 void drain_local_pages(struct zone *zone) 3007 { 3008 int cpu = smp_processor_id(); 3009 3010 if (zone) 3011 drain_pages_zone(cpu, zone); 3012 else 3013 drain_pages(cpu); 3014 } 3015 3016 static void drain_local_pages_wq(struct work_struct *work) 3017 { 3018 struct pcpu_drain *drain; 3019 3020 drain = container_of(work, struct pcpu_drain, work); 3021 3022 /* 3023 * drain_all_pages doesn't use proper cpu hotplug protection so 3024 * we can race with cpu offline when the WQ can move this from 3025 * a cpu pinned worker to an unbound one. We can operate on a different 3026 * cpu which is allright but we also have to make sure to not move to 3027 * a different one. 3028 */ 3029 preempt_disable(); 3030 drain_local_pages(drain->zone); 3031 preempt_enable(); 3032 } 3033 3034 /* 3035 * The implementation of drain_all_pages(), exposing an extra parameter to 3036 * drain on all cpus. 3037 * 3038 * drain_all_pages() is optimized to only execute on cpus where pcplists are 3039 * not empty. The check for non-emptiness can however race with a free to 3040 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 3041 * that need the guarantee that every CPU has drained can disable the 3042 * optimizing racy check. 3043 */ 3044 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 3045 { 3046 int cpu; 3047 3048 /* 3049 * Allocate in the BSS so we wont require allocation in 3050 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 3051 */ 3052 static cpumask_t cpus_with_pcps; 3053 3054 /* 3055 * Make sure nobody triggers this path before mm_percpu_wq is fully 3056 * initialized. 3057 */ 3058 if (WARN_ON_ONCE(!mm_percpu_wq)) 3059 return; 3060 3061 /* 3062 * Do not drain if one is already in progress unless it's specific to 3063 * a zone. Such callers are primarily CMA and memory hotplug and need 3064 * the drain to be complete when the call returns. 3065 */ 3066 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 3067 if (!zone) 3068 return; 3069 mutex_lock(&pcpu_drain_mutex); 3070 } 3071 3072 /* 3073 * We don't care about racing with CPU hotplug event 3074 * as offline notification will cause the notified 3075 * cpu to drain that CPU pcps and on_each_cpu_mask 3076 * disables preemption as part of its processing 3077 */ 3078 for_each_online_cpu(cpu) { 3079 struct per_cpu_pageset *pcp; 3080 struct zone *z; 3081 bool has_pcps = false; 3082 3083 if (force_all_cpus) { 3084 /* 3085 * The pcp.count check is racy, some callers need a 3086 * guarantee that no cpu is missed. 3087 */ 3088 has_pcps = true; 3089 } else if (zone) { 3090 pcp = per_cpu_ptr(zone->pageset, cpu); 3091 if (pcp->pcp.count) 3092 has_pcps = true; 3093 } else { 3094 for_each_populated_zone(z) { 3095 pcp = per_cpu_ptr(z->pageset, cpu); 3096 if (pcp->pcp.count) { 3097 has_pcps = true; 3098 break; 3099 } 3100 } 3101 } 3102 3103 if (has_pcps) 3104 cpumask_set_cpu(cpu, &cpus_with_pcps); 3105 else 3106 cpumask_clear_cpu(cpu, &cpus_with_pcps); 3107 } 3108 3109 for_each_cpu(cpu, &cpus_with_pcps) { 3110 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); 3111 3112 drain->zone = zone; 3113 INIT_WORK(&drain->work, drain_local_pages_wq); 3114 queue_work_on(cpu, mm_percpu_wq, &drain->work); 3115 } 3116 for_each_cpu(cpu, &cpus_with_pcps) 3117 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); 3118 3119 mutex_unlock(&pcpu_drain_mutex); 3120 } 3121 3122 /* 3123 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 3124 * 3125 * When zone parameter is non-NULL, spill just the single zone's pages. 3126 * 3127 * Note that this can be extremely slow as the draining happens in a workqueue. 3128 */ 3129 void drain_all_pages(struct zone *zone) 3130 { 3131 __drain_all_pages(zone, false); 3132 } 3133 3134 #ifdef CONFIG_HIBERNATION 3135 3136 /* 3137 * Touch the watchdog for every WD_PAGE_COUNT pages. 3138 */ 3139 #define WD_PAGE_COUNT (128*1024) 3140 3141 void mark_free_pages(struct zone *zone) 3142 { 3143 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; 3144 unsigned long flags; 3145 unsigned int order, t; 3146 struct page *page; 3147 3148 if (zone_is_empty(zone)) 3149 return; 3150 3151 spin_lock_irqsave(&zone->lock, flags); 3152 3153 max_zone_pfn = zone_end_pfn(zone); 3154 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 3155 if (pfn_valid(pfn)) { 3156 page = pfn_to_page(pfn); 3157 3158 if (!--page_count) { 3159 touch_nmi_watchdog(); 3160 page_count = WD_PAGE_COUNT; 3161 } 3162 3163 if (page_zone(page) != zone) 3164 continue; 3165 3166 if (!swsusp_page_is_forbidden(page)) 3167 swsusp_unset_page_free(page); 3168 } 3169 3170 for_each_migratetype_order(order, t) { 3171 list_for_each_entry(page, 3172 &zone->free_area[order].free_list[t], lru) { 3173 unsigned long i; 3174 3175 pfn = page_to_pfn(page); 3176 for (i = 0; i < (1UL << order); i++) { 3177 if (!--page_count) { 3178 touch_nmi_watchdog(); 3179 page_count = WD_PAGE_COUNT; 3180 } 3181 swsusp_set_page_free(pfn_to_page(pfn + i)); 3182 } 3183 } 3184 } 3185 spin_unlock_irqrestore(&zone->lock, flags); 3186 } 3187 #endif /* CONFIG_PM */ 3188 3189 static bool free_unref_page_prepare(struct page *page, unsigned long pfn) 3190 { 3191 int migratetype; 3192 3193 if (!free_pcp_prepare(page)) 3194 return false; 3195 3196 migratetype = get_pfnblock_migratetype(page, pfn); 3197 set_pcppage_migratetype(page, migratetype); 3198 return true; 3199 } 3200 3201 static void free_unref_page_commit(struct page *page, unsigned long pfn) 3202 { 3203 struct zone *zone = page_zone(page); 3204 struct per_cpu_pages *pcp; 3205 int migratetype; 3206 3207 migratetype = get_pcppage_migratetype(page); 3208 __count_vm_event(PGFREE); 3209 3210 /* 3211 * We only track unmovable, reclaimable and movable on pcp lists. 3212 * Free ISOLATE pages back to the allocator because they are being 3213 * offlined but treat HIGHATOMIC as movable pages so we can get those 3214 * areas back if necessary. Otherwise, we may have to free 3215 * excessively into the page allocator 3216 */ 3217 if (migratetype >= MIGRATE_PCPTYPES) { 3218 if (unlikely(is_migrate_isolate(migratetype))) { 3219 free_one_page(zone, page, pfn, 0, migratetype, 3220 FPI_NONE); 3221 return; 3222 } 3223 migratetype = MIGRATE_MOVABLE; 3224 } 3225 3226 pcp = &this_cpu_ptr(zone->pageset)->pcp; 3227 list_add(&page->lru, &pcp->lists[migratetype]); 3228 pcp->count++; 3229 if (pcp->count >= READ_ONCE(pcp->high)) 3230 free_pcppages_bulk(zone, READ_ONCE(pcp->batch), pcp); 3231 } 3232 3233 /* 3234 * Free a 0-order page 3235 */ 3236 void free_unref_page(struct page *page) 3237 { 3238 unsigned long flags; 3239 unsigned long pfn = page_to_pfn(page); 3240 3241 if (!free_unref_page_prepare(page, pfn)) 3242 return; 3243 3244 local_irq_save(flags); 3245 free_unref_page_commit(page, pfn); 3246 local_irq_restore(flags); 3247 } 3248 3249 /* 3250 * Free a list of 0-order pages 3251 */ 3252 void free_unref_page_list(struct list_head *list) 3253 { 3254 struct page *page, *next; 3255 unsigned long flags, pfn; 3256 int batch_count = 0; 3257 3258 /* Prepare pages for freeing */ 3259 list_for_each_entry_safe(page, next, list, lru) { 3260 pfn = page_to_pfn(page); 3261 if (!free_unref_page_prepare(page, pfn)) 3262 list_del(&page->lru); 3263 set_page_private(page, pfn); 3264 } 3265 3266 local_irq_save(flags); 3267 list_for_each_entry_safe(page, next, list, lru) { 3268 unsigned long pfn = page_private(page); 3269 3270 set_page_private(page, 0); 3271 trace_mm_page_free_batched(page); 3272 free_unref_page_commit(page, pfn); 3273 3274 /* 3275 * Guard against excessive IRQ disabled times when we get 3276 * a large list of pages to free. 3277 */ 3278 if (++batch_count == SWAP_CLUSTER_MAX) { 3279 local_irq_restore(flags); 3280 batch_count = 0; 3281 local_irq_save(flags); 3282 } 3283 } 3284 local_irq_restore(flags); 3285 } 3286 3287 /* 3288 * split_page takes a non-compound higher-order page, and splits it into 3289 * n (1<<order) sub-pages: page[0..n] 3290 * Each sub-page must be freed individually. 3291 * 3292 * Note: this is probably too low level an operation for use in drivers. 3293 * Please consult with lkml before using this in your driver. 3294 */ 3295 void split_page(struct page *page, unsigned int order) 3296 { 3297 int i; 3298 3299 VM_BUG_ON_PAGE(PageCompound(page), page); 3300 VM_BUG_ON_PAGE(!page_count(page), page); 3301 3302 for (i = 1; i < (1 << order); i++) 3303 set_page_refcounted(page + i); 3304 split_page_owner(page, 1 << order); 3305 } 3306 EXPORT_SYMBOL_GPL(split_page); 3307 3308 int __isolate_free_page(struct page *page, unsigned int order) 3309 { 3310 unsigned long watermark; 3311 struct zone *zone; 3312 int mt; 3313 3314 BUG_ON(!PageBuddy(page)); 3315 3316 zone = page_zone(page); 3317 mt = get_pageblock_migratetype(page); 3318 3319 if (!is_migrate_isolate(mt)) { 3320 /* 3321 * Obey watermarks as if the page was being allocated. We can 3322 * emulate a high-order watermark check with a raised order-0 3323 * watermark, because we already know our high-order page 3324 * exists. 3325 */ 3326 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3327 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3328 return 0; 3329 3330 __mod_zone_freepage_state(zone, -(1UL << order), mt); 3331 } 3332 3333 /* Remove page from free list */ 3334 3335 del_page_from_free_list(page, zone, order); 3336 3337 /* 3338 * Set the pageblock if the isolated page is at least half of a 3339 * pageblock 3340 */ 3341 if (order >= pageblock_order - 1) { 3342 struct page *endpage = page + (1 << order) - 1; 3343 for (; page < endpage; page += pageblock_nr_pages) { 3344 int mt = get_pageblock_migratetype(page); 3345 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) 3346 && !is_migrate_highatomic(mt)) 3347 set_pageblock_migratetype(page, 3348 MIGRATE_MOVABLE); 3349 } 3350 } 3351 3352 3353 return 1UL << order; 3354 } 3355 3356 /** 3357 * __putback_isolated_page - Return a now-isolated page back where we got it 3358 * @page: Page that was isolated 3359 * @order: Order of the isolated page 3360 * @mt: The page's pageblock's migratetype 3361 * 3362 * This function is meant to return a page pulled from the free lists via 3363 * __isolate_free_page back to the free lists they were pulled from. 3364 */ 3365 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3366 { 3367 struct zone *zone = page_zone(page); 3368 3369 /* zone lock should be held when this function is called */ 3370 lockdep_assert_held(&zone->lock); 3371 3372 /* Return isolated page to tail of freelist. */ 3373 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3374 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3375 } 3376 3377 /* 3378 * Update NUMA hit/miss statistics 3379 * 3380 * Must be called with interrupts disabled. 3381 */ 3382 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) 3383 { 3384 #ifdef CONFIG_NUMA 3385 enum numa_stat_item local_stat = NUMA_LOCAL; 3386 3387 /* skip numa counters update if numa stats is disabled */ 3388 if (!static_branch_likely(&vm_numa_stat_key)) 3389 return; 3390 3391 if (zone_to_nid(z) != numa_node_id()) 3392 local_stat = NUMA_OTHER; 3393 3394 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3395 __inc_numa_state(z, NUMA_HIT); 3396 else { 3397 __inc_numa_state(z, NUMA_MISS); 3398 __inc_numa_state(preferred_zone, NUMA_FOREIGN); 3399 } 3400 __inc_numa_state(z, local_stat); 3401 #endif 3402 } 3403 3404 /* Remove page from the per-cpu list, caller must protect the list */ 3405 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, 3406 unsigned int alloc_flags, 3407 struct per_cpu_pages *pcp, 3408 struct list_head *list) 3409 { 3410 struct page *page; 3411 3412 do { 3413 if (list_empty(list)) { 3414 pcp->count += rmqueue_bulk(zone, 0, 3415 READ_ONCE(pcp->batch), list, 3416 migratetype, alloc_flags); 3417 if (unlikely(list_empty(list))) 3418 return NULL; 3419 } 3420 3421 page = list_first_entry(list, struct page, lru); 3422 list_del(&page->lru); 3423 pcp->count--; 3424 } while (check_new_pcp(page)); 3425 3426 return page; 3427 } 3428 3429 /* Lock and remove page from the per-cpu list */ 3430 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3431 struct zone *zone, gfp_t gfp_flags, 3432 int migratetype, unsigned int alloc_flags) 3433 { 3434 struct per_cpu_pages *pcp; 3435 struct list_head *list; 3436 struct page *page; 3437 unsigned long flags; 3438 3439 local_irq_save(flags); 3440 pcp = &this_cpu_ptr(zone->pageset)->pcp; 3441 list = &pcp->lists[migratetype]; 3442 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); 3443 if (page) { 3444 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); 3445 zone_statistics(preferred_zone, zone); 3446 } 3447 local_irq_restore(flags); 3448 return page; 3449 } 3450 3451 /* 3452 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 3453 */ 3454 static inline 3455 struct page *rmqueue(struct zone *preferred_zone, 3456 struct zone *zone, unsigned int order, 3457 gfp_t gfp_flags, unsigned int alloc_flags, 3458 int migratetype) 3459 { 3460 unsigned long flags; 3461 struct page *page; 3462 3463 if (likely(order == 0)) { 3464 /* 3465 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and 3466 * we need to skip it when CMA area isn't allowed. 3467 */ 3468 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || 3469 migratetype != MIGRATE_MOVABLE) { 3470 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, 3471 migratetype, alloc_flags); 3472 goto out; 3473 } 3474 } 3475 3476 /* 3477 * We most definitely don't want callers attempting to 3478 * allocate greater than order-1 page units with __GFP_NOFAIL. 3479 */ 3480 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 3481 spin_lock_irqsave(&zone->lock, flags); 3482 3483 do { 3484 page = NULL; 3485 /* 3486 * order-0 request can reach here when the pcplist is skipped 3487 * due to non-CMA allocation context. HIGHATOMIC area is 3488 * reserved for high-order atomic allocation, so order-0 3489 * request should skip it. 3490 */ 3491 if (order > 0 && alloc_flags & ALLOC_HARDER) { 3492 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3493 if (page) 3494 trace_mm_page_alloc_zone_locked(page, order, migratetype); 3495 } 3496 if (!page) 3497 page = __rmqueue(zone, order, migratetype, alloc_flags); 3498 } while (page && check_new_pages(page, order)); 3499 spin_unlock(&zone->lock); 3500 if (!page) 3501 goto failed; 3502 __mod_zone_freepage_state(zone, -(1 << order), 3503 get_pcppage_migratetype(page)); 3504 3505 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3506 zone_statistics(preferred_zone, zone); 3507 local_irq_restore(flags); 3508 3509 out: 3510 /* Separate test+clear to avoid unnecessary atomics */ 3511 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { 3512 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3513 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3514 } 3515 3516 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3517 return page; 3518 3519 failed: 3520 local_irq_restore(flags); 3521 return NULL; 3522 } 3523 3524 #ifdef CONFIG_FAIL_PAGE_ALLOC 3525 3526 static struct { 3527 struct fault_attr attr; 3528 3529 bool ignore_gfp_highmem; 3530 bool ignore_gfp_reclaim; 3531 u32 min_order; 3532 } fail_page_alloc = { 3533 .attr = FAULT_ATTR_INITIALIZER, 3534 .ignore_gfp_reclaim = true, 3535 .ignore_gfp_highmem = true, 3536 .min_order = 1, 3537 }; 3538 3539 static int __init setup_fail_page_alloc(char *str) 3540 { 3541 return setup_fault_attr(&fail_page_alloc.attr, str); 3542 } 3543 __setup("fail_page_alloc=", setup_fail_page_alloc); 3544 3545 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3546 { 3547 if (order < fail_page_alloc.min_order) 3548 return false; 3549 if (gfp_mask & __GFP_NOFAIL) 3550 return false; 3551 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 3552 return false; 3553 if (fail_page_alloc.ignore_gfp_reclaim && 3554 (gfp_mask & __GFP_DIRECT_RECLAIM)) 3555 return false; 3556 3557 return should_fail(&fail_page_alloc.attr, 1 << order); 3558 } 3559 3560 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3561 3562 static int __init fail_page_alloc_debugfs(void) 3563 { 3564 umode_t mode = S_IFREG | 0600; 3565 struct dentry *dir; 3566 3567 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 3568 &fail_page_alloc.attr); 3569 3570 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3571 &fail_page_alloc.ignore_gfp_reclaim); 3572 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 3573 &fail_page_alloc.ignore_gfp_highmem); 3574 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); 3575 3576 return 0; 3577 } 3578 3579 late_initcall(fail_page_alloc_debugfs); 3580 3581 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3582 3583 #else /* CONFIG_FAIL_PAGE_ALLOC */ 3584 3585 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3586 { 3587 return false; 3588 } 3589 3590 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 3591 3592 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3593 { 3594 return __should_fail_alloc_page(gfp_mask, order); 3595 } 3596 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 3597 3598 static inline long __zone_watermark_unusable_free(struct zone *z, 3599 unsigned int order, unsigned int alloc_flags) 3600 { 3601 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3602 long unusable_free = (1 << order) - 1; 3603 3604 /* 3605 * If the caller does not have rights to ALLOC_HARDER then subtract 3606 * the high-atomic reserves. This will over-estimate the size of the 3607 * atomic reserve but it avoids a search. 3608 */ 3609 if (likely(!alloc_harder)) 3610 unusable_free += z->nr_reserved_highatomic; 3611 3612 #ifdef CONFIG_CMA 3613 /* If allocation can't use CMA areas don't use free CMA pages */ 3614 if (!(alloc_flags & ALLOC_CMA)) 3615 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3616 #endif 3617 3618 return unusable_free; 3619 } 3620 3621 /* 3622 * Return true if free base pages are above 'mark'. For high-order checks it 3623 * will return true of the order-0 watermark is reached and there is at least 3624 * one free page of a suitable size. Checking now avoids taking the zone lock 3625 * to check in the allocation paths if no pages are free. 3626 */ 3627 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3628 int highest_zoneidx, unsigned int alloc_flags, 3629 long free_pages) 3630 { 3631 long min = mark; 3632 int o; 3633 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3634 3635 /* free_pages may go negative - that's OK */ 3636 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3637 3638 if (alloc_flags & ALLOC_HIGH) 3639 min -= min / 2; 3640 3641 if (unlikely(alloc_harder)) { 3642 /* 3643 * OOM victims can try even harder than normal ALLOC_HARDER 3644 * users on the grounds that it's definitely going to be in 3645 * the exit path shortly and free memory. Any allocation it 3646 * makes during the free path will be small and short-lived. 3647 */ 3648 if (alloc_flags & ALLOC_OOM) 3649 min -= min / 2; 3650 else 3651 min -= min / 4; 3652 } 3653 3654 /* 3655 * Check watermarks for an order-0 allocation request. If these 3656 * are not met, then a high-order request also cannot go ahead 3657 * even if a suitable page happened to be free. 3658 */ 3659 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3660 return false; 3661 3662 /* If this is an order-0 request then the watermark is fine */ 3663 if (!order) 3664 return true; 3665 3666 /* For a high-order request, check at least one suitable page is free */ 3667 for (o = order; o < MAX_ORDER; o++) { 3668 struct free_area *area = &z->free_area[o]; 3669 int mt; 3670 3671 if (!area->nr_free) 3672 continue; 3673 3674 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3675 if (!free_area_empty(area, mt)) 3676 return true; 3677 } 3678 3679 #ifdef CONFIG_CMA 3680 if ((alloc_flags & ALLOC_CMA) && 3681 !free_area_empty(area, MIGRATE_CMA)) { 3682 return true; 3683 } 3684 #endif 3685 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC)) 3686 return true; 3687 } 3688 return false; 3689 } 3690 3691 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3692 int highest_zoneidx, unsigned int alloc_flags) 3693 { 3694 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3695 zone_page_state(z, NR_FREE_PAGES)); 3696 } 3697 3698 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3699 unsigned long mark, int highest_zoneidx, 3700 unsigned int alloc_flags, gfp_t gfp_mask) 3701 { 3702 long free_pages; 3703 3704 free_pages = zone_page_state(z, NR_FREE_PAGES); 3705 3706 /* 3707 * Fast check for order-0 only. If this fails then the reserves 3708 * need to be calculated. 3709 */ 3710 if (!order) { 3711 long fast_free; 3712 3713 fast_free = free_pages; 3714 fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags); 3715 if (fast_free > mark + z->lowmem_reserve[highest_zoneidx]) 3716 return true; 3717 } 3718 3719 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3720 free_pages)) 3721 return true; 3722 /* 3723 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations 3724 * when checking the min watermark. The min watermark is the 3725 * point where boosting is ignored so that kswapd is woken up 3726 * when below the low watermark. 3727 */ 3728 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost 3729 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3730 mark = z->_watermark[WMARK_MIN]; 3731 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3732 alloc_flags, free_pages); 3733 } 3734 3735 return false; 3736 } 3737 3738 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3739 unsigned long mark, int highest_zoneidx) 3740 { 3741 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3742 3743 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3744 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3745 3746 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3747 free_pages); 3748 } 3749 3750 #ifdef CONFIG_NUMA 3751 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3752 { 3753 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3754 node_reclaim_distance; 3755 } 3756 #else /* CONFIG_NUMA */ 3757 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3758 { 3759 return true; 3760 } 3761 #endif /* CONFIG_NUMA */ 3762 3763 /* 3764 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3765 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3766 * premature use of a lower zone may cause lowmem pressure problems that 3767 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3768 * probably too small. It only makes sense to spread allocations to avoid 3769 * fragmentation between the Normal and DMA32 zones. 3770 */ 3771 static inline unsigned int 3772 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3773 { 3774 unsigned int alloc_flags; 3775 3776 /* 3777 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3778 * to save a branch. 3779 */ 3780 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3781 3782 #ifdef CONFIG_ZONE_DMA32 3783 if (!zone) 3784 return alloc_flags; 3785 3786 if (zone_idx(zone) != ZONE_NORMAL) 3787 return alloc_flags; 3788 3789 /* 3790 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3791 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3792 * on UMA that if Normal is populated then so is DMA32. 3793 */ 3794 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3795 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3796 return alloc_flags; 3797 3798 alloc_flags |= ALLOC_NOFRAGMENT; 3799 #endif /* CONFIG_ZONE_DMA32 */ 3800 return alloc_flags; 3801 } 3802 3803 static inline unsigned int current_alloc_flags(gfp_t gfp_mask, 3804 unsigned int alloc_flags) 3805 { 3806 #ifdef CONFIG_CMA 3807 unsigned int pflags = current->flags; 3808 3809 if (!(pflags & PF_MEMALLOC_NOCMA) && 3810 gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3811 alloc_flags |= ALLOC_CMA; 3812 3813 #endif 3814 return alloc_flags; 3815 } 3816 3817 /* 3818 * get_page_from_freelist goes through the zonelist trying to allocate 3819 * a page. 3820 */ 3821 static struct page * 3822 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3823 const struct alloc_context *ac) 3824 { 3825 struct zoneref *z; 3826 struct zone *zone; 3827 struct pglist_data *last_pgdat_dirty_limit = NULL; 3828 bool no_fallback; 3829 3830 retry: 3831 /* 3832 * Scan zonelist, looking for a zone with enough free. 3833 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 3834 */ 3835 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3836 z = ac->preferred_zoneref; 3837 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3838 ac->nodemask) { 3839 struct page *page; 3840 unsigned long mark; 3841 3842 if (cpusets_enabled() && 3843 (alloc_flags & ALLOC_CPUSET) && 3844 !__cpuset_zone_allowed(zone, gfp_mask)) 3845 continue; 3846 /* 3847 * When allocating a page cache page for writing, we 3848 * want to get it from a node that is within its dirty 3849 * limit, such that no single node holds more than its 3850 * proportional share of globally allowed dirty pages. 3851 * The dirty limits take into account the node's 3852 * lowmem reserves and high watermark so that kswapd 3853 * should be able to balance it without having to 3854 * write pages from its LRU list. 3855 * 3856 * XXX: For now, allow allocations to potentially 3857 * exceed the per-node dirty limit in the slowpath 3858 * (spread_dirty_pages unset) before going into reclaim, 3859 * which is important when on a NUMA setup the allowed 3860 * nodes are together not big enough to reach the 3861 * global limit. The proper fix for these situations 3862 * will require awareness of nodes in the 3863 * dirty-throttling and the flusher threads. 3864 */ 3865 if (ac->spread_dirty_pages) { 3866 if (last_pgdat_dirty_limit == zone->zone_pgdat) 3867 continue; 3868 3869 if (!node_dirty_ok(zone->zone_pgdat)) { 3870 last_pgdat_dirty_limit = zone->zone_pgdat; 3871 continue; 3872 } 3873 } 3874 3875 if (no_fallback && nr_online_nodes > 1 && 3876 zone != ac->preferred_zoneref->zone) { 3877 int local_nid; 3878 3879 /* 3880 * If moving to a remote node, retry but allow 3881 * fragmenting fallbacks. Locality is more important 3882 * than fragmentation avoidance. 3883 */ 3884 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 3885 if (zone_to_nid(zone) != local_nid) { 3886 alloc_flags &= ~ALLOC_NOFRAGMENT; 3887 goto retry; 3888 } 3889 } 3890 3891 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3892 if (!zone_watermark_fast(zone, order, mark, 3893 ac->highest_zoneidx, alloc_flags, 3894 gfp_mask)) { 3895 int ret; 3896 3897 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3898 /* 3899 * Watermark failed for this zone, but see if we can 3900 * grow this zone if it contains deferred pages. 3901 */ 3902 if (static_branch_unlikely(&deferred_pages)) { 3903 if (_deferred_grow_zone(zone, order)) 3904 goto try_this_zone; 3905 } 3906 #endif 3907 /* Checked here to keep the fast path fast */ 3908 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3909 if (alloc_flags & ALLOC_NO_WATERMARKS) 3910 goto try_this_zone; 3911 3912 if (node_reclaim_mode == 0 || 3913 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3914 continue; 3915 3916 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3917 switch (ret) { 3918 case NODE_RECLAIM_NOSCAN: 3919 /* did not scan */ 3920 continue; 3921 case NODE_RECLAIM_FULL: 3922 /* scanned but unreclaimable */ 3923 continue; 3924 default: 3925 /* did we reclaim enough */ 3926 if (zone_watermark_ok(zone, order, mark, 3927 ac->highest_zoneidx, alloc_flags)) 3928 goto try_this_zone; 3929 3930 continue; 3931 } 3932 } 3933 3934 try_this_zone: 3935 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3936 gfp_mask, alloc_flags, ac->migratetype); 3937 if (page) { 3938 prep_new_page(page, order, gfp_mask, alloc_flags); 3939 3940 /* 3941 * If this is a high-order atomic allocation then check 3942 * if the pageblock should be reserved for the future 3943 */ 3944 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 3945 reserve_highatomic_pageblock(page, zone, order); 3946 3947 return page; 3948 } else { 3949 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3950 /* Try again if zone has deferred pages */ 3951 if (static_branch_unlikely(&deferred_pages)) { 3952 if (_deferred_grow_zone(zone, order)) 3953 goto try_this_zone; 3954 } 3955 #endif 3956 } 3957 } 3958 3959 /* 3960 * It's possible on a UMA machine to get through all zones that are 3961 * fragmented. If avoiding fragmentation, reset and try again. 3962 */ 3963 if (no_fallback) { 3964 alloc_flags &= ~ALLOC_NOFRAGMENT; 3965 goto retry; 3966 } 3967 3968 return NULL; 3969 } 3970 3971 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3972 { 3973 unsigned int filter = SHOW_MEM_FILTER_NODES; 3974 3975 /* 3976 * This documents exceptions given to allocations in certain 3977 * contexts that are allowed to allocate outside current's set 3978 * of allowed nodes. 3979 */ 3980 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3981 if (tsk_is_oom_victim(current) || 3982 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3983 filter &= ~SHOW_MEM_FILTER_NODES; 3984 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3985 filter &= ~SHOW_MEM_FILTER_NODES; 3986 3987 show_mem(filter, nodemask); 3988 } 3989 3990 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3991 { 3992 struct va_format vaf; 3993 va_list args; 3994 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3995 3996 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) 3997 return; 3998 3999 va_start(args, fmt); 4000 vaf.fmt = fmt; 4001 vaf.va = &args; 4002 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 4003 current->comm, &vaf, gfp_mask, &gfp_mask, 4004 nodemask_pr_args(nodemask)); 4005 va_end(args); 4006 4007 cpuset_print_current_mems_allowed(); 4008 pr_cont("\n"); 4009 dump_stack(); 4010 warn_alloc_show_mem(gfp_mask, nodemask); 4011 } 4012 4013 static inline struct page * 4014 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 4015 unsigned int alloc_flags, 4016 const struct alloc_context *ac) 4017 { 4018 struct page *page; 4019 4020 page = get_page_from_freelist(gfp_mask, order, 4021 alloc_flags|ALLOC_CPUSET, ac); 4022 /* 4023 * fallback to ignore cpuset restriction if our nodes 4024 * are depleted 4025 */ 4026 if (!page) 4027 page = get_page_from_freelist(gfp_mask, order, 4028 alloc_flags, ac); 4029 4030 return page; 4031 } 4032 4033 static inline struct page * 4034 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 4035 const struct alloc_context *ac, unsigned long *did_some_progress) 4036 { 4037 struct oom_control oc = { 4038 .zonelist = ac->zonelist, 4039 .nodemask = ac->nodemask, 4040 .memcg = NULL, 4041 .gfp_mask = gfp_mask, 4042 .order = order, 4043 }; 4044 struct page *page; 4045 4046 *did_some_progress = 0; 4047 4048 /* 4049 * Acquire the oom lock. If that fails, somebody else is 4050 * making progress for us. 4051 */ 4052 if (!mutex_trylock(&oom_lock)) { 4053 *did_some_progress = 1; 4054 schedule_timeout_uninterruptible(1); 4055 return NULL; 4056 } 4057 4058 /* 4059 * Go through the zonelist yet one more time, keep very high watermark 4060 * here, this is only to catch a parallel oom killing, we must fail if 4061 * we're still under heavy pressure. But make sure that this reclaim 4062 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 4063 * allocation which will never fail due to oom_lock already held. 4064 */ 4065 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 4066 ~__GFP_DIRECT_RECLAIM, order, 4067 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 4068 if (page) 4069 goto out; 4070 4071 /* Coredumps can quickly deplete all memory reserves */ 4072 if (current->flags & PF_DUMPCORE) 4073 goto out; 4074 /* The OOM killer will not help higher order allocs */ 4075 if (order > PAGE_ALLOC_COSTLY_ORDER) 4076 goto out; 4077 /* 4078 * We have already exhausted all our reclaim opportunities without any 4079 * success so it is time to admit defeat. We will skip the OOM killer 4080 * because it is very likely that the caller has a more reasonable 4081 * fallback than shooting a random task. 4082 * 4083 * The OOM killer may not free memory on a specific node. 4084 */ 4085 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4086 goto out; 4087 /* The OOM killer does not needlessly kill tasks for lowmem */ 4088 if (ac->highest_zoneidx < ZONE_NORMAL) 4089 goto out; 4090 if (pm_suspended_storage()) 4091 goto out; 4092 /* 4093 * XXX: GFP_NOFS allocations should rather fail than rely on 4094 * other request to make a forward progress. 4095 * We are in an unfortunate situation where out_of_memory cannot 4096 * do much for this context but let's try it to at least get 4097 * access to memory reserved if the current task is killed (see 4098 * out_of_memory). Once filesystems are ready to handle allocation 4099 * failures more gracefully we should just bail out here. 4100 */ 4101 4102 /* Exhausted what can be done so it's blame time */ 4103 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { 4104 *did_some_progress = 1; 4105 4106 /* 4107 * Help non-failing allocations by giving them access to memory 4108 * reserves 4109 */ 4110 if (gfp_mask & __GFP_NOFAIL) 4111 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4112 ALLOC_NO_WATERMARKS, ac); 4113 } 4114 out: 4115 mutex_unlock(&oom_lock); 4116 return page; 4117 } 4118 4119 /* 4120 * Maximum number of compaction retries wit a progress before OOM 4121 * killer is consider as the only way to move forward. 4122 */ 4123 #define MAX_COMPACT_RETRIES 16 4124 4125 #ifdef CONFIG_COMPACTION 4126 /* Try memory compaction for high-order allocations before reclaim */ 4127 static struct page * 4128 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4129 unsigned int alloc_flags, const struct alloc_context *ac, 4130 enum compact_priority prio, enum compact_result *compact_result) 4131 { 4132 struct page *page = NULL; 4133 unsigned long pflags; 4134 unsigned int noreclaim_flag; 4135 4136 if (!order) 4137 return NULL; 4138 4139 psi_memstall_enter(&pflags); 4140 noreclaim_flag = memalloc_noreclaim_save(); 4141 4142 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4143 prio, &page); 4144 4145 memalloc_noreclaim_restore(noreclaim_flag); 4146 psi_memstall_leave(&pflags); 4147 4148 /* 4149 * At least in one zone compaction wasn't deferred or skipped, so let's 4150 * count a compaction stall 4151 */ 4152 count_vm_event(COMPACTSTALL); 4153 4154 /* Prep a captured page if available */ 4155 if (page) 4156 prep_new_page(page, order, gfp_mask, alloc_flags); 4157 4158 /* Try get a page from the freelist if available */ 4159 if (!page) 4160 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4161 4162 if (page) { 4163 struct zone *zone = page_zone(page); 4164 4165 zone->compact_blockskip_flush = false; 4166 compaction_defer_reset(zone, order, true); 4167 count_vm_event(COMPACTSUCCESS); 4168 return page; 4169 } 4170 4171 /* 4172 * It's bad if compaction run occurs and fails. The most likely reason 4173 * is that pages exist, but not enough to satisfy watermarks. 4174 */ 4175 count_vm_event(COMPACTFAIL); 4176 4177 cond_resched(); 4178 4179 return NULL; 4180 } 4181 4182 static inline bool 4183 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4184 enum compact_result compact_result, 4185 enum compact_priority *compact_priority, 4186 int *compaction_retries) 4187 { 4188 int max_retries = MAX_COMPACT_RETRIES; 4189 int min_priority; 4190 bool ret = false; 4191 int retries = *compaction_retries; 4192 enum compact_priority priority = *compact_priority; 4193 4194 if (!order) 4195 return false; 4196 4197 if (compaction_made_progress(compact_result)) 4198 (*compaction_retries)++; 4199 4200 /* 4201 * compaction considers all the zone as desperately out of memory 4202 * so it doesn't really make much sense to retry except when the 4203 * failure could be caused by insufficient priority 4204 */ 4205 if (compaction_failed(compact_result)) 4206 goto check_priority; 4207 4208 /* 4209 * compaction was skipped because there are not enough order-0 pages 4210 * to work with, so we retry only if it looks like reclaim can help. 4211 */ 4212 if (compaction_needs_reclaim(compact_result)) { 4213 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4214 goto out; 4215 } 4216 4217 /* 4218 * make sure the compaction wasn't deferred or didn't bail out early 4219 * due to locks contention before we declare that we should give up. 4220 * But the next retry should use a higher priority if allowed, so 4221 * we don't just keep bailing out endlessly. 4222 */ 4223 if (compaction_withdrawn(compact_result)) { 4224 goto check_priority; 4225 } 4226 4227 /* 4228 * !costly requests are much more important than __GFP_RETRY_MAYFAIL 4229 * costly ones because they are de facto nofail and invoke OOM 4230 * killer to move on while costly can fail and users are ready 4231 * to cope with that. 1/4 retries is rather arbitrary but we 4232 * would need much more detailed feedback from compaction to 4233 * make a better decision. 4234 */ 4235 if (order > PAGE_ALLOC_COSTLY_ORDER) 4236 max_retries /= 4; 4237 if (*compaction_retries <= max_retries) { 4238 ret = true; 4239 goto out; 4240 } 4241 4242 /* 4243 * Make sure there are attempts at the highest priority if we exhausted 4244 * all retries or failed at the lower priorities. 4245 */ 4246 check_priority: 4247 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4248 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4249 4250 if (*compact_priority > min_priority) { 4251 (*compact_priority)--; 4252 *compaction_retries = 0; 4253 ret = true; 4254 } 4255 out: 4256 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4257 return ret; 4258 } 4259 #else 4260 static inline struct page * 4261 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4262 unsigned int alloc_flags, const struct alloc_context *ac, 4263 enum compact_priority prio, enum compact_result *compact_result) 4264 { 4265 *compact_result = COMPACT_SKIPPED; 4266 return NULL; 4267 } 4268 4269 static inline bool 4270 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 4271 enum compact_result compact_result, 4272 enum compact_priority *compact_priority, 4273 int *compaction_retries) 4274 { 4275 struct zone *zone; 4276 struct zoneref *z; 4277 4278 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4279 return false; 4280 4281 /* 4282 * There are setups with compaction disabled which would prefer to loop 4283 * inside the allocator rather than hit the oom killer prematurely. 4284 * Let's give them a good hope and keep retrying while the order-0 4285 * watermarks are OK. 4286 */ 4287 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4288 ac->highest_zoneidx, ac->nodemask) { 4289 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4290 ac->highest_zoneidx, alloc_flags)) 4291 return true; 4292 } 4293 return false; 4294 } 4295 #endif /* CONFIG_COMPACTION */ 4296 4297 #ifdef CONFIG_LOCKDEP 4298 static struct lockdep_map __fs_reclaim_map = 4299 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4300 4301 static bool __need_reclaim(gfp_t gfp_mask) 4302 { 4303 /* no reclaim without waiting on it */ 4304 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4305 return false; 4306 4307 /* this guy won't enter reclaim */ 4308 if (current->flags & PF_MEMALLOC) 4309 return false; 4310 4311 if (gfp_mask & __GFP_NOLOCKDEP) 4312 return false; 4313 4314 return true; 4315 } 4316 4317 void __fs_reclaim_acquire(void) 4318 { 4319 lock_map_acquire(&__fs_reclaim_map); 4320 } 4321 4322 void __fs_reclaim_release(void) 4323 { 4324 lock_map_release(&__fs_reclaim_map); 4325 } 4326 4327 void fs_reclaim_acquire(gfp_t gfp_mask) 4328 { 4329 gfp_mask = current_gfp_context(gfp_mask); 4330 4331 if (__need_reclaim(gfp_mask)) { 4332 if (gfp_mask & __GFP_FS) 4333 __fs_reclaim_acquire(); 4334 4335 #ifdef CONFIG_MMU_NOTIFIER 4336 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4337 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4338 #endif 4339 4340 } 4341 } 4342 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4343 4344 void fs_reclaim_release(gfp_t gfp_mask) 4345 { 4346 gfp_mask = current_gfp_context(gfp_mask); 4347 4348 if (__need_reclaim(gfp_mask)) { 4349 if (gfp_mask & __GFP_FS) 4350 __fs_reclaim_release(); 4351 } 4352 } 4353 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4354 #endif 4355 4356 /* Perform direct synchronous page reclaim */ 4357 static unsigned long 4358 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4359 const struct alloc_context *ac) 4360 { 4361 unsigned int noreclaim_flag; 4362 unsigned long pflags, progress; 4363 4364 cond_resched(); 4365 4366 /* We now go into synchronous reclaim */ 4367 cpuset_memory_pressure_bump(); 4368 psi_memstall_enter(&pflags); 4369 fs_reclaim_acquire(gfp_mask); 4370 noreclaim_flag = memalloc_noreclaim_save(); 4371 4372 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4373 ac->nodemask); 4374 4375 memalloc_noreclaim_restore(noreclaim_flag); 4376 fs_reclaim_release(gfp_mask); 4377 psi_memstall_leave(&pflags); 4378 4379 cond_resched(); 4380 4381 return progress; 4382 } 4383 4384 /* The really slow allocator path where we enter direct reclaim */ 4385 static inline struct page * 4386 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4387 unsigned int alloc_flags, const struct alloc_context *ac, 4388 unsigned long *did_some_progress) 4389 { 4390 struct page *page = NULL; 4391 bool drained = false; 4392 4393 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4394 if (unlikely(!(*did_some_progress))) 4395 return NULL; 4396 4397 retry: 4398 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4399 4400 /* 4401 * If an allocation failed after direct reclaim, it could be because 4402 * pages are pinned on the per-cpu lists or in high alloc reserves. 4403 * Shrink them and try again 4404 */ 4405 if (!page && !drained) { 4406 unreserve_highatomic_pageblock(ac, false); 4407 drain_all_pages(NULL); 4408 drained = true; 4409 goto retry; 4410 } 4411 4412 return page; 4413 } 4414 4415 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4416 const struct alloc_context *ac) 4417 { 4418 struct zoneref *z; 4419 struct zone *zone; 4420 pg_data_t *last_pgdat = NULL; 4421 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4422 4423 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4424 ac->nodemask) { 4425 if (last_pgdat != zone->zone_pgdat) 4426 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 4427 last_pgdat = zone->zone_pgdat; 4428 } 4429 } 4430 4431 static inline unsigned int 4432 gfp_to_alloc_flags(gfp_t gfp_mask) 4433 { 4434 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4435 4436 /* 4437 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH 4438 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4439 * to save two branches. 4440 */ 4441 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 4442 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4443 4444 /* 4445 * The caller may dip into page reserves a bit more if the caller 4446 * cannot run direct reclaim, or if the caller has realtime scheduling 4447 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4448 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 4449 */ 4450 alloc_flags |= (__force int) 4451 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4452 4453 if (gfp_mask & __GFP_ATOMIC) { 4454 /* 4455 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4456 * if it can't schedule. 4457 */ 4458 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4459 alloc_flags |= ALLOC_HARDER; 4460 /* 4461 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 4462 * comment for __cpuset_node_allowed(). 4463 */ 4464 alloc_flags &= ~ALLOC_CPUSET; 4465 } else if (unlikely(rt_task(current)) && !in_interrupt()) 4466 alloc_flags |= ALLOC_HARDER; 4467 4468 alloc_flags = current_alloc_flags(gfp_mask, alloc_flags); 4469 4470 return alloc_flags; 4471 } 4472 4473 static bool oom_reserves_allowed(struct task_struct *tsk) 4474 { 4475 if (!tsk_is_oom_victim(tsk)) 4476 return false; 4477 4478 /* 4479 * !MMU doesn't have oom reaper so give access to memory reserves 4480 * only to the thread with TIF_MEMDIE set 4481 */ 4482 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4483 return false; 4484 4485 return true; 4486 } 4487 4488 /* 4489 * Distinguish requests which really need access to full memory 4490 * reserves from oom victims which can live with a portion of it 4491 */ 4492 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4493 { 4494 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4495 return 0; 4496 if (gfp_mask & __GFP_MEMALLOC) 4497 return ALLOC_NO_WATERMARKS; 4498 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4499 return ALLOC_NO_WATERMARKS; 4500 if (!in_interrupt()) { 4501 if (current->flags & PF_MEMALLOC) 4502 return ALLOC_NO_WATERMARKS; 4503 else if (oom_reserves_allowed(current)) 4504 return ALLOC_OOM; 4505 } 4506 4507 return 0; 4508 } 4509 4510 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4511 { 4512 return !!__gfp_pfmemalloc_flags(gfp_mask); 4513 } 4514 4515 /* 4516 * Checks whether it makes sense to retry the reclaim to make a forward progress 4517 * for the given allocation request. 4518 * 4519 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4520 * without success, or when we couldn't even meet the watermark if we 4521 * reclaimed all remaining pages on the LRU lists. 4522 * 4523 * Returns true if a retry is viable or false to enter the oom path. 4524 */ 4525 static inline bool 4526 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4527 struct alloc_context *ac, int alloc_flags, 4528 bool did_some_progress, int *no_progress_loops) 4529 { 4530 struct zone *zone; 4531 struct zoneref *z; 4532 bool ret = false; 4533 4534 /* 4535 * Costly allocations might have made a progress but this doesn't mean 4536 * their order will become available due to high fragmentation so 4537 * always increment the no progress counter for them 4538 */ 4539 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4540 *no_progress_loops = 0; 4541 else 4542 (*no_progress_loops)++; 4543 4544 /* 4545 * Make sure we converge to OOM if we cannot make any progress 4546 * several times in the row. 4547 */ 4548 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 4549 /* Before OOM, exhaust highatomic_reserve */ 4550 return unreserve_highatomic_pageblock(ac, true); 4551 } 4552 4553 /* 4554 * Keep reclaiming pages while there is a chance this will lead 4555 * somewhere. If none of the target zones can satisfy our allocation 4556 * request even if all reclaimable pages are considered then we are 4557 * screwed and have to go OOM. 4558 */ 4559 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4560 ac->highest_zoneidx, ac->nodemask) { 4561 unsigned long available; 4562 unsigned long reclaimable; 4563 unsigned long min_wmark = min_wmark_pages(zone); 4564 bool wmark; 4565 4566 available = reclaimable = zone_reclaimable_pages(zone); 4567 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4568 4569 /* 4570 * Would the allocation succeed if we reclaimed all 4571 * reclaimable pages? 4572 */ 4573 wmark = __zone_watermark_ok(zone, order, min_wmark, 4574 ac->highest_zoneidx, alloc_flags, available); 4575 trace_reclaim_retry_zone(z, order, reclaimable, 4576 available, min_wmark, *no_progress_loops, wmark); 4577 if (wmark) { 4578 /* 4579 * If we didn't make any progress and have a lot of 4580 * dirty + writeback pages then we should wait for 4581 * an IO to complete to slow down the reclaim and 4582 * prevent from pre mature OOM 4583 */ 4584 if (!did_some_progress) { 4585 unsigned long write_pending; 4586 4587 write_pending = zone_page_state_snapshot(zone, 4588 NR_ZONE_WRITE_PENDING); 4589 4590 if (2 * write_pending > reclaimable) { 4591 congestion_wait(BLK_RW_ASYNC, HZ/10); 4592 return true; 4593 } 4594 } 4595 4596 ret = true; 4597 goto out; 4598 } 4599 } 4600 4601 out: 4602 /* 4603 * Memory allocation/reclaim might be called from a WQ context and the 4604 * current implementation of the WQ concurrency control doesn't 4605 * recognize that a particular WQ is congested if the worker thread is 4606 * looping without ever sleeping. Therefore we have to do a short sleep 4607 * here rather than calling cond_resched(). 4608 */ 4609 if (current->flags & PF_WQ_WORKER) 4610 schedule_timeout_uninterruptible(1); 4611 else 4612 cond_resched(); 4613 return ret; 4614 } 4615 4616 static inline bool 4617 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4618 { 4619 /* 4620 * It's possible that cpuset's mems_allowed and the nodemask from 4621 * mempolicy don't intersect. This should be normally dealt with by 4622 * policy_nodemask(), but it's possible to race with cpuset update in 4623 * such a way the check therein was true, and then it became false 4624 * before we got our cpuset_mems_cookie here. 4625 * This assumes that for all allocations, ac->nodemask can come only 4626 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4627 * when it does not intersect with the cpuset restrictions) or the 4628 * caller can deal with a violated nodemask. 4629 */ 4630 if (cpusets_enabled() && ac->nodemask && 4631 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4632 ac->nodemask = NULL; 4633 return true; 4634 } 4635 4636 /* 4637 * When updating a task's mems_allowed or mempolicy nodemask, it is 4638 * possible to race with parallel threads in such a way that our 4639 * allocation can fail while the mask is being updated. If we are about 4640 * to fail, check if the cpuset changed during allocation and if so, 4641 * retry. 4642 */ 4643 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4644 return true; 4645 4646 return false; 4647 } 4648 4649 static inline struct page * 4650 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4651 struct alloc_context *ac) 4652 { 4653 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4654 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4655 struct page *page = NULL; 4656 unsigned int alloc_flags; 4657 unsigned long did_some_progress; 4658 enum compact_priority compact_priority; 4659 enum compact_result compact_result; 4660 int compaction_retries; 4661 int no_progress_loops; 4662 unsigned int cpuset_mems_cookie; 4663 int reserve_flags; 4664 4665 /* 4666 * We also sanity check to catch abuse of atomic reserves being used by 4667 * callers that are not in atomic context. 4668 */ 4669 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 4670 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 4671 gfp_mask &= ~__GFP_ATOMIC; 4672 4673 retry_cpuset: 4674 compaction_retries = 0; 4675 no_progress_loops = 0; 4676 compact_priority = DEF_COMPACT_PRIORITY; 4677 cpuset_mems_cookie = read_mems_allowed_begin(); 4678 4679 /* 4680 * The fast path uses conservative alloc_flags to succeed only until 4681 * kswapd needs to be woken up, and to avoid the cost of setting up 4682 * alloc_flags precisely. So we do that now. 4683 */ 4684 alloc_flags = gfp_to_alloc_flags(gfp_mask); 4685 4686 /* 4687 * We need to recalculate the starting point for the zonelist iterator 4688 * because we might have used different nodemask in the fast path, or 4689 * there was a cpuset modification and we are retrying - otherwise we 4690 * could end up iterating over non-eligible zones endlessly. 4691 */ 4692 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4693 ac->highest_zoneidx, ac->nodemask); 4694 if (!ac->preferred_zoneref->zone) 4695 goto nopage; 4696 4697 if (alloc_flags & ALLOC_KSWAPD) 4698 wake_all_kswapds(order, gfp_mask, ac); 4699 4700 /* 4701 * The adjusted alloc_flags might result in immediate success, so try 4702 * that first 4703 */ 4704 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4705 if (page) 4706 goto got_pg; 4707 4708 /* 4709 * For costly allocations, try direct compaction first, as it's likely 4710 * that we have enough base pages and don't need to reclaim. For non- 4711 * movable high-order allocations, do that as well, as compaction will 4712 * try prevent permanent fragmentation by migrating from blocks of the 4713 * same migratetype. 4714 * Don't try this for allocations that are allowed to ignore 4715 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4716 */ 4717 if (can_direct_reclaim && 4718 (costly_order || 4719 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4720 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4721 page = __alloc_pages_direct_compact(gfp_mask, order, 4722 alloc_flags, ac, 4723 INIT_COMPACT_PRIORITY, 4724 &compact_result); 4725 if (page) 4726 goto got_pg; 4727 4728 /* 4729 * Checks for costly allocations with __GFP_NORETRY, which 4730 * includes some THP page fault allocations 4731 */ 4732 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4733 /* 4734 * If allocating entire pageblock(s) and compaction 4735 * failed because all zones are below low watermarks 4736 * or is prohibited because it recently failed at this 4737 * order, fail immediately unless the allocator has 4738 * requested compaction and reclaim retry. 4739 * 4740 * Reclaim is 4741 * - potentially very expensive because zones are far 4742 * below their low watermarks or this is part of very 4743 * bursty high order allocations, 4744 * - not guaranteed to help because isolate_freepages() 4745 * may not iterate over freed pages as part of its 4746 * linear scan, and 4747 * - unlikely to make entire pageblocks free on its 4748 * own. 4749 */ 4750 if (compact_result == COMPACT_SKIPPED || 4751 compact_result == COMPACT_DEFERRED) 4752 goto nopage; 4753 4754 /* 4755 * Looks like reclaim/compaction is worth trying, but 4756 * sync compaction could be very expensive, so keep 4757 * using async compaction. 4758 */ 4759 compact_priority = INIT_COMPACT_PRIORITY; 4760 } 4761 } 4762 4763 retry: 4764 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4765 if (alloc_flags & ALLOC_KSWAPD) 4766 wake_all_kswapds(order, gfp_mask, ac); 4767 4768 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4769 if (reserve_flags) 4770 alloc_flags = current_alloc_flags(gfp_mask, reserve_flags); 4771 4772 /* 4773 * Reset the nodemask and zonelist iterators if memory policies can be 4774 * ignored. These allocations are high priority and system rather than 4775 * user oriented. 4776 */ 4777 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4778 ac->nodemask = NULL; 4779 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4780 ac->highest_zoneidx, ac->nodemask); 4781 } 4782 4783 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4784 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4785 if (page) 4786 goto got_pg; 4787 4788 /* Caller is not willing to reclaim, we can't balance anything */ 4789 if (!can_direct_reclaim) 4790 goto nopage; 4791 4792 /* Avoid recursion of direct reclaim */ 4793 if (current->flags & PF_MEMALLOC) 4794 goto nopage; 4795 4796 /* Try direct reclaim and then allocating */ 4797 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4798 &did_some_progress); 4799 if (page) 4800 goto got_pg; 4801 4802 /* Try direct compaction and then allocating */ 4803 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4804 compact_priority, &compact_result); 4805 if (page) 4806 goto got_pg; 4807 4808 /* Do not loop if specifically requested */ 4809 if (gfp_mask & __GFP_NORETRY) 4810 goto nopage; 4811 4812 /* 4813 * Do not retry costly high order allocations unless they are 4814 * __GFP_RETRY_MAYFAIL 4815 */ 4816 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 4817 goto nopage; 4818 4819 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4820 did_some_progress > 0, &no_progress_loops)) 4821 goto retry; 4822 4823 /* 4824 * It doesn't make any sense to retry for the compaction if the order-0 4825 * reclaim is not able to make any progress because the current 4826 * implementation of the compaction depends on the sufficient amount 4827 * of free memory (see __compaction_suitable) 4828 */ 4829 if (did_some_progress > 0 && 4830 should_compact_retry(ac, order, alloc_flags, 4831 compact_result, &compact_priority, 4832 &compaction_retries)) 4833 goto retry; 4834 4835 4836 /* Deal with possible cpuset update races before we start OOM killing */ 4837 if (check_retry_cpuset(cpuset_mems_cookie, ac)) 4838 goto retry_cpuset; 4839 4840 /* Reclaim has failed us, start killing things */ 4841 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4842 if (page) 4843 goto got_pg; 4844 4845 /* Avoid allocations with no watermarks from looping endlessly */ 4846 if (tsk_is_oom_victim(current) && 4847 (alloc_flags & ALLOC_OOM || 4848 (gfp_mask & __GFP_NOMEMALLOC))) 4849 goto nopage; 4850 4851 /* Retry as long as the OOM killer is making progress */ 4852 if (did_some_progress) { 4853 no_progress_loops = 0; 4854 goto retry; 4855 } 4856 4857 nopage: 4858 /* Deal with possible cpuset update races before we fail */ 4859 if (check_retry_cpuset(cpuset_mems_cookie, ac)) 4860 goto retry_cpuset; 4861 4862 /* 4863 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4864 * we always retry 4865 */ 4866 if (gfp_mask & __GFP_NOFAIL) { 4867 /* 4868 * All existing users of the __GFP_NOFAIL are blockable, so warn 4869 * of any new users that actually require GFP_NOWAIT 4870 */ 4871 if (WARN_ON_ONCE(!can_direct_reclaim)) 4872 goto fail; 4873 4874 /* 4875 * PF_MEMALLOC request from this context is rather bizarre 4876 * because we cannot reclaim anything and only can loop waiting 4877 * for somebody to do a work for us 4878 */ 4879 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 4880 4881 /* 4882 * non failing costly orders are a hard requirement which we 4883 * are not prepared for much so let's warn about these users 4884 * so that we can identify them and convert them to something 4885 * else. 4886 */ 4887 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); 4888 4889 /* 4890 * Help non-failing allocations by giving them access to memory 4891 * reserves but do not use ALLOC_NO_WATERMARKS because this 4892 * could deplete whole memory reserves which would just make 4893 * the situation worse 4894 */ 4895 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); 4896 if (page) 4897 goto got_pg; 4898 4899 cond_resched(); 4900 goto retry; 4901 } 4902 fail: 4903 warn_alloc(gfp_mask, ac->nodemask, 4904 "page allocation failure: order:%u", order); 4905 got_pg: 4906 return page; 4907 } 4908 4909 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4910 int preferred_nid, nodemask_t *nodemask, 4911 struct alloc_context *ac, gfp_t *alloc_mask, 4912 unsigned int *alloc_flags) 4913 { 4914 ac->highest_zoneidx = gfp_zone(gfp_mask); 4915 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4916 ac->nodemask = nodemask; 4917 ac->migratetype = gfp_migratetype(gfp_mask); 4918 4919 if (cpusets_enabled()) { 4920 *alloc_mask |= __GFP_HARDWALL; 4921 /* 4922 * When we are in the interrupt context, it is irrelevant 4923 * to the current task context. It means that any node ok. 4924 */ 4925 if (!in_interrupt() && !ac->nodemask) 4926 ac->nodemask = &cpuset_current_mems_allowed; 4927 else 4928 *alloc_flags |= ALLOC_CPUSET; 4929 } 4930 4931 fs_reclaim_acquire(gfp_mask); 4932 fs_reclaim_release(gfp_mask); 4933 4934 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 4935 4936 if (should_fail_alloc_page(gfp_mask, order)) 4937 return false; 4938 4939 *alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags); 4940 4941 /* Dirty zone balancing only done in the fast path */ 4942 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4943 4944 /* 4945 * The preferred zone is used for statistics but crucially it is 4946 * also used as the starting point for the zonelist iterator. It 4947 * may get reset for allocations that ignore memory policies. 4948 */ 4949 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4950 ac->highest_zoneidx, ac->nodemask); 4951 4952 return true; 4953 } 4954 4955 /* 4956 * This is the 'heart' of the zoned buddy allocator. 4957 */ 4958 struct page * 4959 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, 4960 nodemask_t *nodemask) 4961 { 4962 struct page *page; 4963 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4964 gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */ 4965 struct alloc_context ac = { }; 4966 4967 /* 4968 * There are several places where we assume that the order value is sane 4969 * so bail out early if the request is out of bound. 4970 */ 4971 if (unlikely(order >= MAX_ORDER)) { 4972 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 4973 return NULL; 4974 } 4975 4976 gfp_mask &= gfp_allowed_mask; 4977 alloc_mask = gfp_mask; 4978 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) 4979 return NULL; 4980 4981 /* 4982 * Forbid the first pass from falling back to types that fragment 4983 * memory until all local zones are considered. 4984 */ 4985 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask); 4986 4987 /* First allocation attempt */ 4988 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); 4989 if (likely(page)) 4990 goto out; 4991 4992 /* 4993 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4994 * resp. GFP_NOIO which has to be inherited for all allocation requests 4995 * from a particular context which has been marked by 4996 * memalloc_no{fs,io}_{save,restore}. 4997 */ 4998 alloc_mask = current_gfp_context(gfp_mask); 4999 ac.spread_dirty_pages = false; 5000 5001 /* 5002 * Restore the original nodemask if it was potentially replaced with 5003 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5004 */ 5005 ac.nodemask = nodemask; 5006 5007 page = __alloc_pages_slowpath(alloc_mask, order, &ac); 5008 5009 out: 5010 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 5011 unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) { 5012 __free_pages(page, order); 5013 page = NULL; 5014 } 5015 5016 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); 5017 5018 return page; 5019 } 5020 EXPORT_SYMBOL(__alloc_pages_nodemask); 5021 5022 /* 5023 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5024 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5025 * you need to access high mem. 5026 */ 5027 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 5028 { 5029 struct page *page; 5030 5031 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 5032 if (!page) 5033 return 0; 5034 return (unsigned long) page_address(page); 5035 } 5036 EXPORT_SYMBOL(__get_free_pages); 5037 5038 unsigned long get_zeroed_page(gfp_t gfp_mask) 5039 { 5040 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 5041 } 5042 EXPORT_SYMBOL(get_zeroed_page); 5043 5044 static inline void free_the_page(struct page *page, unsigned int order) 5045 { 5046 if (order == 0) /* Via pcp? */ 5047 free_unref_page(page); 5048 else 5049 __free_pages_ok(page, order, FPI_NONE); 5050 } 5051 5052 /** 5053 * __free_pages - Free pages allocated with alloc_pages(). 5054 * @page: The page pointer returned from alloc_pages(). 5055 * @order: The order of the allocation. 5056 * 5057 * This function can free multi-page allocations that are not compound 5058 * pages. It does not check that the @order passed in matches that of 5059 * the allocation, so it is easy to leak memory. Freeing more memory 5060 * than was allocated will probably emit a warning. 5061 * 5062 * If the last reference to this page is speculative, it will be released 5063 * by put_page() which only frees the first page of a non-compound 5064 * allocation. To prevent the remaining pages from being leaked, we free 5065 * the subsequent pages here. If you want to use the page's reference 5066 * count to decide when to free the allocation, you should allocate a 5067 * compound page, and use put_page() instead of __free_pages(). 5068 * 5069 * Context: May be called in interrupt context or while holding a normal 5070 * spinlock, but not in NMI context or while holding a raw spinlock. 5071 */ 5072 void __free_pages(struct page *page, unsigned int order) 5073 { 5074 if (put_page_testzero(page)) 5075 free_the_page(page, order); 5076 else if (!PageHead(page)) 5077 while (order-- > 0) 5078 free_the_page(page + (1 << order), order); 5079 } 5080 EXPORT_SYMBOL(__free_pages); 5081 5082 void free_pages(unsigned long addr, unsigned int order) 5083 { 5084 if (addr != 0) { 5085 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5086 __free_pages(virt_to_page((void *)addr), order); 5087 } 5088 } 5089 5090 EXPORT_SYMBOL(free_pages); 5091 5092 /* 5093 * Page Fragment: 5094 * An arbitrary-length arbitrary-offset area of memory which resides 5095 * within a 0 or higher order page. Multiple fragments within that page 5096 * are individually refcounted, in the page's reference counter. 5097 * 5098 * The page_frag functions below provide a simple allocation framework for 5099 * page fragments. This is used by the network stack and network device 5100 * drivers to provide a backing region of memory for use as either an 5101 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 5102 */ 5103 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 5104 gfp_t gfp_mask) 5105 { 5106 struct page *page = NULL; 5107 gfp_t gfp = gfp_mask; 5108 5109 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5110 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 5111 __GFP_NOMEMALLOC; 5112 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 5113 PAGE_FRAG_CACHE_MAX_ORDER); 5114 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 5115 #endif 5116 if (unlikely(!page)) 5117 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 5118 5119 nc->va = page ? page_address(page) : NULL; 5120 5121 return page; 5122 } 5123 5124 void __page_frag_cache_drain(struct page *page, unsigned int count) 5125 { 5126 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 5127 5128 if (page_ref_sub_and_test(page, count)) 5129 free_the_page(page, compound_order(page)); 5130 } 5131 EXPORT_SYMBOL(__page_frag_cache_drain); 5132 5133 void *page_frag_alloc(struct page_frag_cache *nc, 5134 unsigned int fragsz, gfp_t gfp_mask) 5135 { 5136 unsigned int size = PAGE_SIZE; 5137 struct page *page; 5138 int offset; 5139 5140 if (unlikely(!nc->va)) { 5141 refill: 5142 page = __page_frag_cache_refill(nc, gfp_mask); 5143 if (!page) 5144 return NULL; 5145 5146 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5147 /* if size can vary use size else just use PAGE_SIZE */ 5148 size = nc->size; 5149 #endif 5150 /* Even if we own the page, we do not use atomic_set(). 5151 * This would break get_page_unless_zero() users. 5152 */ 5153 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 5154 5155 /* reset page count bias and offset to start of new frag */ 5156 nc->pfmemalloc = page_is_pfmemalloc(page); 5157 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5158 nc->offset = size; 5159 } 5160 5161 offset = nc->offset - fragsz; 5162 if (unlikely(offset < 0)) { 5163 page = virt_to_page(nc->va); 5164 5165 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 5166 goto refill; 5167 5168 if (unlikely(nc->pfmemalloc)) { 5169 free_the_page(page, compound_order(page)); 5170 goto refill; 5171 } 5172 5173 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5174 /* if size can vary use size else just use PAGE_SIZE */ 5175 size = nc->size; 5176 #endif 5177 /* OK, page count is 0, we can safely set it */ 5178 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 5179 5180 /* reset page count bias and offset to start of new frag */ 5181 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5182 offset = size - fragsz; 5183 } 5184 5185 nc->pagecnt_bias--; 5186 nc->offset = offset; 5187 5188 return nc->va + offset; 5189 } 5190 EXPORT_SYMBOL(page_frag_alloc); 5191 5192 /* 5193 * Frees a page fragment allocated out of either a compound or order 0 page. 5194 */ 5195 void page_frag_free(void *addr) 5196 { 5197 struct page *page = virt_to_head_page(addr); 5198 5199 if (unlikely(put_page_testzero(page))) 5200 free_the_page(page, compound_order(page)); 5201 } 5202 EXPORT_SYMBOL(page_frag_free); 5203 5204 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5205 size_t size) 5206 { 5207 if (addr) { 5208 unsigned long alloc_end = addr + (PAGE_SIZE << order); 5209 unsigned long used = addr + PAGE_ALIGN(size); 5210 5211 split_page(virt_to_page((void *)addr), order); 5212 while (used < alloc_end) { 5213 free_page(used); 5214 used += PAGE_SIZE; 5215 } 5216 } 5217 return (void *)addr; 5218 } 5219 5220 /** 5221 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5222 * @size: the number of bytes to allocate 5223 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5224 * 5225 * This function is similar to alloc_pages(), except that it allocates the 5226 * minimum number of pages to satisfy the request. alloc_pages() can only 5227 * allocate memory in power-of-two pages. 5228 * 5229 * This function is also limited by MAX_ORDER. 5230 * 5231 * Memory allocated by this function must be released by free_pages_exact(). 5232 * 5233 * Return: pointer to the allocated area or %NULL in case of error. 5234 */ 5235 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 5236 { 5237 unsigned int order = get_order(size); 5238 unsigned long addr; 5239 5240 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP)) 5241 gfp_mask &= ~__GFP_COMP; 5242 5243 addr = __get_free_pages(gfp_mask, order); 5244 return make_alloc_exact(addr, order, size); 5245 } 5246 EXPORT_SYMBOL(alloc_pages_exact); 5247 5248 /** 5249 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5250 * pages on a node. 5251 * @nid: the preferred node ID where memory should be allocated 5252 * @size: the number of bytes to allocate 5253 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5254 * 5255 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5256 * back. 5257 * 5258 * Return: pointer to the allocated area or %NULL in case of error. 5259 */ 5260 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 5261 { 5262 unsigned int order = get_order(size); 5263 struct page *p; 5264 5265 if (WARN_ON_ONCE(gfp_mask & __GFP_COMP)) 5266 gfp_mask &= ~__GFP_COMP; 5267 5268 p = alloc_pages_node(nid, gfp_mask, order); 5269 if (!p) 5270 return NULL; 5271 return make_alloc_exact((unsigned long)page_address(p), order, size); 5272 } 5273 5274 /** 5275 * free_pages_exact - release memory allocated via alloc_pages_exact() 5276 * @virt: the value returned by alloc_pages_exact. 5277 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5278 * 5279 * Release the memory allocated by a previous call to alloc_pages_exact. 5280 */ 5281 void free_pages_exact(void *virt, size_t size) 5282 { 5283 unsigned long addr = (unsigned long)virt; 5284 unsigned long end = addr + PAGE_ALIGN(size); 5285 5286 while (addr < end) { 5287 free_page(addr); 5288 addr += PAGE_SIZE; 5289 } 5290 } 5291 EXPORT_SYMBOL(free_pages_exact); 5292 5293 /** 5294 * nr_free_zone_pages - count number of pages beyond high watermark 5295 * @offset: The zone index of the highest zone 5296 * 5297 * nr_free_zone_pages() counts the number of pages which are beyond the 5298 * high watermark within all zones at or below a given zone index. For each 5299 * zone, the number of pages is calculated as: 5300 * 5301 * nr_free_zone_pages = managed_pages - high_pages 5302 * 5303 * Return: number of pages beyond high watermark. 5304 */ 5305 static unsigned long nr_free_zone_pages(int offset) 5306 { 5307 struct zoneref *z; 5308 struct zone *zone; 5309 5310 /* Just pick one node, since fallback list is circular */ 5311 unsigned long sum = 0; 5312 5313 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5314 5315 for_each_zone_zonelist(zone, z, zonelist, offset) { 5316 unsigned long size = zone_managed_pages(zone); 5317 unsigned long high = high_wmark_pages(zone); 5318 if (size > high) 5319 sum += size - high; 5320 } 5321 5322 return sum; 5323 } 5324 5325 /** 5326 * nr_free_buffer_pages - count number of pages beyond high watermark 5327 * 5328 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5329 * watermark within ZONE_DMA and ZONE_NORMAL. 5330 * 5331 * Return: number of pages beyond high watermark within ZONE_DMA and 5332 * ZONE_NORMAL. 5333 */ 5334 unsigned long nr_free_buffer_pages(void) 5335 { 5336 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5337 } 5338 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5339 5340 static inline void show_node(struct zone *zone) 5341 { 5342 if (IS_ENABLED(CONFIG_NUMA)) 5343 printk("Node %d ", zone_to_nid(zone)); 5344 } 5345 5346 long si_mem_available(void) 5347 { 5348 long available; 5349 unsigned long pagecache; 5350 unsigned long wmark_low = 0; 5351 unsigned long pages[NR_LRU_LISTS]; 5352 unsigned long reclaimable; 5353 struct zone *zone; 5354 int lru; 5355 5356 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 5357 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 5358 5359 for_each_zone(zone) 5360 wmark_low += low_wmark_pages(zone); 5361 5362 /* 5363 * Estimate the amount of memory available for userspace allocations, 5364 * without causing swapping. 5365 */ 5366 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 5367 5368 /* 5369 * Not all the page cache can be freed, otherwise the system will 5370 * start swapping. Assume at least half of the page cache, or the 5371 * low watermark worth of cache, needs to stay. 5372 */ 5373 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 5374 pagecache -= min(pagecache / 2, wmark_low); 5375 available += pagecache; 5376 5377 /* 5378 * Part of the reclaimable slab and other kernel memory consists of 5379 * items that are in use, and cannot be freed. Cap this estimate at the 5380 * low watermark. 5381 */ 5382 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 5383 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 5384 available += reclaimable - min(reclaimable / 2, wmark_low); 5385 5386 if (available < 0) 5387 available = 0; 5388 return available; 5389 } 5390 EXPORT_SYMBOL_GPL(si_mem_available); 5391 5392 void si_meminfo(struct sysinfo *val) 5393 { 5394 val->totalram = totalram_pages(); 5395 val->sharedram = global_node_page_state(NR_SHMEM); 5396 val->freeram = global_zone_page_state(NR_FREE_PAGES); 5397 val->bufferram = nr_blockdev_pages(); 5398 val->totalhigh = totalhigh_pages(); 5399 val->freehigh = nr_free_highpages(); 5400 val->mem_unit = PAGE_SIZE; 5401 } 5402 5403 EXPORT_SYMBOL(si_meminfo); 5404 5405 #ifdef CONFIG_NUMA 5406 void si_meminfo_node(struct sysinfo *val, int nid) 5407 { 5408 int zone_type; /* needs to be signed */ 5409 unsigned long managed_pages = 0; 5410 unsigned long managed_highpages = 0; 5411 unsigned long free_highpages = 0; 5412 pg_data_t *pgdat = NODE_DATA(nid); 5413 5414 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 5415 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); 5416 val->totalram = managed_pages; 5417 val->sharedram = node_page_state(pgdat, NR_SHMEM); 5418 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 5419 #ifdef CONFIG_HIGHMEM 5420 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 5421 struct zone *zone = &pgdat->node_zones[zone_type]; 5422 5423 if (is_highmem(zone)) { 5424 managed_highpages += zone_managed_pages(zone); 5425 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 5426 } 5427 } 5428 val->totalhigh = managed_highpages; 5429 val->freehigh = free_highpages; 5430 #else 5431 val->totalhigh = managed_highpages; 5432 val->freehigh = free_highpages; 5433 #endif 5434 val->mem_unit = PAGE_SIZE; 5435 } 5436 #endif 5437 5438 /* 5439 * Determine whether the node should be displayed or not, depending on whether 5440 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 5441 */ 5442 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 5443 { 5444 if (!(flags & SHOW_MEM_FILTER_NODES)) 5445 return false; 5446 5447 /* 5448 * no node mask - aka implicit memory numa policy. Do not bother with 5449 * the synchronization - read_mems_allowed_begin - because we do not 5450 * have to be precise here. 5451 */ 5452 if (!nodemask) 5453 nodemask = &cpuset_current_mems_allowed; 5454 5455 return !node_isset(nid, *nodemask); 5456 } 5457 5458 #define K(x) ((x) << (PAGE_SHIFT-10)) 5459 5460 static void show_migration_types(unsigned char type) 5461 { 5462 static const char types[MIGRATE_TYPES] = { 5463 [MIGRATE_UNMOVABLE] = 'U', 5464 [MIGRATE_MOVABLE] = 'M', 5465 [MIGRATE_RECLAIMABLE] = 'E', 5466 [MIGRATE_HIGHATOMIC] = 'H', 5467 #ifdef CONFIG_CMA 5468 [MIGRATE_CMA] = 'C', 5469 #endif 5470 #ifdef CONFIG_MEMORY_ISOLATION 5471 [MIGRATE_ISOLATE] = 'I', 5472 #endif 5473 }; 5474 char tmp[MIGRATE_TYPES + 1]; 5475 char *p = tmp; 5476 int i; 5477 5478 for (i = 0; i < MIGRATE_TYPES; i++) { 5479 if (type & (1 << i)) 5480 *p++ = types[i]; 5481 } 5482 5483 *p = '\0'; 5484 printk(KERN_CONT "(%s) ", tmp); 5485 } 5486 5487 /* 5488 * Show free area list (used inside shift_scroll-lock stuff) 5489 * We also calculate the percentage fragmentation. We do this by counting the 5490 * memory on each free list with the exception of the first item on the list. 5491 * 5492 * Bits in @filter: 5493 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 5494 * cpuset. 5495 */ 5496 void show_free_areas(unsigned int filter, nodemask_t *nodemask) 5497 { 5498 unsigned long free_pcp = 0; 5499 int cpu; 5500 struct zone *zone; 5501 pg_data_t *pgdat; 5502 5503 for_each_populated_zone(zone) { 5504 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5505 continue; 5506 5507 for_each_online_cpu(cpu) 5508 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 5509 } 5510 5511 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 5512 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 5513 " unevictable:%lu dirty:%lu writeback:%lu\n" 5514 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 5515 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 5516 " free:%lu free_pcp:%lu free_cma:%lu\n", 5517 global_node_page_state(NR_ACTIVE_ANON), 5518 global_node_page_state(NR_INACTIVE_ANON), 5519 global_node_page_state(NR_ISOLATED_ANON), 5520 global_node_page_state(NR_ACTIVE_FILE), 5521 global_node_page_state(NR_INACTIVE_FILE), 5522 global_node_page_state(NR_ISOLATED_FILE), 5523 global_node_page_state(NR_UNEVICTABLE), 5524 global_node_page_state(NR_FILE_DIRTY), 5525 global_node_page_state(NR_WRITEBACK), 5526 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 5527 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 5528 global_node_page_state(NR_FILE_MAPPED), 5529 global_node_page_state(NR_SHMEM), 5530 global_node_page_state(NR_PAGETABLE), 5531 global_zone_page_state(NR_BOUNCE), 5532 global_zone_page_state(NR_FREE_PAGES), 5533 free_pcp, 5534 global_zone_page_state(NR_FREE_CMA_PAGES)); 5535 5536 for_each_online_pgdat(pgdat) { 5537 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 5538 continue; 5539 5540 printk("Node %d" 5541 " active_anon:%lukB" 5542 " inactive_anon:%lukB" 5543 " active_file:%lukB" 5544 " inactive_file:%lukB" 5545 " unevictable:%lukB" 5546 " isolated(anon):%lukB" 5547 " isolated(file):%lukB" 5548 " mapped:%lukB" 5549 " dirty:%lukB" 5550 " writeback:%lukB" 5551 " shmem:%lukB" 5552 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5553 " shmem_thp: %lukB" 5554 " shmem_pmdmapped: %lukB" 5555 " anon_thp: %lukB" 5556 #endif 5557 " writeback_tmp:%lukB" 5558 " kernel_stack:%lukB" 5559 #ifdef CONFIG_SHADOW_CALL_STACK 5560 " shadow_call_stack:%lukB" 5561 #endif 5562 " pagetables:%lukB" 5563 " all_unreclaimable? %s" 5564 "\n", 5565 pgdat->node_id, 5566 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 5567 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 5568 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 5569 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 5570 K(node_page_state(pgdat, NR_UNEVICTABLE)), 5571 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 5572 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 5573 K(node_page_state(pgdat, NR_FILE_MAPPED)), 5574 K(node_page_state(pgdat, NR_FILE_DIRTY)), 5575 K(node_page_state(pgdat, NR_WRITEBACK)), 5576 K(node_page_state(pgdat, NR_SHMEM)), 5577 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5578 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 5579 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 5580 * HPAGE_PMD_NR), 5581 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 5582 #endif 5583 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 5584 node_page_state(pgdat, NR_KERNEL_STACK_KB), 5585 #ifdef CONFIG_SHADOW_CALL_STACK 5586 node_page_state(pgdat, NR_KERNEL_SCS_KB), 5587 #endif 5588 K(node_page_state(pgdat, NR_PAGETABLE)), 5589 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 5590 "yes" : "no"); 5591 } 5592 5593 for_each_populated_zone(zone) { 5594 int i; 5595 5596 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5597 continue; 5598 5599 free_pcp = 0; 5600 for_each_online_cpu(cpu) 5601 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 5602 5603 show_node(zone); 5604 printk(KERN_CONT 5605 "%s" 5606 " free:%lukB" 5607 " min:%lukB" 5608 " low:%lukB" 5609 " high:%lukB" 5610 " reserved_highatomic:%luKB" 5611 " active_anon:%lukB" 5612 " inactive_anon:%lukB" 5613 " active_file:%lukB" 5614 " inactive_file:%lukB" 5615 " unevictable:%lukB" 5616 " writepending:%lukB" 5617 " present:%lukB" 5618 " managed:%lukB" 5619 " mlocked:%lukB" 5620 " bounce:%lukB" 5621 " free_pcp:%lukB" 5622 " local_pcp:%ukB" 5623 " free_cma:%lukB" 5624 "\n", 5625 zone->name, 5626 K(zone_page_state(zone, NR_FREE_PAGES)), 5627 K(min_wmark_pages(zone)), 5628 K(low_wmark_pages(zone)), 5629 K(high_wmark_pages(zone)), 5630 K(zone->nr_reserved_highatomic), 5631 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 5632 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 5633 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 5634 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 5635 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 5636 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 5637 K(zone->present_pages), 5638 K(zone_managed_pages(zone)), 5639 K(zone_page_state(zone, NR_MLOCK)), 5640 K(zone_page_state(zone, NR_BOUNCE)), 5641 K(free_pcp), 5642 K(this_cpu_read(zone->pageset->pcp.count)), 5643 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 5644 printk("lowmem_reserve[]:"); 5645 for (i = 0; i < MAX_NR_ZONES; i++) 5646 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 5647 printk(KERN_CONT "\n"); 5648 } 5649 5650 for_each_populated_zone(zone) { 5651 unsigned int order; 5652 unsigned long nr[MAX_ORDER], flags, total = 0; 5653 unsigned char types[MAX_ORDER]; 5654 5655 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5656 continue; 5657 show_node(zone); 5658 printk(KERN_CONT "%s: ", zone->name); 5659 5660 spin_lock_irqsave(&zone->lock, flags); 5661 for (order = 0; order < MAX_ORDER; order++) { 5662 struct free_area *area = &zone->free_area[order]; 5663 int type; 5664 5665 nr[order] = area->nr_free; 5666 total += nr[order] << order; 5667 5668 types[order] = 0; 5669 for (type = 0; type < MIGRATE_TYPES; type++) { 5670 if (!free_area_empty(area, type)) 5671 types[order] |= 1 << type; 5672 } 5673 } 5674 spin_unlock_irqrestore(&zone->lock, flags); 5675 for (order = 0; order < MAX_ORDER; order++) { 5676 printk(KERN_CONT "%lu*%lukB ", 5677 nr[order], K(1UL) << order); 5678 if (nr[order]) 5679 show_migration_types(types[order]); 5680 } 5681 printk(KERN_CONT "= %lukB\n", K(total)); 5682 } 5683 5684 hugetlb_show_meminfo(); 5685 5686 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 5687 5688 show_swap_cache_info(); 5689 } 5690 5691 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5692 { 5693 zoneref->zone = zone; 5694 zoneref->zone_idx = zone_idx(zone); 5695 } 5696 5697 /* 5698 * Builds allocation fallback zone lists. 5699 * 5700 * Add all populated zones of a node to the zonelist. 5701 */ 5702 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5703 { 5704 struct zone *zone; 5705 enum zone_type zone_type = MAX_NR_ZONES; 5706 int nr_zones = 0; 5707 5708 do { 5709 zone_type--; 5710 zone = pgdat->node_zones + zone_type; 5711 if (managed_zone(zone)) { 5712 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5713 check_highest_zone(zone_type); 5714 } 5715 } while (zone_type); 5716 5717 return nr_zones; 5718 } 5719 5720 #ifdef CONFIG_NUMA 5721 5722 static int __parse_numa_zonelist_order(char *s) 5723 { 5724 /* 5725 * We used to support different zonlists modes but they turned 5726 * out to be just not useful. Let's keep the warning in place 5727 * if somebody still use the cmd line parameter so that we do 5728 * not fail it silently 5729 */ 5730 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5731 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5732 return -EINVAL; 5733 } 5734 return 0; 5735 } 5736 5737 char numa_zonelist_order[] = "Node"; 5738 5739 /* 5740 * sysctl handler for numa_zonelist_order 5741 */ 5742 int numa_zonelist_order_handler(struct ctl_table *table, int write, 5743 void *buffer, size_t *length, loff_t *ppos) 5744 { 5745 if (write) 5746 return __parse_numa_zonelist_order(buffer); 5747 return proc_dostring(table, write, buffer, length, ppos); 5748 } 5749 5750 5751 #define MAX_NODE_LOAD (nr_online_nodes) 5752 static int node_load[MAX_NUMNODES]; 5753 5754 /** 5755 * find_next_best_node - find the next node that should appear in a given node's fallback list 5756 * @node: node whose fallback list we're appending 5757 * @used_node_mask: nodemask_t of already used nodes 5758 * 5759 * We use a number of factors to determine which is the next node that should 5760 * appear on a given node's fallback list. The node should not have appeared 5761 * already in @node's fallback list, and it should be the next closest node 5762 * according to the distance array (which contains arbitrary distance values 5763 * from each node to each node in the system), and should also prefer nodes 5764 * with no CPUs, since presumably they'll have very little allocation pressure 5765 * on them otherwise. 5766 * 5767 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5768 */ 5769 static int find_next_best_node(int node, nodemask_t *used_node_mask) 5770 { 5771 int n, val; 5772 int min_val = INT_MAX; 5773 int best_node = NUMA_NO_NODE; 5774 5775 /* Use the local node if we haven't already */ 5776 if (!node_isset(node, *used_node_mask)) { 5777 node_set(node, *used_node_mask); 5778 return node; 5779 } 5780 5781 for_each_node_state(n, N_MEMORY) { 5782 5783 /* Don't want a node to appear more than once */ 5784 if (node_isset(n, *used_node_mask)) 5785 continue; 5786 5787 /* Use the distance array to find the distance */ 5788 val = node_distance(node, n); 5789 5790 /* Penalize nodes under us ("prefer the next node") */ 5791 val += (n < node); 5792 5793 /* Give preference to headless and unused nodes */ 5794 if (!cpumask_empty(cpumask_of_node(n))) 5795 val += PENALTY_FOR_NODE_WITH_CPUS; 5796 5797 /* Slight preference for less loaded node */ 5798 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 5799 val += node_load[n]; 5800 5801 if (val < min_val) { 5802 min_val = val; 5803 best_node = n; 5804 } 5805 } 5806 5807 if (best_node >= 0) 5808 node_set(best_node, *used_node_mask); 5809 5810 return best_node; 5811 } 5812 5813 5814 /* 5815 * Build zonelists ordered by node and zones within node. 5816 * This results in maximum locality--normal zone overflows into local 5817 * DMA zone, if any--but risks exhausting DMA zone. 5818 */ 5819 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5820 unsigned nr_nodes) 5821 { 5822 struct zoneref *zonerefs; 5823 int i; 5824 5825 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5826 5827 for (i = 0; i < nr_nodes; i++) { 5828 int nr_zones; 5829 5830 pg_data_t *node = NODE_DATA(node_order[i]); 5831 5832 nr_zones = build_zonerefs_node(node, zonerefs); 5833 zonerefs += nr_zones; 5834 } 5835 zonerefs->zone = NULL; 5836 zonerefs->zone_idx = 0; 5837 } 5838 5839 /* 5840 * Build gfp_thisnode zonelists 5841 */ 5842 static void build_thisnode_zonelists(pg_data_t *pgdat) 5843 { 5844 struct zoneref *zonerefs; 5845 int nr_zones; 5846 5847 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5848 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5849 zonerefs += nr_zones; 5850 zonerefs->zone = NULL; 5851 zonerefs->zone_idx = 0; 5852 } 5853 5854 /* 5855 * Build zonelists ordered by zone and nodes within zones. 5856 * This results in conserving DMA zone[s] until all Normal memory is 5857 * exhausted, but results in overflowing to remote node while memory 5858 * may still exist in local DMA zone. 5859 */ 5860 5861 static void build_zonelists(pg_data_t *pgdat) 5862 { 5863 static int node_order[MAX_NUMNODES]; 5864 int node, load, nr_nodes = 0; 5865 nodemask_t used_mask = NODE_MASK_NONE; 5866 int local_node, prev_node; 5867 5868 /* NUMA-aware ordering of nodes */ 5869 local_node = pgdat->node_id; 5870 load = nr_online_nodes; 5871 prev_node = local_node; 5872 5873 memset(node_order, 0, sizeof(node_order)); 5874 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5875 /* 5876 * We don't want to pressure a particular node. 5877 * So adding penalty to the first node in same 5878 * distance group to make it round-robin. 5879 */ 5880 if (node_distance(local_node, node) != 5881 node_distance(local_node, prev_node)) 5882 node_load[node] = load; 5883 5884 node_order[nr_nodes++] = node; 5885 prev_node = node; 5886 load--; 5887 } 5888 5889 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5890 build_thisnode_zonelists(pgdat); 5891 } 5892 5893 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5894 /* 5895 * Return node id of node used for "local" allocations. 5896 * I.e., first node id of first zone in arg node's generic zonelist. 5897 * Used for initializing percpu 'numa_mem', which is used primarily 5898 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5899 */ 5900 int local_memory_node(int node) 5901 { 5902 struct zoneref *z; 5903 5904 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5905 gfp_zone(GFP_KERNEL), 5906 NULL); 5907 return zone_to_nid(z->zone); 5908 } 5909 #endif 5910 5911 static void setup_min_unmapped_ratio(void); 5912 static void setup_min_slab_ratio(void); 5913 #else /* CONFIG_NUMA */ 5914 5915 static void build_zonelists(pg_data_t *pgdat) 5916 { 5917 int node, local_node; 5918 struct zoneref *zonerefs; 5919 int nr_zones; 5920 5921 local_node = pgdat->node_id; 5922 5923 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5924 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5925 zonerefs += nr_zones; 5926 5927 /* 5928 * Now we build the zonelist so that it contains the zones 5929 * of all the other nodes. 5930 * We don't want to pressure a particular node, so when 5931 * building the zones for node N, we make sure that the 5932 * zones coming right after the local ones are those from 5933 * node N+1 (modulo N) 5934 */ 5935 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 5936 if (!node_online(node)) 5937 continue; 5938 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5939 zonerefs += nr_zones; 5940 } 5941 for (node = 0; node < local_node; node++) { 5942 if (!node_online(node)) 5943 continue; 5944 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5945 zonerefs += nr_zones; 5946 } 5947 5948 zonerefs->zone = NULL; 5949 zonerefs->zone_idx = 0; 5950 } 5951 5952 #endif /* CONFIG_NUMA */ 5953 5954 /* 5955 * Boot pageset table. One per cpu which is going to be used for all 5956 * zones and all nodes. The parameters will be set in such a way 5957 * that an item put on a list will immediately be handed over to 5958 * the buddy list. This is safe since pageset manipulation is done 5959 * with interrupts disabled. 5960 * 5961 * The boot_pagesets must be kept even after bootup is complete for 5962 * unused processors and/or zones. They do play a role for bootstrapping 5963 * hotplugged processors. 5964 * 5965 * zoneinfo_show() and maybe other functions do 5966 * not check if the processor is online before following the pageset pointer. 5967 * Other parts of the kernel may not check if the zone is available. 5968 */ 5969 static void pageset_init(struct per_cpu_pageset *p); 5970 /* These effectively disable the pcplists in the boot pageset completely */ 5971 #define BOOT_PAGESET_HIGH 0 5972 #define BOOT_PAGESET_BATCH 1 5973 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 5974 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 5975 5976 static void __build_all_zonelists(void *data) 5977 { 5978 int nid; 5979 int __maybe_unused cpu; 5980 pg_data_t *self = data; 5981 static DEFINE_SPINLOCK(lock); 5982 5983 spin_lock(&lock); 5984 5985 #ifdef CONFIG_NUMA 5986 memset(node_load, 0, sizeof(node_load)); 5987 #endif 5988 5989 /* 5990 * This node is hotadded and no memory is yet present. So just 5991 * building zonelists is fine - no need to touch other nodes. 5992 */ 5993 if (self && !node_online(self->node_id)) { 5994 build_zonelists(self); 5995 } else { 5996 for_each_online_node(nid) { 5997 pg_data_t *pgdat = NODE_DATA(nid); 5998 5999 build_zonelists(pgdat); 6000 } 6001 6002 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6003 /* 6004 * We now know the "local memory node" for each node-- 6005 * i.e., the node of the first zone in the generic zonelist. 6006 * Set up numa_mem percpu variable for on-line cpus. During 6007 * boot, only the boot cpu should be on-line; we'll init the 6008 * secondary cpus' numa_mem as they come on-line. During 6009 * node/memory hotplug, we'll fixup all on-line cpus. 6010 */ 6011 for_each_online_cpu(cpu) 6012 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 6013 #endif 6014 } 6015 6016 spin_unlock(&lock); 6017 } 6018 6019 static noinline void __init 6020 build_all_zonelists_init(void) 6021 { 6022 int cpu; 6023 6024 __build_all_zonelists(NULL); 6025 6026 /* 6027 * Initialize the boot_pagesets that are going to be used 6028 * for bootstrapping processors. The real pagesets for 6029 * each zone will be allocated later when the per cpu 6030 * allocator is available. 6031 * 6032 * boot_pagesets are used also for bootstrapping offline 6033 * cpus if the system is already booted because the pagesets 6034 * are needed to initialize allocators on a specific cpu too. 6035 * F.e. the percpu allocator needs the page allocator which 6036 * needs the percpu allocator in order to allocate its pagesets 6037 * (a chicken-egg dilemma). 6038 */ 6039 for_each_possible_cpu(cpu) 6040 pageset_init(&per_cpu(boot_pageset, cpu)); 6041 6042 mminit_verify_zonelist(); 6043 cpuset_init_current_mems_allowed(); 6044 } 6045 6046 /* 6047 * unless system_state == SYSTEM_BOOTING. 6048 * 6049 * __ref due to call of __init annotated helper build_all_zonelists_init 6050 * [protected by SYSTEM_BOOTING]. 6051 */ 6052 void __ref build_all_zonelists(pg_data_t *pgdat) 6053 { 6054 unsigned long vm_total_pages; 6055 6056 if (system_state == SYSTEM_BOOTING) { 6057 build_all_zonelists_init(); 6058 } else { 6059 __build_all_zonelists(pgdat); 6060 /* cpuset refresh routine should be here */ 6061 } 6062 /* Get the number of free pages beyond high watermark in all zones. */ 6063 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 6064 /* 6065 * Disable grouping by mobility if the number of pages in the 6066 * system is too low to allow the mechanism to work. It would be 6067 * more accurate, but expensive to check per-zone. This check is 6068 * made on memory-hotadd so a system can start with mobility 6069 * disabled and enable it later 6070 */ 6071 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 6072 page_group_by_mobility_disabled = 1; 6073 else 6074 page_group_by_mobility_disabled = 0; 6075 6076 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 6077 nr_online_nodes, 6078 page_group_by_mobility_disabled ? "off" : "on", 6079 vm_total_pages); 6080 #ifdef CONFIG_NUMA 6081 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 6082 #endif 6083 } 6084 6085 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 6086 static bool __meminit 6087 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 6088 { 6089 static struct memblock_region *r; 6090 6091 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 6092 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 6093 for_each_mem_region(r) { 6094 if (*pfn < memblock_region_memory_end_pfn(r)) 6095 break; 6096 } 6097 } 6098 if (*pfn >= memblock_region_memory_base_pfn(r) && 6099 memblock_is_mirror(r)) { 6100 *pfn = memblock_region_memory_end_pfn(r); 6101 return true; 6102 } 6103 } 6104 return false; 6105 } 6106 6107 /* 6108 * Initially all pages are reserved - free ones are freed 6109 * up by memblock_free_all() once the early boot process is 6110 * done. Non-atomic initialization, single-pass. 6111 * 6112 * All aligned pageblocks are initialized to the specified migratetype 6113 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 6114 * zone stats (e.g., nr_isolate_pageblock) are touched. 6115 */ 6116 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 6117 unsigned long start_pfn, 6118 enum meminit_context context, 6119 struct vmem_altmap *altmap, int migratetype) 6120 { 6121 unsigned long pfn, end_pfn = start_pfn + size; 6122 struct page *page; 6123 6124 if (highest_memmap_pfn < end_pfn - 1) 6125 highest_memmap_pfn = end_pfn - 1; 6126 6127 #ifdef CONFIG_ZONE_DEVICE 6128 /* 6129 * Honor reservation requested by the driver for this ZONE_DEVICE 6130 * memory. We limit the total number of pages to initialize to just 6131 * those that might contain the memory mapping. We will defer the 6132 * ZONE_DEVICE page initialization until after we have released 6133 * the hotplug lock. 6134 */ 6135 if (zone == ZONE_DEVICE) { 6136 if (!altmap) 6137 return; 6138 6139 if (start_pfn == altmap->base_pfn) 6140 start_pfn += altmap->reserve; 6141 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6142 } 6143 #endif 6144 6145 for (pfn = start_pfn; pfn < end_pfn; ) { 6146 /* 6147 * There can be holes in boot-time mem_map[]s handed to this 6148 * function. They do not exist on hotplugged memory. 6149 */ 6150 if (context == MEMINIT_EARLY) { 6151 if (overlap_memmap_init(zone, &pfn)) 6152 continue; 6153 if (defer_init(nid, pfn, end_pfn)) 6154 break; 6155 } 6156 6157 page = pfn_to_page(pfn); 6158 __init_single_page(page, pfn, zone, nid); 6159 if (context == MEMINIT_HOTPLUG) 6160 __SetPageReserved(page); 6161 6162 /* 6163 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 6164 * such that unmovable allocations won't be scattered all 6165 * over the place during system boot. 6166 */ 6167 if (IS_ALIGNED(pfn, pageblock_nr_pages)) { 6168 set_pageblock_migratetype(page, migratetype); 6169 cond_resched(); 6170 } 6171 pfn++; 6172 } 6173 } 6174 6175 #ifdef CONFIG_ZONE_DEVICE 6176 void __ref memmap_init_zone_device(struct zone *zone, 6177 unsigned long start_pfn, 6178 unsigned long nr_pages, 6179 struct dev_pagemap *pgmap) 6180 { 6181 unsigned long pfn, end_pfn = start_pfn + nr_pages; 6182 struct pglist_data *pgdat = zone->zone_pgdat; 6183 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 6184 unsigned long zone_idx = zone_idx(zone); 6185 unsigned long start = jiffies; 6186 int nid = pgdat->node_id; 6187 6188 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) 6189 return; 6190 6191 /* 6192 * The call to memmap_init_zone should have already taken care 6193 * of the pages reserved for the memmap, so we can just jump to 6194 * the end of that region and start processing the device pages. 6195 */ 6196 if (altmap) { 6197 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6198 nr_pages = end_pfn - start_pfn; 6199 } 6200 6201 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 6202 struct page *page = pfn_to_page(pfn); 6203 6204 __init_single_page(page, pfn, zone_idx, nid); 6205 6206 /* 6207 * Mark page reserved as it will need to wait for onlining 6208 * phase for it to be fully associated with a zone. 6209 * 6210 * We can use the non-atomic __set_bit operation for setting 6211 * the flag as we are still initializing the pages. 6212 */ 6213 __SetPageReserved(page); 6214 6215 /* 6216 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 6217 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 6218 * ever freed or placed on a driver-private list. 6219 */ 6220 page->pgmap = pgmap; 6221 page->zone_device_data = NULL; 6222 6223 /* 6224 * Mark the block movable so that blocks are reserved for 6225 * movable at startup. This will force kernel allocations 6226 * to reserve their blocks rather than leaking throughout 6227 * the address space during boot when many long-lived 6228 * kernel allocations are made. 6229 * 6230 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 6231 * because this is done early in section_activate() 6232 */ 6233 if (IS_ALIGNED(pfn, pageblock_nr_pages)) { 6234 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 6235 cond_resched(); 6236 } 6237 } 6238 6239 pr_info("%s initialised %lu pages in %ums\n", __func__, 6240 nr_pages, jiffies_to_msecs(jiffies - start)); 6241 } 6242 6243 #endif 6244 static void __meminit zone_init_free_lists(struct zone *zone) 6245 { 6246 unsigned int order, t; 6247 for_each_migratetype_order(order, t) { 6248 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 6249 zone->free_area[order].nr_free = 0; 6250 } 6251 } 6252 6253 void __meminit __weak memmap_init(unsigned long size, int nid, 6254 unsigned long zone, 6255 unsigned long range_start_pfn) 6256 { 6257 unsigned long start_pfn, end_pfn; 6258 unsigned long range_end_pfn = range_start_pfn + size; 6259 int i; 6260 6261 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 6262 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 6263 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 6264 6265 if (end_pfn > start_pfn) { 6266 size = end_pfn - start_pfn; 6267 memmap_init_zone(size, nid, zone, start_pfn, 6268 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); 6269 } 6270 } 6271 } 6272 6273 static int zone_batchsize(struct zone *zone) 6274 { 6275 #ifdef CONFIG_MMU 6276 int batch; 6277 6278 /* 6279 * The per-cpu-pages pools are set to around 1000th of the 6280 * size of the zone. 6281 */ 6282 batch = zone_managed_pages(zone) / 1024; 6283 /* But no more than a meg. */ 6284 if (batch * PAGE_SIZE > 1024 * 1024) 6285 batch = (1024 * 1024) / PAGE_SIZE; 6286 batch /= 4; /* We effectively *= 4 below */ 6287 if (batch < 1) 6288 batch = 1; 6289 6290 /* 6291 * Clamp the batch to a 2^n - 1 value. Having a power 6292 * of 2 value was found to be more likely to have 6293 * suboptimal cache aliasing properties in some cases. 6294 * 6295 * For example if 2 tasks are alternately allocating 6296 * batches of pages, one task can end up with a lot 6297 * of pages of one half of the possible page colors 6298 * and the other with pages of the other colors. 6299 */ 6300 batch = rounddown_pow_of_two(batch + batch/2) - 1; 6301 6302 return batch; 6303 6304 #else 6305 /* The deferral and batching of frees should be suppressed under NOMMU 6306 * conditions. 6307 * 6308 * The problem is that NOMMU needs to be able to allocate large chunks 6309 * of contiguous memory as there's no hardware page translation to 6310 * assemble apparent contiguous memory from discontiguous pages. 6311 * 6312 * Queueing large contiguous runs of pages for batching, however, 6313 * causes the pages to actually be freed in smaller chunks. As there 6314 * can be a significant delay between the individual batches being 6315 * recycled, this leads to the once large chunks of space being 6316 * fragmented and becoming unavailable for high-order allocations. 6317 */ 6318 return 0; 6319 #endif 6320 } 6321 6322 /* 6323 * pcp->high and pcp->batch values are related and generally batch is lower 6324 * than high. They are also related to pcp->count such that count is lower 6325 * than high, and as soon as it reaches high, the pcplist is flushed. 6326 * 6327 * However, guaranteeing these relations at all times would require e.g. write 6328 * barriers here but also careful usage of read barriers at the read side, and 6329 * thus be prone to error and bad for performance. Thus the update only prevents 6330 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 6331 * can cope with those fields changing asynchronously, and fully trust only the 6332 * pcp->count field on the local CPU with interrupts disabled. 6333 * 6334 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 6335 * outside of boot time (or some other assurance that no concurrent updaters 6336 * exist). 6337 */ 6338 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 6339 unsigned long batch) 6340 { 6341 WRITE_ONCE(pcp->batch, batch); 6342 WRITE_ONCE(pcp->high, high); 6343 } 6344 6345 static void pageset_init(struct per_cpu_pageset *p) 6346 { 6347 struct per_cpu_pages *pcp; 6348 int migratetype; 6349 6350 memset(p, 0, sizeof(*p)); 6351 6352 pcp = &p->pcp; 6353 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 6354 INIT_LIST_HEAD(&pcp->lists[migratetype]); 6355 6356 /* 6357 * Set batch and high values safe for a boot pageset. A true percpu 6358 * pageset's initialization will update them subsequently. Here we don't 6359 * need to be as careful as pageset_update() as nobody can access the 6360 * pageset yet. 6361 */ 6362 pcp->high = BOOT_PAGESET_HIGH; 6363 pcp->batch = BOOT_PAGESET_BATCH; 6364 } 6365 6366 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 6367 unsigned long batch) 6368 { 6369 struct per_cpu_pageset *p; 6370 int cpu; 6371 6372 for_each_possible_cpu(cpu) { 6373 p = per_cpu_ptr(zone->pageset, cpu); 6374 pageset_update(&p->pcp, high, batch); 6375 } 6376 } 6377 6378 /* 6379 * Calculate and set new high and batch values for all per-cpu pagesets of a 6380 * zone, based on the zone's size and the percpu_pagelist_fraction sysctl. 6381 */ 6382 static void zone_set_pageset_high_and_batch(struct zone *zone) 6383 { 6384 unsigned long new_high, new_batch; 6385 6386 if (percpu_pagelist_fraction) { 6387 new_high = zone_managed_pages(zone) / percpu_pagelist_fraction; 6388 new_batch = max(1UL, new_high / 4); 6389 if ((new_high / 4) > (PAGE_SHIFT * 8)) 6390 new_batch = PAGE_SHIFT * 8; 6391 } else { 6392 new_batch = zone_batchsize(zone); 6393 new_high = 6 * new_batch; 6394 new_batch = max(1UL, 1 * new_batch); 6395 } 6396 6397 if (zone->pageset_high == new_high && 6398 zone->pageset_batch == new_batch) 6399 return; 6400 6401 zone->pageset_high = new_high; 6402 zone->pageset_batch = new_batch; 6403 6404 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 6405 } 6406 6407 void __meminit setup_zone_pageset(struct zone *zone) 6408 { 6409 struct per_cpu_pageset *p; 6410 int cpu; 6411 6412 zone->pageset = alloc_percpu(struct per_cpu_pageset); 6413 for_each_possible_cpu(cpu) { 6414 p = per_cpu_ptr(zone->pageset, cpu); 6415 pageset_init(p); 6416 } 6417 6418 zone_set_pageset_high_and_batch(zone); 6419 } 6420 6421 /* 6422 * Allocate per cpu pagesets and initialize them. 6423 * Before this call only boot pagesets were available. 6424 */ 6425 void __init setup_per_cpu_pageset(void) 6426 { 6427 struct pglist_data *pgdat; 6428 struct zone *zone; 6429 int __maybe_unused cpu; 6430 6431 for_each_populated_zone(zone) 6432 setup_zone_pageset(zone); 6433 6434 #ifdef CONFIG_NUMA 6435 /* 6436 * Unpopulated zones continue using the boot pagesets. 6437 * The numa stats for these pagesets need to be reset. 6438 * Otherwise, they will end up skewing the stats of 6439 * the nodes these zones are associated with. 6440 */ 6441 for_each_possible_cpu(cpu) { 6442 struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu); 6443 memset(pcp->vm_numa_stat_diff, 0, 6444 sizeof(pcp->vm_numa_stat_diff)); 6445 } 6446 #endif 6447 6448 for_each_online_pgdat(pgdat) 6449 pgdat->per_cpu_nodestats = 6450 alloc_percpu(struct per_cpu_nodestat); 6451 } 6452 6453 static __meminit void zone_pcp_init(struct zone *zone) 6454 { 6455 /* 6456 * per cpu subsystem is not up at this point. The following code 6457 * relies on the ability of the linker to provide the 6458 * offset of a (static) per cpu variable into the per cpu area. 6459 */ 6460 zone->pageset = &boot_pageset; 6461 zone->pageset_high = BOOT_PAGESET_HIGH; 6462 zone->pageset_batch = BOOT_PAGESET_BATCH; 6463 6464 if (populated_zone(zone)) 6465 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 6466 zone->name, zone->present_pages, 6467 zone_batchsize(zone)); 6468 } 6469 6470 void __meminit init_currently_empty_zone(struct zone *zone, 6471 unsigned long zone_start_pfn, 6472 unsigned long size) 6473 { 6474 struct pglist_data *pgdat = zone->zone_pgdat; 6475 int zone_idx = zone_idx(zone) + 1; 6476 6477 if (zone_idx > pgdat->nr_zones) 6478 pgdat->nr_zones = zone_idx; 6479 6480 zone->zone_start_pfn = zone_start_pfn; 6481 6482 mminit_dprintk(MMINIT_TRACE, "memmap_init", 6483 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 6484 pgdat->node_id, 6485 (unsigned long)zone_idx(zone), 6486 zone_start_pfn, (zone_start_pfn + size)); 6487 6488 zone_init_free_lists(zone); 6489 zone->initialized = 1; 6490 } 6491 6492 /** 6493 * get_pfn_range_for_nid - Return the start and end page frames for a node 6494 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 6495 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 6496 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 6497 * 6498 * It returns the start and end page frame of a node based on information 6499 * provided by memblock_set_node(). If called for a node 6500 * with no available memory, a warning is printed and the start and end 6501 * PFNs will be 0. 6502 */ 6503 void __init get_pfn_range_for_nid(unsigned int nid, 6504 unsigned long *start_pfn, unsigned long *end_pfn) 6505 { 6506 unsigned long this_start_pfn, this_end_pfn; 6507 int i; 6508 6509 *start_pfn = -1UL; 6510 *end_pfn = 0; 6511 6512 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 6513 *start_pfn = min(*start_pfn, this_start_pfn); 6514 *end_pfn = max(*end_pfn, this_end_pfn); 6515 } 6516 6517 if (*start_pfn == -1UL) 6518 *start_pfn = 0; 6519 } 6520 6521 /* 6522 * This finds a zone that can be used for ZONE_MOVABLE pages. The 6523 * assumption is made that zones within a node are ordered in monotonic 6524 * increasing memory addresses so that the "highest" populated zone is used 6525 */ 6526 static void __init find_usable_zone_for_movable(void) 6527 { 6528 int zone_index; 6529 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 6530 if (zone_index == ZONE_MOVABLE) 6531 continue; 6532 6533 if (arch_zone_highest_possible_pfn[zone_index] > 6534 arch_zone_lowest_possible_pfn[zone_index]) 6535 break; 6536 } 6537 6538 VM_BUG_ON(zone_index == -1); 6539 movable_zone = zone_index; 6540 } 6541 6542 /* 6543 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 6544 * because it is sized independent of architecture. Unlike the other zones, 6545 * the starting point for ZONE_MOVABLE is not fixed. It may be different 6546 * in each node depending on the size of each node and how evenly kernelcore 6547 * is distributed. This helper function adjusts the zone ranges 6548 * provided by the architecture for a given node by using the end of the 6549 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 6550 * zones within a node are in order of monotonic increases memory addresses 6551 */ 6552 static void __init adjust_zone_range_for_zone_movable(int nid, 6553 unsigned long zone_type, 6554 unsigned long node_start_pfn, 6555 unsigned long node_end_pfn, 6556 unsigned long *zone_start_pfn, 6557 unsigned long *zone_end_pfn) 6558 { 6559 /* Only adjust if ZONE_MOVABLE is on this node */ 6560 if (zone_movable_pfn[nid]) { 6561 /* Size ZONE_MOVABLE */ 6562 if (zone_type == ZONE_MOVABLE) { 6563 *zone_start_pfn = zone_movable_pfn[nid]; 6564 *zone_end_pfn = min(node_end_pfn, 6565 arch_zone_highest_possible_pfn[movable_zone]); 6566 6567 /* Adjust for ZONE_MOVABLE starting within this range */ 6568 } else if (!mirrored_kernelcore && 6569 *zone_start_pfn < zone_movable_pfn[nid] && 6570 *zone_end_pfn > zone_movable_pfn[nid]) { 6571 *zone_end_pfn = zone_movable_pfn[nid]; 6572 6573 /* Check if this whole range is within ZONE_MOVABLE */ 6574 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 6575 *zone_start_pfn = *zone_end_pfn; 6576 } 6577 } 6578 6579 /* 6580 * Return the number of pages a zone spans in a node, including holes 6581 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 6582 */ 6583 static unsigned long __init zone_spanned_pages_in_node(int nid, 6584 unsigned long zone_type, 6585 unsigned long node_start_pfn, 6586 unsigned long node_end_pfn, 6587 unsigned long *zone_start_pfn, 6588 unsigned long *zone_end_pfn) 6589 { 6590 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 6591 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 6592 /* When hotadd a new node from cpu_up(), the node should be empty */ 6593 if (!node_start_pfn && !node_end_pfn) 6594 return 0; 6595 6596 /* Get the start and end of the zone */ 6597 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 6598 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 6599 adjust_zone_range_for_zone_movable(nid, zone_type, 6600 node_start_pfn, node_end_pfn, 6601 zone_start_pfn, zone_end_pfn); 6602 6603 /* Check that this node has pages within the zone's required range */ 6604 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 6605 return 0; 6606 6607 /* Move the zone boundaries inside the node if necessary */ 6608 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 6609 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 6610 6611 /* Return the spanned pages */ 6612 return *zone_end_pfn - *zone_start_pfn; 6613 } 6614 6615 /* 6616 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 6617 * then all holes in the requested range will be accounted for. 6618 */ 6619 unsigned long __init __absent_pages_in_range(int nid, 6620 unsigned long range_start_pfn, 6621 unsigned long range_end_pfn) 6622 { 6623 unsigned long nr_absent = range_end_pfn - range_start_pfn; 6624 unsigned long start_pfn, end_pfn; 6625 int i; 6626 6627 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 6628 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 6629 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 6630 nr_absent -= end_pfn - start_pfn; 6631 } 6632 return nr_absent; 6633 } 6634 6635 /** 6636 * absent_pages_in_range - Return number of page frames in holes within a range 6637 * @start_pfn: The start PFN to start searching for holes 6638 * @end_pfn: The end PFN to stop searching for holes 6639 * 6640 * Return: the number of pages frames in memory holes within a range. 6641 */ 6642 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 6643 unsigned long end_pfn) 6644 { 6645 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 6646 } 6647 6648 /* Return the number of page frames in holes in a zone on a node */ 6649 static unsigned long __init zone_absent_pages_in_node(int nid, 6650 unsigned long zone_type, 6651 unsigned long node_start_pfn, 6652 unsigned long node_end_pfn) 6653 { 6654 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 6655 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 6656 unsigned long zone_start_pfn, zone_end_pfn; 6657 unsigned long nr_absent; 6658 6659 /* When hotadd a new node from cpu_up(), the node should be empty */ 6660 if (!node_start_pfn && !node_end_pfn) 6661 return 0; 6662 6663 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 6664 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 6665 6666 adjust_zone_range_for_zone_movable(nid, zone_type, 6667 node_start_pfn, node_end_pfn, 6668 &zone_start_pfn, &zone_end_pfn); 6669 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 6670 6671 /* 6672 * ZONE_MOVABLE handling. 6673 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 6674 * and vice versa. 6675 */ 6676 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 6677 unsigned long start_pfn, end_pfn; 6678 struct memblock_region *r; 6679 6680 for_each_mem_region(r) { 6681 start_pfn = clamp(memblock_region_memory_base_pfn(r), 6682 zone_start_pfn, zone_end_pfn); 6683 end_pfn = clamp(memblock_region_memory_end_pfn(r), 6684 zone_start_pfn, zone_end_pfn); 6685 6686 if (zone_type == ZONE_MOVABLE && 6687 memblock_is_mirror(r)) 6688 nr_absent += end_pfn - start_pfn; 6689 6690 if (zone_type == ZONE_NORMAL && 6691 !memblock_is_mirror(r)) 6692 nr_absent += end_pfn - start_pfn; 6693 } 6694 } 6695 6696 return nr_absent; 6697 } 6698 6699 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 6700 unsigned long node_start_pfn, 6701 unsigned long node_end_pfn) 6702 { 6703 unsigned long realtotalpages = 0, totalpages = 0; 6704 enum zone_type i; 6705 6706 for (i = 0; i < MAX_NR_ZONES; i++) { 6707 struct zone *zone = pgdat->node_zones + i; 6708 unsigned long zone_start_pfn, zone_end_pfn; 6709 unsigned long spanned, absent; 6710 unsigned long size, real_size; 6711 6712 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 6713 node_start_pfn, 6714 node_end_pfn, 6715 &zone_start_pfn, 6716 &zone_end_pfn); 6717 absent = zone_absent_pages_in_node(pgdat->node_id, i, 6718 node_start_pfn, 6719 node_end_pfn); 6720 6721 size = spanned; 6722 real_size = size - absent; 6723 6724 if (size) 6725 zone->zone_start_pfn = zone_start_pfn; 6726 else 6727 zone->zone_start_pfn = 0; 6728 zone->spanned_pages = size; 6729 zone->present_pages = real_size; 6730 6731 totalpages += size; 6732 realtotalpages += real_size; 6733 } 6734 6735 pgdat->node_spanned_pages = totalpages; 6736 pgdat->node_present_pages = realtotalpages; 6737 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 6738 realtotalpages); 6739 } 6740 6741 #ifndef CONFIG_SPARSEMEM 6742 /* 6743 * Calculate the size of the zone->blockflags rounded to an unsigned long 6744 * Start by making sure zonesize is a multiple of pageblock_order by rounding 6745 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 6746 * round what is now in bits to nearest long in bits, then return it in 6747 * bytes. 6748 */ 6749 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 6750 { 6751 unsigned long usemapsize; 6752 6753 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 6754 usemapsize = roundup(zonesize, pageblock_nr_pages); 6755 usemapsize = usemapsize >> pageblock_order; 6756 usemapsize *= NR_PAGEBLOCK_BITS; 6757 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 6758 6759 return usemapsize / 8; 6760 } 6761 6762 static void __ref setup_usemap(struct pglist_data *pgdat, 6763 struct zone *zone, 6764 unsigned long zone_start_pfn, 6765 unsigned long zonesize) 6766 { 6767 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 6768 zone->pageblock_flags = NULL; 6769 if (usemapsize) { 6770 zone->pageblock_flags = 6771 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 6772 pgdat->node_id); 6773 if (!zone->pageblock_flags) 6774 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 6775 usemapsize, zone->name, pgdat->node_id); 6776 } 6777 } 6778 #else 6779 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 6780 unsigned long zone_start_pfn, unsigned long zonesize) {} 6781 #endif /* CONFIG_SPARSEMEM */ 6782 6783 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 6784 6785 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 6786 void __init set_pageblock_order(void) 6787 { 6788 unsigned int order; 6789 6790 /* Check that pageblock_nr_pages has not already been setup */ 6791 if (pageblock_order) 6792 return; 6793 6794 if (HPAGE_SHIFT > PAGE_SHIFT) 6795 order = HUGETLB_PAGE_ORDER; 6796 else 6797 order = MAX_ORDER - 1; 6798 6799 /* 6800 * Assume the largest contiguous order of interest is a huge page. 6801 * This value may be variable depending on boot parameters on IA64 and 6802 * powerpc. 6803 */ 6804 pageblock_order = order; 6805 } 6806 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 6807 6808 /* 6809 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 6810 * is unused as pageblock_order is set at compile-time. See 6811 * include/linux/pageblock-flags.h for the values of pageblock_order based on 6812 * the kernel config 6813 */ 6814 void __init set_pageblock_order(void) 6815 { 6816 } 6817 6818 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 6819 6820 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, 6821 unsigned long present_pages) 6822 { 6823 unsigned long pages = spanned_pages; 6824 6825 /* 6826 * Provide a more accurate estimation if there are holes within 6827 * the zone and SPARSEMEM is in use. If there are holes within the 6828 * zone, each populated memory region may cost us one or two extra 6829 * memmap pages due to alignment because memmap pages for each 6830 * populated regions may not be naturally aligned on page boundary. 6831 * So the (present_pages >> 4) heuristic is a tradeoff for that. 6832 */ 6833 if (spanned_pages > present_pages + (present_pages >> 4) && 6834 IS_ENABLED(CONFIG_SPARSEMEM)) 6835 pages = present_pages; 6836 6837 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 6838 } 6839 6840 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6841 static void pgdat_init_split_queue(struct pglist_data *pgdat) 6842 { 6843 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 6844 6845 spin_lock_init(&ds_queue->split_queue_lock); 6846 INIT_LIST_HEAD(&ds_queue->split_queue); 6847 ds_queue->split_queue_len = 0; 6848 } 6849 #else 6850 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 6851 #endif 6852 6853 #ifdef CONFIG_COMPACTION 6854 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 6855 { 6856 init_waitqueue_head(&pgdat->kcompactd_wait); 6857 } 6858 #else 6859 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 6860 #endif 6861 6862 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 6863 { 6864 pgdat_resize_init(pgdat); 6865 6866 pgdat_init_split_queue(pgdat); 6867 pgdat_init_kcompactd(pgdat); 6868 6869 init_waitqueue_head(&pgdat->kswapd_wait); 6870 init_waitqueue_head(&pgdat->pfmemalloc_wait); 6871 6872 pgdat_page_ext_init(pgdat); 6873 lruvec_init(&pgdat->__lruvec); 6874 } 6875 6876 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 6877 unsigned long remaining_pages) 6878 { 6879 atomic_long_set(&zone->managed_pages, remaining_pages); 6880 zone_set_nid(zone, nid); 6881 zone->name = zone_names[idx]; 6882 zone->zone_pgdat = NODE_DATA(nid); 6883 spin_lock_init(&zone->lock); 6884 zone_seqlock_init(zone); 6885 zone_pcp_init(zone); 6886 } 6887 6888 /* 6889 * Set up the zone data structures 6890 * - init pgdat internals 6891 * - init all zones belonging to this node 6892 * 6893 * NOTE: this function is only called during memory hotplug 6894 */ 6895 #ifdef CONFIG_MEMORY_HOTPLUG 6896 void __ref free_area_init_core_hotplug(int nid) 6897 { 6898 enum zone_type z; 6899 pg_data_t *pgdat = NODE_DATA(nid); 6900 6901 pgdat_init_internals(pgdat); 6902 for (z = 0; z < MAX_NR_ZONES; z++) 6903 zone_init_internals(&pgdat->node_zones[z], z, nid, 0); 6904 } 6905 #endif 6906 6907 /* 6908 * Set up the zone data structures: 6909 * - mark all pages reserved 6910 * - mark all memory queues empty 6911 * - clear the memory bitmaps 6912 * 6913 * NOTE: pgdat should get zeroed by caller. 6914 * NOTE: this function is only called during early init. 6915 */ 6916 static void __init free_area_init_core(struct pglist_data *pgdat) 6917 { 6918 enum zone_type j; 6919 int nid = pgdat->node_id; 6920 6921 pgdat_init_internals(pgdat); 6922 pgdat->per_cpu_nodestats = &boot_nodestats; 6923 6924 for (j = 0; j < MAX_NR_ZONES; j++) { 6925 struct zone *zone = pgdat->node_zones + j; 6926 unsigned long size, freesize, memmap_pages; 6927 unsigned long zone_start_pfn = zone->zone_start_pfn; 6928 6929 size = zone->spanned_pages; 6930 freesize = zone->present_pages; 6931 6932 /* 6933 * Adjust freesize so that it accounts for how much memory 6934 * is used by this zone for memmap. This affects the watermark 6935 * and per-cpu initialisations 6936 */ 6937 memmap_pages = calc_memmap_size(size, freesize); 6938 if (!is_highmem_idx(j)) { 6939 if (freesize >= memmap_pages) { 6940 freesize -= memmap_pages; 6941 if (memmap_pages) 6942 printk(KERN_DEBUG 6943 " %s zone: %lu pages used for memmap\n", 6944 zone_names[j], memmap_pages); 6945 } else 6946 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", 6947 zone_names[j], memmap_pages, freesize); 6948 } 6949 6950 /* Account for reserved pages */ 6951 if (j == 0 && freesize > dma_reserve) { 6952 freesize -= dma_reserve; 6953 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 6954 zone_names[0], dma_reserve); 6955 } 6956 6957 if (!is_highmem_idx(j)) 6958 nr_kernel_pages += freesize; 6959 /* Charge for highmem memmap if there are enough kernel pages */ 6960 else if (nr_kernel_pages > memmap_pages * 2) 6961 nr_kernel_pages -= memmap_pages; 6962 nr_all_pages += freesize; 6963 6964 /* 6965 * Set an approximate value for lowmem here, it will be adjusted 6966 * when the bootmem allocator frees pages into the buddy system. 6967 * And all highmem pages will be managed by the buddy system. 6968 */ 6969 zone_init_internals(zone, j, nid, freesize); 6970 6971 if (!size) 6972 continue; 6973 6974 set_pageblock_order(); 6975 setup_usemap(pgdat, zone, zone_start_pfn, size); 6976 init_currently_empty_zone(zone, zone_start_pfn, size); 6977 memmap_init(size, nid, j, zone_start_pfn); 6978 } 6979 } 6980 6981 #ifdef CONFIG_FLAT_NODE_MEM_MAP 6982 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) 6983 { 6984 unsigned long __maybe_unused start = 0; 6985 unsigned long __maybe_unused offset = 0; 6986 6987 /* Skip empty nodes */ 6988 if (!pgdat->node_spanned_pages) 6989 return; 6990 6991 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 6992 offset = pgdat->node_start_pfn - start; 6993 /* ia64 gets its own node_mem_map, before this, without bootmem */ 6994 if (!pgdat->node_mem_map) { 6995 unsigned long size, end; 6996 struct page *map; 6997 6998 /* 6999 * The zone's endpoints aren't required to be MAX_ORDER 7000 * aligned but the node_mem_map endpoints must be in order 7001 * for the buddy allocator to function correctly. 7002 */ 7003 end = pgdat_end_pfn(pgdat); 7004 end = ALIGN(end, MAX_ORDER_NR_PAGES); 7005 size = (end - start) * sizeof(struct page); 7006 map = memblock_alloc_node(size, SMP_CACHE_BYTES, 7007 pgdat->node_id); 7008 if (!map) 7009 panic("Failed to allocate %ld bytes for node %d memory map\n", 7010 size, pgdat->node_id); 7011 pgdat->node_mem_map = map + offset; 7012 } 7013 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 7014 __func__, pgdat->node_id, (unsigned long)pgdat, 7015 (unsigned long)pgdat->node_mem_map); 7016 #ifndef CONFIG_NEED_MULTIPLE_NODES 7017 /* 7018 * With no DISCONTIG, the global mem_map is just set as node 0's 7019 */ 7020 if (pgdat == NODE_DATA(0)) { 7021 mem_map = NODE_DATA(0)->node_mem_map; 7022 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 7023 mem_map -= offset; 7024 } 7025 #endif 7026 } 7027 #else 7028 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { } 7029 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 7030 7031 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 7032 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 7033 { 7034 pgdat->first_deferred_pfn = ULONG_MAX; 7035 } 7036 #else 7037 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 7038 #endif 7039 7040 static void __init free_area_init_node(int nid) 7041 { 7042 pg_data_t *pgdat = NODE_DATA(nid); 7043 unsigned long start_pfn = 0; 7044 unsigned long end_pfn = 0; 7045 7046 /* pg_data_t should be reset to zero when it's allocated */ 7047 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 7048 7049 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 7050 7051 pgdat->node_id = nid; 7052 pgdat->node_start_pfn = start_pfn; 7053 pgdat->per_cpu_nodestats = NULL; 7054 7055 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 7056 (u64)start_pfn << PAGE_SHIFT, 7057 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 7058 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 7059 7060 alloc_node_mem_map(pgdat); 7061 pgdat_set_deferred_range(pgdat); 7062 7063 free_area_init_core(pgdat); 7064 } 7065 7066 void __init free_area_init_memoryless_node(int nid) 7067 { 7068 free_area_init_node(nid); 7069 } 7070 7071 #if !defined(CONFIG_FLAT_NODE_MEM_MAP) 7072 /* 7073 * Initialize all valid struct pages in the range [spfn, epfn) and mark them 7074 * PageReserved(). Return the number of struct pages that were initialized. 7075 */ 7076 static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn) 7077 { 7078 unsigned long pfn; 7079 u64 pgcnt = 0; 7080 7081 for (pfn = spfn; pfn < epfn; pfn++) { 7082 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { 7083 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) 7084 + pageblock_nr_pages - 1; 7085 continue; 7086 } 7087 /* 7088 * Use a fake node/zone (0) for now. Some of these pages 7089 * (in memblock.reserved but not in memblock.memory) will 7090 * get re-initialized via reserve_bootmem_region() later. 7091 */ 7092 __init_single_page(pfn_to_page(pfn), pfn, 0, 0); 7093 __SetPageReserved(pfn_to_page(pfn)); 7094 pgcnt++; 7095 } 7096 7097 return pgcnt; 7098 } 7099 7100 /* 7101 * Only struct pages that are backed by physical memory are zeroed and 7102 * initialized by going through __init_single_page(). But, there are some 7103 * struct pages which are reserved in memblock allocator and their fields 7104 * may be accessed (for example page_to_pfn() on some configuration accesses 7105 * flags). We must explicitly initialize those struct pages. 7106 * 7107 * This function also addresses a similar issue where struct pages are left 7108 * uninitialized because the physical address range is not covered by 7109 * memblock.memory or memblock.reserved. That could happen when memblock 7110 * layout is manually configured via memmap=, or when the highest physical 7111 * address (max_pfn) does not end on a section boundary. 7112 */ 7113 static void __init init_unavailable_mem(void) 7114 { 7115 phys_addr_t start, end; 7116 u64 i, pgcnt; 7117 phys_addr_t next = 0; 7118 7119 /* 7120 * Loop through unavailable ranges not covered by memblock.memory. 7121 */ 7122 pgcnt = 0; 7123 for_each_mem_range(i, &start, &end) { 7124 if (next < start) 7125 pgcnt += init_unavailable_range(PFN_DOWN(next), 7126 PFN_UP(start)); 7127 next = end; 7128 } 7129 7130 /* 7131 * Early sections always have a fully populated memmap for the whole 7132 * section - see pfn_valid(). If the last section has holes at the 7133 * end and that section is marked "online", the memmap will be 7134 * considered initialized. Make sure that memmap has a well defined 7135 * state. 7136 */ 7137 pgcnt += init_unavailable_range(PFN_DOWN(next), 7138 round_up(max_pfn, PAGES_PER_SECTION)); 7139 7140 /* 7141 * Struct pages that do not have backing memory. This could be because 7142 * firmware is using some of this memory, or for some other reasons. 7143 */ 7144 if (pgcnt) 7145 pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt); 7146 } 7147 #else 7148 static inline void __init init_unavailable_mem(void) 7149 { 7150 } 7151 #endif /* !CONFIG_FLAT_NODE_MEM_MAP */ 7152 7153 #if MAX_NUMNODES > 1 7154 /* 7155 * Figure out the number of possible node ids. 7156 */ 7157 void __init setup_nr_node_ids(void) 7158 { 7159 unsigned int highest; 7160 7161 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 7162 nr_node_ids = highest + 1; 7163 } 7164 #endif 7165 7166 /** 7167 * node_map_pfn_alignment - determine the maximum internode alignment 7168 * 7169 * This function should be called after node map is populated and sorted. 7170 * It calculates the maximum power of two alignment which can distinguish 7171 * all the nodes. 7172 * 7173 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 7174 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 7175 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 7176 * shifted, 1GiB is enough and this function will indicate so. 7177 * 7178 * This is used to test whether pfn -> nid mapping of the chosen memory 7179 * model has fine enough granularity to avoid incorrect mapping for the 7180 * populated node map. 7181 * 7182 * Return: the determined alignment in pfn's. 0 if there is no alignment 7183 * requirement (single node). 7184 */ 7185 unsigned long __init node_map_pfn_alignment(void) 7186 { 7187 unsigned long accl_mask = 0, last_end = 0; 7188 unsigned long start, end, mask; 7189 int last_nid = NUMA_NO_NODE; 7190 int i, nid; 7191 7192 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 7193 if (!start || last_nid < 0 || last_nid == nid) { 7194 last_nid = nid; 7195 last_end = end; 7196 continue; 7197 } 7198 7199 /* 7200 * Start with a mask granular enough to pin-point to the 7201 * start pfn and tick off bits one-by-one until it becomes 7202 * too coarse to separate the current node from the last. 7203 */ 7204 mask = ~((1 << __ffs(start)) - 1); 7205 while (mask && last_end <= (start & (mask << 1))) 7206 mask <<= 1; 7207 7208 /* accumulate all internode masks */ 7209 accl_mask |= mask; 7210 } 7211 7212 /* convert mask to number of pages */ 7213 return ~accl_mask + 1; 7214 } 7215 7216 /** 7217 * find_min_pfn_with_active_regions - Find the minimum PFN registered 7218 * 7219 * Return: the minimum PFN based on information provided via 7220 * memblock_set_node(). 7221 */ 7222 unsigned long __init find_min_pfn_with_active_regions(void) 7223 { 7224 return PHYS_PFN(memblock_start_of_DRAM()); 7225 } 7226 7227 /* 7228 * early_calculate_totalpages() 7229 * Sum pages in active regions for movable zone. 7230 * Populate N_MEMORY for calculating usable_nodes. 7231 */ 7232 static unsigned long __init early_calculate_totalpages(void) 7233 { 7234 unsigned long totalpages = 0; 7235 unsigned long start_pfn, end_pfn; 7236 int i, nid; 7237 7238 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 7239 unsigned long pages = end_pfn - start_pfn; 7240 7241 totalpages += pages; 7242 if (pages) 7243 node_set_state(nid, N_MEMORY); 7244 } 7245 return totalpages; 7246 } 7247 7248 /* 7249 * Find the PFN the Movable zone begins in each node. Kernel memory 7250 * is spread evenly between nodes as long as the nodes have enough 7251 * memory. When they don't, some nodes will have more kernelcore than 7252 * others 7253 */ 7254 static void __init find_zone_movable_pfns_for_nodes(void) 7255 { 7256 int i, nid; 7257 unsigned long usable_startpfn; 7258 unsigned long kernelcore_node, kernelcore_remaining; 7259 /* save the state before borrow the nodemask */ 7260 nodemask_t saved_node_state = node_states[N_MEMORY]; 7261 unsigned long totalpages = early_calculate_totalpages(); 7262 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 7263 struct memblock_region *r; 7264 7265 /* Need to find movable_zone earlier when movable_node is specified. */ 7266 find_usable_zone_for_movable(); 7267 7268 /* 7269 * If movable_node is specified, ignore kernelcore and movablecore 7270 * options. 7271 */ 7272 if (movable_node_is_enabled()) { 7273 for_each_mem_region(r) { 7274 if (!memblock_is_hotpluggable(r)) 7275 continue; 7276 7277 nid = memblock_get_region_node(r); 7278 7279 usable_startpfn = PFN_DOWN(r->base); 7280 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 7281 min(usable_startpfn, zone_movable_pfn[nid]) : 7282 usable_startpfn; 7283 } 7284 7285 goto out2; 7286 } 7287 7288 /* 7289 * If kernelcore=mirror is specified, ignore movablecore option 7290 */ 7291 if (mirrored_kernelcore) { 7292 bool mem_below_4gb_not_mirrored = false; 7293 7294 for_each_mem_region(r) { 7295 if (memblock_is_mirror(r)) 7296 continue; 7297 7298 nid = memblock_get_region_node(r); 7299 7300 usable_startpfn = memblock_region_memory_base_pfn(r); 7301 7302 if (usable_startpfn < 0x100000) { 7303 mem_below_4gb_not_mirrored = true; 7304 continue; 7305 } 7306 7307 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 7308 min(usable_startpfn, zone_movable_pfn[nid]) : 7309 usable_startpfn; 7310 } 7311 7312 if (mem_below_4gb_not_mirrored) 7313 pr_warn("This configuration results in unmirrored kernel memory.\n"); 7314 7315 goto out2; 7316 } 7317 7318 /* 7319 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 7320 * amount of necessary memory. 7321 */ 7322 if (required_kernelcore_percent) 7323 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 7324 10000UL; 7325 if (required_movablecore_percent) 7326 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 7327 10000UL; 7328 7329 /* 7330 * If movablecore= was specified, calculate what size of 7331 * kernelcore that corresponds so that memory usable for 7332 * any allocation type is evenly spread. If both kernelcore 7333 * and movablecore are specified, then the value of kernelcore 7334 * will be used for required_kernelcore if it's greater than 7335 * what movablecore would have allowed. 7336 */ 7337 if (required_movablecore) { 7338 unsigned long corepages; 7339 7340 /* 7341 * Round-up so that ZONE_MOVABLE is at least as large as what 7342 * was requested by the user 7343 */ 7344 required_movablecore = 7345 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 7346 required_movablecore = min(totalpages, required_movablecore); 7347 corepages = totalpages - required_movablecore; 7348 7349 required_kernelcore = max(required_kernelcore, corepages); 7350 } 7351 7352 /* 7353 * If kernelcore was not specified or kernelcore size is larger 7354 * than totalpages, there is no ZONE_MOVABLE. 7355 */ 7356 if (!required_kernelcore || required_kernelcore >= totalpages) 7357 goto out; 7358 7359 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 7360 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 7361 7362 restart: 7363 /* Spread kernelcore memory as evenly as possible throughout nodes */ 7364 kernelcore_node = required_kernelcore / usable_nodes; 7365 for_each_node_state(nid, N_MEMORY) { 7366 unsigned long start_pfn, end_pfn; 7367 7368 /* 7369 * Recalculate kernelcore_node if the division per node 7370 * now exceeds what is necessary to satisfy the requested 7371 * amount of memory for the kernel 7372 */ 7373 if (required_kernelcore < kernelcore_node) 7374 kernelcore_node = required_kernelcore / usable_nodes; 7375 7376 /* 7377 * As the map is walked, we track how much memory is usable 7378 * by the kernel using kernelcore_remaining. When it is 7379 * 0, the rest of the node is usable by ZONE_MOVABLE 7380 */ 7381 kernelcore_remaining = kernelcore_node; 7382 7383 /* Go through each range of PFNs within this node */ 7384 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 7385 unsigned long size_pages; 7386 7387 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 7388 if (start_pfn >= end_pfn) 7389 continue; 7390 7391 /* Account for what is only usable for kernelcore */ 7392 if (start_pfn < usable_startpfn) { 7393 unsigned long kernel_pages; 7394 kernel_pages = min(end_pfn, usable_startpfn) 7395 - start_pfn; 7396 7397 kernelcore_remaining -= min(kernel_pages, 7398 kernelcore_remaining); 7399 required_kernelcore -= min(kernel_pages, 7400 required_kernelcore); 7401 7402 /* Continue if range is now fully accounted */ 7403 if (end_pfn <= usable_startpfn) { 7404 7405 /* 7406 * Push zone_movable_pfn to the end so 7407 * that if we have to rebalance 7408 * kernelcore across nodes, we will 7409 * not double account here 7410 */ 7411 zone_movable_pfn[nid] = end_pfn; 7412 continue; 7413 } 7414 start_pfn = usable_startpfn; 7415 } 7416 7417 /* 7418 * The usable PFN range for ZONE_MOVABLE is from 7419 * start_pfn->end_pfn. Calculate size_pages as the 7420 * number of pages used as kernelcore 7421 */ 7422 size_pages = end_pfn - start_pfn; 7423 if (size_pages > kernelcore_remaining) 7424 size_pages = kernelcore_remaining; 7425 zone_movable_pfn[nid] = start_pfn + size_pages; 7426 7427 /* 7428 * Some kernelcore has been met, update counts and 7429 * break if the kernelcore for this node has been 7430 * satisfied 7431 */ 7432 required_kernelcore -= min(required_kernelcore, 7433 size_pages); 7434 kernelcore_remaining -= size_pages; 7435 if (!kernelcore_remaining) 7436 break; 7437 } 7438 } 7439 7440 /* 7441 * If there is still required_kernelcore, we do another pass with one 7442 * less node in the count. This will push zone_movable_pfn[nid] further 7443 * along on the nodes that still have memory until kernelcore is 7444 * satisfied 7445 */ 7446 usable_nodes--; 7447 if (usable_nodes && required_kernelcore > usable_nodes) 7448 goto restart; 7449 7450 out2: 7451 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 7452 for (nid = 0; nid < MAX_NUMNODES; nid++) 7453 zone_movable_pfn[nid] = 7454 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 7455 7456 out: 7457 /* restore the node_state */ 7458 node_states[N_MEMORY] = saved_node_state; 7459 } 7460 7461 /* Any regular or high memory on that node ? */ 7462 static void check_for_memory(pg_data_t *pgdat, int nid) 7463 { 7464 enum zone_type zone_type; 7465 7466 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 7467 struct zone *zone = &pgdat->node_zones[zone_type]; 7468 if (populated_zone(zone)) { 7469 if (IS_ENABLED(CONFIG_HIGHMEM)) 7470 node_set_state(nid, N_HIGH_MEMORY); 7471 if (zone_type <= ZONE_NORMAL) 7472 node_set_state(nid, N_NORMAL_MEMORY); 7473 break; 7474 } 7475 } 7476 } 7477 7478 /* 7479 * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 7480 * such cases we allow max_zone_pfn sorted in the descending order 7481 */ 7482 bool __weak arch_has_descending_max_zone_pfns(void) 7483 { 7484 return false; 7485 } 7486 7487 /** 7488 * free_area_init - Initialise all pg_data_t and zone data 7489 * @max_zone_pfn: an array of max PFNs for each zone 7490 * 7491 * This will call free_area_init_node() for each active node in the system. 7492 * Using the page ranges provided by memblock_set_node(), the size of each 7493 * zone in each node and their holes is calculated. If the maximum PFN 7494 * between two adjacent zones match, it is assumed that the zone is empty. 7495 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 7496 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 7497 * starts where the previous one ended. For example, ZONE_DMA32 starts 7498 * at arch_max_dma_pfn. 7499 */ 7500 void __init free_area_init(unsigned long *max_zone_pfn) 7501 { 7502 unsigned long start_pfn, end_pfn; 7503 int i, nid, zone; 7504 bool descending; 7505 7506 /* Record where the zone boundaries are */ 7507 memset(arch_zone_lowest_possible_pfn, 0, 7508 sizeof(arch_zone_lowest_possible_pfn)); 7509 memset(arch_zone_highest_possible_pfn, 0, 7510 sizeof(arch_zone_highest_possible_pfn)); 7511 7512 start_pfn = find_min_pfn_with_active_regions(); 7513 descending = arch_has_descending_max_zone_pfns(); 7514 7515 for (i = 0; i < MAX_NR_ZONES; i++) { 7516 if (descending) 7517 zone = MAX_NR_ZONES - i - 1; 7518 else 7519 zone = i; 7520 7521 if (zone == ZONE_MOVABLE) 7522 continue; 7523 7524 end_pfn = max(max_zone_pfn[zone], start_pfn); 7525 arch_zone_lowest_possible_pfn[zone] = start_pfn; 7526 arch_zone_highest_possible_pfn[zone] = end_pfn; 7527 7528 start_pfn = end_pfn; 7529 } 7530 7531 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 7532 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 7533 find_zone_movable_pfns_for_nodes(); 7534 7535 /* Print out the zone ranges */ 7536 pr_info("Zone ranges:\n"); 7537 for (i = 0; i < MAX_NR_ZONES; i++) { 7538 if (i == ZONE_MOVABLE) 7539 continue; 7540 pr_info(" %-8s ", zone_names[i]); 7541 if (arch_zone_lowest_possible_pfn[i] == 7542 arch_zone_highest_possible_pfn[i]) 7543 pr_cont("empty\n"); 7544 else 7545 pr_cont("[mem %#018Lx-%#018Lx]\n", 7546 (u64)arch_zone_lowest_possible_pfn[i] 7547 << PAGE_SHIFT, 7548 ((u64)arch_zone_highest_possible_pfn[i] 7549 << PAGE_SHIFT) - 1); 7550 } 7551 7552 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 7553 pr_info("Movable zone start for each node\n"); 7554 for (i = 0; i < MAX_NUMNODES; i++) { 7555 if (zone_movable_pfn[i]) 7556 pr_info(" Node %d: %#018Lx\n", i, 7557 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 7558 } 7559 7560 /* 7561 * Print out the early node map, and initialize the 7562 * subsection-map relative to active online memory ranges to 7563 * enable future "sub-section" extensions of the memory map. 7564 */ 7565 pr_info("Early memory node ranges\n"); 7566 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 7567 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 7568 (u64)start_pfn << PAGE_SHIFT, 7569 ((u64)end_pfn << PAGE_SHIFT) - 1); 7570 subsection_map_init(start_pfn, end_pfn - start_pfn); 7571 } 7572 7573 /* Initialise every node */ 7574 mminit_verify_pageflags_layout(); 7575 setup_nr_node_ids(); 7576 init_unavailable_mem(); 7577 for_each_online_node(nid) { 7578 pg_data_t *pgdat = NODE_DATA(nid); 7579 free_area_init_node(nid); 7580 7581 /* Any memory on that node */ 7582 if (pgdat->node_present_pages) 7583 node_set_state(nid, N_MEMORY); 7584 check_for_memory(pgdat, nid); 7585 } 7586 } 7587 7588 static int __init cmdline_parse_core(char *p, unsigned long *core, 7589 unsigned long *percent) 7590 { 7591 unsigned long long coremem; 7592 char *endptr; 7593 7594 if (!p) 7595 return -EINVAL; 7596 7597 /* Value may be a percentage of total memory, otherwise bytes */ 7598 coremem = simple_strtoull(p, &endptr, 0); 7599 if (*endptr == '%') { 7600 /* Paranoid check for percent values greater than 100 */ 7601 WARN_ON(coremem > 100); 7602 7603 *percent = coremem; 7604 } else { 7605 coremem = memparse(p, &p); 7606 /* Paranoid check that UL is enough for the coremem value */ 7607 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 7608 7609 *core = coremem >> PAGE_SHIFT; 7610 *percent = 0UL; 7611 } 7612 return 0; 7613 } 7614 7615 /* 7616 * kernelcore=size sets the amount of memory for use for allocations that 7617 * cannot be reclaimed or migrated. 7618 */ 7619 static int __init cmdline_parse_kernelcore(char *p) 7620 { 7621 /* parse kernelcore=mirror */ 7622 if (parse_option_str(p, "mirror")) { 7623 mirrored_kernelcore = true; 7624 return 0; 7625 } 7626 7627 return cmdline_parse_core(p, &required_kernelcore, 7628 &required_kernelcore_percent); 7629 } 7630 7631 /* 7632 * movablecore=size sets the amount of memory for use for allocations that 7633 * can be reclaimed or migrated. 7634 */ 7635 static int __init cmdline_parse_movablecore(char *p) 7636 { 7637 return cmdline_parse_core(p, &required_movablecore, 7638 &required_movablecore_percent); 7639 } 7640 7641 early_param("kernelcore", cmdline_parse_kernelcore); 7642 early_param("movablecore", cmdline_parse_movablecore); 7643 7644 void adjust_managed_page_count(struct page *page, long count) 7645 { 7646 atomic_long_add(count, &page_zone(page)->managed_pages); 7647 totalram_pages_add(count); 7648 #ifdef CONFIG_HIGHMEM 7649 if (PageHighMem(page)) 7650 totalhigh_pages_add(count); 7651 #endif 7652 } 7653 EXPORT_SYMBOL(adjust_managed_page_count); 7654 7655 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 7656 { 7657 void *pos; 7658 unsigned long pages = 0; 7659 7660 start = (void *)PAGE_ALIGN((unsigned long)start); 7661 end = (void *)((unsigned long)end & PAGE_MASK); 7662 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 7663 struct page *page = virt_to_page(pos); 7664 void *direct_map_addr; 7665 7666 /* 7667 * 'direct_map_addr' might be different from 'pos' 7668 * because some architectures' virt_to_page() 7669 * work with aliases. Getting the direct map 7670 * address ensures that we get a _writeable_ 7671 * alias for the memset(). 7672 */ 7673 direct_map_addr = page_address(page); 7674 if ((unsigned int)poison <= 0xFF) 7675 memset(direct_map_addr, poison, PAGE_SIZE); 7676 7677 free_reserved_page(page); 7678 } 7679 7680 if (pages && s) 7681 pr_info("Freeing %s memory: %ldK\n", 7682 s, pages << (PAGE_SHIFT - 10)); 7683 7684 return pages; 7685 } 7686 7687 #ifdef CONFIG_HIGHMEM 7688 void free_highmem_page(struct page *page) 7689 { 7690 __free_reserved_page(page); 7691 totalram_pages_inc(); 7692 atomic_long_inc(&page_zone(page)->managed_pages); 7693 totalhigh_pages_inc(); 7694 } 7695 #endif 7696 7697 7698 void __init mem_init_print_info(const char *str) 7699 { 7700 unsigned long physpages, codesize, datasize, rosize, bss_size; 7701 unsigned long init_code_size, init_data_size; 7702 7703 physpages = get_num_physpages(); 7704 codesize = _etext - _stext; 7705 datasize = _edata - _sdata; 7706 rosize = __end_rodata - __start_rodata; 7707 bss_size = __bss_stop - __bss_start; 7708 init_data_size = __init_end - __init_begin; 7709 init_code_size = _einittext - _sinittext; 7710 7711 /* 7712 * Detect special cases and adjust section sizes accordingly: 7713 * 1) .init.* may be embedded into .data sections 7714 * 2) .init.text.* may be out of [__init_begin, __init_end], 7715 * please refer to arch/tile/kernel/vmlinux.lds.S. 7716 * 3) .rodata.* may be embedded into .text or .data sections. 7717 */ 7718 #define adj_init_size(start, end, size, pos, adj) \ 7719 do { \ 7720 if (start <= pos && pos < end && size > adj) \ 7721 size -= adj; \ 7722 } while (0) 7723 7724 adj_init_size(__init_begin, __init_end, init_data_size, 7725 _sinittext, init_code_size); 7726 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 7727 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 7728 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 7729 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 7730 7731 #undef adj_init_size 7732 7733 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 7734 #ifdef CONFIG_HIGHMEM 7735 ", %luK highmem" 7736 #endif 7737 "%s%s)\n", 7738 nr_free_pages() << (PAGE_SHIFT - 10), 7739 physpages << (PAGE_SHIFT - 10), 7740 codesize >> 10, datasize >> 10, rosize >> 10, 7741 (init_data_size + init_code_size) >> 10, bss_size >> 10, 7742 (physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10), 7743 totalcma_pages << (PAGE_SHIFT - 10), 7744 #ifdef CONFIG_HIGHMEM 7745 totalhigh_pages() << (PAGE_SHIFT - 10), 7746 #endif 7747 str ? ", " : "", str ? str : ""); 7748 } 7749 7750 /** 7751 * set_dma_reserve - set the specified number of pages reserved in the first zone 7752 * @new_dma_reserve: The number of pages to mark reserved 7753 * 7754 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 7755 * In the DMA zone, a significant percentage may be consumed by kernel image 7756 * and other unfreeable allocations which can skew the watermarks badly. This 7757 * function may optionally be used to account for unfreeable pages in the 7758 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 7759 * smaller per-cpu batchsize. 7760 */ 7761 void __init set_dma_reserve(unsigned long new_dma_reserve) 7762 { 7763 dma_reserve = new_dma_reserve; 7764 } 7765 7766 static int page_alloc_cpu_dead(unsigned int cpu) 7767 { 7768 7769 lru_add_drain_cpu(cpu); 7770 drain_pages(cpu); 7771 7772 /* 7773 * Spill the event counters of the dead processor 7774 * into the current processors event counters. 7775 * This artificially elevates the count of the current 7776 * processor. 7777 */ 7778 vm_events_fold_cpu(cpu); 7779 7780 /* 7781 * Zero the differential counters of the dead processor 7782 * so that the vm statistics are consistent. 7783 * 7784 * This is only okay since the processor is dead and cannot 7785 * race with what we are doing. 7786 */ 7787 cpu_vm_stats_fold(cpu); 7788 return 0; 7789 } 7790 7791 #ifdef CONFIG_NUMA 7792 int hashdist = HASHDIST_DEFAULT; 7793 7794 static int __init set_hashdist(char *str) 7795 { 7796 if (!str) 7797 return 0; 7798 hashdist = simple_strtoul(str, &str, 0); 7799 return 1; 7800 } 7801 __setup("hashdist=", set_hashdist); 7802 #endif 7803 7804 void __init page_alloc_init(void) 7805 { 7806 int ret; 7807 7808 #ifdef CONFIG_NUMA 7809 if (num_node_state(N_MEMORY) == 1) 7810 hashdist = 0; 7811 #endif 7812 7813 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD, 7814 "mm/page_alloc:dead", NULL, 7815 page_alloc_cpu_dead); 7816 WARN_ON(ret < 0); 7817 } 7818 7819 /* 7820 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 7821 * or min_free_kbytes changes. 7822 */ 7823 static void calculate_totalreserve_pages(void) 7824 { 7825 struct pglist_data *pgdat; 7826 unsigned long reserve_pages = 0; 7827 enum zone_type i, j; 7828 7829 for_each_online_pgdat(pgdat) { 7830 7831 pgdat->totalreserve_pages = 0; 7832 7833 for (i = 0; i < MAX_NR_ZONES; i++) { 7834 struct zone *zone = pgdat->node_zones + i; 7835 long max = 0; 7836 unsigned long managed_pages = zone_managed_pages(zone); 7837 7838 /* Find valid and maximum lowmem_reserve in the zone */ 7839 for (j = i; j < MAX_NR_ZONES; j++) { 7840 if (zone->lowmem_reserve[j] > max) 7841 max = zone->lowmem_reserve[j]; 7842 } 7843 7844 /* we treat the high watermark as reserved pages. */ 7845 max += high_wmark_pages(zone); 7846 7847 if (max > managed_pages) 7848 max = managed_pages; 7849 7850 pgdat->totalreserve_pages += max; 7851 7852 reserve_pages += max; 7853 } 7854 } 7855 totalreserve_pages = reserve_pages; 7856 } 7857 7858 /* 7859 * setup_per_zone_lowmem_reserve - called whenever 7860 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 7861 * has a correct pages reserved value, so an adequate number of 7862 * pages are left in the zone after a successful __alloc_pages(). 7863 */ 7864 static void setup_per_zone_lowmem_reserve(void) 7865 { 7866 struct pglist_data *pgdat; 7867 enum zone_type i, j; 7868 7869 for_each_online_pgdat(pgdat) { 7870 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 7871 struct zone *zone = &pgdat->node_zones[i]; 7872 int ratio = sysctl_lowmem_reserve_ratio[i]; 7873 bool clear = !ratio || !zone_managed_pages(zone); 7874 unsigned long managed_pages = 0; 7875 7876 for (j = i + 1; j < MAX_NR_ZONES; j++) { 7877 if (clear) { 7878 zone->lowmem_reserve[j] = 0; 7879 } else { 7880 struct zone *upper_zone = &pgdat->node_zones[j]; 7881 7882 managed_pages += zone_managed_pages(upper_zone); 7883 zone->lowmem_reserve[j] = managed_pages / ratio; 7884 } 7885 } 7886 } 7887 } 7888 7889 /* update totalreserve_pages */ 7890 calculate_totalreserve_pages(); 7891 } 7892 7893 static void __setup_per_zone_wmarks(void) 7894 { 7895 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 7896 unsigned long lowmem_pages = 0; 7897 struct zone *zone; 7898 unsigned long flags; 7899 7900 /* Calculate total number of !ZONE_HIGHMEM pages */ 7901 for_each_zone(zone) { 7902 if (!is_highmem(zone)) 7903 lowmem_pages += zone_managed_pages(zone); 7904 } 7905 7906 for_each_zone(zone) { 7907 u64 tmp; 7908 7909 spin_lock_irqsave(&zone->lock, flags); 7910 tmp = (u64)pages_min * zone_managed_pages(zone); 7911 do_div(tmp, lowmem_pages); 7912 if (is_highmem(zone)) { 7913 /* 7914 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 7915 * need highmem pages, so cap pages_min to a small 7916 * value here. 7917 * 7918 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 7919 * deltas control async page reclaim, and so should 7920 * not be capped for highmem. 7921 */ 7922 unsigned long min_pages; 7923 7924 min_pages = zone_managed_pages(zone) / 1024; 7925 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 7926 zone->_watermark[WMARK_MIN] = min_pages; 7927 } else { 7928 /* 7929 * If it's a lowmem zone, reserve a number of pages 7930 * proportionate to the zone's size. 7931 */ 7932 zone->_watermark[WMARK_MIN] = tmp; 7933 } 7934 7935 /* 7936 * Set the kswapd watermarks distance according to the 7937 * scale factor in proportion to available memory, but 7938 * ensure a minimum size on small systems. 7939 */ 7940 tmp = max_t(u64, tmp >> 2, 7941 mult_frac(zone_managed_pages(zone), 7942 watermark_scale_factor, 10000)); 7943 7944 zone->watermark_boost = 0; 7945 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 7946 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; 7947 7948 spin_unlock_irqrestore(&zone->lock, flags); 7949 } 7950 7951 /* update totalreserve_pages */ 7952 calculate_totalreserve_pages(); 7953 } 7954 7955 /** 7956 * setup_per_zone_wmarks - called when min_free_kbytes changes 7957 * or when memory is hot-{added|removed} 7958 * 7959 * Ensures that the watermark[min,low,high] values for each zone are set 7960 * correctly with respect to min_free_kbytes. 7961 */ 7962 void setup_per_zone_wmarks(void) 7963 { 7964 static DEFINE_SPINLOCK(lock); 7965 7966 spin_lock(&lock); 7967 __setup_per_zone_wmarks(); 7968 spin_unlock(&lock); 7969 } 7970 7971 /* 7972 * Initialise min_free_kbytes. 7973 * 7974 * For small machines we want it small (128k min). For large machines 7975 * we want it large (256MB max). But it is not linear, because network 7976 * bandwidth does not increase linearly with machine size. We use 7977 * 7978 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 7979 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 7980 * 7981 * which yields 7982 * 7983 * 16MB: 512k 7984 * 32MB: 724k 7985 * 64MB: 1024k 7986 * 128MB: 1448k 7987 * 256MB: 2048k 7988 * 512MB: 2896k 7989 * 1024MB: 4096k 7990 * 2048MB: 5792k 7991 * 4096MB: 8192k 7992 * 8192MB: 11584k 7993 * 16384MB: 16384k 7994 */ 7995 int __meminit init_per_zone_wmark_min(void) 7996 { 7997 unsigned long lowmem_kbytes; 7998 int new_min_free_kbytes; 7999 8000 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 8001 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 8002 8003 if (new_min_free_kbytes > user_min_free_kbytes) { 8004 min_free_kbytes = new_min_free_kbytes; 8005 if (min_free_kbytes < 128) 8006 min_free_kbytes = 128; 8007 if (min_free_kbytes > 262144) 8008 min_free_kbytes = 262144; 8009 } else { 8010 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 8011 new_min_free_kbytes, user_min_free_kbytes); 8012 } 8013 setup_per_zone_wmarks(); 8014 refresh_zone_stat_thresholds(); 8015 setup_per_zone_lowmem_reserve(); 8016 8017 #ifdef CONFIG_NUMA 8018 setup_min_unmapped_ratio(); 8019 setup_min_slab_ratio(); 8020 #endif 8021 8022 khugepaged_min_free_kbytes_update(); 8023 8024 return 0; 8025 } 8026 postcore_initcall(init_per_zone_wmark_min) 8027 8028 /* 8029 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 8030 * that we can call two helper functions whenever min_free_kbytes 8031 * changes. 8032 */ 8033 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 8034 void *buffer, size_t *length, loff_t *ppos) 8035 { 8036 int rc; 8037 8038 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8039 if (rc) 8040 return rc; 8041 8042 if (write) { 8043 user_min_free_kbytes = min_free_kbytes; 8044 setup_per_zone_wmarks(); 8045 } 8046 return 0; 8047 } 8048 8049 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 8050 void *buffer, size_t *length, loff_t *ppos) 8051 { 8052 int rc; 8053 8054 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8055 if (rc) 8056 return rc; 8057 8058 if (write) 8059 setup_per_zone_wmarks(); 8060 8061 return 0; 8062 } 8063 8064 #ifdef CONFIG_NUMA 8065 static void setup_min_unmapped_ratio(void) 8066 { 8067 pg_data_t *pgdat; 8068 struct zone *zone; 8069 8070 for_each_online_pgdat(pgdat) 8071 pgdat->min_unmapped_pages = 0; 8072 8073 for_each_zone(zone) 8074 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 8075 sysctl_min_unmapped_ratio) / 100; 8076 } 8077 8078 8079 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 8080 void *buffer, size_t *length, loff_t *ppos) 8081 { 8082 int rc; 8083 8084 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8085 if (rc) 8086 return rc; 8087 8088 setup_min_unmapped_ratio(); 8089 8090 return 0; 8091 } 8092 8093 static void setup_min_slab_ratio(void) 8094 { 8095 pg_data_t *pgdat; 8096 struct zone *zone; 8097 8098 for_each_online_pgdat(pgdat) 8099 pgdat->min_slab_pages = 0; 8100 8101 for_each_zone(zone) 8102 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 8103 sysctl_min_slab_ratio) / 100; 8104 } 8105 8106 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 8107 void *buffer, size_t *length, loff_t *ppos) 8108 { 8109 int rc; 8110 8111 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8112 if (rc) 8113 return rc; 8114 8115 setup_min_slab_ratio(); 8116 8117 return 0; 8118 } 8119 #endif 8120 8121 /* 8122 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 8123 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 8124 * whenever sysctl_lowmem_reserve_ratio changes. 8125 * 8126 * The reserve ratio obviously has absolutely no relation with the 8127 * minimum watermarks. The lowmem reserve ratio can only make sense 8128 * if in function of the boot time zone sizes. 8129 */ 8130 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 8131 void *buffer, size_t *length, loff_t *ppos) 8132 { 8133 int i; 8134 8135 proc_dointvec_minmax(table, write, buffer, length, ppos); 8136 8137 for (i = 0; i < MAX_NR_ZONES; i++) { 8138 if (sysctl_lowmem_reserve_ratio[i] < 1) 8139 sysctl_lowmem_reserve_ratio[i] = 0; 8140 } 8141 8142 setup_per_zone_lowmem_reserve(); 8143 return 0; 8144 } 8145 8146 /* 8147 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 8148 * cpu. It is the fraction of total pages in each zone that a hot per cpu 8149 * pagelist can have before it gets flushed back to buddy allocator. 8150 */ 8151 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, 8152 void *buffer, size_t *length, loff_t *ppos) 8153 { 8154 struct zone *zone; 8155 int old_percpu_pagelist_fraction; 8156 int ret; 8157 8158 mutex_lock(&pcp_batch_high_lock); 8159 old_percpu_pagelist_fraction = percpu_pagelist_fraction; 8160 8161 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 8162 if (!write || ret < 0) 8163 goto out; 8164 8165 /* Sanity checking to avoid pcp imbalance */ 8166 if (percpu_pagelist_fraction && 8167 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { 8168 percpu_pagelist_fraction = old_percpu_pagelist_fraction; 8169 ret = -EINVAL; 8170 goto out; 8171 } 8172 8173 /* No change? */ 8174 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) 8175 goto out; 8176 8177 for_each_populated_zone(zone) 8178 zone_set_pageset_high_and_batch(zone); 8179 out: 8180 mutex_unlock(&pcp_batch_high_lock); 8181 return ret; 8182 } 8183 8184 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 8185 /* 8186 * Returns the number of pages that arch has reserved but 8187 * is not known to alloc_large_system_hash(). 8188 */ 8189 static unsigned long __init arch_reserved_kernel_pages(void) 8190 { 8191 return 0; 8192 } 8193 #endif 8194 8195 /* 8196 * Adaptive scale is meant to reduce sizes of hash tables on large memory 8197 * machines. As memory size is increased the scale is also increased but at 8198 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 8199 * quadruples the scale is increased by one, which means the size of hash table 8200 * only doubles, instead of quadrupling as well. 8201 * Because 32-bit systems cannot have large physical memory, where this scaling 8202 * makes sense, it is disabled on such platforms. 8203 */ 8204 #if __BITS_PER_LONG > 32 8205 #define ADAPT_SCALE_BASE (64ul << 30) 8206 #define ADAPT_SCALE_SHIFT 2 8207 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 8208 #endif 8209 8210 /* 8211 * allocate a large system hash table from bootmem 8212 * - it is assumed that the hash table must contain an exact power-of-2 8213 * quantity of entries 8214 * - limit is the number of hash buckets, not the total allocation size 8215 */ 8216 void *__init alloc_large_system_hash(const char *tablename, 8217 unsigned long bucketsize, 8218 unsigned long numentries, 8219 int scale, 8220 int flags, 8221 unsigned int *_hash_shift, 8222 unsigned int *_hash_mask, 8223 unsigned long low_limit, 8224 unsigned long high_limit) 8225 { 8226 unsigned long long max = high_limit; 8227 unsigned long log2qty, size; 8228 void *table = NULL; 8229 gfp_t gfp_flags; 8230 bool virt; 8231 8232 /* allow the kernel cmdline to have a say */ 8233 if (!numentries) { 8234 /* round applicable memory size up to nearest megabyte */ 8235 numentries = nr_kernel_pages; 8236 numentries -= arch_reserved_kernel_pages(); 8237 8238 /* It isn't necessary when PAGE_SIZE >= 1MB */ 8239 if (PAGE_SHIFT < 20) 8240 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 8241 8242 #if __BITS_PER_LONG > 32 8243 if (!high_limit) { 8244 unsigned long adapt; 8245 8246 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 8247 adapt <<= ADAPT_SCALE_SHIFT) 8248 scale++; 8249 } 8250 #endif 8251 8252 /* limit to 1 bucket per 2^scale bytes of low memory */ 8253 if (scale > PAGE_SHIFT) 8254 numentries >>= (scale - PAGE_SHIFT); 8255 else 8256 numentries <<= (PAGE_SHIFT - scale); 8257 8258 /* Make sure we've got at least a 0-order allocation.. */ 8259 if (unlikely(flags & HASH_SMALL)) { 8260 /* Makes no sense without HASH_EARLY */ 8261 WARN_ON(!(flags & HASH_EARLY)); 8262 if (!(numentries >> *_hash_shift)) { 8263 numentries = 1UL << *_hash_shift; 8264 BUG_ON(!numentries); 8265 } 8266 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 8267 numentries = PAGE_SIZE / bucketsize; 8268 } 8269 numentries = roundup_pow_of_two(numentries); 8270 8271 /* limit allocation size to 1/16 total memory by default */ 8272 if (max == 0) { 8273 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 8274 do_div(max, bucketsize); 8275 } 8276 max = min(max, 0x80000000ULL); 8277 8278 if (numentries < low_limit) 8279 numentries = low_limit; 8280 if (numentries > max) 8281 numentries = max; 8282 8283 log2qty = ilog2(numentries); 8284 8285 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 8286 do { 8287 virt = false; 8288 size = bucketsize << log2qty; 8289 if (flags & HASH_EARLY) { 8290 if (flags & HASH_ZERO) 8291 table = memblock_alloc(size, SMP_CACHE_BYTES); 8292 else 8293 table = memblock_alloc_raw(size, 8294 SMP_CACHE_BYTES); 8295 } else if (get_order(size) >= MAX_ORDER || hashdist) { 8296 table = __vmalloc(size, gfp_flags); 8297 virt = true; 8298 } else { 8299 /* 8300 * If bucketsize is not a power-of-two, we may free 8301 * some pages at the end of hash table which 8302 * alloc_pages_exact() automatically does 8303 */ 8304 table = alloc_pages_exact(size, gfp_flags); 8305 kmemleak_alloc(table, size, 1, gfp_flags); 8306 } 8307 } while (!table && size > PAGE_SIZE && --log2qty); 8308 8309 if (!table) 8310 panic("Failed to allocate %s hash table\n", tablename); 8311 8312 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 8313 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, 8314 virt ? "vmalloc" : "linear"); 8315 8316 if (_hash_shift) 8317 *_hash_shift = log2qty; 8318 if (_hash_mask) 8319 *_hash_mask = (1 << log2qty) - 1; 8320 8321 return table; 8322 } 8323 8324 /* 8325 * This function checks whether pageblock includes unmovable pages or not. 8326 * 8327 * PageLRU check without isolation or lru_lock could race so that 8328 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable 8329 * check without lock_page also may miss some movable non-lru pages at 8330 * race condition. So you can't expect this function should be exact. 8331 * 8332 * Returns a page without holding a reference. If the caller wants to 8333 * dereference that page (e.g., dumping), it has to make sure that it 8334 * cannot get removed (e.g., via memory unplug) concurrently. 8335 * 8336 */ 8337 struct page *has_unmovable_pages(struct zone *zone, struct page *page, 8338 int migratetype, int flags) 8339 { 8340 unsigned long iter = 0; 8341 unsigned long pfn = page_to_pfn(page); 8342 unsigned long offset = pfn % pageblock_nr_pages; 8343 8344 if (is_migrate_cma_page(page)) { 8345 /* 8346 * CMA allocations (alloc_contig_range) really need to mark 8347 * isolate CMA pageblocks even when they are not movable in fact 8348 * so consider them movable here. 8349 */ 8350 if (is_migrate_cma(migratetype)) 8351 return NULL; 8352 8353 return page; 8354 } 8355 8356 for (; iter < pageblock_nr_pages - offset; iter++) { 8357 if (!pfn_valid_within(pfn + iter)) 8358 continue; 8359 8360 page = pfn_to_page(pfn + iter); 8361 8362 /* 8363 * Both, bootmem allocations and memory holes are marked 8364 * PG_reserved and are unmovable. We can even have unmovable 8365 * allocations inside ZONE_MOVABLE, for example when 8366 * specifying "movablecore". 8367 */ 8368 if (PageReserved(page)) 8369 return page; 8370 8371 /* 8372 * If the zone is movable and we have ruled out all reserved 8373 * pages then it should be reasonably safe to assume the rest 8374 * is movable. 8375 */ 8376 if (zone_idx(zone) == ZONE_MOVABLE) 8377 continue; 8378 8379 /* 8380 * Hugepages are not in LRU lists, but they're movable. 8381 * THPs are on the LRU, but need to be counted as #small pages. 8382 * We need not scan over tail pages because we don't 8383 * handle each tail page individually in migration. 8384 */ 8385 if (PageHuge(page) || PageTransCompound(page)) { 8386 struct page *head = compound_head(page); 8387 unsigned int skip_pages; 8388 8389 if (PageHuge(page)) { 8390 if (!hugepage_migration_supported(page_hstate(head))) 8391 return page; 8392 } else if (!PageLRU(head) && !__PageMovable(head)) { 8393 return page; 8394 } 8395 8396 skip_pages = compound_nr(head) - (page - head); 8397 iter += skip_pages - 1; 8398 continue; 8399 } 8400 8401 /* 8402 * We can't use page_count without pin a page 8403 * because another CPU can free compound page. 8404 * This check already skips compound tails of THP 8405 * because their page->_refcount is zero at all time. 8406 */ 8407 if (!page_ref_count(page)) { 8408 if (PageBuddy(page)) 8409 iter += (1 << buddy_order(page)) - 1; 8410 continue; 8411 } 8412 8413 /* 8414 * The HWPoisoned page may be not in buddy system, and 8415 * page_count() is not 0. 8416 */ 8417 if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) 8418 continue; 8419 8420 /* 8421 * We treat all PageOffline() pages as movable when offlining 8422 * to give drivers a chance to decrement their reference count 8423 * in MEM_GOING_OFFLINE in order to indicate that these pages 8424 * can be offlined as there are no direct references anymore. 8425 * For actually unmovable PageOffline() where the driver does 8426 * not support this, we will fail later when trying to actually 8427 * move these pages that still have a reference count > 0. 8428 * (false negatives in this function only) 8429 */ 8430 if ((flags & MEMORY_OFFLINE) && PageOffline(page)) 8431 continue; 8432 8433 if (__PageMovable(page) || PageLRU(page)) 8434 continue; 8435 8436 /* 8437 * If there are RECLAIMABLE pages, we need to check 8438 * it. But now, memory offline itself doesn't call 8439 * shrink_node_slabs() and it still to be fixed. 8440 */ 8441 return page; 8442 } 8443 return NULL; 8444 } 8445 8446 #ifdef CONFIG_CONTIG_ALLOC 8447 static unsigned long pfn_max_align_down(unsigned long pfn) 8448 { 8449 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 8450 pageblock_nr_pages) - 1); 8451 } 8452 8453 static unsigned long pfn_max_align_up(unsigned long pfn) 8454 { 8455 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 8456 pageblock_nr_pages)); 8457 } 8458 8459 /* [start, end) must belong to a single zone. */ 8460 static int __alloc_contig_migrate_range(struct compact_control *cc, 8461 unsigned long start, unsigned long end) 8462 { 8463 /* This function is based on compact_zone() from compaction.c. */ 8464 unsigned int nr_reclaimed; 8465 unsigned long pfn = start; 8466 unsigned int tries = 0; 8467 int ret = 0; 8468 struct migration_target_control mtc = { 8469 .nid = zone_to_nid(cc->zone), 8470 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 8471 }; 8472 8473 migrate_prep(); 8474 8475 while (pfn < end || !list_empty(&cc->migratepages)) { 8476 if (fatal_signal_pending(current)) { 8477 ret = -EINTR; 8478 break; 8479 } 8480 8481 if (list_empty(&cc->migratepages)) { 8482 cc->nr_migratepages = 0; 8483 pfn = isolate_migratepages_range(cc, pfn, end); 8484 if (!pfn) { 8485 ret = -EINTR; 8486 break; 8487 } 8488 tries = 0; 8489 } else if (++tries == 5) { 8490 ret = ret < 0 ? ret : -EBUSY; 8491 break; 8492 } 8493 8494 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 8495 &cc->migratepages); 8496 cc->nr_migratepages -= nr_reclaimed; 8497 8498 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 8499 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE); 8500 } 8501 if (ret < 0) { 8502 putback_movable_pages(&cc->migratepages); 8503 return ret; 8504 } 8505 return 0; 8506 } 8507 8508 /** 8509 * alloc_contig_range() -- tries to allocate given range of pages 8510 * @start: start PFN to allocate 8511 * @end: one-past-the-last PFN to allocate 8512 * @migratetype: migratetype of the underlaying pageblocks (either 8513 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 8514 * in range must have the same migratetype and it must 8515 * be either of the two. 8516 * @gfp_mask: GFP mask to use during compaction 8517 * 8518 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 8519 * aligned. The PFN range must belong to a single zone. 8520 * 8521 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 8522 * pageblocks in the range. Once isolated, the pageblocks should not 8523 * be modified by others. 8524 * 8525 * Return: zero on success or negative error code. On success all 8526 * pages which PFN is in [start, end) are allocated for the caller and 8527 * need to be freed with free_contig_range(). 8528 */ 8529 int alloc_contig_range(unsigned long start, unsigned long end, 8530 unsigned migratetype, gfp_t gfp_mask) 8531 { 8532 unsigned long outer_start, outer_end; 8533 unsigned int order; 8534 int ret = 0; 8535 8536 struct compact_control cc = { 8537 .nr_migratepages = 0, 8538 .order = -1, 8539 .zone = page_zone(pfn_to_page(start)), 8540 .mode = MIGRATE_SYNC, 8541 .ignore_skip_hint = true, 8542 .no_set_skip_hint = true, 8543 .gfp_mask = current_gfp_context(gfp_mask), 8544 .alloc_contig = true, 8545 }; 8546 INIT_LIST_HEAD(&cc.migratepages); 8547 8548 /* 8549 * What we do here is we mark all pageblocks in range as 8550 * MIGRATE_ISOLATE. Because pageblock and max order pages may 8551 * have different sizes, and due to the way page allocator 8552 * work, we align the range to biggest of the two pages so 8553 * that page allocator won't try to merge buddies from 8554 * different pageblocks and change MIGRATE_ISOLATE to some 8555 * other migration type. 8556 * 8557 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 8558 * migrate the pages from an unaligned range (ie. pages that 8559 * we are interested in). This will put all the pages in 8560 * range back to page allocator as MIGRATE_ISOLATE. 8561 * 8562 * When this is done, we take the pages in range from page 8563 * allocator removing them from the buddy system. This way 8564 * page allocator will never consider using them. 8565 * 8566 * This lets us mark the pageblocks back as 8567 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 8568 * aligned range but not in the unaligned, original range are 8569 * put back to page allocator so that buddy can use them. 8570 */ 8571 8572 ret = start_isolate_page_range(pfn_max_align_down(start), 8573 pfn_max_align_up(end), migratetype, 0); 8574 if (ret) 8575 return ret; 8576 8577 drain_all_pages(cc.zone); 8578 8579 /* 8580 * In case of -EBUSY, we'd like to know which page causes problem. 8581 * So, just fall through. test_pages_isolated() has a tracepoint 8582 * which will report the busy page. 8583 * 8584 * It is possible that busy pages could become available before 8585 * the call to test_pages_isolated, and the range will actually be 8586 * allocated. So, if we fall through be sure to clear ret so that 8587 * -EBUSY is not accidentally used or returned to caller. 8588 */ 8589 ret = __alloc_contig_migrate_range(&cc, start, end); 8590 if (ret && ret != -EBUSY) 8591 goto done; 8592 ret =0; 8593 8594 /* 8595 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 8596 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 8597 * more, all pages in [start, end) are free in page allocator. 8598 * What we are going to do is to allocate all pages from 8599 * [start, end) (that is remove them from page allocator). 8600 * 8601 * The only problem is that pages at the beginning and at the 8602 * end of interesting range may be not aligned with pages that 8603 * page allocator holds, ie. they can be part of higher order 8604 * pages. Because of this, we reserve the bigger range and 8605 * once this is done free the pages we are not interested in. 8606 * 8607 * We don't have to hold zone->lock here because the pages are 8608 * isolated thus they won't get removed from buddy. 8609 */ 8610 8611 lru_add_drain_all(); 8612 8613 order = 0; 8614 outer_start = start; 8615 while (!PageBuddy(pfn_to_page(outer_start))) { 8616 if (++order >= MAX_ORDER) { 8617 outer_start = start; 8618 break; 8619 } 8620 outer_start &= ~0UL << order; 8621 } 8622 8623 if (outer_start != start) { 8624 order = buddy_order(pfn_to_page(outer_start)); 8625 8626 /* 8627 * outer_start page could be small order buddy page and 8628 * it doesn't include start page. Adjust outer_start 8629 * in this case to report failed page properly 8630 * on tracepoint in test_pages_isolated() 8631 */ 8632 if (outer_start + (1UL << order) <= start) 8633 outer_start = start; 8634 } 8635 8636 /* Make sure the range is really isolated. */ 8637 if (test_pages_isolated(outer_start, end, 0)) { 8638 pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", 8639 __func__, outer_start, end); 8640 ret = -EBUSY; 8641 goto done; 8642 } 8643 8644 /* Grab isolated pages from freelists. */ 8645 outer_end = isolate_freepages_range(&cc, outer_start, end); 8646 if (!outer_end) { 8647 ret = -EBUSY; 8648 goto done; 8649 } 8650 8651 /* Free head and tail (if any) */ 8652 if (start != outer_start) 8653 free_contig_range(outer_start, start - outer_start); 8654 if (end != outer_end) 8655 free_contig_range(end, outer_end - end); 8656 8657 done: 8658 undo_isolate_page_range(pfn_max_align_down(start), 8659 pfn_max_align_up(end), migratetype); 8660 return ret; 8661 } 8662 EXPORT_SYMBOL(alloc_contig_range); 8663 8664 static int __alloc_contig_pages(unsigned long start_pfn, 8665 unsigned long nr_pages, gfp_t gfp_mask) 8666 { 8667 unsigned long end_pfn = start_pfn + nr_pages; 8668 8669 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 8670 gfp_mask); 8671 } 8672 8673 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 8674 unsigned long nr_pages) 8675 { 8676 unsigned long i, end_pfn = start_pfn + nr_pages; 8677 struct page *page; 8678 8679 for (i = start_pfn; i < end_pfn; i++) { 8680 page = pfn_to_online_page(i); 8681 if (!page) 8682 return false; 8683 8684 if (page_zone(page) != z) 8685 return false; 8686 8687 if (PageReserved(page)) 8688 return false; 8689 8690 if (page_count(page) > 0) 8691 return false; 8692 8693 if (PageHuge(page)) 8694 return false; 8695 } 8696 return true; 8697 } 8698 8699 static bool zone_spans_last_pfn(const struct zone *zone, 8700 unsigned long start_pfn, unsigned long nr_pages) 8701 { 8702 unsigned long last_pfn = start_pfn + nr_pages - 1; 8703 8704 return zone_spans_pfn(zone, last_pfn); 8705 } 8706 8707 /** 8708 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 8709 * @nr_pages: Number of contiguous pages to allocate 8710 * @gfp_mask: GFP mask to limit search and used during compaction 8711 * @nid: Target node 8712 * @nodemask: Mask for other possible nodes 8713 * 8714 * This routine is a wrapper around alloc_contig_range(). It scans over zones 8715 * on an applicable zonelist to find a contiguous pfn range which can then be 8716 * tried for allocation with alloc_contig_range(). This routine is intended 8717 * for allocation requests which can not be fulfilled with the buddy allocator. 8718 * 8719 * The allocated memory is always aligned to a page boundary. If nr_pages is a 8720 * power of two then the alignment is guaranteed to be to the given nr_pages 8721 * (e.g. 1GB request would be aligned to 1GB). 8722 * 8723 * Allocated pages can be freed with free_contig_range() or by manually calling 8724 * __free_page() on each allocated page. 8725 * 8726 * Return: pointer to contiguous pages on success, or NULL if not successful. 8727 */ 8728 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 8729 int nid, nodemask_t *nodemask) 8730 { 8731 unsigned long ret, pfn, flags; 8732 struct zonelist *zonelist; 8733 struct zone *zone; 8734 struct zoneref *z; 8735 8736 zonelist = node_zonelist(nid, gfp_mask); 8737 for_each_zone_zonelist_nodemask(zone, z, zonelist, 8738 gfp_zone(gfp_mask), nodemask) { 8739 spin_lock_irqsave(&zone->lock, flags); 8740 8741 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 8742 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 8743 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 8744 /* 8745 * We release the zone lock here because 8746 * alloc_contig_range() will also lock the zone 8747 * at some point. If there's an allocation 8748 * spinning on this lock, it may win the race 8749 * and cause alloc_contig_range() to fail... 8750 */ 8751 spin_unlock_irqrestore(&zone->lock, flags); 8752 ret = __alloc_contig_pages(pfn, nr_pages, 8753 gfp_mask); 8754 if (!ret) 8755 return pfn_to_page(pfn); 8756 spin_lock_irqsave(&zone->lock, flags); 8757 } 8758 pfn += nr_pages; 8759 } 8760 spin_unlock_irqrestore(&zone->lock, flags); 8761 } 8762 return NULL; 8763 } 8764 #endif /* CONFIG_CONTIG_ALLOC */ 8765 8766 void free_contig_range(unsigned long pfn, unsigned int nr_pages) 8767 { 8768 unsigned int count = 0; 8769 8770 for (; nr_pages--; pfn++) { 8771 struct page *page = pfn_to_page(pfn); 8772 8773 count += page_count(page) != 1; 8774 __free_page(page); 8775 } 8776 WARN(count != 0, "%d pages are still in use!\n", count); 8777 } 8778 EXPORT_SYMBOL(free_contig_range); 8779 8780 /* 8781 * The zone indicated has a new number of managed_pages; batch sizes and percpu 8782 * page high values need to be recalulated. 8783 */ 8784 void __meminit zone_pcp_update(struct zone *zone) 8785 { 8786 mutex_lock(&pcp_batch_high_lock); 8787 zone_set_pageset_high_and_batch(zone); 8788 mutex_unlock(&pcp_batch_high_lock); 8789 } 8790 8791 /* 8792 * Effectively disable pcplists for the zone by setting the high limit to 0 8793 * and draining all cpus. A concurrent page freeing on another CPU that's about 8794 * to put the page on pcplist will either finish before the drain and the page 8795 * will be drained, or observe the new high limit and skip the pcplist. 8796 * 8797 * Must be paired with a call to zone_pcp_enable(). 8798 */ 8799 void zone_pcp_disable(struct zone *zone) 8800 { 8801 mutex_lock(&pcp_batch_high_lock); 8802 __zone_set_pageset_high_and_batch(zone, 0, 1); 8803 __drain_all_pages(zone, true); 8804 } 8805 8806 void zone_pcp_enable(struct zone *zone) 8807 { 8808 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 8809 mutex_unlock(&pcp_batch_high_lock); 8810 } 8811 8812 void zone_pcp_reset(struct zone *zone) 8813 { 8814 unsigned long flags; 8815 int cpu; 8816 struct per_cpu_pageset *pset; 8817 8818 /* avoid races with drain_pages() */ 8819 local_irq_save(flags); 8820 if (zone->pageset != &boot_pageset) { 8821 for_each_online_cpu(cpu) { 8822 pset = per_cpu_ptr(zone->pageset, cpu); 8823 drain_zonestat(zone, pset); 8824 } 8825 free_percpu(zone->pageset); 8826 zone->pageset = &boot_pageset; 8827 } 8828 local_irq_restore(flags); 8829 } 8830 8831 #ifdef CONFIG_MEMORY_HOTREMOVE 8832 /* 8833 * All pages in the range must be in a single zone, must not contain holes, 8834 * must span full sections, and must be isolated before calling this function. 8835 */ 8836 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 8837 { 8838 unsigned long pfn = start_pfn; 8839 struct page *page; 8840 struct zone *zone; 8841 unsigned int order; 8842 unsigned long flags; 8843 8844 offline_mem_sections(pfn, end_pfn); 8845 zone = page_zone(pfn_to_page(pfn)); 8846 spin_lock_irqsave(&zone->lock, flags); 8847 while (pfn < end_pfn) { 8848 page = pfn_to_page(pfn); 8849 /* 8850 * The HWPoisoned page may be not in buddy system, and 8851 * page_count() is not 0. 8852 */ 8853 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 8854 pfn++; 8855 continue; 8856 } 8857 /* 8858 * At this point all remaining PageOffline() pages have a 8859 * reference count of 0 and can simply be skipped. 8860 */ 8861 if (PageOffline(page)) { 8862 BUG_ON(page_count(page)); 8863 BUG_ON(PageBuddy(page)); 8864 pfn++; 8865 continue; 8866 } 8867 8868 BUG_ON(page_count(page)); 8869 BUG_ON(!PageBuddy(page)); 8870 order = buddy_order(page); 8871 del_page_from_free_list(page, zone, order); 8872 pfn += (1 << order); 8873 } 8874 spin_unlock_irqrestore(&zone->lock, flags); 8875 } 8876 #endif 8877 8878 bool is_free_buddy_page(struct page *page) 8879 { 8880 struct zone *zone = page_zone(page); 8881 unsigned long pfn = page_to_pfn(page); 8882 unsigned long flags; 8883 unsigned int order; 8884 8885 spin_lock_irqsave(&zone->lock, flags); 8886 for (order = 0; order < MAX_ORDER; order++) { 8887 struct page *page_head = page - (pfn & ((1 << order) - 1)); 8888 8889 if (PageBuddy(page_head) && buddy_order(page_head) >= order) 8890 break; 8891 } 8892 spin_unlock_irqrestore(&zone->lock, flags); 8893 8894 return order < MAX_ORDER; 8895 } 8896 8897 #ifdef CONFIG_MEMORY_FAILURE 8898 /* 8899 * Break down a higher-order page in sub-pages, and keep our target out of 8900 * buddy allocator. 8901 */ 8902 static void break_down_buddy_pages(struct zone *zone, struct page *page, 8903 struct page *target, int low, int high, 8904 int migratetype) 8905 { 8906 unsigned long size = 1 << high; 8907 struct page *current_buddy, *next_page; 8908 8909 while (high > low) { 8910 high--; 8911 size >>= 1; 8912 8913 if (target >= &page[size]) { 8914 next_page = page + size; 8915 current_buddy = page; 8916 } else { 8917 next_page = page; 8918 current_buddy = page + size; 8919 } 8920 8921 if (set_page_guard(zone, current_buddy, high, migratetype)) 8922 continue; 8923 8924 if (current_buddy != target) { 8925 add_to_free_list(current_buddy, zone, high, migratetype); 8926 set_buddy_order(current_buddy, high); 8927 page = next_page; 8928 } 8929 } 8930 } 8931 8932 /* 8933 * Take a page that will be marked as poisoned off the buddy allocator. 8934 */ 8935 bool take_page_off_buddy(struct page *page) 8936 { 8937 struct zone *zone = page_zone(page); 8938 unsigned long pfn = page_to_pfn(page); 8939 unsigned long flags; 8940 unsigned int order; 8941 bool ret = false; 8942 8943 spin_lock_irqsave(&zone->lock, flags); 8944 for (order = 0; order < MAX_ORDER; order++) { 8945 struct page *page_head = page - (pfn & ((1 << order) - 1)); 8946 int page_order = buddy_order(page_head); 8947 8948 if (PageBuddy(page_head) && page_order >= order) { 8949 unsigned long pfn_head = page_to_pfn(page_head); 8950 int migratetype = get_pfnblock_migratetype(page_head, 8951 pfn_head); 8952 8953 del_page_from_free_list(page_head, zone, page_order); 8954 break_down_buddy_pages(zone, page_head, page, 0, 8955 page_order, migratetype); 8956 ret = true; 8957 break; 8958 } 8959 if (page_count(page_head) > 0) 8960 break; 8961 } 8962 spin_unlock_irqrestore(&zone->lock, flags); 8963 return ret; 8964 } 8965 #endif 8966