1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/swap.h> 22 #include <linux/swapops.h> 23 #include <linux/interrupt.h> 24 #include <linux/pagemap.h> 25 #include <linux/jiffies.h> 26 #include <linux/memblock.h> 27 #include <linux/compiler.h> 28 #include <linux/kernel.h> 29 #include <linux/kasan.h> 30 #include <linux/module.h> 31 #include <linux/suspend.h> 32 #include <linux/pagevec.h> 33 #include <linux/blkdev.h> 34 #include <linux/slab.h> 35 #include <linux/ratelimit.h> 36 #include <linux/oom.h> 37 #include <linux/topology.h> 38 #include <linux/sysctl.h> 39 #include <linux/cpu.h> 40 #include <linux/cpuset.h> 41 #include <linux/memory_hotplug.h> 42 #include <linux/nodemask.h> 43 #include <linux/vmalloc.h> 44 #include <linux/vmstat.h> 45 #include <linux/mempolicy.h> 46 #include <linux/memremap.h> 47 #include <linux/stop_machine.h> 48 #include <linux/random.h> 49 #include <linux/sort.h> 50 #include <linux/pfn.h> 51 #include <linux/backing-dev.h> 52 #include <linux/fault-inject.h> 53 #include <linux/page-isolation.h> 54 #include <linux/debugobjects.h> 55 #include <linux/kmemleak.h> 56 #include <linux/compaction.h> 57 #include <trace/events/kmem.h> 58 #include <trace/events/oom.h> 59 #include <linux/prefetch.h> 60 #include <linux/mm_inline.h> 61 #include <linux/mmu_notifier.h> 62 #include <linux/migrate.h> 63 #include <linux/hugetlb.h> 64 #include <linux/sched/rt.h> 65 #include <linux/sched/mm.h> 66 #include <linux/page_owner.h> 67 #include <linux/page_table_check.h> 68 #include <linux/kthread.h> 69 #include <linux/memcontrol.h> 70 #include <linux/ftrace.h> 71 #include <linux/lockdep.h> 72 #include <linux/nmi.h> 73 #include <linux/psi.h> 74 #include <linux/padata.h> 75 #include <linux/khugepaged.h> 76 #include <linux/buffer_head.h> 77 #include <linux/delayacct.h> 78 #include <asm/sections.h> 79 #include <asm/tlbflush.h> 80 #include <asm/div64.h> 81 #include "internal.h" 82 #include "shuffle.h" 83 #include "page_reporting.h" 84 #include "swap.h" 85 86 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 87 typedef int __bitwise fpi_t; 88 89 /* No special request */ 90 #define FPI_NONE ((__force fpi_t)0) 91 92 /* 93 * Skip free page reporting notification for the (possibly merged) page. 94 * This does not hinder free page reporting from grabbing the page, 95 * reporting it and marking it "reported" - it only skips notifying 96 * the free page reporting infrastructure about a newly freed page. For 97 * example, used when temporarily pulling a page from a freelist and 98 * putting it back unmodified. 99 */ 100 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 101 102 /* 103 * Place the (possibly merged) page to the tail of the freelist. Will ignore 104 * page shuffling (relevant code - e.g., memory onlining - is expected to 105 * shuffle the whole zone). 106 * 107 * Note: No code should rely on this flag for correctness - it's purely 108 * to allow for optimizations when handing back either fresh pages 109 * (memory onlining) or untouched pages (page isolation, free page 110 * reporting). 111 */ 112 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 113 114 /* 115 * Don't poison memory with KASAN (only for the tag-based modes). 116 * During boot, all non-reserved memblock memory is exposed to page_alloc. 117 * Poisoning all that memory lengthens boot time, especially on systems with 118 * large amount of RAM. This flag is used to skip that poisoning. 119 * This is only done for the tag-based KASAN modes, as those are able to 120 * detect memory corruptions with the memory tags assigned by default. 121 * All memory allocated normally after boot gets poisoned as usual. 122 */ 123 #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2)) 124 125 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 126 static DEFINE_MUTEX(pcp_batch_high_lock); 127 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 128 129 struct pagesets { 130 local_lock_t lock; 131 }; 132 static DEFINE_PER_CPU(struct pagesets, pagesets) = { 133 .lock = INIT_LOCAL_LOCK(lock), 134 }; 135 136 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 137 DEFINE_PER_CPU(int, numa_node); 138 EXPORT_PER_CPU_SYMBOL(numa_node); 139 #endif 140 141 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 142 143 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 144 /* 145 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 146 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 147 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 148 * defined in <linux/topology.h>. 149 */ 150 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 151 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 152 #endif 153 154 /* work_structs for global per-cpu drains */ 155 struct pcpu_drain { 156 struct zone *zone; 157 struct work_struct work; 158 }; 159 static DEFINE_MUTEX(pcpu_drain_mutex); 160 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); 161 162 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 163 volatile unsigned long latent_entropy __latent_entropy; 164 EXPORT_SYMBOL(latent_entropy); 165 #endif 166 167 /* 168 * Array of node states. 169 */ 170 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 171 [N_POSSIBLE] = NODE_MASK_ALL, 172 [N_ONLINE] = { { [0] = 1UL } }, 173 #ifndef CONFIG_NUMA 174 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 175 #ifdef CONFIG_HIGHMEM 176 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 177 #endif 178 [N_MEMORY] = { { [0] = 1UL } }, 179 [N_CPU] = { { [0] = 1UL } }, 180 #endif /* NUMA */ 181 }; 182 EXPORT_SYMBOL(node_states); 183 184 atomic_long_t _totalram_pages __read_mostly; 185 EXPORT_SYMBOL(_totalram_pages); 186 unsigned long totalreserve_pages __read_mostly; 187 unsigned long totalcma_pages __read_mostly; 188 189 int percpu_pagelist_high_fraction; 190 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 191 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 192 EXPORT_SYMBOL(init_on_alloc); 193 194 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 195 EXPORT_SYMBOL(init_on_free); 196 197 static bool _init_on_alloc_enabled_early __read_mostly 198 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 199 static int __init early_init_on_alloc(char *buf) 200 { 201 202 return kstrtobool(buf, &_init_on_alloc_enabled_early); 203 } 204 early_param("init_on_alloc", early_init_on_alloc); 205 206 static bool _init_on_free_enabled_early __read_mostly 207 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 208 static int __init early_init_on_free(char *buf) 209 { 210 return kstrtobool(buf, &_init_on_free_enabled_early); 211 } 212 early_param("init_on_free", early_init_on_free); 213 214 /* 215 * A cached value of the page's pageblock's migratetype, used when the page is 216 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 217 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 218 * Also the migratetype set in the page does not necessarily match the pcplist 219 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 220 * other index - this ensures that it will be put on the correct CMA freelist. 221 */ 222 static inline int get_pcppage_migratetype(struct page *page) 223 { 224 return page->index; 225 } 226 227 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 228 { 229 page->index = migratetype; 230 } 231 232 #ifdef CONFIG_PM_SLEEP 233 /* 234 * The following functions are used by the suspend/hibernate code to temporarily 235 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 236 * while devices are suspended. To avoid races with the suspend/hibernate code, 237 * they should always be called with system_transition_mutex held 238 * (gfp_allowed_mask also should only be modified with system_transition_mutex 239 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 240 * with that modification). 241 */ 242 243 static gfp_t saved_gfp_mask; 244 245 void pm_restore_gfp_mask(void) 246 { 247 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 248 if (saved_gfp_mask) { 249 gfp_allowed_mask = saved_gfp_mask; 250 saved_gfp_mask = 0; 251 } 252 } 253 254 void pm_restrict_gfp_mask(void) 255 { 256 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 257 WARN_ON(saved_gfp_mask); 258 saved_gfp_mask = gfp_allowed_mask; 259 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 260 } 261 262 bool pm_suspended_storage(void) 263 { 264 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 265 return false; 266 return true; 267 } 268 #endif /* CONFIG_PM_SLEEP */ 269 270 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 271 unsigned int pageblock_order __read_mostly; 272 #endif 273 274 static void __free_pages_ok(struct page *page, unsigned int order, 275 fpi_t fpi_flags); 276 277 /* 278 * results with 256, 32 in the lowmem_reserve sysctl: 279 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 280 * 1G machine -> (16M dma, 784M normal, 224M high) 281 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 282 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 283 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 284 * 285 * TBD: should special case ZONE_DMA32 machines here - in those we normally 286 * don't need any ZONE_NORMAL reservation 287 */ 288 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 289 #ifdef CONFIG_ZONE_DMA 290 [ZONE_DMA] = 256, 291 #endif 292 #ifdef CONFIG_ZONE_DMA32 293 [ZONE_DMA32] = 256, 294 #endif 295 [ZONE_NORMAL] = 32, 296 #ifdef CONFIG_HIGHMEM 297 [ZONE_HIGHMEM] = 0, 298 #endif 299 [ZONE_MOVABLE] = 0, 300 }; 301 302 static char * const zone_names[MAX_NR_ZONES] = { 303 #ifdef CONFIG_ZONE_DMA 304 "DMA", 305 #endif 306 #ifdef CONFIG_ZONE_DMA32 307 "DMA32", 308 #endif 309 "Normal", 310 #ifdef CONFIG_HIGHMEM 311 "HighMem", 312 #endif 313 "Movable", 314 #ifdef CONFIG_ZONE_DEVICE 315 "Device", 316 #endif 317 }; 318 319 const char * const migratetype_names[MIGRATE_TYPES] = { 320 "Unmovable", 321 "Movable", 322 "Reclaimable", 323 "HighAtomic", 324 #ifdef CONFIG_CMA 325 "CMA", 326 #endif 327 #ifdef CONFIG_MEMORY_ISOLATION 328 "Isolate", 329 #endif 330 }; 331 332 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { 333 [NULL_COMPOUND_DTOR] = NULL, 334 [COMPOUND_PAGE_DTOR] = free_compound_page, 335 #ifdef CONFIG_HUGETLB_PAGE 336 [HUGETLB_PAGE_DTOR] = free_huge_page, 337 #endif 338 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 339 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, 340 #endif 341 }; 342 343 int min_free_kbytes = 1024; 344 int user_min_free_kbytes = -1; 345 int watermark_boost_factor __read_mostly = 15000; 346 int watermark_scale_factor = 10; 347 348 static unsigned long nr_kernel_pages __initdata; 349 static unsigned long nr_all_pages __initdata; 350 static unsigned long dma_reserve __initdata; 351 352 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 353 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 354 static unsigned long required_kernelcore __initdata; 355 static unsigned long required_kernelcore_percent __initdata; 356 static unsigned long required_movablecore __initdata; 357 static unsigned long required_movablecore_percent __initdata; 358 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 359 static bool mirrored_kernelcore __meminitdata; 360 361 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 362 int movable_zone; 363 EXPORT_SYMBOL(movable_zone); 364 365 #if MAX_NUMNODES > 1 366 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 367 unsigned int nr_online_nodes __read_mostly = 1; 368 EXPORT_SYMBOL(nr_node_ids); 369 EXPORT_SYMBOL(nr_online_nodes); 370 #endif 371 372 int page_group_by_mobility_disabled __read_mostly; 373 374 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 375 /* 376 * During boot we initialize deferred pages on-demand, as needed, but once 377 * page_alloc_init_late() has finished, the deferred pages are all initialized, 378 * and we can permanently disable that path. 379 */ 380 static DEFINE_STATIC_KEY_TRUE(deferred_pages); 381 382 static inline bool deferred_pages_enabled(void) 383 { 384 return static_branch_unlikely(&deferred_pages); 385 } 386 387 /* Returns true if the struct page for the pfn is uninitialised */ 388 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 389 { 390 int nid = early_pfn_to_nid(pfn); 391 392 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 393 return true; 394 395 return false; 396 } 397 398 /* 399 * Returns true when the remaining initialisation should be deferred until 400 * later in the boot cycle when it can be parallelised. 401 */ 402 static bool __meminit 403 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 404 { 405 static unsigned long prev_end_pfn, nr_initialised; 406 407 /* 408 * prev_end_pfn static that contains the end of previous zone 409 * No need to protect because called very early in boot before smp_init. 410 */ 411 if (prev_end_pfn != end_pfn) { 412 prev_end_pfn = end_pfn; 413 nr_initialised = 0; 414 } 415 416 /* Always populate low zones for address-constrained allocations */ 417 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 418 return false; 419 420 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 421 return true; 422 /* 423 * We start only with one section of pages, more pages are added as 424 * needed until the rest of deferred pages are initialized. 425 */ 426 nr_initialised++; 427 if ((nr_initialised > PAGES_PER_SECTION) && 428 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 429 NODE_DATA(nid)->first_deferred_pfn = pfn; 430 return true; 431 } 432 return false; 433 } 434 #else 435 static inline bool deferred_pages_enabled(void) 436 { 437 return false; 438 } 439 440 static inline bool early_page_uninitialised(unsigned long pfn) 441 { 442 return false; 443 } 444 445 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 446 { 447 return false; 448 } 449 #endif 450 451 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 452 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 453 unsigned long pfn) 454 { 455 #ifdef CONFIG_SPARSEMEM 456 return section_to_usemap(__pfn_to_section(pfn)); 457 #else 458 return page_zone(page)->pageblock_flags; 459 #endif /* CONFIG_SPARSEMEM */ 460 } 461 462 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 463 { 464 #ifdef CONFIG_SPARSEMEM 465 pfn &= (PAGES_PER_SECTION-1); 466 #else 467 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); 468 #endif /* CONFIG_SPARSEMEM */ 469 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 470 } 471 472 static __always_inline 473 unsigned long __get_pfnblock_flags_mask(const struct page *page, 474 unsigned long pfn, 475 unsigned long mask) 476 { 477 unsigned long *bitmap; 478 unsigned long bitidx, word_bitidx; 479 unsigned long word; 480 481 bitmap = get_pageblock_bitmap(page, pfn); 482 bitidx = pfn_to_bitidx(page, pfn); 483 word_bitidx = bitidx / BITS_PER_LONG; 484 bitidx &= (BITS_PER_LONG-1); 485 486 word = bitmap[word_bitidx]; 487 return (word >> bitidx) & mask; 488 } 489 490 /** 491 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 492 * @page: The page within the block of interest 493 * @pfn: The target page frame number 494 * @mask: mask of bits that the caller is interested in 495 * 496 * Return: pageblock_bits flags 497 */ 498 unsigned long get_pfnblock_flags_mask(const struct page *page, 499 unsigned long pfn, unsigned long mask) 500 { 501 return __get_pfnblock_flags_mask(page, pfn, mask); 502 } 503 504 static __always_inline int get_pfnblock_migratetype(const struct page *page, 505 unsigned long pfn) 506 { 507 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 508 } 509 510 /** 511 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 512 * @page: The page within the block of interest 513 * @flags: The flags to set 514 * @pfn: The target page frame number 515 * @mask: mask of bits that the caller is interested in 516 */ 517 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 518 unsigned long pfn, 519 unsigned long mask) 520 { 521 unsigned long *bitmap; 522 unsigned long bitidx, word_bitidx; 523 unsigned long old_word, word; 524 525 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 526 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 527 528 bitmap = get_pageblock_bitmap(page, pfn); 529 bitidx = pfn_to_bitidx(page, pfn); 530 word_bitidx = bitidx / BITS_PER_LONG; 531 bitidx &= (BITS_PER_LONG-1); 532 533 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 534 535 mask <<= bitidx; 536 flags <<= bitidx; 537 538 word = READ_ONCE(bitmap[word_bitidx]); 539 for (;;) { 540 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 541 if (word == old_word) 542 break; 543 word = old_word; 544 } 545 } 546 547 void set_pageblock_migratetype(struct page *page, int migratetype) 548 { 549 if (unlikely(page_group_by_mobility_disabled && 550 migratetype < MIGRATE_PCPTYPES)) 551 migratetype = MIGRATE_UNMOVABLE; 552 553 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 554 page_to_pfn(page), MIGRATETYPE_MASK); 555 } 556 557 #ifdef CONFIG_DEBUG_VM 558 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 559 { 560 int ret = 0; 561 unsigned seq; 562 unsigned long pfn = page_to_pfn(page); 563 unsigned long sp, start_pfn; 564 565 do { 566 seq = zone_span_seqbegin(zone); 567 start_pfn = zone->zone_start_pfn; 568 sp = zone->spanned_pages; 569 if (!zone_spans_pfn(zone, pfn)) 570 ret = 1; 571 } while (zone_span_seqretry(zone, seq)); 572 573 if (ret) 574 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 575 pfn, zone_to_nid(zone), zone->name, 576 start_pfn, start_pfn + sp); 577 578 return ret; 579 } 580 581 static int page_is_consistent(struct zone *zone, struct page *page) 582 { 583 if (zone != page_zone(page)) 584 return 0; 585 586 return 1; 587 } 588 /* 589 * Temporary debugging check for pages not lying within a given zone. 590 */ 591 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 592 { 593 if (page_outside_zone_boundaries(zone, page)) 594 return 1; 595 if (!page_is_consistent(zone, page)) 596 return 1; 597 598 return 0; 599 } 600 #else 601 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 602 { 603 return 0; 604 } 605 #endif 606 607 static void bad_page(struct page *page, const char *reason) 608 { 609 static unsigned long resume; 610 static unsigned long nr_shown; 611 static unsigned long nr_unshown; 612 613 /* 614 * Allow a burst of 60 reports, then keep quiet for that minute; 615 * or allow a steady drip of one report per second. 616 */ 617 if (nr_shown == 60) { 618 if (time_before(jiffies, resume)) { 619 nr_unshown++; 620 goto out; 621 } 622 if (nr_unshown) { 623 pr_alert( 624 "BUG: Bad page state: %lu messages suppressed\n", 625 nr_unshown); 626 nr_unshown = 0; 627 } 628 nr_shown = 0; 629 } 630 if (nr_shown++ == 0) 631 resume = jiffies + 60 * HZ; 632 633 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 634 current->comm, page_to_pfn(page)); 635 dump_page(page, reason); 636 637 print_modules(); 638 dump_stack(); 639 out: 640 /* Leave bad fields for debug, except PageBuddy could make trouble */ 641 page_mapcount_reset(page); /* remove PageBuddy */ 642 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 643 } 644 645 static inline unsigned int order_to_pindex(int migratetype, int order) 646 { 647 int base = order; 648 649 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 650 if (order > PAGE_ALLOC_COSTLY_ORDER) { 651 VM_BUG_ON(order != pageblock_order); 652 base = PAGE_ALLOC_COSTLY_ORDER + 1; 653 } 654 #else 655 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 656 #endif 657 658 return (MIGRATE_PCPTYPES * base) + migratetype; 659 } 660 661 static inline int pindex_to_order(unsigned int pindex) 662 { 663 int order = pindex / MIGRATE_PCPTYPES; 664 665 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 666 if (order > PAGE_ALLOC_COSTLY_ORDER) 667 order = pageblock_order; 668 #else 669 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 670 #endif 671 672 return order; 673 } 674 675 static inline bool pcp_allowed_order(unsigned int order) 676 { 677 if (order <= PAGE_ALLOC_COSTLY_ORDER) 678 return true; 679 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 680 if (order == pageblock_order) 681 return true; 682 #endif 683 return false; 684 } 685 686 static inline void free_the_page(struct page *page, unsigned int order) 687 { 688 if (pcp_allowed_order(order)) /* Via pcp? */ 689 free_unref_page(page, order); 690 else 691 __free_pages_ok(page, order, FPI_NONE); 692 } 693 694 /* 695 * Higher-order pages are called "compound pages". They are structured thusly: 696 * 697 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 698 * 699 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 700 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 701 * 702 * The first tail page's ->compound_dtor holds the offset in array of compound 703 * page destructors. See compound_page_dtors. 704 * 705 * The first tail page's ->compound_order holds the order of allocation. 706 * This usage means that zero-order pages may not be compound. 707 */ 708 709 void free_compound_page(struct page *page) 710 { 711 mem_cgroup_uncharge(page_folio(page)); 712 free_the_page(page, compound_order(page)); 713 } 714 715 static void prep_compound_head(struct page *page, unsigned int order) 716 { 717 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 718 set_compound_order(page, order); 719 atomic_set(compound_mapcount_ptr(page), -1); 720 atomic_set(compound_pincount_ptr(page), 0); 721 } 722 723 static void prep_compound_tail(struct page *head, int tail_idx) 724 { 725 struct page *p = head + tail_idx; 726 727 p->mapping = TAIL_MAPPING; 728 set_compound_head(p, head); 729 } 730 731 void prep_compound_page(struct page *page, unsigned int order) 732 { 733 int i; 734 int nr_pages = 1 << order; 735 736 __SetPageHead(page); 737 for (i = 1; i < nr_pages; i++) 738 prep_compound_tail(page, i); 739 740 prep_compound_head(page, order); 741 } 742 743 #ifdef CONFIG_DEBUG_PAGEALLOC 744 unsigned int _debug_guardpage_minorder; 745 746 bool _debug_pagealloc_enabled_early __read_mostly 747 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 748 EXPORT_SYMBOL(_debug_pagealloc_enabled_early); 749 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 750 EXPORT_SYMBOL(_debug_pagealloc_enabled); 751 752 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 753 754 static int __init early_debug_pagealloc(char *buf) 755 { 756 return kstrtobool(buf, &_debug_pagealloc_enabled_early); 757 } 758 early_param("debug_pagealloc", early_debug_pagealloc); 759 760 static int __init debug_guardpage_minorder_setup(char *buf) 761 { 762 unsigned long res; 763 764 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 765 pr_err("Bad debug_guardpage_minorder value\n"); 766 return 0; 767 } 768 _debug_guardpage_minorder = res; 769 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 770 return 0; 771 } 772 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); 773 774 static inline bool set_page_guard(struct zone *zone, struct page *page, 775 unsigned int order, int migratetype) 776 { 777 if (!debug_guardpage_enabled()) 778 return false; 779 780 if (order >= debug_guardpage_minorder()) 781 return false; 782 783 __SetPageGuard(page); 784 INIT_LIST_HEAD(&page->lru); 785 set_page_private(page, order); 786 /* Guard pages are not available for any usage */ 787 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 788 789 return true; 790 } 791 792 static inline void clear_page_guard(struct zone *zone, struct page *page, 793 unsigned int order, int migratetype) 794 { 795 if (!debug_guardpage_enabled()) 796 return; 797 798 __ClearPageGuard(page); 799 800 set_page_private(page, 0); 801 if (!is_migrate_isolate(migratetype)) 802 __mod_zone_freepage_state(zone, (1 << order), migratetype); 803 } 804 #else 805 static inline bool set_page_guard(struct zone *zone, struct page *page, 806 unsigned int order, int migratetype) { return false; } 807 static inline void clear_page_guard(struct zone *zone, struct page *page, 808 unsigned int order, int migratetype) {} 809 #endif 810 811 /* 812 * Enable static keys related to various memory debugging and hardening options. 813 * Some override others, and depend on early params that are evaluated in the 814 * order of appearance. So we need to first gather the full picture of what was 815 * enabled, and then make decisions. 816 */ 817 void init_mem_debugging_and_hardening(void) 818 { 819 bool page_poisoning_requested = false; 820 821 #ifdef CONFIG_PAGE_POISONING 822 /* 823 * Page poisoning is debug page alloc for some arches. If 824 * either of those options are enabled, enable poisoning. 825 */ 826 if (page_poisoning_enabled() || 827 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 828 debug_pagealloc_enabled())) { 829 static_branch_enable(&_page_poisoning_enabled); 830 page_poisoning_requested = true; 831 } 832 #endif 833 834 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 835 page_poisoning_requested) { 836 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 837 "will take precedence over init_on_alloc and init_on_free\n"); 838 _init_on_alloc_enabled_early = false; 839 _init_on_free_enabled_early = false; 840 } 841 842 if (_init_on_alloc_enabled_early) 843 static_branch_enable(&init_on_alloc); 844 else 845 static_branch_disable(&init_on_alloc); 846 847 if (_init_on_free_enabled_early) 848 static_branch_enable(&init_on_free); 849 else 850 static_branch_disable(&init_on_free); 851 852 #ifdef CONFIG_DEBUG_PAGEALLOC 853 if (!debug_pagealloc_enabled()) 854 return; 855 856 static_branch_enable(&_debug_pagealloc_enabled); 857 858 if (!debug_guardpage_minorder()) 859 return; 860 861 static_branch_enable(&_debug_guardpage_enabled); 862 #endif 863 } 864 865 static inline void set_buddy_order(struct page *page, unsigned int order) 866 { 867 set_page_private(page, order); 868 __SetPageBuddy(page); 869 } 870 871 #ifdef CONFIG_COMPACTION 872 static inline struct capture_control *task_capc(struct zone *zone) 873 { 874 struct capture_control *capc = current->capture_control; 875 876 return unlikely(capc) && 877 !(current->flags & PF_KTHREAD) && 878 !capc->page && 879 capc->cc->zone == zone ? capc : NULL; 880 } 881 882 static inline bool 883 compaction_capture(struct capture_control *capc, struct page *page, 884 int order, int migratetype) 885 { 886 if (!capc || order != capc->cc->order) 887 return false; 888 889 /* Do not accidentally pollute CMA or isolated regions*/ 890 if (is_migrate_cma(migratetype) || 891 is_migrate_isolate(migratetype)) 892 return false; 893 894 /* 895 * Do not let lower order allocations pollute a movable pageblock. 896 * This might let an unmovable request use a reclaimable pageblock 897 * and vice-versa but no more than normal fallback logic which can 898 * have trouble finding a high-order free page. 899 */ 900 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 901 return false; 902 903 capc->page = page; 904 return true; 905 } 906 907 #else 908 static inline struct capture_control *task_capc(struct zone *zone) 909 { 910 return NULL; 911 } 912 913 static inline bool 914 compaction_capture(struct capture_control *capc, struct page *page, 915 int order, int migratetype) 916 { 917 return false; 918 } 919 #endif /* CONFIG_COMPACTION */ 920 921 /* Used for pages not on another list */ 922 static inline void add_to_free_list(struct page *page, struct zone *zone, 923 unsigned int order, int migratetype) 924 { 925 struct free_area *area = &zone->free_area[order]; 926 927 list_add(&page->lru, &area->free_list[migratetype]); 928 area->nr_free++; 929 } 930 931 /* Used for pages not on another list */ 932 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 933 unsigned int order, int migratetype) 934 { 935 struct free_area *area = &zone->free_area[order]; 936 937 list_add_tail(&page->lru, &area->free_list[migratetype]); 938 area->nr_free++; 939 } 940 941 /* 942 * Used for pages which are on another list. Move the pages to the tail 943 * of the list - so the moved pages won't immediately be considered for 944 * allocation again (e.g., optimization for memory onlining). 945 */ 946 static inline void move_to_free_list(struct page *page, struct zone *zone, 947 unsigned int order, int migratetype) 948 { 949 struct free_area *area = &zone->free_area[order]; 950 951 list_move_tail(&page->lru, &area->free_list[migratetype]); 952 } 953 954 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 955 unsigned int order) 956 { 957 /* clear reported state and update reported page count */ 958 if (page_reported(page)) 959 __ClearPageReported(page); 960 961 list_del(&page->lru); 962 __ClearPageBuddy(page); 963 set_page_private(page, 0); 964 zone->free_area[order].nr_free--; 965 } 966 967 /* 968 * If this is not the largest possible page, check if the buddy 969 * of the next-highest order is free. If it is, it's possible 970 * that pages are being freed that will coalesce soon. In case, 971 * that is happening, add the free page to the tail of the list 972 * so it's less likely to be used soon and more likely to be merged 973 * as a higher order page 974 */ 975 static inline bool 976 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 977 struct page *page, unsigned int order) 978 { 979 unsigned long higher_page_pfn; 980 struct page *higher_page; 981 982 if (order >= MAX_ORDER - 2) 983 return false; 984 985 higher_page_pfn = buddy_pfn & pfn; 986 higher_page = page + (higher_page_pfn - pfn); 987 988 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 989 NULL) != NULL; 990 } 991 992 /* 993 * Freeing function for a buddy system allocator. 994 * 995 * The concept of a buddy system is to maintain direct-mapped table 996 * (containing bit values) for memory blocks of various "orders". 997 * The bottom level table contains the map for the smallest allocatable 998 * units of memory (here, pages), and each level above it describes 999 * pairs of units from the levels below, hence, "buddies". 1000 * At a high level, all that happens here is marking the table entry 1001 * at the bottom level available, and propagating the changes upward 1002 * as necessary, plus some accounting needed to play nicely with other 1003 * parts of the VM system. 1004 * At each level, we keep a list of pages, which are heads of continuous 1005 * free pages of length of (1 << order) and marked with PageBuddy. 1006 * Page's order is recorded in page_private(page) field. 1007 * So when we are allocating or freeing one, we can derive the state of the 1008 * other. That is, if we allocate a small block, and both were 1009 * free, the remainder of the region must be split into blocks. 1010 * If a block is freed, and its buddy is also free, then this 1011 * triggers coalescing into a block of larger size. 1012 * 1013 * -- nyc 1014 */ 1015 1016 static inline void __free_one_page(struct page *page, 1017 unsigned long pfn, 1018 struct zone *zone, unsigned int order, 1019 int migratetype, fpi_t fpi_flags) 1020 { 1021 struct capture_control *capc = task_capc(zone); 1022 unsigned long buddy_pfn; 1023 unsigned long combined_pfn; 1024 struct page *buddy; 1025 bool to_tail; 1026 1027 VM_BUG_ON(!zone_is_initialized(zone)); 1028 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 1029 1030 VM_BUG_ON(migratetype == -1); 1031 if (likely(!is_migrate_isolate(migratetype))) 1032 __mod_zone_freepage_state(zone, 1 << order, migratetype); 1033 1034 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 1035 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1036 1037 while (order < MAX_ORDER - 1) { 1038 if (compaction_capture(capc, page, order, migratetype)) { 1039 __mod_zone_freepage_state(zone, -(1 << order), 1040 migratetype); 1041 return; 1042 } 1043 1044 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 1045 if (!buddy) 1046 goto done_merging; 1047 1048 if (unlikely(order >= pageblock_order)) { 1049 /* 1050 * We want to prevent merge between freepages on pageblock 1051 * without fallbacks and normal pageblock. Without this, 1052 * pageblock isolation could cause incorrect freepage or CMA 1053 * accounting or HIGHATOMIC accounting. 1054 */ 1055 int buddy_mt = get_pageblock_migratetype(buddy); 1056 1057 if (migratetype != buddy_mt 1058 && (!migratetype_is_mergeable(migratetype) || 1059 !migratetype_is_mergeable(buddy_mt))) 1060 goto done_merging; 1061 } 1062 1063 /* 1064 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 1065 * merge with it and move up one order. 1066 */ 1067 if (page_is_guard(buddy)) 1068 clear_page_guard(zone, buddy, order, migratetype); 1069 else 1070 del_page_from_free_list(buddy, zone, order); 1071 combined_pfn = buddy_pfn & pfn; 1072 page = page + (combined_pfn - pfn); 1073 pfn = combined_pfn; 1074 order++; 1075 } 1076 1077 done_merging: 1078 set_buddy_order(page, order); 1079 1080 if (fpi_flags & FPI_TO_TAIL) 1081 to_tail = true; 1082 else if (is_shuffle_order(order)) 1083 to_tail = shuffle_pick_tail(); 1084 else 1085 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1086 1087 if (to_tail) 1088 add_to_free_list_tail(page, zone, order, migratetype); 1089 else 1090 add_to_free_list(page, zone, order, migratetype); 1091 1092 /* Notify page reporting subsystem of freed page */ 1093 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1094 page_reporting_notify_free(order); 1095 } 1096 1097 /** 1098 * split_free_page() -- split a free page at split_pfn_offset 1099 * @free_page: the original free page 1100 * @order: the order of the page 1101 * @split_pfn_offset: split offset within the page 1102 * 1103 * It is used when the free page crosses two pageblocks with different migratetypes 1104 * at split_pfn_offset within the page. The split free page will be put into 1105 * separate migratetype lists afterwards. Otherwise, the function achieves 1106 * nothing. 1107 */ 1108 void split_free_page(struct page *free_page, 1109 int order, unsigned long split_pfn_offset) 1110 { 1111 struct zone *zone = page_zone(free_page); 1112 unsigned long free_page_pfn = page_to_pfn(free_page); 1113 unsigned long pfn; 1114 unsigned long flags; 1115 int free_page_order; 1116 1117 if (split_pfn_offset == 0) 1118 return; 1119 1120 spin_lock_irqsave(&zone->lock, flags); 1121 del_page_from_free_list(free_page, zone, order); 1122 for (pfn = free_page_pfn; 1123 pfn < free_page_pfn + (1UL << order);) { 1124 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); 1125 1126 free_page_order = min_t(int, 1127 pfn ? __ffs(pfn) : order, 1128 __fls(split_pfn_offset)); 1129 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, 1130 mt, FPI_NONE); 1131 pfn += 1UL << free_page_order; 1132 split_pfn_offset -= (1UL << free_page_order); 1133 /* we have done the first part, now switch to second part */ 1134 if (split_pfn_offset == 0) 1135 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); 1136 } 1137 spin_unlock_irqrestore(&zone->lock, flags); 1138 } 1139 /* 1140 * A bad page could be due to a number of fields. Instead of multiple branches, 1141 * try and check multiple fields with one check. The caller must do a detailed 1142 * check if necessary. 1143 */ 1144 static inline bool page_expected_state(struct page *page, 1145 unsigned long check_flags) 1146 { 1147 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1148 return false; 1149 1150 if (unlikely((unsigned long)page->mapping | 1151 page_ref_count(page) | 1152 #ifdef CONFIG_MEMCG 1153 page->memcg_data | 1154 #endif 1155 (page->flags & check_flags))) 1156 return false; 1157 1158 return true; 1159 } 1160 1161 static const char *page_bad_reason(struct page *page, unsigned long flags) 1162 { 1163 const char *bad_reason = NULL; 1164 1165 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1166 bad_reason = "nonzero mapcount"; 1167 if (unlikely(page->mapping != NULL)) 1168 bad_reason = "non-NULL mapping"; 1169 if (unlikely(page_ref_count(page) != 0)) 1170 bad_reason = "nonzero _refcount"; 1171 if (unlikely(page->flags & flags)) { 1172 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1173 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1174 else 1175 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1176 } 1177 #ifdef CONFIG_MEMCG 1178 if (unlikely(page->memcg_data)) 1179 bad_reason = "page still charged to cgroup"; 1180 #endif 1181 return bad_reason; 1182 } 1183 1184 static void check_free_page_bad(struct page *page) 1185 { 1186 bad_page(page, 1187 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1188 } 1189 1190 static inline int check_free_page(struct page *page) 1191 { 1192 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1193 return 0; 1194 1195 /* Something has gone sideways, find it */ 1196 check_free_page_bad(page); 1197 return 1; 1198 } 1199 1200 static int free_tail_pages_check(struct page *head_page, struct page *page) 1201 { 1202 int ret = 1; 1203 1204 /* 1205 * We rely page->lru.next never has bit 0 set, unless the page 1206 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1207 */ 1208 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1209 1210 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 1211 ret = 0; 1212 goto out; 1213 } 1214 switch (page - head_page) { 1215 case 1: 1216 /* the first tail page: ->mapping may be compound_mapcount() */ 1217 if (unlikely(compound_mapcount(page))) { 1218 bad_page(page, "nonzero compound_mapcount"); 1219 goto out; 1220 } 1221 break; 1222 case 2: 1223 /* 1224 * the second tail page: ->mapping is 1225 * deferred_list.next -- ignore value. 1226 */ 1227 break; 1228 default: 1229 if (page->mapping != TAIL_MAPPING) { 1230 bad_page(page, "corrupted mapping in tail page"); 1231 goto out; 1232 } 1233 break; 1234 } 1235 if (unlikely(!PageTail(page))) { 1236 bad_page(page, "PageTail not set"); 1237 goto out; 1238 } 1239 if (unlikely(compound_head(page) != head_page)) { 1240 bad_page(page, "compound_head not consistent"); 1241 goto out; 1242 } 1243 ret = 0; 1244 out: 1245 page->mapping = NULL; 1246 clear_compound_head(page); 1247 return ret; 1248 } 1249 1250 /* 1251 * Skip KASAN memory poisoning when either: 1252 * 1253 * 1. Deferred memory initialization has not yet completed, 1254 * see the explanation below. 1255 * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON, 1256 * see the comment next to it. 1257 * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON, 1258 * see the comment next to it. 1259 * 1260 * Poisoning pages during deferred memory init will greatly lengthen the 1261 * process and cause problem in large memory systems as the deferred pages 1262 * initialization is done with interrupt disabled. 1263 * 1264 * Assuming that there will be no reference to those newly initialized 1265 * pages before they are ever allocated, this should have no effect on 1266 * KASAN memory tracking as the poison will be properly inserted at page 1267 * allocation time. The only corner case is when pages are allocated by 1268 * on-demand allocation and then freed again before the deferred pages 1269 * initialization is done, but this is not likely to happen. 1270 */ 1271 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 1272 { 1273 return deferred_pages_enabled() || 1274 (!IS_ENABLED(CONFIG_KASAN_GENERIC) && 1275 (fpi_flags & FPI_SKIP_KASAN_POISON)) || 1276 PageSkipKASanPoison(page); 1277 } 1278 1279 static void kernel_init_free_pages(struct page *page, int numpages) 1280 { 1281 int i; 1282 1283 /* s390's use of memset() could override KASAN redzones. */ 1284 kasan_disable_current(); 1285 for (i = 0; i < numpages; i++) { 1286 u8 tag = page_kasan_tag(page + i); 1287 page_kasan_tag_reset(page + i); 1288 clear_highpage(page + i); 1289 page_kasan_tag_set(page + i, tag); 1290 } 1291 kasan_enable_current(); 1292 } 1293 1294 static __always_inline bool free_pages_prepare(struct page *page, 1295 unsigned int order, bool check_free, fpi_t fpi_flags) 1296 { 1297 int bad = 0; 1298 bool init = want_init_on_free(); 1299 1300 VM_BUG_ON_PAGE(PageTail(page), page); 1301 1302 trace_mm_page_free(page, order); 1303 1304 if (unlikely(PageHWPoison(page)) && !order) { 1305 /* 1306 * Do not let hwpoison pages hit pcplists/buddy 1307 * Untie memcg state and reset page's owner 1308 */ 1309 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1310 __memcg_kmem_uncharge_page(page, order); 1311 reset_page_owner(page, order); 1312 page_table_check_free(page, order); 1313 return false; 1314 } 1315 1316 /* 1317 * Check tail pages before head page information is cleared to 1318 * avoid checking PageCompound for order-0 pages. 1319 */ 1320 if (unlikely(order)) { 1321 bool compound = PageCompound(page); 1322 int i; 1323 1324 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1325 1326 if (compound) { 1327 ClearPageDoubleMap(page); 1328 ClearPageHasHWPoisoned(page); 1329 } 1330 for (i = 1; i < (1 << order); i++) { 1331 if (compound) 1332 bad += free_tail_pages_check(page, page + i); 1333 if (unlikely(check_free_page(page + i))) { 1334 bad++; 1335 continue; 1336 } 1337 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1338 } 1339 } 1340 if (PageMappingFlags(page)) 1341 page->mapping = NULL; 1342 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1343 __memcg_kmem_uncharge_page(page, order); 1344 if (check_free) 1345 bad += check_free_page(page); 1346 if (bad) 1347 return false; 1348 1349 page_cpupid_reset_last(page); 1350 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1351 reset_page_owner(page, order); 1352 page_table_check_free(page, order); 1353 1354 if (!PageHighMem(page)) { 1355 debug_check_no_locks_freed(page_address(page), 1356 PAGE_SIZE << order); 1357 debug_check_no_obj_freed(page_address(page), 1358 PAGE_SIZE << order); 1359 } 1360 1361 kernel_poison_pages(page, 1 << order); 1362 1363 /* 1364 * As memory initialization might be integrated into KASAN, 1365 * KASAN poisoning and memory initialization code must be 1366 * kept together to avoid discrepancies in behavior. 1367 * 1368 * With hardware tag-based KASAN, memory tags must be set before the 1369 * page becomes unavailable via debug_pagealloc or arch_free_page. 1370 */ 1371 if (!should_skip_kasan_poison(page, fpi_flags)) { 1372 kasan_poison_pages(page, order, init); 1373 1374 /* Memory is already initialized if KASAN did it internally. */ 1375 if (kasan_has_integrated_init()) 1376 init = false; 1377 } 1378 if (init) 1379 kernel_init_free_pages(page, 1 << order); 1380 1381 /* 1382 * arch_free_page() can make the page's contents inaccessible. s390 1383 * does this. So nothing which can access the page's contents should 1384 * happen after this. 1385 */ 1386 arch_free_page(page, order); 1387 1388 debug_pagealloc_unmap_pages(page, 1 << order); 1389 1390 return true; 1391 } 1392 1393 #ifdef CONFIG_DEBUG_VM 1394 /* 1395 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed 1396 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when 1397 * moved from pcp lists to free lists. 1398 */ 1399 static bool free_pcp_prepare(struct page *page, unsigned int order) 1400 { 1401 return free_pages_prepare(page, order, true, FPI_NONE); 1402 } 1403 1404 static bool bulkfree_pcp_prepare(struct page *page) 1405 { 1406 if (debug_pagealloc_enabled_static()) 1407 return check_free_page(page); 1408 else 1409 return false; 1410 } 1411 #else 1412 /* 1413 * With DEBUG_VM disabled, order-0 pages being freed are checked only when 1414 * moving from pcp lists to free list in order to reduce overhead. With 1415 * debug_pagealloc enabled, they are checked also immediately when being freed 1416 * to the pcp lists. 1417 */ 1418 static bool free_pcp_prepare(struct page *page, unsigned int order) 1419 { 1420 if (debug_pagealloc_enabled_static()) 1421 return free_pages_prepare(page, order, true, FPI_NONE); 1422 else 1423 return free_pages_prepare(page, order, false, FPI_NONE); 1424 } 1425 1426 static bool bulkfree_pcp_prepare(struct page *page) 1427 { 1428 return check_free_page(page); 1429 } 1430 #endif /* CONFIG_DEBUG_VM */ 1431 1432 /* 1433 * Frees a number of pages from the PCP lists 1434 * Assumes all pages on list are in same zone. 1435 * count is the number of pages to free. 1436 */ 1437 static void free_pcppages_bulk(struct zone *zone, int count, 1438 struct per_cpu_pages *pcp, 1439 int pindex) 1440 { 1441 int min_pindex = 0; 1442 int max_pindex = NR_PCP_LISTS - 1; 1443 unsigned int order; 1444 bool isolated_pageblocks; 1445 struct page *page; 1446 1447 /* 1448 * Ensure proper count is passed which otherwise would stuck in the 1449 * below while (list_empty(list)) loop. 1450 */ 1451 count = min(pcp->count, count); 1452 1453 /* Ensure requested pindex is drained first. */ 1454 pindex = pindex - 1; 1455 1456 /* 1457 * local_lock_irq held so equivalent to spin_lock_irqsave for 1458 * both PREEMPT_RT and non-PREEMPT_RT configurations. 1459 */ 1460 spin_lock(&zone->lock); 1461 isolated_pageblocks = has_isolate_pageblock(zone); 1462 1463 while (count > 0) { 1464 struct list_head *list; 1465 int nr_pages; 1466 1467 /* Remove pages from lists in a round-robin fashion. */ 1468 do { 1469 if (++pindex > max_pindex) 1470 pindex = min_pindex; 1471 list = &pcp->lists[pindex]; 1472 if (!list_empty(list)) 1473 break; 1474 1475 if (pindex == max_pindex) 1476 max_pindex--; 1477 if (pindex == min_pindex) 1478 min_pindex++; 1479 } while (1); 1480 1481 order = pindex_to_order(pindex); 1482 nr_pages = 1 << order; 1483 BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH)); 1484 do { 1485 int mt; 1486 1487 page = list_last_entry(list, struct page, lru); 1488 mt = get_pcppage_migratetype(page); 1489 1490 /* must delete to avoid corrupting pcp list */ 1491 list_del(&page->lru); 1492 count -= nr_pages; 1493 pcp->count -= nr_pages; 1494 1495 if (bulkfree_pcp_prepare(page)) 1496 continue; 1497 1498 /* MIGRATE_ISOLATE page should not go to pcplists */ 1499 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1500 /* Pageblock could have been isolated meanwhile */ 1501 if (unlikely(isolated_pageblocks)) 1502 mt = get_pageblock_migratetype(page); 1503 1504 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1505 trace_mm_page_pcpu_drain(page, order, mt); 1506 } while (count > 0 && !list_empty(list)); 1507 } 1508 1509 spin_unlock(&zone->lock); 1510 } 1511 1512 static void free_one_page(struct zone *zone, 1513 struct page *page, unsigned long pfn, 1514 unsigned int order, 1515 int migratetype, fpi_t fpi_flags) 1516 { 1517 unsigned long flags; 1518 1519 spin_lock_irqsave(&zone->lock, flags); 1520 if (unlikely(has_isolate_pageblock(zone) || 1521 is_migrate_isolate(migratetype))) { 1522 migratetype = get_pfnblock_migratetype(page, pfn); 1523 } 1524 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1525 spin_unlock_irqrestore(&zone->lock, flags); 1526 } 1527 1528 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1529 unsigned long zone, int nid) 1530 { 1531 mm_zero_struct_page(page); 1532 set_page_links(page, zone, nid, pfn); 1533 init_page_count(page); 1534 page_mapcount_reset(page); 1535 page_cpupid_reset_last(page); 1536 page_kasan_tag_reset(page); 1537 1538 INIT_LIST_HEAD(&page->lru); 1539 #ifdef WANT_PAGE_VIRTUAL 1540 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1541 if (!is_highmem_idx(zone)) 1542 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1543 #endif 1544 } 1545 1546 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1547 static void __meminit init_reserved_page(unsigned long pfn) 1548 { 1549 pg_data_t *pgdat; 1550 int nid, zid; 1551 1552 if (!early_page_uninitialised(pfn)) 1553 return; 1554 1555 nid = early_pfn_to_nid(pfn); 1556 pgdat = NODE_DATA(nid); 1557 1558 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1559 struct zone *zone = &pgdat->node_zones[zid]; 1560 1561 if (zone_spans_pfn(zone, pfn)) 1562 break; 1563 } 1564 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 1565 } 1566 #else 1567 static inline void init_reserved_page(unsigned long pfn) 1568 { 1569 } 1570 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1571 1572 /* 1573 * Initialised pages do not have PageReserved set. This function is 1574 * called for each range allocated by the bootmem allocator and 1575 * marks the pages PageReserved. The remaining valid pages are later 1576 * sent to the buddy page allocator. 1577 */ 1578 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 1579 { 1580 unsigned long start_pfn = PFN_DOWN(start); 1581 unsigned long end_pfn = PFN_UP(end); 1582 1583 for (; start_pfn < end_pfn; start_pfn++) { 1584 if (pfn_valid(start_pfn)) { 1585 struct page *page = pfn_to_page(start_pfn); 1586 1587 init_reserved_page(start_pfn); 1588 1589 /* Avoid false-positive PageTail() */ 1590 INIT_LIST_HEAD(&page->lru); 1591 1592 /* 1593 * no need for atomic set_bit because the struct 1594 * page is not visible yet so nobody should 1595 * access it yet. 1596 */ 1597 __SetPageReserved(page); 1598 } 1599 } 1600 } 1601 1602 static void __free_pages_ok(struct page *page, unsigned int order, 1603 fpi_t fpi_flags) 1604 { 1605 unsigned long flags; 1606 int migratetype; 1607 unsigned long pfn = page_to_pfn(page); 1608 struct zone *zone = page_zone(page); 1609 1610 if (!free_pages_prepare(page, order, true, fpi_flags)) 1611 return; 1612 1613 migratetype = get_pfnblock_migratetype(page, pfn); 1614 1615 spin_lock_irqsave(&zone->lock, flags); 1616 if (unlikely(has_isolate_pageblock(zone) || 1617 is_migrate_isolate(migratetype))) { 1618 migratetype = get_pfnblock_migratetype(page, pfn); 1619 } 1620 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1621 spin_unlock_irqrestore(&zone->lock, flags); 1622 1623 __count_vm_events(PGFREE, 1 << order); 1624 } 1625 1626 void __free_pages_core(struct page *page, unsigned int order) 1627 { 1628 unsigned int nr_pages = 1 << order; 1629 struct page *p = page; 1630 unsigned int loop; 1631 1632 /* 1633 * When initializing the memmap, __init_single_page() sets the refcount 1634 * of all pages to 1 ("allocated"/"not free"). We have to set the 1635 * refcount of all involved pages to 0. 1636 */ 1637 prefetchw(p); 1638 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1639 prefetchw(p + 1); 1640 __ClearPageReserved(p); 1641 set_page_count(p, 0); 1642 } 1643 __ClearPageReserved(p); 1644 set_page_count(p, 0); 1645 1646 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1647 1648 /* 1649 * Bypass PCP and place fresh pages right to the tail, primarily 1650 * relevant for memory onlining. 1651 */ 1652 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); 1653 } 1654 1655 #ifdef CONFIG_NUMA 1656 1657 /* 1658 * During memory init memblocks map pfns to nids. The search is expensive and 1659 * this caches recent lookups. The implementation of __early_pfn_to_nid 1660 * treats start/end as pfns. 1661 */ 1662 struct mminit_pfnnid_cache { 1663 unsigned long last_start; 1664 unsigned long last_end; 1665 int last_nid; 1666 }; 1667 1668 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1669 1670 /* 1671 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 1672 */ 1673 static int __meminit __early_pfn_to_nid(unsigned long pfn, 1674 struct mminit_pfnnid_cache *state) 1675 { 1676 unsigned long start_pfn, end_pfn; 1677 int nid; 1678 1679 if (state->last_start <= pfn && pfn < state->last_end) 1680 return state->last_nid; 1681 1682 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 1683 if (nid != NUMA_NO_NODE) { 1684 state->last_start = start_pfn; 1685 state->last_end = end_pfn; 1686 state->last_nid = nid; 1687 } 1688 1689 return nid; 1690 } 1691 1692 int __meminit early_pfn_to_nid(unsigned long pfn) 1693 { 1694 static DEFINE_SPINLOCK(early_pfn_lock); 1695 int nid; 1696 1697 spin_lock(&early_pfn_lock); 1698 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1699 if (nid < 0) 1700 nid = first_online_node; 1701 spin_unlock(&early_pfn_lock); 1702 1703 return nid; 1704 } 1705 #endif /* CONFIG_NUMA */ 1706 1707 void __init memblock_free_pages(struct page *page, unsigned long pfn, 1708 unsigned int order) 1709 { 1710 if (early_page_uninitialised(pfn)) 1711 return; 1712 __free_pages_core(page, order); 1713 } 1714 1715 /* 1716 * Check that the whole (or subset of) a pageblock given by the interval of 1717 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1718 * with the migration of free compaction scanner. 1719 * 1720 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1721 * 1722 * It's possible on some configurations to have a setup like node0 node1 node0 1723 * i.e. it's possible that all pages within a zones range of pages do not 1724 * belong to a single zone. We assume that a border between node0 and node1 1725 * can occur within a single pageblock, but not a node0 node1 node0 1726 * interleaving within a single pageblock. It is therefore sufficient to check 1727 * the first and last page of a pageblock and avoid checking each individual 1728 * page in a pageblock. 1729 */ 1730 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1731 unsigned long end_pfn, struct zone *zone) 1732 { 1733 struct page *start_page; 1734 struct page *end_page; 1735 1736 /* end_pfn is one past the range we are checking */ 1737 end_pfn--; 1738 1739 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1740 return NULL; 1741 1742 start_page = pfn_to_online_page(start_pfn); 1743 if (!start_page) 1744 return NULL; 1745 1746 if (page_zone(start_page) != zone) 1747 return NULL; 1748 1749 end_page = pfn_to_page(end_pfn); 1750 1751 /* This gives a shorter code than deriving page_zone(end_page) */ 1752 if (page_zone_id(start_page) != page_zone_id(end_page)) 1753 return NULL; 1754 1755 return start_page; 1756 } 1757 1758 void set_zone_contiguous(struct zone *zone) 1759 { 1760 unsigned long block_start_pfn = zone->zone_start_pfn; 1761 unsigned long block_end_pfn; 1762 1763 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); 1764 for (; block_start_pfn < zone_end_pfn(zone); 1765 block_start_pfn = block_end_pfn, 1766 block_end_pfn += pageblock_nr_pages) { 1767 1768 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1769 1770 if (!__pageblock_pfn_to_page(block_start_pfn, 1771 block_end_pfn, zone)) 1772 return; 1773 cond_resched(); 1774 } 1775 1776 /* We confirm that there is no hole */ 1777 zone->contiguous = true; 1778 } 1779 1780 void clear_zone_contiguous(struct zone *zone) 1781 { 1782 zone->contiguous = false; 1783 } 1784 1785 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1786 static void __init deferred_free_range(unsigned long pfn, 1787 unsigned long nr_pages) 1788 { 1789 struct page *page; 1790 unsigned long i; 1791 1792 if (!nr_pages) 1793 return; 1794 1795 page = pfn_to_page(pfn); 1796 1797 /* Free a large naturally-aligned chunk if possible */ 1798 if (nr_pages == pageblock_nr_pages && 1799 (pfn & (pageblock_nr_pages - 1)) == 0) { 1800 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1801 __free_pages_core(page, pageblock_order); 1802 return; 1803 } 1804 1805 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1806 if ((pfn & (pageblock_nr_pages - 1)) == 0) 1807 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1808 __free_pages_core(page, 0); 1809 } 1810 } 1811 1812 /* Completion tracking for deferred_init_memmap() threads */ 1813 static atomic_t pgdat_init_n_undone __initdata; 1814 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1815 1816 static inline void __init pgdat_init_report_one_done(void) 1817 { 1818 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1819 complete(&pgdat_init_all_done_comp); 1820 } 1821 1822 /* 1823 * Returns true if page needs to be initialized or freed to buddy allocator. 1824 * 1825 * First we check if pfn is valid on architectures where it is possible to have 1826 * holes within pageblock_nr_pages. On systems where it is not possible, this 1827 * function is optimized out. 1828 * 1829 * Then, we check if a current large page is valid by only checking the validity 1830 * of the head pfn. 1831 */ 1832 static inline bool __init deferred_pfn_valid(unsigned long pfn) 1833 { 1834 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) 1835 return false; 1836 return true; 1837 } 1838 1839 /* 1840 * Free pages to buddy allocator. Try to free aligned pages in 1841 * pageblock_nr_pages sizes. 1842 */ 1843 static void __init deferred_free_pages(unsigned long pfn, 1844 unsigned long end_pfn) 1845 { 1846 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1847 unsigned long nr_free = 0; 1848 1849 for (; pfn < end_pfn; pfn++) { 1850 if (!deferred_pfn_valid(pfn)) { 1851 deferred_free_range(pfn - nr_free, nr_free); 1852 nr_free = 0; 1853 } else if (!(pfn & nr_pgmask)) { 1854 deferred_free_range(pfn - nr_free, nr_free); 1855 nr_free = 1; 1856 } else { 1857 nr_free++; 1858 } 1859 } 1860 /* Free the last block of pages to allocator */ 1861 deferred_free_range(pfn - nr_free, nr_free); 1862 } 1863 1864 /* 1865 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 1866 * by performing it only once every pageblock_nr_pages. 1867 * Return number of pages initialized. 1868 */ 1869 static unsigned long __init deferred_init_pages(struct zone *zone, 1870 unsigned long pfn, 1871 unsigned long end_pfn) 1872 { 1873 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1874 int nid = zone_to_nid(zone); 1875 unsigned long nr_pages = 0; 1876 int zid = zone_idx(zone); 1877 struct page *page = NULL; 1878 1879 for (; pfn < end_pfn; pfn++) { 1880 if (!deferred_pfn_valid(pfn)) { 1881 page = NULL; 1882 continue; 1883 } else if (!page || !(pfn & nr_pgmask)) { 1884 page = pfn_to_page(pfn); 1885 } else { 1886 page++; 1887 } 1888 __init_single_page(page, pfn, zid, nid); 1889 nr_pages++; 1890 } 1891 return (nr_pages); 1892 } 1893 1894 /* 1895 * This function is meant to pre-load the iterator for the zone init. 1896 * Specifically it walks through the ranges until we are caught up to the 1897 * first_init_pfn value and exits there. If we never encounter the value we 1898 * return false indicating there are no valid ranges left. 1899 */ 1900 static bool __init 1901 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, 1902 unsigned long *spfn, unsigned long *epfn, 1903 unsigned long first_init_pfn) 1904 { 1905 u64 j; 1906 1907 /* 1908 * Start out by walking through the ranges in this zone that have 1909 * already been initialized. We don't need to do anything with them 1910 * so we just need to flush them out of the system. 1911 */ 1912 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { 1913 if (*epfn <= first_init_pfn) 1914 continue; 1915 if (*spfn < first_init_pfn) 1916 *spfn = first_init_pfn; 1917 *i = j; 1918 return true; 1919 } 1920 1921 return false; 1922 } 1923 1924 /* 1925 * Initialize and free pages. We do it in two loops: first we initialize 1926 * struct page, then free to buddy allocator, because while we are 1927 * freeing pages we can access pages that are ahead (computing buddy 1928 * page in __free_one_page()). 1929 * 1930 * In order to try and keep some memory in the cache we have the loop 1931 * broken along max page order boundaries. This way we will not cause 1932 * any issues with the buddy page computation. 1933 */ 1934 static unsigned long __init 1935 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, 1936 unsigned long *end_pfn) 1937 { 1938 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); 1939 unsigned long spfn = *start_pfn, epfn = *end_pfn; 1940 unsigned long nr_pages = 0; 1941 u64 j = *i; 1942 1943 /* First we loop through and initialize the page values */ 1944 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { 1945 unsigned long t; 1946 1947 if (mo_pfn <= *start_pfn) 1948 break; 1949 1950 t = min(mo_pfn, *end_pfn); 1951 nr_pages += deferred_init_pages(zone, *start_pfn, t); 1952 1953 if (mo_pfn < *end_pfn) { 1954 *start_pfn = mo_pfn; 1955 break; 1956 } 1957 } 1958 1959 /* Reset values and now loop through freeing pages as needed */ 1960 swap(j, *i); 1961 1962 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { 1963 unsigned long t; 1964 1965 if (mo_pfn <= spfn) 1966 break; 1967 1968 t = min(mo_pfn, epfn); 1969 deferred_free_pages(spfn, t); 1970 1971 if (mo_pfn <= epfn) 1972 break; 1973 } 1974 1975 return nr_pages; 1976 } 1977 1978 static void __init 1979 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 1980 void *arg) 1981 { 1982 unsigned long spfn, epfn; 1983 struct zone *zone = arg; 1984 u64 i; 1985 1986 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); 1987 1988 /* 1989 * Initialize and free pages in MAX_ORDER sized increments so that we 1990 * can avoid introducing any issues with the buddy allocator. 1991 */ 1992 while (spfn < end_pfn) { 1993 deferred_init_maxorder(&i, zone, &spfn, &epfn); 1994 cond_resched(); 1995 } 1996 } 1997 1998 /* An arch may override for more concurrency. */ 1999 __weak int __init 2000 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2001 { 2002 return 1; 2003 } 2004 2005 /* Initialise remaining memory on a node */ 2006 static int __init deferred_init_memmap(void *data) 2007 { 2008 pg_data_t *pgdat = data; 2009 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2010 unsigned long spfn = 0, epfn = 0; 2011 unsigned long first_init_pfn, flags; 2012 unsigned long start = jiffies; 2013 struct zone *zone; 2014 int zid, max_threads; 2015 u64 i; 2016 2017 /* Bind memory initialisation thread to a local node if possible */ 2018 if (!cpumask_empty(cpumask)) 2019 set_cpus_allowed_ptr(current, cpumask); 2020 2021 pgdat_resize_lock(pgdat, &flags); 2022 first_init_pfn = pgdat->first_deferred_pfn; 2023 if (first_init_pfn == ULONG_MAX) { 2024 pgdat_resize_unlock(pgdat, &flags); 2025 pgdat_init_report_one_done(); 2026 return 0; 2027 } 2028 2029 /* Sanity check boundaries */ 2030 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2031 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2032 pgdat->first_deferred_pfn = ULONG_MAX; 2033 2034 /* 2035 * Once we unlock here, the zone cannot be grown anymore, thus if an 2036 * interrupt thread must allocate this early in boot, zone must be 2037 * pre-grown prior to start of deferred page initialization. 2038 */ 2039 pgdat_resize_unlock(pgdat, &flags); 2040 2041 /* Only the highest zone is deferred so find it */ 2042 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 2043 zone = pgdat->node_zones + zid; 2044 if (first_init_pfn < zone_end_pfn(zone)) 2045 break; 2046 } 2047 2048 /* If the zone is empty somebody else may have cleared out the zone */ 2049 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2050 first_init_pfn)) 2051 goto zone_empty; 2052 2053 max_threads = deferred_page_init_max_threads(cpumask); 2054 2055 while (spfn < epfn) { 2056 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); 2057 struct padata_mt_job job = { 2058 .thread_fn = deferred_init_memmap_chunk, 2059 .fn_arg = zone, 2060 .start = spfn, 2061 .size = epfn_align - spfn, 2062 .align = PAGES_PER_SECTION, 2063 .min_chunk = PAGES_PER_SECTION, 2064 .max_threads = max_threads, 2065 }; 2066 2067 padata_do_multithreaded(&job); 2068 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2069 epfn_align); 2070 } 2071 zone_empty: 2072 /* Sanity check that the next zone really is unpopulated */ 2073 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 2074 2075 pr_info("node %d deferred pages initialised in %ums\n", 2076 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2077 2078 pgdat_init_report_one_done(); 2079 return 0; 2080 } 2081 2082 /* 2083 * If this zone has deferred pages, try to grow it by initializing enough 2084 * deferred pages to satisfy the allocation specified by order, rounded up to 2085 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2086 * of SECTION_SIZE bytes by initializing struct pages in increments of 2087 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2088 * 2089 * Return true when zone was grown, otherwise return false. We return true even 2090 * when we grow less than requested, to let the caller decide if there are 2091 * enough pages to satisfy the allocation. 2092 * 2093 * Note: We use noinline because this function is needed only during boot, and 2094 * it is called from a __ref function _deferred_grow_zone. This way we are 2095 * making sure that it is not inlined into permanent text section. 2096 */ 2097 static noinline bool __init 2098 deferred_grow_zone(struct zone *zone, unsigned int order) 2099 { 2100 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 2101 pg_data_t *pgdat = zone->zone_pgdat; 2102 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2103 unsigned long spfn, epfn, flags; 2104 unsigned long nr_pages = 0; 2105 u64 i; 2106 2107 /* Only the last zone may have deferred pages */ 2108 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2109 return false; 2110 2111 pgdat_resize_lock(pgdat, &flags); 2112 2113 /* 2114 * If someone grew this zone while we were waiting for spinlock, return 2115 * true, as there might be enough pages already. 2116 */ 2117 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2118 pgdat_resize_unlock(pgdat, &flags); 2119 return true; 2120 } 2121 2122 /* If the zone is empty somebody else may have cleared out the zone */ 2123 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2124 first_deferred_pfn)) { 2125 pgdat->first_deferred_pfn = ULONG_MAX; 2126 pgdat_resize_unlock(pgdat, &flags); 2127 /* Retry only once. */ 2128 return first_deferred_pfn != ULONG_MAX; 2129 } 2130 2131 /* 2132 * Initialize and free pages in MAX_ORDER sized increments so 2133 * that we can avoid introducing any issues with the buddy 2134 * allocator. 2135 */ 2136 while (spfn < epfn) { 2137 /* update our first deferred PFN for this section */ 2138 first_deferred_pfn = spfn; 2139 2140 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); 2141 touch_nmi_watchdog(); 2142 2143 /* We should only stop along section boundaries */ 2144 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) 2145 continue; 2146 2147 /* If our quota has been met we can stop here */ 2148 if (nr_pages >= nr_pages_needed) 2149 break; 2150 } 2151 2152 pgdat->first_deferred_pfn = spfn; 2153 pgdat_resize_unlock(pgdat, &flags); 2154 2155 return nr_pages > 0; 2156 } 2157 2158 /* 2159 * deferred_grow_zone() is __init, but it is called from 2160 * get_page_from_freelist() during early boot until deferred_pages permanently 2161 * disables this call. This is why we have refdata wrapper to avoid warning, 2162 * and to ensure that the function body gets unloaded. 2163 */ 2164 static bool __ref 2165 _deferred_grow_zone(struct zone *zone, unsigned int order) 2166 { 2167 return deferred_grow_zone(zone, order); 2168 } 2169 2170 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2171 2172 void __init page_alloc_init_late(void) 2173 { 2174 struct zone *zone; 2175 int nid; 2176 2177 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2178 2179 /* There will be num_node_state(N_MEMORY) threads */ 2180 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2181 for_each_node_state(nid, N_MEMORY) { 2182 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2183 } 2184 2185 /* Block until all are initialised */ 2186 wait_for_completion(&pgdat_init_all_done_comp); 2187 2188 /* 2189 * We initialized the rest of the deferred pages. Permanently disable 2190 * on-demand struct page initialization. 2191 */ 2192 static_branch_disable(&deferred_pages); 2193 2194 /* Reinit limits that are based on free pages after the kernel is up */ 2195 files_maxfiles_init(); 2196 #endif 2197 2198 buffer_init(); 2199 2200 /* Discard memblock private memory */ 2201 memblock_discard(); 2202 2203 for_each_node_state(nid, N_MEMORY) 2204 shuffle_free_memory(NODE_DATA(nid)); 2205 2206 for_each_populated_zone(zone) 2207 set_zone_contiguous(zone); 2208 } 2209 2210 #ifdef CONFIG_CMA 2211 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 2212 void __init init_cma_reserved_pageblock(struct page *page) 2213 { 2214 unsigned i = pageblock_nr_pages; 2215 struct page *p = page; 2216 2217 do { 2218 __ClearPageReserved(p); 2219 set_page_count(p, 0); 2220 } while (++p, --i); 2221 2222 set_pageblock_migratetype(page, MIGRATE_CMA); 2223 set_page_refcounted(page); 2224 __free_pages(page, pageblock_order); 2225 2226 adjust_managed_page_count(page, pageblock_nr_pages); 2227 page_zone(page)->cma_pages += pageblock_nr_pages; 2228 } 2229 #endif 2230 2231 /* 2232 * The order of subdivision here is critical for the IO subsystem. 2233 * Please do not alter this order without good reasons and regression 2234 * testing. Specifically, as large blocks of memory are subdivided, 2235 * the order in which smaller blocks are delivered depends on the order 2236 * they're subdivided in this function. This is the primary factor 2237 * influencing the order in which pages are delivered to the IO 2238 * subsystem according to empirical testing, and this is also justified 2239 * by considering the behavior of a buddy system containing a single 2240 * large block of memory acted on by a series of small allocations. 2241 * This behavior is a critical factor in sglist merging's success. 2242 * 2243 * -- nyc 2244 */ 2245 static inline void expand(struct zone *zone, struct page *page, 2246 int low, int high, int migratetype) 2247 { 2248 unsigned long size = 1 << high; 2249 2250 while (high > low) { 2251 high--; 2252 size >>= 1; 2253 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 2254 2255 /* 2256 * Mark as guard pages (or page), that will allow to 2257 * merge back to allocator when buddy will be freed. 2258 * Corresponding page table entries will not be touched, 2259 * pages will stay not present in virtual address space 2260 */ 2261 if (set_page_guard(zone, &page[size], high, migratetype)) 2262 continue; 2263 2264 add_to_free_list(&page[size], zone, high, migratetype); 2265 set_buddy_order(&page[size], high); 2266 } 2267 } 2268 2269 static void check_new_page_bad(struct page *page) 2270 { 2271 if (unlikely(page->flags & __PG_HWPOISON)) { 2272 /* Don't complain about hwpoisoned pages */ 2273 page_mapcount_reset(page); /* remove PageBuddy */ 2274 return; 2275 } 2276 2277 bad_page(page, 2278 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 2279 } 2280 2281 /* 2282 * This page is about to be returned from the page allocator 2283 */ 2284 static inline int check_new_page(struct page *page) 2285 { 2286 if (likely(page_expected_state(page, 2287 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 2288 return 0; 2289 2290 check_new_page_bad(page); 2291 return 1; 2292 } 2293 2294 static bool check_new_pages(struct page *page, unsigned int order) 2295 { 2296 int i; 2297 for (i = 0; i < (1 << order); i++) { 2298 struct page *p = page + i; 2299 2300 if (unlikely(check_new_page(p))) 2301 return true; 2302 } 2303 2304 return false; 2305 } 2306 2307 #ifdef CONFIG_DEBUG_VM 2308 /* 2309 * With DEBUG_VM enabled, order-0 pages are checked for expected state when 2310 * being allocated from pcp lists. With debug_pagealloc also enabled, they are 2311 * also checked when pcp lists are refilled from the free lists. 2312 */ 2313 static inline bool check_pcp_refill(struct page *page, unsigned int order) 2314 { 2315 if (debug_pagealloc_enabled_static()) 2316 return check_new_pages(page, order); 2317 else 2318 return false; 2319 } 2320 2321 static inline bool check_new_pcp(struct page *page, unsigned int order) 2322 { 2323 return check_new_pages(page, order); 2324 } 2325 #else 2326 /* 2327 * With DEBUG_VM disabled, free order-0 pages are checked for expected state 2328 * when pcp lists are being refilled from the free lists. With debug_pagealloc 2329 * enabled, they are also checked when being allocated from the pcp lists. 2330 */ 2331 static inline bool check_pcp_refill(struct page *page, unsigned int order) 2332 { 2333 return check_new_pages(page, order); 2334 } 2335 static inline bool check_new_pcp(struct page *page, unsigned int order) 2336 { 2337 if (debug_pagealloc_enabled_static()) 2338 return check_new_pages(page, order); 2339 else 2340 return false; 2341 } 2342 #endif /* CONFIG_DEBUG_VM */ 2343 2344 static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags) 2345 { 2346 /* Don't skip if a software KASAN mode is enabled. */ 2347 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 2348 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 2349 return false; 2350 2351 /* Skip, if hardware tag-based KASAN is not enabled. */ 2352 if (!kasan_hw_tags_enabled()) 2353 return true; 2354 2355 /* 2356 * With hardware tag-based KASAN enabled, skip if either: 2357 * 2358 * 1. Memory tags have already been cleared via tag_clear_highpage(). 2359 * 2. Skipping has been requested via __GFP_SKIP_KASAN_UNPOISON. 2360 */ 2361 return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON); 2362 } 2363 2364 static inline bool should_skip_init(gfp_t flags) 2365 { 2366 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 2367 if (!kasan_hw_tags_enabled()) 2368 return false; 2369 2370 /* For hardware tag-based KASAN, skip if requested. */ 2371 return (flags & __GFP_SKIP_ZERO); 2372 } 2373 2374 inline void post_alloc_hook(struct page *page, unsigned int order, 2375 gfp_t gfp_flags) 2376 { 2377 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 2378 !should_skip_init(gfp_flags); 2379 bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS); 2380 2381 set_page_private(page, 0); 2382 set_page_refcounted(page); 2383 2384 arch_alloc_page(page, order); 2385 debug_pagealloc_map_pages(page, 1 << order); 2386 2387 /* 2388 * Page unpoisoning must happen before memory initialization. 2389 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 2390 * allocations and the page unpoisoning code will complain. 2391 */ 2392 kernel_unpoison_pages(page, 1 << order); 2393 2394 /* 2395 * As memory initialization might be integrated into KASAN, 2396 * KASAN unpoisoning and memory initializion code must be 2397 * kept together to avoid discrepancies in behavior. 2398 */ 2399 2400 /* 2401 * If memory tags should be zeroed (which happens only when memory 2402 * should be initialized as well). 2403 */ 2404 if (init_tags) { 2405 int i; 2406 2407 /* Initialize both memory and tags. */ 2408 for (i = 0; i != 1 << order; ++i) 2409 tag_clear_highpage(page + i); 2410 2411 /* Note that memory is already initialized by the loop above. */ 2412 init = false; 2413 } 2414 if (!should_skip_kasan_unpoison(gfp_flags, init_tags)) { 2415 /* Unpoison shadow memory or set memory tags. */ 2416 kasan_unpoison_pages(page, order, init); 2417 2418 /* Note that memory is already initialized by KASAN. */ 2419 if (kasan_has_integrated_init()) 2420 init = false; 2421 } 2422 /* If memory is still not initialized, do it now. */ 2423 if (init) 2424 kernel_init_free_pages(page, 1 << order); 2425 /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */ 2426 if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON)) 2427 SetPageSkipKASanPoison(page); 2428 2429 set_page_owner(page, order, gfp_flags); 2430 page_table_check_alloc(page, order); 2431 } 2432 2433 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 2434 unsigned int alloc_flags) 2435 { 2436 post_alloc_hook(page, order, gfp_flags); 2437 2438 if (order && (gfp_flags & __GFP_COMP)) 2439 prep_compound_page(page, order); 2440 2441 /* 2442 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 2443 * allocate the page. The expectation is that the caller is taking 2444 * steps that will free more memory. The caller should avoid the page 2445 * being used for !PFMEMALLOC purposes. 2446 */ 2447 if (alloc_flags & ALLOC_NO_WATERMARKS) 2448 set_page_pfmemalloc(page); 2449 else 2450 clear_page_pfmemalloc(page); 2451 } 2452 2453 /* 2454 * Go through the free lists for the given migratetype and remove 2455 * the smallest available page from the freelists 2456 */ 2457 static __always_inline 2458 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 2459 int migratetype) 2460 { 2461 unsigned int current_order; 2462 struct free_area *area; 2463 struct page *page; 2464 2465 /* Find a page of the appropriate size in the preferred list */ 2466 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 2467 area = &(zone->free_area[current_order]); 2468 page = get_page_from_free_area(area, migratetype); 2469 if (!page) 2470 continue; 2471 del_page_from_free_list(page, zone, current_order); 2472 expand(zone, page, order, current_order, migratetype); 2473 set_pcppage_migratetype(page, migratetype); 2474 trace_mm_page_alloc_zone_locked(page, order, migratetype, 2475 pcp_allowed_order(order) && 2476 migratetype < MIGRATE_PCPTYPES); 2477 return page; 2478 } 2479 2480 return NULL; 2481 } 2482 2483 2484 /* 2485 * This array describes the order lists are fallen back to when 2486 * the free lists for the desirable migrate type are depleted 2487 * 2488 * The other migratetypes do not have fallbacks. 2489 */ 2490 static int fallbacks[MIGRATE_TYPES][3] = { 2491 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2492 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 2493 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2494 }; 2495 2496 #ifdef CONFIG_CMA 2497 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2498 unsigned int order) 2499 { 2500 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 2501 } 2502 #else 2503 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2504 unsigned int order) { return NULL; } 2505 #endif 2506 2507 /* 2508 * Move the free pages in a range to the freelist tail of the requested type. 2509 * Note that start_page and end_pages are not aligned on a pageblock 2510 * boundary. If alignment is required, use move_freepages_block() 2511 */ 2512 static int move_freepages(struct zone *zone, 2513 unsigned long start_pfn, unsigned long end_pfn, 2514 int migratetype, int *num_movable) 2515 { 2516 struct page *page; 2517 unsigned long pfn; 2518 unsigned int order; 2519 int pages_moved = 0; 2520 2521 for (pfn = start_pfn; pfn <= end_pfn;) { 2522 page = pfn_to_page(pfn); 2523 if (!PageBuddy(page)) { 2524 /* 2525 * We assume that pages that could be isolated for 2526 * migration are movable. But we don't actually try 2527 * isolating, as that would be expensive. 2528 */ 2529 if (num_movable && 2530 (PageLRU(page) || __PageMovable(page))) 2531 (*num_movable)++; 2532 pfn++; 2533 continue; 2534 } 2535 2536 /* Make sure we are not inadvertently changing nodes */ 2537 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 2538 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 2539 2540 order = buddy_order(page); 2541 move_to_free_list(page, zone, order, migratetype); 2542 pfn += 1 << order; 2543 pages_moved += 1 << order; 2544 } 2545 2546 return pages_moved; 2547 } 2548 2549 int move_freepages_block(struct zone *zone, struct page *page, 2550 int migratetype, int *num_movable) 2551 { 2552 unsigned long start_pfn, end_pfn, pfn; 2553 2554 if (num_movable) 2555 *num_movable = 0; 2556 2557 pfn = page_to_pfn(page); 2558 start_pfn = pfn & ~(pageblock_nr_pages - 1); 2559 end_pfn = start_pfn + pageblock_nr_pages - 1; 2560 2561 /* Do not cross zone boundaries */ 2562 if (!zone_spans_pfn(zone, start_pfn)) 2563 start_pfn = pfn; 2564 if (!zone_spans_pfn(zone, end_pfn)) 2565 return 0; 2566 2567 return move_freepages(zone, start_pfn, end_pfn, migratetype, 2568 num_movable); 2569 } 2570 2571 static void change_pageblock_range(struct page *pageblock_page, 2572 int start_order, int migratetype) 2573 { 2574 int nr_pageblocks = 1 << (start_order - pageblock_order); 2575 2576 while (nr_pageblocks--) { 2577 set_pageblock_migratetype(pageblock_page, migratetype); 2578 pageblock_page += pageblock_nr_pages; 2579 } 2580 } 2581 2582 /* 2583 * When we are falling back to another migratetype during allocation, try to 2584 * steal extra free pages from the same pageblocks to satisfy further 2585 * allocations, instead of polluting multiple pageblocks. 2586 * 2587 * If we are stealing a relatively large buddy page, it is likely there will 2588 * be more free pages in the pageblock, so try to steal them all. For 2589 * reclaimable and unmovable allocations, we steal regardless of page size, 2590 * as fragmentation caused by those allocations polluting movable pageblocks 2591 * is worse than movable allocations stealing from unmovable and reclaimable 2592 * pageblocks. 2593 */ 2594 static bool can_steal_fallback(unsigned int order, int start_mt) 2595 { 2596 /* 2597 * Leaving this order check is intended, although there is 2598 * relaxed order check in next check. The reason is that 2599 * we can actually steal whole pageblock if this condition met, 2600 * but, below check doesn't guarantee it and that is just heuristic 2601 * so could be changed anytime. 2602 */ 2603 if (order >= pageblock_order) 2604 return true; 2605 2606 if (order >= pageblock_order / 2 || 2607 start_mt == MIGRATE_RECLAIMABLE || 2608 start_mt == MIGRATE_UNMOVABLE || 2609 page_group_by_mobility_disabled) 2610 return true; 2611 2612 return false; 2613 } 2614 2615 static inline bool boost_watermark(struct zone *zone) 2616 { 2617 unsigned long max_boost; 2618 2619 if (!watermark_boost_factor) 2620 return false; 2621 /* 2622 * Don't bother in zones that are unlikely to produce results. 2623 * On small machines, including kdump capture kernels running 2624 * in a small area, boosting the watermark can cause an out of 2625 * memory situation immediately. 2626 */ 2627 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2628 return false; 2629 2630 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2631 watermark_boost_factor, 10000); 2632 2633 /* 2634 * high watermark may be uninitialised if fragmentation occurs 2635 * very early in boot so do not boost. We do not fall 2636 * through and boost by pageblock_nr_pages as failing 2637 * allocations that early means that reclaim is not going 2638 * to help and it may even be impossible to reclaim the 2639 * boosted watermark resulting in a hang. 2640 */ 2641 if (!max_boost) 2642 return false; 2643 2644 max_boost = max(pageblock_nr_pages, max_boost); 2645 2646 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2647 max_boost); 2648 2649 return true; 2650 } 2651 2652 /* 2653 * This function implements actual steal behaviour. If order is large enough, 2654 * we can steal whole pageblock. If not, we first move freepages in this 2655 * pageblock to our migratetype and determine how many already-allocated pages 2656 * are there in the pageblock with a compatible migratetype. If at least half 2657 * of pages are free or compatible, we can change migratetype of the pageblock 2658 * itself, so pages freed in the future will be put on the correct free list. 2659 */ 2660 static void steal_suitable_fallback(struct zone *zone, struct page *page, 2661 unsigned int alloc_flags, int start_type, bool whole_block) 2662 { 2663 unsigned int current_order = buddy_order(page); 2664 int free_pages, movable_pages, alike_pages; 2665 int old_block_type; 2666 2667 old_block_type = get_pageblock_migratetype(page); 2668 2669 /* 2670 * This can happen due to races and we want to prevent broken 2671 * highatomic accounting. 2672 */ 2673 if (is_migrate_highatomic(old_block_type)) 2674 goto single_page; 2675 2676 /* Take ownership for orders >= pageblock_order */ 2677 if (current_order >= pageblock_order) { 2678 change_pageblock_range(page, current_order, start_type); 2679 goto single_page; 2680 } 2681 2682 /* 2683 * Boost watermarks to increase reclaim pressure to reduce the 2684 * likelihood of future fallbacks. Wake kswapd now as the node 2685 * may be balanced overall and kswapd will not wake naturally. 2686 */ 2687 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2688 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2689 2690 /* We are not allowed to try stealing from the whole block */ 2691 if (!whole_block) 2692 goto single_page; 2693 2694 free_pages = move_freepages_block(zone, page, start_type, 2695 &movable_pages); 2696 /* 2697 * Determine how many pages are compatible with our allocation. 2698 * For movable allocation, it's the number of movable pages which 2699 * we just obtained. For other types it's a bit more tricky. 2700 */ 2701 if (start_type == MIGRATE_MOVABLE) { 2702 alike_pages = movable_pages; 2703 } else { 2704 /* 2705 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2706 * to MOVABLE pageblock, consider all non-movable pages as 2707 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2708 * vice versa, be conservative since we can't distinguish the 2709 * exact migratetype of non-movable pages. 2710 */ 2711 if (old_block_type == MIGRATE_MOVABLE) 2712 alike_pages = pageblock_nr_pages 2713 - (free_pages + movable_pages); 2714 else 2715 alike_pages = 0; 2716 } 2717 2718 /* moving whole block can fail due to zone boundary conditions */ 2719 if (!free_pages) 2720 goto single_page; 2721 2722 /* 2723 * If a sufficient number of pages in the block are either free or of 2724 * comparable migratability as our allocation, claim the whole block. 2725 */ 2726 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2727 page_group_by_mobility_disabled) 2728 set_pageblock_migratetype(page, start_type); 2729 2730 return; 2731 2732 single_page: 2733 move_to_free_list(page, zone, current_order, start_type); 2734 } 2735 2736 /* 2737 * Check whether there is a suitable fallback freepage with requested order. 2738 * If only_stealable is true, this function returns fallback_mt only if 2739 * we can steal other freepages all together. This would help to reduce 2740 * fragmentation due to mixed migratetype pages in one pageblock. 2741 */ 2742 int find_suitable_fallback(struct free_area *area, unsigned int order, 2743 int migratetype, bool only_stealable, bool *can_steal) 2744 { 2745 int i; 2746 int fallback_mt; 2747 2748 if (area->nr_free == 0) 2749 return -1; 2750 2751 *can_steal = false; 2752 for (i = 0;; i++) { 2753 fallback_mt = fallbacks[migratetype][i]; 2754 if (fallback_mt == MIGRATE_TYPES) 2755 break; 2756 2757 if (free_area_empty(area, fallback_mt)) 2758 continue; 2759 2760 if (can_steal_fallback(order, migratetype)) 2761 *can_steal = true; 2762 2763 if (!only_stealable) 2764 return fallback_mt; 2765 2766 if (*can_steal) 2767 return fallback_mt; 2768 } 2769 2770 return -1; 2771 } 2772 2773 /* 2774 * Reserve a pageblock for exclusive use of high-order atomic allocations if 2775 * there are no empty page blocks that contain a page with a suitable order 2776 */ 2777 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2778 unsigned int alloc_order) 2779 { 2780 int mt; 2781 unsigned long max_managed, flags; 2782 2783 /* 2784 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2785 * Check is race-prone but harmless. 2786 */ 2787 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 2788 if (zone->nr_reserved_highatomic >= max_managed) 2789 return; 2790 2791 spin_lock_irqsave(&zone->lock, flags); 2792 2793 /* Recheck the nr_reserved_highatomic limit under the lock */ 2794 if (zone->nr_reserved_highatomic >= max_managed) 2795 goto out_unlock; 2796 2797 /* Yoink! */ 2798 mt = get_pageblock_migratetype(page); 2799 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 2800 if (migratetype_is_mergeable(mt)) { 2801 zone->nr_reserved_highatomic += pageblock_nr_pages; 2802 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2803 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 2804 } 2805 2806 out_unlock: 2807 spin_unlock_irqrestore(&zone->lock, flags); 2808 } 2809 2810 /* 2811 * Used when an allocation is about to fail under memory pressure. This 2812 * potentially hurts the reliability of high-order allocations when under 2813 * intense memory pressure but failed atomic allocations should be easier 2814 * to recover from than an OOM. 2815 * 2816 * If @force is true, try to unreserve a pageblock even though highatomic 2817 * pageblock is exhausted. 2818 */ 2819 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2820 bool force) 2821 { 2822 struct zonelist *zonelist = ac->zonelist; 2823 unsigned long flags; 2824 struct zoneref *z; 2825 struct zone *zone; 2826 struct page *page; 2827 int order; 2828 bool ret; 2829 2830 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2831 ac->nodemask) { 2832 /* 2833 * Preserve at least one pageblock unless memory pressure 2834 * is really high. 2835 */ 2836 if (!force && zone->nr_reserved_highatomic <= 2837 pageblock_nr_pages) 2838 continue; 2839 2840 spin_lock_irqsave(&zone->lock, flags); 2841 for (order = 0; order < MAX_ORDER; order++) { 2842 struct free_area *area = &(zone->free_area[order]); 2843 2844 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2845 if (!page) 2846 continue; 2847 2848 /* 2849 * In page freeing path, migratetype change is racy so 2850 * we can counter several free pages in a pageblock 2851 * in this loop although we changed the pageblock type 2852 * from highatomic to ac->migratetype. So we should 2853 * adjust the count once. 2854 */ 2855 if (is_migrate_highatomic_page(page)) { 2856 /* 2857 * It should never happen but changes to 2858 * locking could inadvertently allow a per-cpu 2859 * drain to add pages to MIGRATE_HIGHATOMIC 2860 * while unreserving so be safe and watch for 2861 * underflows. 2862 */ 2863 zone->nr_reserved_highatomic -= min( 2864 pageblock_nr_pages, 2865 zone->nr_reserved_highatomic); 2866 } 2867 2868 /* 2869 * Convert to ac->migratetype and avoid the normal 2870 * pageblock stealing heuristics. Minimally, the caller 2871 * is doing the work and needs the pages. More 2872 * importantly, if the block was always converted to 2873 * MIGRATE_UNMOVABLE or another type then the number 2874 * of pageblocks that cannot be completely freed 2875 * may increase. 2876 */ 2877 set_pageblock_migratetype(page, ac->migratetype); 2878 ret = move_freepages_block(zone, page, ac->migratetype, 2879 NULL); 2880 if (ret) { 2881 spin_unlock_irqrestore(&zone->lock, flags); 2882 return ret; 2883 } 2884 } 2885 spin_unlock_irqrestore(&zone->lock, flags); 2886 } 2887 2888 return false; 2889 } 2890 2891 /* 2892 * Try finding a free buddy page on the fallback list and put it on the free 2893 * list of requested migratetype, possibly along with other pages from the same 2894 * block, depending on fragmentation avoidance heuristics. Returns true if 2895 * fallback was found so that __rmqueue_smallest() can grab it. 2896 * 2897 * The use of signed ints for order and current_order is a deliberate 2898 * deviation from the rest of this file, to make the for loop 2899 * condition simpler. 2900 */ 2901 static __always_inline bool 2902 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2903 unsigned int alloc_flags) 2904 { 2905 struct free_area *area; 2906 int current_order; 2907 int min_order = order; 2908 struct page *page; 2909 int fallback_mt; 2910 bool can_steal; 2911 2912 /* 2913 * Do not steal pages from freelists belonging to other pageblocks 2914 * i.e. orders < pageblock_order. If there are no local zones free, 2915 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2916 */ 2917 if (alloc_flags & ALLOC_NOFRAGMENT) 2918 min_order = pageblock_order; 2919 2920 /* 2921 * Find the largest available free page in the other list. This roughly 2922 * approximates finding the pageblock with the most free pages, which 2923 * would be too costly to do exactly. 2924 */ 2925 for (current_order = MAX_ORDER - 1; current_order >= min_order; 2926 --current_order) { 2927 area = &(zone->free_area[current_order]); 2928 fallback_mt = find_suitable_fallback(area, current_order, 2929 start_migratetype, false, &can_steal); 2930 if (fallback_mt == -1) 2931 continue; 2932 2933 /* 2934 * We cannot steal all free pages from the pageblock and the 2935 * requested migratetype is movable. In that case it's better to 2936 * steal and split the smallest available page instead of the 2937 * largest available page, because even if the next movable 2938 * allocation falls back into a different pageblock than this 2939 * one, it won't cause permanent fragmentation. 2940 */ 2941 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2942 && current_order > order) 2943 goto find_smallest; 2944 2945 goto do_steal; 2946 } 2947 2948 return false; 2949 2950 find_smallest: 2951 for (current_order = order; current_order < MAX_ORDER; 2952 current_order++) { 2953 area = &(zone->free_area[current_order]); 2954 fallback_mt = find_suitable_fallback(area, current_order, 2955 start_migratetype, false, &can_steal); 2956 if (fallback_mt != -1) 2957 break; 2958 } 2959 2960 /* 2961 * This should not happen - we already found a suitable fallback 2962 * when looking for the largest page. 2963 */ 2964 VM_BUG_ON(current_order == MAX_ORDER); 2965 2966 do_steal: 2967 page = get_page_from_free_area(area, fallback_mt); 2968 2969 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2970 can_steal); 2971 2972 trace_mm_page_alloc_extfrag(page, order, current_order, 2973 start_migratetype, fallback_mt); 2974 2975 return true; 2976 2977 } 2978 2979 /* 2980 * Do the hard work of removing an element from the buddy allocator. 2981 * Call me with the zone->lock already held. 2982 */ 2983 static __always_inline struct page * 2984 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2985 unsigned int alloc_flags) 2986 { 2987 struct page *page; 2988 2989 if (IS_ENABLED(CONFIG_CMA)) { 2990 /* 2991 * Balance movable allocations between regular and CMA areas by 2992 * allocating from CMA when over half of the zone's free memory 2993 * is in the CMA area. 2994 */ 2995 if (alloc_flags & ALLOC_CMA && 2996 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2997 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2998 page = __rmqueue_cma_fallback(zone, order); 2999 if (page) 3000 return page; 3001 } 3002 } 3003 retry: 3004 page = __rmqueue_smallest(zone, order, migratetype); 3005 if (unlikely(!page)) { 3006 if (alloc_flags & ALLOC_CMA) 3007 page = __rmqueue_cma_fallback(zone, order); 3008 3009 if (!page && __rmqueue_fallback(zone, order, migratetype, 3010 alloc_flags)) 3011 goto retry; 3012 } 3013 return page; 3014 } 3015 3016 /* 3017 * Obtain a specified number of elements from the buddy allocator, all under 3018 * a single hold of the lock, for efficiency. Add them to the supplied list. 3019 * Returns the number of new pages which were placed at *list. 3020 */ 3021 static int rmqueue_bulk(struct zone *zone, unsigned int order, 3022 unsigned long count, struct list_head *list, 3023 int migratetype, unsigned int alloc_flags) 3024 { 3025 int i, allocated = 0; 3026 3027 /* 3028 * local_lock_irq held so equivalent to spin_lock_irqsave for 3029 * both PREEMPT_RT and non-PREEMPT_RT configurations. 3030 */ 3031 spin_lock(&zone->lock); 3032 for (i = 0; i < count; ++i) { 3033 struct page *page = __rmqueue(zone, order, migratetype, 3034 alloc_flags); 3035 if (unlikely(page == NULL)) 3036 break; 3037 3038 if (unlikely(check_pcp_refill(page, order))) 3039 continue; 3040 3041 /* 3042 * Split buddy pages returned by expand() are received here in 3043 * physical page order. The page is added to the tail of 3044 * caller's list. From the callers perspective, the linked list 3045 * is ordered by page number under some conditions. This is 3046 * useful for IO devices that can forward direction from the 3047 * head, thus also in the physical page order. This is useful 3048 * for IO devices that can merge IO requests if the physical 3049 * pages are ordered properly. 3050 */ 3051 list_add_tail(&page->lru, list); 3052 allocated++; 3053 if (is_migrate_cma(get_pcppage_migratetype(page))) 3054 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 3055 -(1 << order)); 3056 } 3057 3058 /* 3059 * i pages were removed from the buddy list even if some leak due 3060 * to check_pcp_refill failing so adjust NR_FREE_PAGES based 3061 * on i. Do not confuse with 'allocated' which is the number of 3062 * pages added to the pcp list. 3063 */ 3064 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 3065 spin_unlock(&zone->lock); 3066 return allocated; 3067 } 3068 3069 #ifdef CONFIG_NUMA 3070 /* 3071 * Called from the vmstat counter updater to drain pagesets of this 3072 * currently executing processor on remote nodes after they have 3073 * expired. 3074 * 3075 * Note that this function must be called with the thread pinned to 3076 * a single processor. 3077 */ 3078 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 3079 { 3080 unsigned long flags; 3081 int to_drain, batch; 3082 3083 local_lock_irqsave(&pagesets.lock, flags); 3084 batch = READ_ONCE(pcp->batch); 3085 to_drain = min(pcp->count, batch); 3086 if (to_drain > 0) 3087 free_pcppages_bulk(zone, to_drain, pcp, 0); 3088 local_unlock_irqrestore(&pagesets.lock, flags); 3089 } 3090 #endif 3091 3092 /* 3093 * Drain pcplists of the indicated processor and zone. 3094 * 3095 * The processor must either be the current processor and the 3096 * thread pinned to the current processor or a processor that 3097 * is not online. 3098 */ 3099 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 3100 { 3101 unsigned long flags; 3102 struct per_cpu_pages *pcp; 3103 3104 local_lock_irqsave(&pagesets.lock, flags); 3105 3106 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 3107 if (pcp->count) 3108 free_pcppages_bulk(zone, pcp->count, pcp, 0); 3109 3110 local_unlock_irqrestore(&pagesets.lock, flags); 3111 } 3112 3113 /* 3114 * Drain pcplists of all zones on the indicated processor. 3115 * 3116 * The processor must either be the current processor and the 3117 * thread pinned to the current processor or a processor that 3118 * is not online. 3119 */ 3120 static void drain_pages(unsigned int cpu) 3121 { 3122 struct zone *zone; 3123 3124 for_each_populated_zone(zone) { 3125 drain_pages_zone(cpu, zone); 3126 } 3127 } 3128 3129 /* 3130 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 3131 * 3132 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 3133 * the single zone's pages. 3134 */ 3135 void drain_local_pages(struct zone *zone) 3136 { 3137 int cpu = smp_processor_id(); 3138 3139 if (zone) 3140 drain_pages_zone(cpu, zone); 3141 else 3142 drain_pages(cpu); 3143 } 3144 3145 static void drain_local_pages_wq(struct work_struct *work) 3146 { 3147 struct pcpu_drain *drain; 3148 3149 drain = container_of(work, struct pcpu_drain, work); 3150 3151 /* 3152 * drain_all_pages doesn't use proper cpu hotplug protection so 3153 * we can race with cpu offline when the WQ can move this from 3154 * a cpu pinned worker to an unbound one. We can operate on a different 3155 * cpu which is alright but we also have to make sure to not move to 3156 * a different one. 3157 */ 3158 migrate_disable(); 3159 drain_local_pages(drain->zone); 3160 migrate_enable(); 3161 } 3162 3163 /* 3164 * The implementation of drain_all_pages(), exposing an extra parameter to 3165 * drain on all cpus. 3166 * 3167 * drain_all_pages() is optimized to only execute on cpus where pcplists are 3168 * not empty. The check for non-emptiness can however race with a free to 3169 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 3170 * that need the guarantee that every CPU has drained can disable the 3171 * optimizing racy check. 3172 */ 3173 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 3174 { 3175 int cpu; 3176 3177 /* 3178 * Allocate in the BSS so we won't require allocation in 3179 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 3180 */ 3181 static cpumask_t cpus_with_pcps; 3182 3183 /* 3184 * Make sure nobody triggers this path before mm_percpu_wq is fully 3185 * initialized. 3186 */ 3187 if (WARN_ON_ONCE(!mm_percpu_wq)) 3188 return; 3189 3190 /* 3191 * Do not drain if one is already in progress unless it's specific to 3192 * a zone. Such callers are primarily CMA and memory hotplug and need 3193 * the drain to be complete when the call returns. 3194 */ 3195 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 3196 if (!zone) 3197 return; 3198 mutex_lock(&pcpu_drain_mutex); 3199 } 3200 3201 /* 3202 * We don't care about racing with CPU hotplug event 3203 * as offline notification will cause the notified 3204 * cpu to drain that CPU pcps and on_each_cpu_mask 3205 * disables preemption as part of its processing 3206 */ 3207 for_each_online_cpu(cpu) { 3208 struct per_cpu_pages *pcp; 3209 struct zone *z; 3210 bool has_pcps = false; 3211 3212 if (force_all_cpus) { 3213 /* 3214 * The pcp.count check is racy, some callers need a 3215 * guarantee that no cpu is missed. 3216 */ 3217 has_pcps = true; 3218 } else if (zone) { 3219 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 3220 if (pcp->count) 3221 has_pcps = true; 3222 } else { 3223 for_each_populated_zone(z) { 3224 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 3225 if (pcp->count) { 3226 has_pcps = true; 3227 break; 3228 } 3229 } 3230 } 3231 3232 if (has_pcps) 3233 cpumask_set_cpu(cpu, &cpus_with_pcps); 3234 else 3235 cpumask_clear_cpu(cpu, &cpus_with_pcps); 3236 } 3237 3238 for_each_cpu(cpu, &cpus_with_pcps) { 3239 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); 3240 3241 drain->zone = zone; 3242 INIT_WORK(&drain->work, drain_local_pages_wq); 3243 queue_work_on(cpu, mm_percpu_wq, &drain->work); 3244 } 3245 for_each_cpu(cpu, &cpus_with_pcps) 3246 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); 3247 3248 mutex_unlock(&pcpu_drain_mutex); 3249 } 3250 3251 /* 3252 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 3253 * 3254 * When zone parameter is non-NULL, spill just the single zone's pages. 3255 * 3256 * Note that this can be extremely slow as the draining happens in a workqueue. 3257 */ 3258 void drain_all_pages(struct zone *zone) 3259 { 3260 __drain_all_pages(zone, false); 3261 } 3262 3263 #ifdef CONFIG_HIBERNATION 3264 3265 /* 3266 * Touch the watchdog for every WD_PAGE_COUNT pages. 3267 */ 3268 #define WD_PAGE_COUNT (128*1024) 3269 3270 void mark_free_pages(struct zone *zone) 3271 { 3272 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; 3273 unsigned long flags; 3274 unsigned int order, t; 3275 struct page *page; 3276 3277 if (zone_is_empty(zone)) 3278 return; 3279 3280 spin_lock_irqsave(&zone->lock, flags); 3281 3282 max_zone_pfn = zone_end_pfn(zone); 3283 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 3284 if (pfn_valid(pfn)) { 3285 page = pfn_to_page(pfn); 3286 3287 if (!--page_count) { 3288 touch_nmi_watchdog(); 3289 page_count = WD_PAGE_COUNT; 3290 } 3291 3292 if (page_zone(page) != zone) 3293 continue; 3294 3295 if (!swsusp_page_is_forbidden(page)) 3296 swsusp_unset_page_free(page); 3297 } 3298 3299 for_each_migratetype_order(order, t) { 3300 list_for_each_entry(page, 3301 &zone->free_area[order].free_list[t], lru) { 3302 unsigned long i; 3303 3304 pfn = page_to_pfn(page); 3305 for (i = 0; i < (1UL << order); i++) { 3306 if (!--page_count) { 3307 touch_nmi_watchdog(); 3308 page_count = WD_PAGE_COUNT; 3309 } 3310 swsusp_set_page_free(pfn_to_page(pfn + i)); 3311 } 3312 } 3313 } 3314 spin_unlock_irqrestore(&zone->lock, flags); 3315 } 3316 #endif /* CONFIG_PM */ 3317 3318 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 3319 unsigned int order) 3320 { 3321 int migratetype; 3322 3323 if (!free_pcp_prepare(page, order)) 3324 return false; 3325 3326 migratetype = get_pfnblock_migratetype(page, pfn); 3327 set_pcppage_migratetype(page, migratetype); 3328 return true; 3329 } 3330 3331 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch, 3332 bool free_high) 3333 { 3334 int min_nr_free, max_nr_free; 3335 3336 /* Free everything if batch freeing high-order pages. */ 3337 if (unlikely(free_high)) 3338 return pcp->count; 3339 3340 /* Check for PCP disabled or boot pageset */ 3341 if (unlikely(high < batch)) 3342 return 1; 3343 3344 /* Leave at least pcp->batch pages on the list */ 3345 min_nr_free = batch; 3346 max_nr_free = high - batch; 3347 3348 /* 3349 * Double the number of pages freed each time there is subsequent 3350 * freeing of pages without any allocation. 3351 */ 3352 batch <<= pcp->free_factor; 3353 if (batch < max_nr_free) 3354 pcp->free_factor++; 3355 batch = clamp(batch, min_nr_free, max_nr_free); 3356 3357 return batch; 3358 } 3359 3360 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 3361 bool free_high) 3362 { 3363 int high = READ_ONCE(pcp->high); 3364 3365 if (unlikely(!high || free_high)) 3366 return 0; 3367 3368 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 3369 return high; 3370 3371 /* 3372 * If reclaim is active, limit the number of pages that can be 3373 * stored on pcp lists 3374 */ 3375 return min(READ_ONCE(pcp->batch) << 2, high); 3376 } 3377 3378 static void free_unref_page_commit(struct page *page, int migratetype, 3379 unsigned int order) 3380 { 3381 struct zone *zone = page_zone(page); 3382 struct per_cpu_pages *pcp; 3383 int high; 3384 int pindex; 3385 bool free_high; 3386 3387 __count_vm_event(PGFREE); 3388 pcp = this_cpu_ptr(zone->per_cpu_pageset); 3389 pindex = order_to_pindex(migratetype, order); 3390 list_add(&page->lru, &pcp->lists[pindex]); 3391 pcp->count += 1 << order; 3392 3393 /* 3394 * As high-order pages other than THP's stored on PCP can contribute 3395 * to fragmentation, limit the number stored when PCP is heavily 3396 * freeing without allocation. The remainder after bulk freeing 3397 * stops will be drained from vmstat refresh context. 3398 */ 3399 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); 3400 3401 high = nr_pcp_high(pcp, zone, free_high); 3402 if (pcp->count >= high) { 3403 int batch = READ_ONCE(pcp->batch); 3404 3405 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex); 3406 } 3407 } 3408 3409 /* 3410 * Free a pcp page 3411 */ 3412 void free_unref_page(struct page *page, unsigned int order) 3413 { 3414 unsigned long flags; 3415 unsigned long pfn = page_to_pfn(page); 3416 int migratetype; 3417 3418 if (!free_unref_page_prepare(page, pfn, order)) 3419 return; 3420 3421 /* 3422 * We only track unmovable, reclaimable and movable on pcp lists. 3423 * Place ISOLATE pages on the isolated list because they are being 3424 * offlined but treat HIGHATOMIC as movable pages so we can get those 3425 * areas back if necessary. Otherwise, we may have to free 3426 * excessively into the page allocator 3427 */ 3428 migratetype = get_pcppage_migratetype(page); 3429 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 3430 if (unlikely(is_migrate_isolate(migratetype))) { 3431 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 3432 return; 3433 } 3434 migratetype = MIGRATE_MOVABLE; 3435 } 3436 3437 local_lock_irqsave(&pagesets.lock, flags); 3438 free_unref_page_commit(page, migratetype, order); 3439 local_unlock_irqrestore(&pagesets.lock, flags); 3440 } 3441 3442 /* 3443 * Free a list of 0-order pages 3444 */ 3445 void free_unref_page_list(struct list_head *list) 3446 { 3447 struct page *page, *next; 3448 unsigned long flags; 3449 int batch_count = 0; 3450 int migratetype; 3451 3452 /* Prepare pages for freeing */ 3453 list_for_each_entry_safe(page, next, list, lru) { 3454 unsigned long pfn = page_to_pfn(page); 3455 if (!free_unref_page_prepare(page, pfn, 0)) { 3456 list_del(&page->lru); 3457 continue; 3458 } 3459 3460 /* 3461 * Free isolated pages directly to the allocator, see 3462 * comment in free_unref_page. 3463 */ 3464 migratetype = get_pcppage_migratetype(page); 3465 if (unlikely(is_migrate_isolate(migratetype))) { 3466 list_del(&page->lru); 3467 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 3468 continue; 3469 } 3470 } 3471 3472 local_lock_irqsave(&pagesets.lock, flags); 3473 list_for_each_entry_safe(page, next, list, lru) { 3474 /* 3475 * Non-isolated types over MIGRATE_PCPTYPES get added 3476 * to the MIGRATE_MOVABLE pcp list. 3477 */ 3478 migratetype = get_pcppage_migratetype(page); 3479 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 3480 migratetype = MIGRATE_MOVABLE; 3481 3482 trace_mm_page_free_batched(page); 3483 free_unref_page_commit(page, migratetype, 0); 3484 3485 /* 3486 * Guard against excessive IRQ disabled times when we get 3487 * a large list of pages to free. 3488 */ 3489 if (++batch_count == SWAP_CLUSTER_MAX) { 3490 local_unlock_irqrestore(&pagesets.lock, flags); 3491 batch_count = 0; 3492 local_lock_irqsave(&pagesets.lock, flags); 3493 } 3494 } 3495 local_unlock_irqrestore(&pagesets.lock, flags); 3496 } 3497 3498 /* 3499 * split_page takes a non-compound higher-order page, and splits it into 3500 * n (1<<order) sub-pages: page[0..n] 3501 * Each sub-page must be freed individually. 3502 * 3503 * Note: this is probably too low level an operation for use in drivers. 3504 * Please consult with lkml before using this in your driver. 3505 */ 3506 void split_page(struct page *page, unsigned int order) 3507 { 3508 int i; 3509 3510 VM_BUG_ON_PAGE(PageCompound(page), page); 3511 VM_BUG_ON_PAGE(!page_count(page), page); 3512 3513 for (i = 1; i < (1 << order); i++) 3514 set_page_refcounted(page + i); 3515 split_page_owner(page, 1 << order); 3516 split_page_memcg(page, 1 << order); 3517 } 3518 EXPORT_SYMBOL_GPL(split_page); 3519 3520 int __isolate_free_page(struct page *page, unsigned int order) 3521 { 3522 unsigned long watermark; 3523 struct zone *zone; 3524 int mt; 3525 3526 BUG_ON(!PageBuddy(page)); 3527 3528 zone = page_zone(page); 3529 mt = get_pageblock_migratetype(page); 3530 3531 if (!is_migrate_isolate(mt)) { 3532 /* 3533 * Obey watermarks as if the page was being allocated. We can 3534 * emulate a high-order watermark check with a raised order-0 3535 * watermark, because we already know our high-order page 3536 * exists. 3537 */ 3538 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3539 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3540 return 0; 3541 3542 __mod_zone_freepage_state(zone, -(1UL << order), mt); 3543 } 3544 3545 /* Remove page from free list */ 3546 3547 del_page_from_free_list(page, zone, order); 3548 3549 /* 3550 * Set the pageblock if the isolated page is at least half of a 3551 * pageblock 3552 */ 3553 if (order >= pageblock_order - 1) { 3554 struct page *endpage = page + (1 << order) - 1; 3555 for (; page < endpage; page += pageblock_nr_pages) { 3556 int mt = get_pageblock_migratetype(page); 3557 /* 3558 * Only change normal pageblocks (i.e., they can merge 3559 * with others) 3560 */ 3561 if (migratetype_is_mergeable(mt)) 3562 set_pageblock_migratetype(page, 3563 MIGRATE_MOVABLE); 3564 } 3565 } 3566 3567 3568 return 1UL << order; 3569 } 3570 3571 /** 3572 * __putback_isolated_page - Return a now-isolated page back where we got it 3573 * @page: Page that was isolated 3574 * @order: Order of the isolated page 3575 * @mt: The page's pageblock's migratetype 3576 * 3577 * This function is meant to return a page pulled from the free lists via 3578 * __isolate_free_page back to the free lists they were pulled from. 3579 */ 3580 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3581 { 3582 struct zone *zone = page_zone(page); 3583 3584 /* zone lock should be held when this function is called */ 3585 lockdep_assert_held(&zone->lock); 3586 3587 /* Return isolated page to tail of freelist. */ 3588 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3589 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3590 } 3591 3592 /* 3593 * Update NUMA hit/miss statistics 3594 * 3595 * Must be called with interrupts disabled. 3596 */ 3597 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 3598 long nr_account) 3599 { 3600 #ifdef CONFIG_NUMA 3601 enum numa_stat_item local_stat = NUMA_LOCAL; 3602 3603 /* skip numa counters update if numa stats is disabled */ 3604 if (!static_branch_likely(&vm_numa_stat_key)) 3605 return; 3606 3607 if (zone_to_nid(z) != numa_node_id()) 3608 local_stat = NUMA_OTHER; 3609 3610 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3611 __count_numa_events(z, NUMA_HIT, nr_account); 3612 else { 3613 __count_numa_events(z, NUMA_MISS, nr_account); 3614 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 3615 } 3616 __count_numa_events(z, local_stat, nr_account); 3617 #endif 3618 } 3619 3620 /* Remove page from the per-cpu list, caller must protect the list */ 3621 static inline 3622 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3623 int migratetype, 3624 unsigned int alloc_flags, 3625 struct per_cpu_pages *pcp, 3626 struct list_head *list) 3627 { 3628 struct page *page; 3629 3630 do { 3631 if (list_empty(list)) { 3632 int batch = READ_ONCE(pcp->batch); 3633 int alloced; 3634 3635 /* 3636 * Scale batch relative to order if batch implies 3637 * free pages can be stored on the PCP. Batch can 3638 * be 1 for small zones or for boot pagesets which 3639 * should never store free pages as the pages may 3640 * belong to arbitrary zones. 3641 */ 3642 if (batch > 1) 3643 batch = max(batch >> order, 2); 3644 alloced = rmqueue_bulk(zone, order, 3645 batch, list, 3646 migratetype, alloc_flags); 3647 3648 pcp->count += alloced << order; 3649 if (unlikely(list_empty(list))) 3650 return NULL; 3651 } 3652 3653 page = list_first_entry(list, struct page, lru); 3654 list_del(&page->lru); 3655 pcp->count -= 1 << order; 3656 } while (check_new_pcp(page, order)); 3657 3658 return page; 3659 } 3660 3661 /* Lock and remove page from the per-cpu list */ 3662 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3663 struct zone *zone, unsigned int order, 3664 gfp_t gfp_flags, int migratetype, 3665 unsigned int alloc_flags) 3666 { 3667 struct per_cpu_pages *pcp; 3668 struct list_head *list; 3669 struct page *page; 3670 unsigned long flags; 3671 3672 local_lock_irqsave(&pagesets.lock, flags); 3673 3674 /* 3675 * On allocation, reduce the number of pages that are batch freed. 3676 * See nr_pcp_free() where free_factor is increased for subsequent 3677 * frees. 3678 */ 3679 pcp = this_cpu_ptr(zone->per_cpu_pageset); 3680 pcp->free_factor >>= 1; 3681 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3682 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3683 local_unlock_irqrestore(&pagesets.lock, flags); 3684 if (page) { 3685 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); 3686 zone_statistics(preferred_zone, zone, 1); 3687 } 3688 return page; 3689 } 3690 3691 /* 3692 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 3693 */ 3694 static inline 3695 struct page *rmqueue(struct zone *preferred_zone, 3696 struct zone *zone, unsigned int order, 3697 gfp_t gfp_flags, unsigned int alloc_flags, 3698 int migratetype) 3699 { 3700 unsigned long flags; 3701 struct page *page; 3702 3703 if (likely(pcp_allowed_order(order))) { 3704 /* 3705 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and 3706 * we need to skip it when CMA area isn't allowed. 3707 */ 3708 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || 3709 migratetype != MIGRATE_MOVABLE) { 3710 page = rmqueue_pcplist(preferred_zone, zone, order, 3711 gfp_flags, migratetype, alloc_flags); 3712 goto out; 3713 } 3714 } 3715 3716 /* 3717 * We most definitely don't want callers attempting to 3718 * allocate greater than order-1 page units with __GFP_NOFAIL. 3719 */ 3720 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 3721 3722 do { 3723 page = NULL; 3724 spin_lock_irqsave(&zone->lock, flags); 3725 /* 3726 * order-0 request can reach here when the pcplist is skipped 3727 * due to non-CMA allocation context. HIGHATOMIC area is 3728 * reserved for high-order atomic allocation, so order-0 3729 * request should skip it. 3730 */ 3731 if (order > 0 && alloc_flags & ALLOC_HARDER) 3732 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3733 if (!page) { 3734 page = __rmqueue(zone, order, migratetype, alloc_flags); 3735 if (!page) 3736 goto failed; 3737 } 3738 __mod_zone_freepage_state(zone, -(1 << order), 3739 get_pcppage_migratetype(page)); 3740 spin_unlock_irqrestore(&zone->lock, flags); 3741 } while (check_new_pages(page, order)); 3742 3743 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3744 zone_statistics(preferred_zone, zone, 1); 3745 3746 out: 3747 /* Separate test+clear to avoid unnecessary atomics */ 3748 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { 3749 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3750 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3751 } 3752 3753 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3754 return page; 3755 3756 failed: 3757 spin_unlock_irqrestore(&zone->lock, flags); 3758 return NULL; 3759 } 3760 3761 #ifdef CONFIG_FAIL_PAGE_ALLOC 3762 3763 static struct { 3764 struct fault_attr attr; 3765 3766 bool ignore_gfp_highmem; 3767 bool ignore_gfp_reclaim; 3768 u32 min_order; 3769 } fail_page_alloc = { 3770 .attr = FAULT_ATTR_INITIALIZER, 3771 .ignore_gfp_reclaim = true, 3772 .ignore_gfp_highmem = true, 3773 .min_order = 1, 3774 }; 3775 3776 static int __init setup_fail_page_alloc(char *str) 3777 { 3778 return setup_fault_attr(&fail_page_alloc.attr, str); 3779 } 3780 __setup("fail_page_alloc=", setup_fail_page_alloc); 3781 3782 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3783 { 3784 if (order < fail_page_alloc.min_order) 3785 return false; 3786 if (gfp_mask & __GFP_NOFAIL) 3787 return false; 3788 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 3789 return false; 3790 if (fail_page_alloc.ignore_gfp_reclaim && 3791 (gfp_mask & __GFP_DIRECT_RECLAIM)) 3792 return false; 3793 3794 if (gfp_mask & __GFP_NOWARN) 3795 fail_page_alloc.attr.no_warn = true; 3796 3797 return should_fail(&fail_page_alloc.attr, 1 << order); 3798 } 3799 3800 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3801 3802 static int __init fail_page_alloc_debugfs(void) 3803 { 3804 umode_t mode = S_IFREG | 0600; 3805 struct dentry *dir; 3806 3807 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 3808 &fail_page_alloc.attr); 3809 3810 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3811 &fail_page_alloc.ignore_gfp_reclaim); 3812 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 3813 &fail_page_alloc.ignore_gfp_highmem); 3814 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); 3815 3816 return 0; 3817 } 3818 3819 late_initcall(fail_page_alloc_debugfs); 3820 3821 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3822 3823 #else /* CONFIG_FAIL_PAGE_ALLOC */ 3824 3825 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3826 { 3827 return false; 3828 } 3829 3830 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 3831 3832 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3833 { 3834 return __should_fail_alloc_page(gfp_mask, order); 3835 } 3836 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 3837 3838 static inline long __zone_watermark_unusable_free(struct zone *z, 3839 unsigned int order, unsigned int alloc_flags) 3840 { 3841 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3842 long unusable_free = (1 << order) - 1; 3843 3844 /* 3845 * If the caller does not have rights to ALLOC_HARDER then subtract 3846 * the high-atomic reserves. This will over-estimate the size of the 3847 * atomic reserve but it avoids a search. 3848 */ 3849 if (likely(!alloc_harder)) 3850 unusable_free += z->nr_reserved_highatomic; 3851 3852 #ifdef CONFIG_CMA 3853 /* If allocation can't use CMA areas don't use free CMA pages */ 3854 if (!(alloc_flags & ALLOC_CMA)) 3855 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3856 #endif 3857 3858 return unusable_free; 3859 } 3860 3861 /* 3862 * Return true if free base pages are above 'mark'. For high-order checks it 3863 * will return true of the order-0 watermark is reached and there is at least 3864 * one free page of a suitable size. Checking now avoids taking the zone lock 3865 * to check in the allocation paths if no pages are free. 3866 */ 3867 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3868 int highest_zoneidx, unsigned int alloc_flags, 3869 long free_pages) 3870 { 3871 long min = mark; 3872 int o; 3873 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3874 3875 /* free_pages may go negative - that's OK */ 3876 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3877 3878 if (alloc_flags & ALLOC_HIGH) 3879 min -= min / 2; 3880 3881 if (unlikely(alloc_harder)) { 3882 /* 3883 * OOM victims can try even harder than normal ALLOC_HARDER 3884 * users on the grounds that it's definitely going to be in 3885 * the exit path shortly and free memory. Any allocation it 3886 * makes during the free path will be small and short-lived. 3887 */ 3888 if (alloc_flags & ALLOC_OOM) 3889 min -= min / 2; 3890 else 3891 min -= min / 4; 3892 } 3893 3894 /* 3895 * Check watermarks for an order-0 allocation request. If these 3896 * are not met, then a high-order request also cannot go ahead 3897 * even if a suitable page happened to be free. 3898 */ 3899 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3900 return false; 3901 3902 /* If this is an order-0 request then the watermark is fine */ 3903 if (!order) 3904 return true; 3905 3906 /* For a high-order request, check at least one suitable page is free */ 3907 for (o = order; o < MAX_ORDER; o++) { 3908 struct free_area *area = &z->free_area[o]; 3909 int mt; 3910 3911 if (!area->nr_free) 3912 continue; 3913 3914 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3915 if (!free_area_empty(area, mt)) 3916 return true; 3917 } 3918 3919 #ifdef CONFIG_CMA 3920 if ((alloc_flags & ALLOC_CMA) && 3921 !free_area_empty(area, MIGRATE_CMA)) { 3922 return true; 3923 } 3924 #endif 3925 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC)) 3926 return true; 3927 } 3928 return false; 3929 } 3930 3931 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3932 int highest_zoneidx, unsigned int alloc_flags) 3933 { 3934 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3935 zone_page_state(z, NR_FREE_PAGES)); 3936 } 3937 3938 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3939 unsigned long mark, int highest_zoneidx, 3940 unsigned int alloc_flags, gfp_t gfp_mask) 3941 { 3942 long free_pages; 3943 3944 free_pages = zone_page_state(z, NR_FREE_PAGES); 3945 3946 /* 3947 * Fast check for order-0 only. If this fails then the reserves 3948 * need to be calculated. 3949 */ 3950 if (!order) { 3951 long fast_free; 3952 3953 fast_free = free_pages; 3954 fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags); 3955 if (fast_free > mark + z->lowmem_reserve[highest_zoneidx]) 3956 return true; 3957 } 3958 3959 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3960 free_pages)) 3961 return true; 3962 /* 3963 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations 3964 * when checking the min watermark. The min watermark is the 3965 * point where boosting is ignored so that kswapd is woken up 3966 * when below the low watermark. 3967 */ 3968 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost 3969 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3970 mark = z->_watermark[WMARK_MIN]; 3971 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3972 alloc_flags, free_pages); 3973 } 3974 3975 return false; 3976 } 3977 3978 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3979 unsigned long mark, int highest_zoneidx) 3980 { 3981 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3982 3983 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3984 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3985 3986 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3987 free_pages); 3988 } 3989 3990 #ifdef CONFIG_NUMA 3991 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3992 3993 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3994 { 3995 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3996 node_reclaim_distance; 3997 } 3998 #else /* CONFIG_NUMA */ 3999 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 4000 { 4001 return true; 4002 } 4003 #endif /* CONFIG_NUMA */ 4004 4005 /* 4006 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 4007 * fragmentation is subtle. If the preferred zone was HIGHMEM then 4008 * premature use of a lower zone may cause lowmem pressure problems that 4009 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 4010 * probably too small. It only makes sense to spread allocations to avoid 4011 * fragmentation between the Normal and DMA32 zones. 4012 */ 4013 static inline unsigned int 4014 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 4015 { 4016 unsigned int alloc_flags; 4017 4018 /* 4019 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4020 * to save a branch. 4021 */ 4022 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 4023 4024 #ifdef CONFIG_ZONE_DMA32 4025 if (!zone) 4026 return alloc_flags; 4027 4028 if (zone_idx(zone) != ZONE_NORMAL) 4029 return alloc_flags; 4030 4031 /* 4032 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 4033 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 4034 * on UMA that if Normal is populated then so is DMA32. 4035 */ 4036 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 4037 if (nr_online_nodes > 1 && !populated_zone(--zone)) 4038 return alloc_flags; 4039 4040 alloc_flags |= ALLOC_NOFRAGMENT; 4041 #endif /* CONFIG_ZONE_DMA32 */ 4042 return alloc_flags; 4043 } 4044 4045 /* Must be called after current_gfp_context() which can change gfp_mask */ 4046 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 4047 unsigned int alloc_flags) 4048 { 4049 #ifdef CONFIG_CMA 4050 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 4051 alloc_flags |= ALLOC_CMA; 4052 #endif 4053 return alloc_flags; 4054 } 4055 4056 /* 4057 * get_page_from_freelist goes through the zonelist trying to allocate 4058 * a page. 4059 */ 4060 static struct page * 4061 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 4062 const struct alloc_context *ac) 4063 { 4064 struct zoneref *z; 4065 struct zone *zone; 4066 struct pglist_data *last_pgdat = NULL; 4067 bool last_pgdat_dirty_ok = false; 4068 bool no_fallback; 4069 4070 retry: 4071 /* 4072 * Scan zonelist, looking for a zone with enough free. 4073 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 4074 */ 4075 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 4076 z = ac->preferred_zoneref; 4077 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 4078 ac->nodemask) { 4079 struct page *page; 4080 unsigned long mark; 4081 4082 if (cpusets_enabled() && 4083 (alloc_flags & ALLOC_CPUSET) && 4084 !__cpuset_zone_allowed(zone, gfp_mask)) 4085 continue; 4086 /* 4087 * When allocating a page cache page for writing, we 4088 * want to get it from a node that is within its dirty 4089 * limit, such that no single node holds more than its 4090 * proportional share of globally allowed dirty pages. 4091 * The dirty limits take into account the node's 4092 * lowmem reserves and high watermark so that kswapd 4093 * should be able to balance it without having to 4094 * write pages from its LRU list. 4095 * 4096 * XXX: For now, allow allocations to potentially 4097 * exceed the per-node dirty limit in the slowpath 4098 * (spread_dirty_pages unset) before going into reclaim, 4099 * which is important when on a NUMA setup the allowed 4100 * nodes are together not big enough to reach the 4101 * global limit. The proper fix for these situations 4102 * will require awareness of nodes in the 4103 * dirty-throttling and the flusher threads. 4104 */ 4105 if (ac->spread_dirty_pages) { 4106 if (last_pgdat != zone->zone_pgdat) { 4107 last_pgdat = zone->zone_pgdat; 4108 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 4109 } 4110 4111 if (!last_pgdat_dirty_ok) 4112 continue; 4113 } 4114 4115 if (no_fallback && nr_online_nodes > 1 && 4116 zone != ac->preferred_zoneref->zone) { 4117 int local_nid; 4118 4119 /* 4120 * If moving to a remote node, retry but allow 4121 * fragmenting fallbacks. Locality is more important 4122 * than fragmentation avoidance. 4123 */ 4124 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 4125 if (zone_to_nid(zone) != local_nid) { 4126 alloc_flags &= ~ALLOC_NOFRAGMENT; 4127 goto retry; 4128 } 4129 } 4130 4131 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 4132 if (!zone_watermark_fast(zone, order, mark, 4133 ac->highest_zoneidx, alloc_flags, 4134 gfp_mask)) { 4135 int ret; 4136 4137 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 4138 /* 4139 * Watermark failed for this zone, but see if we can 4140 * grow this zone if it contains deferred pages. 4141 */ 4142 if (static_branch_unlikely(&deferred_pages)) { 4143 if (_deferred_grow_zone(zone, order)) 4144 goto try_this_zone; 4145 } 4146 #endif 4147 /* Checked here to keep the fast path fast */ 4148 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 4149 if (alloc_flags & ALLOC_NO_WATERMARKS) 4150 goto try_this_zone; 4151 4152 if (!node_reclaim_enabled() || 4153 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 4154 continue; 4155 4156 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 4157 switch (ret) { 4158 case NODE_RECLAIM_NOSCAN: 4159 /* did not scan */ 4160 continue; 4161 case NODE_RECLAIM_FULL: 4162 /* scanned but unreclaimable */ 4163 continue; 4164 default: 4165 /* did we reclaim enough */ 4166 if (zone_watermark_ok(zone, order, mark, 4167 ac->highest_zoneidx, alloc_flags)) 4168 goto try_this_zone; 4169 4170 continue; 4171 } 4172 } 4173 4174 try_this_zone: 4175 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 4176 gfp_mask, alloc_flags, ac->migratetype); 4177 if (page) { 4178 prep_new_page(page, order, gfp_mask, alloc_flags); 4179 4180 /* 4181 * If this is a high-order atomic allocation then check 4182 * if the pageblock should be reserved for the future 4183 */ 4184 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 4185 reserve_highatomic_pageblock(page, zone, order); 4186 4187 return page; 4188 } else { 4189 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 4190 /* Try again if zone has deferred pages */ 4191 if (static_branch_unlikely(&deferred_pages)) { 4192 if (_deferred_grow_zone(zone, order)) 4193 goto try_this_zone; 4194 } 4195 #endif 4196 } 4197 } 4198 4199 /* 4200 * It's possible on a UMA machine to get through all zones that are 4201 * fragmented. If avoiding fragmentation, reset and try again. 4202 */ 4203 if (no_fallback) { 4204 alloc_flags &= ~ALLOC_NOFRAGMENT; 4205 goto retry; 4206 } 4207 4208 return NULL; 4209 } 4210 4211 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 4212 { 4213 unsigned int filter = SHOW_MEM_FILTER_NODES; 4214 4215 /* 4216 * This documents exceptions given to allocations in certain 4217 * contexts that are allowed to allocate outside current's set 4218 * of allowed nodes. 4219 */ 4220 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4221 if (tsk_is_oom_victim(current) || 4222 (current->flags & (PF_MEMALLOC | PF_EXITING))) 4223 filter &= ~SHOW_MEM_FILTER_NODES; 4224 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 4225 filter &= ~SHOW_MEM_FILTER_NODES; 4226 4227 show_mem(filter, nodemask); 4228 } 4229 4230 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 4231 { 4232 struct va_format vaf; 4233 va_list args; 4234 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 4235 4236 if ((gfp_mask & __GFP_NOWARN) || 4237 !__ratelimit(&nopage_rs) || 4238 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 4239 return; 4240 4241 va_start(args, fmt); 4242 vaf.fmt = fmt; 4243 vaf.va = &args; 4244 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 4245 current->comm, &vaf, gfp_mask, &gfp_mask, 4246 nodemask_pr_args(nodemask)); 4247 va_end(args); 4248 4249 cpuset_print_current_mems_allowed(); 4250 pr_cont("\n"); 4251 dump_stack(); 4252 warn_alloc_show_mem(gfp_mask, nodemask); 4253 } 4254 4255 static inline struct page * 4256 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 4257 unsigned int alloc_flags, 4258 const struct alloc_context *ac) 4259 { 4260 struct page *page; 4261 4262 page = get_page_from_freelist(gfp_mask, order, 4263 alloc_flags|ALLOC_CPUSET, ac); 4264 /* 4265 * fallback to ignore cpuset restriction if our nodes 4266 * are depleted 4267 */ 4268 if (!page) 4269 page = get_page_from_freelist(gfp_mask, order, 4270 alloc_flags, ac); 4271 4272 return page; 4273 } 4274 4275 static inline struct page * 4276 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 4277 const struct alloc_context *ac, unsigned long *did_some_progress) 4278 { 4279 struct oom_control oc = { 4280 .zonelist = ac->zonelist, 4281 .nodemask = ac->nodemask, 4282 .memcg = NULL, 4283 .gfp_mask = gfp_mask, 4284 .order = order, 4285 }; 4286 struct page *page; 4287 4288 *did_some_progress = 0; 4289 4290 /* 4291 * Acquire the oom lock. If that fails, somebody else is 4292 * making progress for us. 4293 */ 4294 if (!mutex_trylock(&oom_lock)) { 4295 *did_some_progress = 1; 4296 schedule_timeout_uninterruptible(1); 4297 return NULL; 4298 } 4299 4300 /* 4301 * Go through the zonelist yet one more time, keep very high watermark 4302 * here, this is only to catch a parallel oom killing, we must fail if 4303 * we're still under heavy pressure. But make sure that this reclaim 4304 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 4305 * allocation which will never fail due to oom_lock already held. 4306 */ 4307 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 4308 ~__GFP_DIRECT_RECLAIM, order, 4309 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 4310 if (page) 4311 goto out; 4312 4313 /* Coredumps can quickly deplete all memory reserves */ 4314 if (current->flags & PF_DUMPCORE) 4315 goto out; 4316 /* The OOM killer will not help higher order allocs */ 4317 if (order > PAGE_ALLOC_COSTLY_ORDER) 4318 goto out; 4319 /* 4320 * We have already exhausted all our reclaim opportunities without any 4321 * success so it is time to admit defeat. We will skip the OOM killer 4322 * because it is very likely that the caller has a more reasonable 4323 * fallback than shooting a random task. 4324 * 4325 * The OOM killer may not free memory on a specific node. 4326 */ 4327 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4328 goto out; 4329 /* The OOM killer does not needlessly kill tasks for lowmem */ 4330 if (ac->highest_zoneidx < ZONE_NORMAL) 4331 goto out; 4332 if (pm_suspended_storage()) 4333 goto out; 4334 /* 4335 * XXX: GFP_NOFS allocations should rather fail than rely on 4336 * other request to make a forward progress. 4337 * We are in an unfortunate situation where out_of_memory cannot 4338 * do much for this context but let's try it to at least get 4339 * access to memory reserved if the current task is killed (see 4340 * out_of_memory). Once filesystems are ready to handle allocation 4341 * failures more gracefully we should just bail out here. 4342 */ 4343 4344 /* Exhausted what can be done so it's blame time */ 4345 if (out_of_memory(&oc) || 4346 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 4347 *did_some_progress = 1; 4348 4349 /* 4350 * Help non-failing allocations by giving them access to memory 4351 * reserves 4352 */ 4353 if (gfp_mask & __GFP_NOFAIL) 4354 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4355 ALLOC_NO_WATERMARKS, ac); 4356 } 4357 out: 4358 mutex_unlock(&oom_lock); 4359 return page; 4360 } 4361 4362 /* 4363 * Maximum number of compaction retries with a progress before OOM 4364 * killer is consider as the only way to move forward. 4365 */ 4366 #define MAX_COMPACT_RETRIES 16 4367 4368 #ifdef CONFIG_COMPACTION 4369 /* Try memory compaction for high-order allocations before reclaim */ 4370 static struct page * 4371 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4372 unsigned int alloc_flags, const struct alloc_context *ac, 4373 enum compact_priority prio, enum compact_result *compact_result) 4374 { 4375 struct page *page = NULL; 4376 unsigned long pflags; 4377 unsigned int noreclaim_flag; 4378 4379 if (!order) 4380 return NULL; 4381 4382 psi_memstall_enter(&pflags); 4383 delayacct_compact_start(); 4384 noreclaim_flag = memalloc_noreclaim_save(); 4385 4386 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4387 prio, &page); 4388 4389 memalloc_noreclaim_restore(noreclaim_flag); 4390 psi_memstall_leave(&pflags); 4391 delayacct_compact_end(); 4392 4393 if (*compact_result == COMPACT_SKIPPED) 4394 return NULL; 4395 /* 4396 * At least in one zone compaction wasn't deferred or skipped, so let's 4397 * count a compaction stall 4398 */ 4399 count_vm_event(COMPACTSTALL); 4400 4401 /* Prep a captured page if available */ 4402 if (page) 4403 prep_new_page(page, order, gfp_mask, alloc_flags); 4404 4405 /* Try get a page from the freelist if available */ 4406 if (!page) 4407 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4408 4409 if (page) { 4410 struct zone *zone = page_zone(page); 4411 4412 zone->compact_blockskip_flush = false; 4413 compaction_defer_reset(zone, order, true); 4414 count_vm_event(COMPACTSUCCESS); 4415 return page; 4416 } 4417 4418 /* 4419 * It's bad if compaction run occurs and fails. The most likely reason 4420 * is that pages exist, but not enough to satisfy watermarks. 4421 */ 4422 count_vm_event(COMPACTFAIL); 4423 4424 cond_resched(); 4425 4426 return NULL; 4427 } 4428 4429 static inline bool 4430 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4431 enum compact_result compact_result, 4432 enum compact_priority *compact_priority, 4433 int *compaction_retries) 4434 { 4435 int max_retries = MAX_COMPACT_RETRIES; 4436 int min_priority; 4437 bool ret = false; 4438 int retries = *compaction_retries; 4439 enum compact_priority priority = *compact_priority; 4440 4441 if (!order) 4442 return false; 4443 4444 if (fatal_signal_pending(current)) 4445 return false; 4446 4447 if (compaction_made_progress(compact_result)) 4448 (*compaction_retries)++; 4449 4450 /* 4451 * compaction considers all the zone as desperately out of memory 4452 * so it doesn't really make much sense to retry except when the 4453 * failure could be caused by insufficient priority 4454 */ 4455 if (compaction_failed(compact_result)) 4456 goto check_priority; 4457 4458 /* 4459 * compaction was skipped because there are not enough order-0 pages 4460 * to work with, so we retry only if it looks like reclaim can help. 4461 */ 4462 if (compaction_needs_reclaim(compact_result)) { 4463 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4464 goto out; 4465 } 4466 4467 /* 4468 * make sure the compaction wasn't deferred or didn't bail out early 4469 * due to locks contention before we declare that we should give up. 4470 * But the next retry should use a higher priority if allowed, so 4471 * we don't just keep bailing out endlessly. 4472 */ 4473 if (compaction_withdrawn(compact_result)) { 4474 goto check_priority; 4475 } 4476 4477 /* 4478 * !costly requests are much more important than __GFP_RETRY_MAYFAIL 4479 * costly ones because they are de facto nofail and invoke OOM 4480 * killer to move on while costly can fail and users are ready 4481 * to cope with that. 1/4 retries is rather arbitrary but we 4482 * would need much more detailed feedback from compaction to 4483 * make a better decision. 4484 */ 4485 if (order > PAGE_ALLOC_COSTLY_ORDER) 4486 max_retries /= 4; 4487 if (*compaction_retries <= max_retries) { 4488 ret = true; 4489 goto out; 4490 } 4491 4492 /* 4493 * Make sure there are attempts at the highest priority if we exhausted 4494 * all retries or failed at the lower priorities. 4495 */ 4496 check_priority: 4497 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4498 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4499 4500 if (*compact_priority > min_priority) { 4501 (*compact_priority)--; 4502 *compaction_retries = 0; 4503 ret = true; 4504 } 4505 out: 4506 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4507 return ret; 4508 } 4509 #else 4510 static inline struct page * 4511 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4512 unsigned int alloc_flags, const struct alloc_context *ac, 4513 enum compact_priority prio, enum compact_result *compact_result) 4514 { 4515 *compact_result = COMPACT_SKIPPED; 4516 return NULL; 4517 } 4518 4519 static inline bool 4520 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 4521 enum compact_result compact_result, 4522 enum compact_priority *compact_priority, 4523 int *compaction_retries) 4524 { 4525 struct zone *zone; 4526 struct zoneref *z; 4527 4528 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4529 return false; 4530 4531 /* 4532 * There are setups with compaction disabled which would prefer to loop 4533 * inside the allocator rather than hit the oom killer prematurely. 4534 * Let's give them a good hope and keep retrying while the order-0 4535 * watermarks are OK. 4536 */ 4537 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4538 ac->highest_zoneidx, ac->nodemask) { 4539 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4540 ac->highest_zoneidx, alloc_flags)) 4541 return true; 4542 } 4543 return false; 4544 } 4545 #endif /* CONFIG_COMPACTION */ 4546 4547 #ifdef CONFIG_LOCKDEP 4548 static struct lockdep_map __fs_reclaim_map = 4549 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4550 4551 static bool __need_reclaim(gfp_t gfp_mask) 4552 { 4553 /* no reclaim without waiting on it */ 4554 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4555 return false; 4556 4557 /* this guy won't enter reclaim */ 4558 if (current->flags & PF_MEMALLOC) 4559 return false; 4560 4561 if (gfp_mask & __GFP_NOLOCKDEP) 4562 return false; 4563 4564 return true; 4565 } 4566 4567 void __fs_reclaim_acquire(unsigned long ip) 4568 { 4569 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4570 } 4571 4572 void __fs_reclaim_release(unsigned long ip) 4573 { 4574 lock_release(&__fs_reclaim_map, ip); 4575 } 4576 4577 void fs_reclaim_acquire(gfp_t gfp_mask) 4578 { 4579 gfp_mask = current_gfp_context(gfp_mask); 4580 4581 if (__need_reclaim(gfp_mask)) { 4582 if (gfp_mask & __GFP_FS) 4583 __fs_reclaim_acquire(_RET_IP_); 4584 4585 #ifdef CONFIG_MMU_NOTIFIER 4586 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4587 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4588 #endif 4589 4590 } 4591 } 4592 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4593 4594 void fs_reclaim_release(gfp_t gfp_mask) 4595 { 4596 gfp_mask = current_gfp_context(gfp_mask); 4597 4598 if (__need_reclaim(gfp_mask)) { 4599 if (gfp_mask & __GFP_FS) 4600 __fs_reclaim_release(_RET_IP_); 4601 } 4602 } 4603 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4604 #endif 4605 4606 /* Perform direct synchronous page reclaim */ 4607 static unsigned long 4608 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4609 const struct alloc_context *ac) 4610 { 4611 unsigned int noreclaim_flag; 4612 unsigned long progress; 4613 4614 cond_resched(); 4615 4616 /* We now go into synchronous reclaim */ 4617 cpuset_memory_pressure_bump(); 4618 fs_reclaim_acquire(gfp_mask); 4619 noreclaim_flag = memalloc_noreclaim_save(); 4620 4621 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4622 ac->nodemask); 4623 4624 memalloc_noreclaim_restore(noreclaim_flag); 4625 fs_reclaim_release(gfp_mask); 4626 4627 cond_resched(); 4628 4629 return progress; 4630 } 4631 4632 /* The really slow allocator path where we enter direct reclaim */ 4633 static inline struct page * 4634 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4635 unsigned int alloc_flags, const struct alloc_context *ac, 4636 unsigned long *did_some_progress) 4637 { 4638 struct page *page = NULL; 4639 unsigned long pflags; 4640 bool drained = false; 4641 4642 psi_memstall_enter(&pflags); 4643 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4644 if (unlikely(!(*did_some_progress))) 4645 goto out; 4646 4647 retry: 4648 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4649 4650 /* 4651 * If an allocation failed after direct reclaim, it could be because 4652 * pages are pinned on the per-cpu lists or in high alloc reserves. 4653 * Shrink them and try again 4654 */ 4655 if (!page && !drained) { 4656 unreserve_highatomic_pageblock(ac, false); 4657 drain_all_pages(NULL); 4658 drained = true; 4659 goto retry; 4660 } 4661 out: 4662 psi_memstall_leave(&pflags); 4663 4664 return page; 4665 } 4666 4667 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4668 const struct alloc_context *ac) 4669 { 4670 struct zoneref *z; 4671 struct zone *zone; 4672 pg_data_t *last_pgdat = NULL; 4673 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4674 4675 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4676 ac->nodemask) { 4677 if (!managed_zone(zone)) 4678 continue; 4679 if (last_pgdat != zone->zone_pgdat) { 4680 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 4681 last_pgdat = zone->zone_pgdat; 4682 } 4683 } 4684 } 4685 4686 static inline unsigned int 4687 gfp_to_alloc_flags(gfp_t gfp_mask) 4688 { 4689 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4690 4691 /* 4692 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH 4693 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4694 * to save two branches. 4695 */ 4696 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 4697 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4698 4699 /* 4700 * The caller may dip into page reserves a bit more if the caller 4701 * cannot run direct reclaim, or if the caller has realtime scheduling 4702 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4703 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 4704 */ 4705 alloc_flags |= (__force int) 4706 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4707 4708 if (gfp_mask & __GFP_ATOMIC) { 4709 /* 4710 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4711 * if it can't schedule. 4712 */ 4713 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4714 alloc_flags |= ALLOC_HARDER; 4715 /* 4716 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 4717 * comment for __cpuset_node_allowed(). 4718 */ 4719 alloc_flags &= ~ALLOC_CPUSET; 4720 } else if (unlikely(rt_task(current)) && in_task()) 4721 alloc_flags |= ALLOC_HARDER; 4722 4723 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4724 4725 return alloc_flags; 4726 } 4727 4728 static bool oom_reserves_allowed(struct task_struct *tsk) 4729 { 4730 if (!tsk_is_oom_victim(tsk)) 4731 return false; 4732 4733 /* 4734 * !MMU doesn't have oom reaper so give access to memory reserves 4735 * only to the thread with TIF_MEMDIE set 4736 */ 4737 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4738 return false; 4739 4740 return true; 4741 } 4742 4743 /* 4744 * Distinguish requests which really need access to full memory 4745 * reserves from oom victims which can live with a portion of it 4746 */ 4747 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4748 { 4749 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4750 return 0; 4751 if (gfp_mask & __GFP_MEMALLOC) 4752 return ALLOC_NO_WATERMARKS; 4753 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4754 return ALLOC_NO_WATERMARKS; 4755 if (!in_interrupt()) { 4756 if (current->flags & PF_MEMALLOC) 4757 return ALLOC_NO_WATERMARKS; 4758 else if (oom_reserves_allowed(current)) 4759 return ALLOC_OOM; 4760 } 4761 4762 return 0; 4763 } 4764 4765 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4766 { 4767 return !!__gfp_pfmemalloc_flags(gfp_mask); 4768 } 4769 4770 /* 4771 * Checks whether it makes sense to retry the reclaim to make a forward progress 4772 * for the given allocation request. 4773 * 4774 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4775 * without success, or when we couldn't even meet the watermark if we 4776 * reclaimed all remaining pages on the LRU lists. 4777 * 4778 * Returns true if a retry is viable or false to enter the oom path. 4779 */ 4780 static inline bool 4781 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4782 struct alloc_context *ac, int alloc_flags, 4783 bool did_some_progress, int *no_progress_loops) 4784 { 4785 struct zone *zone; 4786 struct zoneref *z; 4787 bool ret = false; 4788 4789 /* 4790 * Costly allocations might have made a progress but this doesn't mean 4791 * their order will become available due to high fragmentation so 4792 * always increment the no progress counter for them 4793 */ 4794 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4795 *no_progress_loops = 0; 4796 else 4797 (*no_progress_loops)++; 4798 4799 /* 4800 * Make sure we converge to OOM if we cannot make any progress 4801 * several times in the row. 4802 */ 4803 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 4804 /* Before OOM, exhaust highatomic_reserve */ 4805 return unreserve_highatomic_pageblock(ac, true); 4806 } 4807 4808 /* 4809 * Keep reclaiming pages while there is a chance this will lead 4810 * somewhere. If none of the target zones can satisfy our allocation 4811 * request even if all reclaimable pages are considered then we are 4812 * screwed and have to go OOM. 4813 */ 4814 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4815 ac->highest_zoneidx, ac->nodemask) { 4816 unsigned long available; 4817 unsigned long reclaimable; 4818 unsigned long min_wmark = min_wmark_pages(zone); 4819 bool wmark; 4820 4821 available = reclaimable = zone_reclaimable_pages(zone); 4822 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4823 4824 /* 4825 * Would the allocation succeed if we reclaimed all 4826 * reclaimable pages? 4827 */ 4828 wmark = __zone_watermark_ok(zone, order, min_wmark, 4829 ac->highest_zoneidx, alloc_flags, available); 4830 trace_reclaim_retry_zone(z, order, reclaimable, 4831 available, min_wmark, *no_progress_loops, wmark); 4832 if (wmark) { 4833 ret = true; 4834 break; 4835 } 4836 } 4837 4838 /* 4839 * Memory allocation/reclaim might be called from a WQ context and the 4840 * current implementation of the WQ concurrency control doesn't 4841 * recognize that a particular WQ is congested if the worker thread is 4842 * looping without ever sleeping. Therefore we have to do a short sleep 4843 * here rather than calling cond_resched(). 4844 */ 4845 if (current->flags & PF_WQ_WORKER) 4846 schedule_timeout_uninterruptible(1); 4847 else 4848 cond_resched(); 4849 return ret; 4850 } 4851 4852 static inline bool 4853 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4854 { 4855 /* 4856 * It's possible that cpuset's mems_allowed and the nodemask from 4857 * mempolicy don't intersect. This should be normally dealt with by 4858 * policy_nodemask(), but it's possible to race with cpuset update in 4859 * such a way the check therein was true, and then it became false 4860 * before we got our cpuset_mems_cookie here. 4861 * This assumes that for all allocations, ac->nodemask can come only 4862 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4863 * when it does not intersect with the cpuset restrictions) or the 4864 * caller can deal with a violated nodemask. 4865 */ 4866 if (cpusets_enabled() && ac->nodemask && 4867 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4868 ac->nodemask = NULL; 4869 return true; 4870 } 4871 4872 /* 4873 * When updating a task's mems_allowed or mempolicy nodemask, it is 4874 * possible to race with parallel threads in such a way that our 4875 * allocation can fail while the mask is being updated. If we are about 4876 * to fail, check if the cpuset changed during allocation and if so, 4877 * retry. 4878 */ 4879 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4880 return true; 4881 4882 return false; 4883 } 4884 4885 static inline struct page * 4886 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4887 struct alloc_context *ac) 4888 { 4889 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4890 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4891 struct page *page = NULL; 4892 unsigned int alloc_flags; 4893 unsigned long did_some_progress; 4894 enum compact_priority compact_priority; 4895 enum compact_result compact_result; 4896 int compaction_retries; 4897 int no_progress_loops; 4898 unsigned int cpuset_mems_cookie; 4899 int reserve_flags; 4900 4901 /* 4902 * We also sanity check to catch abuse of atomic reserves being used by 4903 * callers that are not in atomic context. 4904 */ 4905 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 4906 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 4907 gfp_mask &= ~__GFP_ATOMIC; 4908 4909 retry_cpuset: 4910 compaction_retries = 0; 4911 no_progress_loops = 0; 4912 compact_priority = DEF_COMPACT_PRIORITY; 4913 cpuset_mems_cookie = read_mems_allowed_begin(); 4914 4915 /* 4916 * The fast path uses conservative alloc_flags to succeed only until 4917 * kswapd needs to be woken up, and to avoid the cost of setting up 4918 * alloc_flags precisely. So we do that now. 4919 */ 4920 alloc_flags = gfp_to_alloc_flags(gfp_mask); 4921 4922 /* 4923 * We need to recalculate the starting point for the zonelist iterator 4924 * because we might have used different nodemask in the fast path, or 4925 * there was a cpuset modification and we are retrying - otherwise we 4926 * could end up iterating over non-eligible zones endlessly. 4927 */ 4928 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4929 ac->highest_zoneidx, ac->nodemask); 4930 if (!ac->preferred_zoneref->zone) 4931 goto nopage; 4932 4933 /* 4934 * Check for insane configurations where the cpuset doesn't contain 4935 * any suitable zone to satisfy the request - e.g. non-movable 4936 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4937 */ 4938 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4939 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4940 ac->highest_zoneidx, 4941 &cpuset_current_mems_allowed); 4942 if (!z->zone) 4943 goto nopage; 4944 } 4945 4946 if (alloc_flags & ALLOC_KSWAPD) 4947 wake_all_kswapds(order, gfp_mask, ac); 4948 4949 /* 4950 * The adjusted alloc_flags might result in immediate success, so try 4951 * that first 4952 */ 4953 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4954 if (page) 4955 goto got_pg; 4956 4957 /* 4958 * For costly allocations, try direct compaction first, as it's likely 4959 * that we have enough base pages and don't need to reclaim. For non- 4960 * movable high-order allocations, do that as well, as compaction will 4961 * try prevent permanent fragmentation by migrating from blocks of the 4962 * same migratetype. 4963 * Don't try this for allocations that are allowed to ignore 4964 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4965 */ 4966 if (can_direct_reclaim && 4967 (costly_order || 4968 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4969 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4970 page = __alloc_pages_direct_compact(gfp_mask, order, 4971 alloc_flags, ac, 4972 INIT_COMPACT_PRIORITY, 4973 &compact_result); 4974 if (page) 4975 goto got_pg; 4976 4977 /* 4978 * Checks for costly allocations with __GFP_NORETRY, which 4979 * includes some THP page fault allocations 4980 */ 4981 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4982 /* 4983 * If allocating entire pageblock(s) and compaction 4984 * failed because all zones are below low watermarks 4985 * or is prohibited because it recently failed at this 4986 * order, fail immediately unless the allocator has 4987 * requested compaction and reclaim retry. 4988 * 4989 * Reclaim is 4990 * - potentially very expensive because zones are far 4991 * below their low watermarks or this is part of very 4992 * bursty high order allocations, 4993 * - not guaranteed to help because isolate_freepages() 4994 * may not iterate over freed pages as part of its 4995 * linear scan, and 4996 * - unlikely to make entire pageblocks free on its 4997 * own. 4998 */ 4999 if (compact_result == COMPACT_SKIPPED || 5000 compact_result == COMPACT_DEFERRED) 5001 goto nopage; 5002 5003 /* 5004 * Looks like reclaim/compaction is worth trying, but 5005 * sync compaction could be very expensive, so keep 5006 * using async compaction. 5007 */ 5008 compact_priority = INIT_COMPACT_PRIORITY; 5009 } 5010 } 5011 5012 retry: 5013 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 5014 if (alloc_flags & ALLOC_KSWAPD) 5015 wake_all_kswapds(order, gfp_mask, ac); 5016 5017 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 5018 if (reserve_flags) 5019 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags); 5020 5021 /* 5022 * Reset the nodemask and zonelist iterators if memory policies can be 5023 * ignored. These allocations are high priority and system rather than 5024 * user oriented. 5025 */ 5026 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 5027 ac->nodemask = NULL; 5028 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5029 ac->highest_zoneidx, ac->nodemask); 5030 } 5031 5032 /* Attempt with potentially adjusted zonelist and alloc_flags */ 5033 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 5034 if (page) 5035 goto got_pg; 5036 5037 /* Caller is not willing to reclaim, we can't balance anything */ 5038 if (!can_direct_reclaim) 5039 goto nopage; 5040 5041 /* Avoid recursion of direct reclaim */ 5042 if (current->flags & PF_MEMALLOC) 5043 goto nopage; 5044 5045 /* Try direct reclaim and then allocating */ 5046 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 5047 &did_some_progress); 5048 if (page) 5049 goto got_pg; 5050 5051 /* Try direct compaction and then allocating */ 5052 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 5053 compact_priority, &compact_result); 5054 if (page) 5055 goto got_pg; 5056 5057 /* Do not loop if specifically requested */ 5058 if (gfp_mask & __GFP_NORETRY) 5059 goto nopage; 5060 5061 /* 5062 * Do not retry costly high order allocations unless they are 5063 * __GFP_RETRY_MAYFAIL 5064 */ 5065 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 5066 goto nopage; 5067 5068 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 5069 did_some_progress > 0, &no_progress_loops)) 5070 goto retry; 5071 5072 /* 5073 * It doesn't make any sense to retry for the compaction if the order-0 5074 * reclaim is not able to make any progress because the current 5075 * implementation of the compaction depends on the sufficient amount 5076 * of free memory (see __compaction_suitable) 5077 */ 5078 if (did_some_progress > 0 && 5079 should_compact_retry(ac, order, alloc_flags, 5080 compact_result, &compact_priority, 5081 &compaction_retries)) 5082 goto retry; 5083 5084 5085 /* Deal with possible cpuset update races before we start OOM killing */ 5086 if (check_retry_cpuset(cpuset_mems_cookie, ac)) 5087 goto retry_cpuset; 5088 5089 /* Reclaim has failed us, start killing things */ 5090 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 5091 if (page) 5092 goto got_pg; 5093 5094 /* Avoid allocations with no watermarks from looping endlessly */ 5095 if (tsk_is_oom_victim(current) && 5096 (alloc_flags & ALLOC_OOM || 5097 (gfp_mask & __GFP_NOMEMALLOC))) 5098 goto nopage; 5099 5100 /* Retry as long as the OOM killer is making progress */ 5101 if (did_some_progress) { 5102 no_progress_loops = 0; 5103 goto retry; 5104 } 5105 5106 nopage: 5107 /* Deal with possible cpuset update races before we fail */ 5108 if (check_retry_cpuset(cpuset_mems_cookie, ac)) 5109 goto retry_cpuset; 5110 5111 /* 5112 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 5113 * we always retry 5114 */ 5115 if (gfp_mask & __GFP_NOFAIL) { 5116 /* 5117 * All existing users of the __GFP_NOFAIL are blockable, so warn 5118 * of any new users that actually require GFP_NOWAIT 5119 */ 5120 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 5121 goto fail; 5122 5123 /* 5124 * PF_MEMALLOC request from this context is rather bizarre 5125 * because we cannot reclaim anything and only can loop waiting 5126 * for somebody to do a work for us 5127 */ 5128 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 5129 5130 /* 5131 * non failing costly orders are a hard requirement which we 5132 * are not prepared for much so let's warn about these users 5133 * so that we can identify them and convert them to something 5134 * else. 5135 */ 5136 WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask); 5137 5138 /* 5139 * Help non-failing allocations by giving them access to memory 5140 * reserves but do not use ALLOC_NO_WATERMARKS because this 5141 * could deplete whole memory reserves which would just make 5142 * the situation worse 5143 */ 5144 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); 5145 if (page) 5146 goto got_pg; 5147 5148 cond_resched(); 5149 goto retry; 5150 } 5151 fail: 5152 warn_alloc(gfp_mask, ac->nodemask, 5153 "page allocation failure: order:%u", order); 5154 got_pg: 5155 return page; 5156 } 5157 5158 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 5159 int preferred_nid, nodemask_t *nodemask, 5160 struct alloc_context *ac, gfp_t *alloc_gfp, 5161 unsigned int *alloc_flags) 5162 { 5163 ac->highest_zoneidx = gfp_zone(gfp_mask); 5164 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 5165 ac->nodemask = nodemask; 5166 ac->migratetype = gfp_migratetype(gfp_mask); 5167 5168 if (cpusets_enabled()) { 5169 *alloc_gfp |= __GFP_HARDWALL; 5170 /* 5171 * When we are in the interrupt context, it is irrelevant 5172 * to the current task context. It means that any node ok. 5173 */ 5174 if (in_task() && !ac->nodemask) 5175 ac->nodemask = &cpuset_current_mems_allowed; 5176 else 5177 *alloc_flags |= ALLOC_CPUSET; 5178 } 5179 5180 fs_reclaim_acquire(gfp_mask); 5181 fs_reclaim_release(gfp_mask); 5182 5183 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 5184 5185 if (should_fail_alloc_page(gfp_mask, order)) 5186 return false; 5187 5188 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 5189 5190 /* Dirty zone balancing only done in the fast path */ 5191 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 5192 5193 /* 5194 * The preferred zone is used for statistics but crucially it is 5195 * also used as the starting point for the zonelist iterator. It 5196 * may get reset for allocations that ignore memory policies. 5197 */ 5198 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5199 ac->highest_zoneidx, ac->nodemask); 5200 5201 return true; 5202 } 5203 5204 /* 5205 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 5206 * @gfp: GFP flags for the allocation 5207 * @preferred_nid: The preferred NUMA node ID to allocate from 5208 * @nodemask: Set of nodes to allocate from, may be NULL 5209 * @nr_pages: The number of pages desired on the list or array 5210 * @page_list: Optional list to store the allocated pages 5211 * @page_array: Optional array to store the pages 5212 * 5213 * This is a batched version of the page allocator that attempts to 5214 * allocate nr_pages quickly. Pages are added to page_list if page_list 5215 * is not NULL, otherwise it is assumed that the page_array is valid. 5216 * 5217 * For lists, nr_pages is the number of pages that should be allocated. 5218 * 5219 * For arrays, only NULL elements are populated with pages and nr_pages 5220 * is the maximum number of pages that will be stored in the array. 5221 * 5222 * Returns the number of pages on the list or array. 5223 */ 5224 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 5225 nodemask_t *nodemask, int nr_pages, 5226 struct list_head *page_list, 5227 struct page **page_array) 5228 { 5229 struct page *page; 5230 unsigned long flags; 5231 struct zone *zone; 5232 struct zoneref *z; 5233 struct per_cpu_pages *pcp; 5234 struct list_head *pcp_list; 5235 struct alloc_context ac; 5236 gfp_t alloc_gfp; 5237 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5238 int nr_populated = 0, nr_account = 0; 5239 5240 /* 5241 * Skip populated array elements to determine if any pages need 5242 * to be allocated before disabling IRQs. 5243 */ 5244 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 5245 nr_populated++; 5246 5247 /* No pages requested? */ 5248 if (unlikely(nr_pages <= 0)) 5249 goto out; 5250 5251 /* Already populated array? */ 5252 if (unlikely(page_array && nr_pages - nr_populated == 0)) 5253 goto out; 5254 5255 /* Bulk allocator does not support memcg accounting. */ 5256 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT)) 5257 goto failed; 5258 5259 /* Use the single page allocator for one page. */ 5260 if (nr_pages - nr_populated == 1) 5261 goto failed; 5262 5263 #ifdef CONFIG_PAGE_OWNER 5264 /* 5265 * PAGE_OWNER may recurse into the allocator to allocate space to 5266 * save the stack with pagesets.lock held. Releasing/reacquiring 5267 * removes much of the performance benefit of bulk allocation so 5268 * force the caller to allocate one page at a time as it'll have 5269 * similar performance to added complexity to the bulk allocator. 5270 */ 5271 if (static_branch_unlikely(&page_owner_inited)) 5272 goto failed; 5273 #endif 5274 5275 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5276 gfp &= gfp_allowed_mask; 5277 alloc_gfp = gfp; 5278 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5279 goto out; 5280 gfp = alloc_gfp; 5281 5282 /* Find an allowed local zone that meets the low watermark. */ 5283 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 5284 unsigned long mark; 5285 5286 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5287 !__cpuset_zone_allowed(zone, gfp)) { 5288 continue; 5289 } 5290 5291 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 5292 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 5293 goto failed; 5294 } 5295 5296 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 5297 if (zone_watermark_fast(zone, 0, mark, 5298 zonelist_zone_idx(ac.preferred_zoneref), 5299 alloc_flags, gfp)) { 5300 break; 5301 } 5302 } 5303 5304 /* 5305 * If there are no allowed local zones that meets the watermarks then 5306 * try to allocate a single page and reclaim if necessary. 5307 */ 5308 if (unlikely(!zone)) 5309 goto failed; 5310 5311 /* Attempt the batch allocation */ 5312 local_lock_irqsave(&pagesets.lock, flags); 5313 pcp = this_cpu_ptr(zone->per_cpu_pageset); 5314 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 5315 5316 while (nr_populated < nr_pages) { 5317 5318 /* Skip existing pages */ 5319 if (page_array && page_array[nr_populated]) { 5320 nr_populated++; 5321 continue; 5322 } 5323 5324 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5325 pcp, pcp_list); 5326 if (unlikely(!page)) { 5327 /* Try and get at least one page */ 5328 if (!nr_populated) 5329 goto failed_irq; 5330 break; 5331 } 5332 nr_account++; 5333 5334 prep_new_page(page, 0, gfp, 0); 5335 if (page_list) 5336 list_add(&page->lru, page_list); 5337 else 5338 page_array[nr_populated] = page; 5339 nr_populated++; 5340 } 5341 5342 local_unlock_irqrestore(&pagesets.lock, flags); 5343 5344 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5345 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 5346 5347 out: 5348 return nr_populated; 5349 5350 failed_irq: 5351 local_unlock_irqrestore(&pagesets.lock, flags); 5352 5353 failed: 5354 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 5355 if (page) { 5356 if (page_list) 5357 list_add(&page->lru, page_list); 5358 else 5359 page_array[nr_populated] = page; 5360 nr_populated++; 5361 } 5362 5363 goto out; 5364 } 5365 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 5366 5367 /* 5368 * This is the 'heart' of the zoned buddy allocator. 5369 */ 5370 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 5371 nodemask_t *nodemask) 5372 { 5373 struct page *page; 5374 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5375 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 5376 struct alloc_context ac = { }; 5377 5378 /* 5379 * There are several places where we assume that the order value is sane 5380 * so bail out early if the request is out of bound. 5381 */ 5382 if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp)) 5383 return NULL; 5384 5385 gfp &= gfp_allowed_mask; 5386 /* 5387 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 5388 * resp. GFP_NOIO which has to be inherited for all allocation requests 5389 * from a particular context which has been marked by 5390 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 5391 * movable zones are not used during allocation. 5392 */ 5393 gfp = current_gfp_context(gfp); 5394 alloc_gfp = gfp; 5395 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 5396 &alloc_gfp, &alloc_flags)) 5397 return NULL; 5398 5399 /* 5400 * Forbid the first pass from falling back to types that fragment 5401 * memory until all local zones are considered. 5402 */ 5403 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 5404 5405 /* First allocation attempt */ 5406 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 5407 if (likely(page)) 5408 goto out; 5409 5410 alloc_gfp = gfp; 5411 ac.spread_dirty_pages = false; 5412 5413 /* 5414 * Restore the original nodemask if it was potentially replaced with 5415 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5416 */ 5417 ac.nodemask = nodemask; 5418 5419 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 5420 5421 out: 5422 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page && 5423 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 5424 __free_pages(page, order); 5425 page = NULL; 5426 } 5427 5428 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5429 5430 return page; 5431 } 5432 EXPORT_SYMBOL(__alloc_pages); 5433 5434 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 5435 nodemask_t *nodemask) 5436 { 5437 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 5438 preferred_nid, nodemask); 5439 5440 if (page && order > 1) 5441 prep_transhuge_page(page); 5442 return (struct folio *)page; 5443 } 5444 EXPORT_SYMBOL(__folio_alloc); 5445 5446 /* 5447 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5448 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5449 * you need to access high mem. 5450 */ 5451 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 5452 { 5453 struct page *page; 5454 5455 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 5456 if (!page) 5457 return 0; 5458 return (unsigned long) page_address(page); 5459 } 5460 EXPORT_SYMBOL(__get_free_pages); 5461 5462 unsigned long get_zeroed_page(gfp_t gfp_mask) 5463 { 5464 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 5465 } 5466 EXPORT_SYMBOL(get_zeroed_page); 5467 5468 /** 5469 * __free_pages - Free pages allocated with alloc_pages(). 5470 * @page: The page pointer returned from alloc_pages(). 5471 * @order: The order of the allocation. 5472 * 5473 * This function can free multi-page allocations that are not compound 5474 * pages. It does not check that the @order passed in matches that of 5475 * the allocation, so it is easy to leak memory. Freeing more memory 5476 * than was allocated will probably emit a warning. 5477 * 5478 * If the last reference to this page is speculative, it will be released 5479 * by put_page() which only frees the first page of a non-compound 5480 * allocation. To prevent the remaining pages from being leaked, we free 5481 * the subsequent pages here. If you want to use the page's reference 5482 * count to decide when to free the allocation, you should allocate a 5483 * compound page, and use put_page() instead of __free_pages(). 5484 * 5485 * Context: May be called in interrupt context or while holding a normal 5486 * spinlock, but not in NMI context or while holding a raw spinlock. 5487 */ 5488 void __free_pages(struct page *page, unsigned int order) 5489 { 5490 if (put_page_testzero(page)) 5491 free_the_page(page, order); 5492 else if (!PageHead(page)) 5493 while (order-- > 0) 5494 free_the_page(page + (1 << order), order); 5495 } 5496 EXPORT_SYMBOL(__free_pages); 5497 5498 void free_pages(unsigned long addr, unsigned int order) 5499 { 5500 if (addr != 0) { 5501 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5502 __free_pages(virt_to_page((void *)addr), order); 5503 } 5504 } 5505 5506 EXPORT_SYMBOL(free_pages); 5507 5508 /* 5509 * Page Fragment: 5510 * An arbitrary-length arbitrary-offset area of memory which resides 5511 * within a 0 or higher order page. Multiple fragments within that page 5512 * are individually refcounted, in the page's reference counter. 5513 * 5514 * The page_frag functions below provide a simple allocation framework for 5515 * page fragments. This is used by the network stack and network device 5516 * drivers to provide a backing region of memory for use as either an 5517 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 5518 */ 5519 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 5520 gfp_t gfp_mask) 5521 { 5522 struct page *page = NULL; 5523 gfp_t gfp = gfp_mask; 5524 5525 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5526 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 5527 __GFP_NOMEMALLOC; 5528 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 5529 PAGE_FRAG_CACHE_MAX_ORDER); 5530 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 5531 #endif 5532 if (unlikely(!page)) 5533 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 5534 5535 nc->va = page ? page_address(page) : NULL; 5536 5537 return page; 5538 } 5539 5540 void __page_frag_cache_drain(struct page *page, unsigned int count) 5541 { 5542 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 5543 5544 if (page_ref_sub_and_test(page, count)) 5545 free_the_page(page, compound_order(page)); 5546 } 5547 EXPORT_SYMBOL(__page_frag_cache_drain); 5548 5549 void *page_frag_alloc_align(struct page_frag_cache *nc, 5550 unsigned int fragsz, gfp_t gfp_mask, 5551 unsigned int align_mask) 5552 { 5553 unsigned int size = PAGE_SIZE; 5554 struct page *page; 5555 int offset; 5556 5557 if (unlikely(!nc->va)) { 5558 refill: 5559 page = __page_frag_cache_refill(nc, gfp_mask); 5560 if (!page) 5561 return NULL; 5562 5563 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5564 /* if size can vary use size else just use PAGE_SIZE */ 5565 size = nc->size; 5566 #endif 5567 /* Even if we own the page, we do not use atomic_set(). 5568 * This would break get_page_unless_zero() users. 5569 */ 5570 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 5571 5572 /* reset page count bias and offset to start of new frag */ 5573 nc->pfmemalloc = page_is_pfmemalloc(page); 5574 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5575 nc->offset = size; 5576 } 5577 5578 offset = nc->offset - fragsz; 5579 if (unlikely(offset < 0)) { 5580 page = virt_to_page(nc->va); 5581 5582 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 5583 goto refill; 5584 5585 if (unlikely(nc->pfmemalloc)) { 5586 free_the_page(page, compound_order(page)); 5587 goto refill; 5588 } 5589 5590 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5591 /* if size can vary use size else just use PAGE_SIZE */ 5592 size = nc->size; 5593 #endif 5594 /* OK, page count is 0, we can safely set it */ 5595 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 5596 5597 /* reset page count bias and offset to start of new frag */ 5598 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5599 offset = size - fragsz; 5600 } 5601 5602 nc->pagecnt_bias--; 5603 offset &= align_mask; 5604 nc->offset = offset; 5605 5606 return nc->va + offset; 5607 } 5608 EXPORT_SYMBOL(page_frag_alloc_align); 5609 5610 /* 5611 * Frees a page fragment allocated out of either a compound or order 0 page. 5612 */ 5613 void page_frag_free(void *addr) 5614 { 5615 struct page *page = virt_to_head_page(addr); 5616 5617 if (unlikely(put_page_testzero(page))) 5618 free_the_page(page, compound_order(page)); 5619 } 5620 EXPORT_SYMBOL(page_frag_free); 5621 5622 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5623 size_t size) 5624 { 5625 if (addr) { 5626 unsigned long alloc_end = addr + (PAGE_SIZE << order); 5627 unsigned long used = addr + PAGE_ALIGN(size); 5628 5629 split_page(virt_to_page((void *)addr), order); 5630 while (used < alloc_end) { 5631 free_page(used); 5632 used += PAGE_SIZE; 5633 } 5634 } 5635 return (void *)addr; 5636 } 5637 5638 /** 5639 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5640 * @size: the number of bytes to allocate 5641 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5642 * 5643 * This function is similar to alloc_pages(), except that it allocates the 5644 * minimum number of pages to satisfy the request. alloc_pages() can only 5645 * allocate memory in power-of-two pages. 5646 * 5647 * This function is also limited by MAX_ORDER. 5648 * 5649 * Memory allocated by this function must be released by free_pages_exact(). 5650 * 5651 * Return: pointer to the allocated area or %NULL in case of error. 5652 */ 5653 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 5654 { 5655 unsigned int order = get_order(size); 5656 unsigned long addr; 5657 5658 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5659 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5660 5661 addr = __get_free_pages(gfp_mask, order); 5662 return make_alloc_exact(addr, order, size); 5663 } 5664 EXPORT_SYMBOL(alloc_pages_exact); 5665 5666 /** 5667 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5668 * pages on a node. 5669 * @nid: the preferred node ID where memory should be allocated 5670 * @size: the number of bytes to allocate 5671 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5672 * 5673 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5674 * back. 5675 * 5676 * Return: pointer to the allocated area or %NULL in case of error. 5677 */ 5678 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 5679 { 5680 unsigned int order = get_order(size); 5681 struct page *p; 5682 5683 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5684 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5685 5686 p = alloc_pages_node(nid, gfp_mask, order); 5687 if (!p) 5688 return NULL; 5689 return make_alloc_exact((unsigned long)page_address(p), order, size); 5690 } 5691 5692 /** 5693 * free_pages_exact - release memory allocated via alloc_pages_exact() 5694 * @virt: the value returned by alloc_pages_exact. 5695 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5696 * 5697 * Release the memory allocated by a previous call to alloc_pages_exact. 5698 */ 5699 void free_pages_exact(void *virt, size_t size) 5700 { 5701 unsigned long addr = (unsigned long)virt; 5702 unsigned long end = addr + PAGE_ALIGN(size); 5703 5704 while (addr < end) { 5705 free_page(addr); 5706 addr += PAGE_SIZE; 5707 } 5708 } 5709 EXPORT_SYMBOL(free_pages_exact); 5710 5711 /** 5712 * nr_free_zone_pages - count number of pages beyond high watermark 5713 * @offset: The zone index of the highest zone 5714 * 5715 * nr_free_zone_pages() counts the number of pages which are beyond the 5716 * high watermark within all zones at or below a given zone index. For each 5717 * zone, the number of pages is calculated as: 5718 * 5719 * nr_free_zone_pages = managed_pages - high_pages 5720 * 5721 * Return: number of pages beyond high watermark. 5722 */ 5723 static unsigned long nr_free_zone_pages(int offset) 5724 { 5725 struct zoneref *z; 5726 struct zone *zone; 5727 5728 /* Just pick one node, since fallback list is circular */ 5729 unsigned long sum = 0; 5730 5731 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5732 5733 for_each_zone_zonelist(zone, z, zonelist, offset) { 5734 unsigned long size = zone_managed_pages(zone); 5735 unsigned long high = high_wmark_pages(zone); 5736 if (size > high) 5737 sum += size - high; 5738 } 5739 5740 return sum; 5741 } 5742 5743 /** 5744 * nr_free_buffer_pages - count number of pages beyond high watermark 5745 * 5746 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5747 * watermark within ZONE_DMA and ZONE_NORMAL. 5748 * 5749 * Return: number of pages beyond high watermark within ZONE_DMA and 5750 * ZONE_NORMAL. 5751 */ 5752 unsigned long nr_free_buffer_pages(void) 5753 { 5754 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5755 } 5756 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5757 5758 static inline void show_node(struct zone *zone) 5759 { 5760 if (IS_ENABLED(CONFIG_NUMA)) 5761 printk("Node %d ", zone_to_nid(zone)); 5762 } 5763 5764 long si_mem_available(void) 5765 { 5766 long available; 5767 unsigned long pagecache; 5768 unsigned long wmark_low = 0; 5769 unsigned long pages[NR_LRU_LISTS]; 5770 unsigned long reclaimable; 5771 struct zone *zone; 5772 int lru; 5773 5774 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 5775 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 5776 5777 for_each_zone(zone) 5778 wmark_low += low_wmark_pages(zone); 5779 5780 /* 5781 * Estimate the amount of memory available for userspace allocations, 5782 * without causing swapping. 5783 */ 5784 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 5785 5786 /* 5787 * Not all the page cache can be freed, otherwise the system will 5788 * start swapping. Assume at least half of the page cache, or the 5789 * low watermark worth of cache, needs to stay. 5790 */ 5791 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 5792 pagecache -= min(pagecache / 2, wmark_low); 5793 available += pagecache; 5794 5795 /* 5796 * Part of the reclaimable slab and other kernel memory consists of 5797 * items that are in use, and cannot be freed. Cap this estimate at the 5798 * low watermark. 5799 */ 5800 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 5801 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 5802 available += reclaimable - min(reclaimable / 2, wmark_low); 5803 5804 if (available < 0) 5805 available = 0; 5806 return available; 5807 } 5808 EXPORT_SYMBOL_GPL(si_mem_available); 5809 5810 void si_meminfo(struct sysinfo *val) 5811 { 5812 val->totalram = totalram_pages(); 5813 val->sharedram = global_node_page_state(NR_SHMEM); 5814 val->freeram = global_zone_page_state(NR_FREE_PAGES); 5815 val->bufferram = nr_blockdev_pages(); 5816 val->totalhigh = totalhigh_pages(); 5817 val->freehigh = nr_free_highpages(); 5818 val->mem_unit = PAGE_SIZE; 5819 } 5820 5821 EXPORT_SYMBOL(si_meminfo); 5822 5823 #ifdef CONFIG_NUMA 5824 void si_meminfo_node(struct sysinfo *val, int nid) 5825 { 5826 int zone_type; /* needs to be signed */ 5827 unsigned long managed_pages = 0; 5828 unsigned long managed_highpages = 0; 5829 unsigned long free_highpages = 0; 5830 pg_data_t *pgdat = NODE_DATA(nid); 5831 5832 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 5833 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); 5834 val->totalram = managed_pages; 5835 val->sharedram = node_page_state(pgdat, NR_SHMEM); 5836 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 5837 #ifdef CONFIG_HIGHMEM 5838 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 5839 struct zone *zone = &pgdat->node_zones[zone_type]; 5840 5841 if (is_highmem(zone)) { 5842 managed_highpages += zone_managed_pages(zone); 5843 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 5844 } 5845 } 5846 val->totalhigh = managed_highpages; 5847 val->freehigh = free_highpages; 5848 #else 5849 val->totalhigh = managed_highpages; 5850 val->freehigh = free_highpages; 5851 #endif 5852 val->mem_unit = PAGE_SIZE; 5853 } 5854 #endif 5855 5856 /* 5857 * Determine whether the node should be displayed or not, depending on whether 5858 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 5859 */ 5860 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 5861 { 5862 if (!(flags & SHOW_MEM_FILTER_NODES)) 5863 return false; 5864 5865 /* 5866 * no node mask - aka implicit memory numa policy. Do not bother with 5867 * the synchronization - read_mems_allowed_begin - because we do not 5868 * have to be precise here. 5869 */ 5870 if (!nodemask) 5871 nodemask = &cpuset_current_mems_allowed; 5872 5873 return !node_isset(nid, *nodemask); 5874 } 5875 5876 #define K(x) ((x) << (PAGE_SHIFT-10)) 5877 5878 static void show_migration_types(unsigned char type) 5879 { 5880 static const char types[MIGRATE_TYPES] = { 5881 [MIGRATE_UNMOVABLE] = 'U', 5882 [MIGRATE_MOVABLE] = 'M', 5883 [MIGRATE_RECLAIMABLE] = 'E', 5884 [MIGRATE_HIGHATOMIC] = 'H', 5885 #ifdef CONFIG_CMA 5886 [MIGRATE_CMA] = 'C', 5887 #endif 5888 #ifdef CONFIG_MEMORY_ISOLATION 5889 [MIGRATE_ISOLATE] = 'I', 5890 #endif 5891 }; 5892 char tmp[MIGRATE_TYPES + 1]; 5893 char *p = tmp; 5894 int i; 5895 5896 for (i = 0; i < MIGRATE_TYPES; i++) { 5897 if (type & (1 << i)) 5898 *p++ = types[i]; 5899 } 5900 5901 *p = '\0'; 5902 printk(KERN_CONT "(%s) ", tmp); 5903 } 5904 5905 /* 5906 * Show free area list (used inside shift_scroll-lock stuff) 5907 * We also calculate the percentage fragmentation. We do this by counting the 5908 * memory on each free list with the exception of the first item on the list. 5909 * 5910 * Bits in @filter: 5911 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 5912 * cpuset. 5913 */ 5914 void show_free_areas(unsigned int filter, nodemask_t *nodemask) 5915 { 5916 unsigned long free_pcp = 0; 5917 int cpu; 5918 struct zone *zone; 5919 pg_data_t *pgdat; 5920 5921 for_each_populated_zone(zone) { 5922 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5923 continue; 5924 5925 for_each_online_cpu(cpu) 5926 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 5927 } 5928 5929 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 5930 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 5931 " unevictable:%lu dirty:%lu writeback:%lu\n" 5932 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 5933 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 5934 " kernel_misc_reclaimable:%lu\n" 5935 " free:%lu free_pcp:%lu free_cma:%lu\n", 5936 global_node_page_state(NR_ACTIVE_ANON), 5937 global_node_page_state(NR_INACTIVE_ANON), 5938 global_node_page_state(NR_ISOLATED_ANON), 5939 global_node_page_state(NR_ACTIVE_FILE), 5940 global_node_page_state(NR_INACTIVE_FILE), 5941 global_node_page_state(NR_ISOLATED_FILE), 5942 global_node_page_state(NR_UNEVICTABLE), 5943 global_node_page_state(NR_FILE_DIRTY), 5944 global_node_page_state(NR_WRITEBACK), 5945 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 5946 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 5947 global_node_page_state(NR_FILE_MAPPED), 5948 global_node_page_state(NR_SHMEM), 5949 global_node_page_state(NR_PAGETABLE), 5950 global_zone_page_state(NR_BOUNCE), 5951 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), 5952 global_zone_page_state(NR_FREE_PAGES), 5953 free_pcp, 5954 global_zone_page_state(NR_FREE_CMA_PAGES)); 5955 5956 for_each_online_pgdat(pgdat) { 5957 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 5958 continue; 5959 5960 printk("Node %d" 5961 " active_anon:%lukB" 5962 " inactive_anon:%lukB" 5963 " active_file:%lukB" 5964 " inactive_file:%lukB" 5965 " unevictable:%lukB" 5966 " isolated(anon):%lukB" 5967 " isolated(file):%lukB" 5968 " mapped:%lukB" 5969 " dirty:%lukB" 5970 " writeback:%lukB" 5971 " shmem:%lukB" 5972 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5973 " shmem_thp: %lukB" 5974 " shmem_pmdmapped: %lukB" 5975 " anon_thp: %lukB" 5976 #endif 5977 " writeback_tmp:%lukB" 5978 " kernel_stack:%lukB" 5979 #ifdef CONFIG_SHADOW_CALL_STACK 5980 " shadow_call_stack:%lukB" 5981 #endif 5982 " pagetables:%lukB" 5983 " all_unreclaimable? %s" 5984 "\n", 5985 pgdat->node_id, 5986 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 5987 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 5988 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 5989 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 5990 K(node_page_state(pgdat, NR_UNEVICTABLE)), 5991 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 5992 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 5993 K(node_page_state(pgdat, NR_FILE_MAPPED)), 5994 K(node_page_state(pgdat, NR_FILE_DIRTY)), 5995 K(node_page_state(pgdat, NR_WRITEBACK)), 5996 K(node_page_state(pgdat, NR_SHMEM)), 5997 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5998 K(node_page_state(pgdat, NR_SHMEM_THPS)), 5999 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), 6000 K(node_page_state(pgdat, NR_ANON_THPS)), 6001 #endif 6002 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 6003 node_page_state(pgdat, NR_KERNEL_STACK_KB), 6004 #ifdef CONFIG_SHADOW_CALL_STACK 6005 node_page_state(pgdat, NR_KERNEL_SCS_KB), 6006 #endif 6007 K(node_page_state(pgdat, NR_PAGETABLE)), 6008 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 6009 "yes" : "no"); 6010 } 6011 6012 for_each_populated_zone(zone) { 6013 int i; 6014 6015 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6016 continue; 6017 6018 free_pcp = 0; 6019 for_each_online_cpu(cpu) 6020 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 6021 6022 show_node(zone); 6023 printk(KERN_CONT 6024 "%s" 6025 " free:%lukB" 6026 " boost:%lukB" 6027 " min:%lukB" 6028 " low:%lukB" 6029 " high:%lukB" 6030 " reserved_highatomic:%luKB" 6031 " active_anon:%lukB" 6032 " inactive_anon:%lukB" 6033 " active_file:%lukB" 6034 " inactive_file:%lukB" 6035 " unevictable:%lukB" 6036 " writepending:%lukB" 6037 " present:%lukB" 6038 " managed:%lukB" 6039 " mlocked:%lukB" 6040 " bounce:%lukB" 6041 " free_pcp:%lukB" 6042 " local_pcp:%ukB" 6043 " free_cma:%lukB" 6044 "\n", 6045 zone->name, 6046 K(zone_page_state(zone, NR_FREE_PAGES)), 6047 K(zone->watermark_boost), 6048 K(min_wmark_pages(zone)), 6049 K(low_wmark_pages(zone)), 6050 K(high_wmark_pages(zone)), 6051 K(zone->nr_reserved_highatomic), 6052 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 6053 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 6054 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 6055 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 6056 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 6057 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 6058 K(zone->present_pages), 6059 K(zone_managed_pages(zone)), 6060 K(zone_page_state(zone, NR_MLOCK)), 6061 K(zone_page_state(zone, NR_BOUNCE)), 6062 K(free_pcp), 6063 K(this_cpu_read(zone->per_cpu_pageset->count)), 6064 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 6065 printk("lowmem_reserve[]:"); 6066 for (i = 0; i < MAX_NR_ZONES; i++) 6067 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 6068 printk(KERN_CONT "\n"); 6069 } 6070 6071 for_each_populated_zone(zone) { 6072 unsigned int order; 6073 unsigned long nr[MAX_ORDER], flags, total = 0; 6074 unsigned char types[MAX_ORDER]; 6075 6076 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6077 continue; 6078 show_node(zone); 6079 printk(KERN_CONT "%s: ", zone->name); 6080 6081 spin_lock_irqsave(&zone->lock, flags); 6082 for (order = 0; order < MAX_ORDER; order++) { 6083 struct free_area *area = &zone->free_area[order]; 6084 int type; 6085 6086 nr[order] = area->nr_free; 6087 total += nr[order] << order; 6088 6089 types[order] = 0; 6090 for (type = 0; type < MIGRATE_TYPES; type++) { 6091 if (!free_area_empty(area, type)) 6092 types[order] |= 1 << type; 6093 } 6094 } 6095 spin_unlock_irqrestore(&zone->lock, flags); 6096 for (order = 0; order < MAX_ORDER; order++) { 6097 printk(KERN_CONT "%lu*%lukB ", 6098 nr[order], K(1UL) << order); 6099 if (nr[order]) 6100 show_migration_types(types[order]); 6101 } 6102 printk(KERN_CONT "= %lukB\n", K(total)); 6103 } 6104 6105 hugetlb_show_meminfo(); 6106 6107 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 6108 6109 show_swap_cache_info(); 6110 } 6111 6112 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 6113 { 6114 zoneref->zone = zone; 6115 zoneref->zone_idx = zone_idx(zone); 6116 } 6117 6118 /* 6119 * Builds allocation fallback zone lists. 6120 * 6121 * Add all populated zones of a node to the zonelist. 6122 */ 6123 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 6124 { 6125 struct zone *zone; 6126 enum zone_type zone_type = MAX_NR_ZONES; 6127 int nr_zones = 0; 6128 6129 do { 6130 zone_type--; 6131 zone = pgdat->node_zones + zone_type; 6132 if (populated_zone(zone)) { 6133 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 6134 check_highest_zone(zone_type); 6135 } 6136 } while (zone_type); 6137 6138 return nr_zones; 6139 } 6140 6141 #ifdef CONFIG_NUMA 6142 6143 static int __parse_numa_zonelist_order(char *s) 6144 { 6145 /* 6146 * We used to support different zonelists modes but they turned 6147 * out to be just not useful. Let's keep the warning in place 6148 * if somebody still use the cmd line parameter so that we do 6149 * not fail it silently 6150 */ 6151 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 6152 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 6153 return -EINVAL; 6154 } 6155 return 0; 6156 } 6157 6158 char numa_zonelist_order[] = "Node"; 6159 6160 /* 6161 * sysctl handler for numa_zonelist_order 6162 */ 6163 int numa_zonelist_order_handler(struct ctl_table *table, int write, 6164 void *buffer, size_t *length, loff_t *ppos) 6165 { 6166 if (write) 6167 return __parse_numa_zonelist_order(buffer); 6168 return proc_dostring(table, write, buffer, length, ppos); 6169 } 6170 6171 6172 static int node_load[MAX_NUMNODES]; 6173 6174 /** 6175 * find_next_best_node - find the next node that should appear in a given node's fallback list 6176 * @node: node whose fallback list we're appending 6177 * @used_node_mask: nodemask_t of already used nodes 6178 * 6179 * We use a number of factors to determine which is the next node that should 6180 * appear on a given node's fallback list. The node should not have appeared 6181 * already in @node's fallback list, and it should be the next closest node 6182 * according to the distance array (which contains arbitrary distance values 6183 * from each node to each node in the system), and should also prefer nodes 6184 * with no CPUs, since presumably they'll have very little allocation pressure 6185 * on them otherwise. 6186 * 6187 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 6188 */ 6189 int find_next_best_node(int node, nodemask_t *used_node_mask) 6190 { 6191 int n, val; 6192 int min_val = INT_MAX; 6193 int best_node = NUMA_NO_NODE; 6194 6195 /* Use the local node if we haven't already */ 6196 if (!node_isset(node, *used_node_mask)) { 6197 node_set(node, *used_node_mask); 6198 return node; 6199 } 6200 6201 for_each_node_state(n, N_MEMORY) { 6202 6203 /* Don't want a node to appear more than once */ 6204 if (node_isset(n, *used_node_mask)) 6205 continue; 6206 6207 /* Use the distance array to find the distance */ 6208 val = node_distance(node, n); 6209 6210 /* Penalize nodes under us ("prefer the next node") */ 6211 val += (n < node); 6212 6213 /* Give preference to headless and unused nodes */ 6214 if (!cpumask_empty(cpumask_of_node(n))) 6215 val += PENALTY_FOR_NODE_WITH_CPUS; 6216 6217 /* Slight preference for less loaded node */ 6218 val *= MAX_NUMNODES; 6219 val += node_load[n]; 6220 6221 if (val < min_val) { 6222 min_val = val; 6223 best_node = n; 6224 } 6225 } 6226 6227 if (best_node >= 0) 6228 node_set(best_node, *used_node_mask); 6229 6230 return best_node; 6231 } 6232 6233 6234 /* 6235 * Build zonelists ordered by node and zones within node. 6236 * This results in maximum locality--normal zone overflows into local 6237 * DMA zone, if any--but risks exhausting DMA zone. 6238 */ 6239 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 6240 unsigned nr_nodes) 6241 { 6242 struct zoneref *zonerefs; 6243 int i; 6244 6245 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 6246 6247 for (i = 0; i < nr_nodes; i++) { 6248 int nr_zones; 6249 6250 pg_data_t *node = NODE_DATA(node_order[i]); 6251 6252 nr_zones = build_zonerefs_node(node, zonerefs); 6253 zonerefs += nr_zones; 6254 } 6255 zonerefs->zone = NULL; 6256 zonerefs->zone_idx = 0; 6257 } 6258 6259 /* 6260 * Build gfp_thisnode zonelists 6261 */ 6262 static void build_thisnode_zonelists(pg_data_t *pgdat) 6263 { 6264 struct zoneref *zonerefs; 6265 int nr_zones; 6266 6267 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 6268 nr_zones = build_zonerefs_node(pgdat, zonerefs); 6269 zonerefs += nr_zones; 6270 zonerefs->zone = NULL; 6271 zonerefs->zone_idx = 0; 6272 } 6273 6274 /* 6275 * Build zonelists ordered by zone and nodes within zones. 6276 * This results in conserving DMA zone[s] until all Normal memory is 6277 * exhausted, but results in overflowing to remote node while memory 6278 * may still exist in local DMA zone. 6279 */ 6280 6281 static void build_zonelists(pg_data_t *pgdat) 6282 { 6283 static int node_order[MAX_NUMNODES]; 6284 int node, nr_nodes = 0; 6285 nodemask_t used_mask = NODE_MASK_NONE; 6286 int local_node, prev_node; 6287 6288 /* NUMA-aware ordering of nodes */ 6289 local_node = pgdat->node_id; 6290 prev_node = local_node; 6291 6292 memset(node_order, 0, sizeof(node_order)); 6293 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 6294 /* 6295 * We don't want to pressure a particular node. 6296 * So adding penalty to the first node in same 6297 * distance group to make it round-robin. 6298 */ 6299 if (node_distance(local_node, node) != 6300 node_distance(local_node, prev_node)) 6301 node_load[node] += 1; 6302 6303 node_order[nr_nodes++] = node; 6304 prev_node = node; 6305 } 6306 6307 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 6308 build_thisnode_zonelists(pgdat); 6309 pr_info("Fallback order for Node %d: ", local_node); 6310 for (node = 0; node < nr_nodes; node++) 6311 pr_cont("%d ", node_order[node]); 6312 pr_cont("\n"); 6313 } 6314 6315 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6316 /* 6317 * Return node id of node used for "local" allocations. 6318 * I.e., first node id of first zone in arg node's generic zonelist. 6319 * Used for initializing percpu 'numa_mem', which is used primarily 6320 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 6321 */ 6322 int local_memory_node(int node) 6323 { 6324 struct zoneref *z; 6325 6326 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 6327 gfp_zone(GFP_KERNEL), 6328 NULL); 6329 return zone_to_nid(z->zone); 6330 } 6331 #endif 6332 6333 static void setup_min_unmapped_ratio(void); 6334 static void setup_min_slab_ratio(void); 6335 #else /* CONFIG_NUMA */ 6336 6337 static void build_zonelists(pg_data_t *pgdat) 6338 { 6339 int node, local_node; 6340 struct zoneref *zonerefs; 6341 int nr_zones; 6342 6343 local_node = pgdat->node_id; 6344 6345 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 6346 nr_zones = build_zonerefs_node(pgdat, zonerefs); 6347 zonerefs += nr_zones; 6348 6349 /* 6350 * Now we build the zonelist so that it contains the zones 6351 * of all the other nodes. 6352 * We don't want to pressure a particular node, so when 6353 * building the zones for node N, we make sure that the 6354 * zones coming right after the local ones are those from 6355 * node N+1 (modulo N) 6356 */ 6357 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 6358 if (!node_online(node)) 6359 continue; 6360 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 6361 zonerefs += nr_zones; 6362 } 6363 for (node = 0; node < local_node; node++) { 6364 if (!node_online(node)) 6365 continue; 6366 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 6367 zonerefs += nr_zones; 6368 } 6369 6370 zonerefs->zone = NULL; 6371 zonerefs->zone_idx = 0; 6372 } 6373 6374 #endif /* CONFIG_NUMA */ 6375 6376 /* 6377 * Boot pageset table. One per cpu which is going to be used for all 6378 * zones and all nodes. The parameters will be set in such a way 6379 * that an item put on a list will immediately be handed over to 6380 * the buddy list. This is safe since pageset manipulation is done 6381 * with interrupts disabled. 6382 * 6383 * The boot_pagesets must be kept even after bootup is complete for 6384 * unused processors and/or zones. They do play a role for bootstrapping 6385 * hotplugged processors. 6386 * 6387 * zoneinfo_show() and maybe other functions do 6388 * not check if the processor is online before following the pageset pointer. 6389 * Other parts of the kernel may not check if the zone is available. 6390 */ 6391 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 6392 /* These effectively disable the pcplists in the boot pageset completely */ 6393 #define BOOT_PAGESET_HIGH 0 6394 #define BOOT_PAGESET_BATCH 1 6395 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 6396 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 6397 DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 6398 6399 static void __build_all_zonelists(void *data) 6400 { 6401 int nid; 6402 int __maybe_unused cpu; 6403 pg_data_t *self = data; 6404 static DEFINE_SPINLOCK(lock); 6405 6406 spin_lock(&lock); 6407 6408 #ifdef CONFIG_NUMA 6409 memset(node_load, 0, sizeof(node_load)); 6410 #endif 6411 6412 /* 6413 * This node is hotadded and no memory is yet present. So just 6414 * building zonelists is fine - no need to touch other nodes. 6415 */ 6416 if (self && !node_online(self->node_id)) { 6417 build_zonelists(self); 6418 } else { 6419 /* 6420 * All possible nodes have pgdat preallocated 6421 * in free_area_init 6422 */ 6423 for_each_node(nid) { 6424 pg_data_t *pgdat = NODE_DATA(nid); 6425 6426 build_zonelists(pgdat); 6427 } 6428 6429 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6430 /* 6431 * We now know the "local memory node" for each node-- 6432 * i.e., the node of the first zone in the generic zonelist. 6433 * Set up numa_mem percpu variable for on-line cpus. During 6434 * boot, only the boot cpu should be on-line; we'll init the 6435 * secondary cpus' numa_mem as they come on-line. During 6436 * node/memory hotplug, we'll fixup all on-line cpus. 6437 */ 6438 for_each_online_cpu(cpu) 6439 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 6440 #endif 6441 } 6442 6443 spin_unlock(&lock); 6444 } 6445 6446 static noinline void __init 6447 build_all_zonelists_init(void) 6448 { 6449 int cpu; 6450 6451 __build_all_zonelists(NULL); 6452 6453 /* 6454 * Initialize the boot_pagesets that are going to be used 6455 * for bootstrapping processors. The real pagesets for 6456 * each zone will be allocated later when the per cpu 6457 * allocator is available. 6458 * 6459 * boot_pagesets are used also for bootstrapping offline 6460 * cpus if the system is already booted because the pagesets 6461 * are needed to initialize allocators on a specific cpu too. 6462 * F.e. the percpu allocator needs the page allocator which 6463 * needs the percpu allocator in order to allocate its pagesets 6464 * (a chicken-egg dilemma). 6465 */ 6466 for_each_possible_cpu(cpu) 6467 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 6468 6469 mminit_verify_zonelist(); 6470 cpuset_init_current_mems_allowed(); 6471 } 6472 6473 /* 6474 * unless system_state == SYSTEM_BOOTING. 6475 * 6476 * __ref due to call of __init annotated helper build_all_zonelists_init 6477 * [protected by SYSTEM_BOOTING]. 6478 */ 6479 void __ref build_all_zonelists(pg_data_t *pgdat) 6480 { 6481 unsigned long vm_total_pages; 6482 6483 if (system_state == SYSTEM_BOOTING) { 6484 build_all_zonelists_init(); 6485 } else { 6486 __build_all_zonelists(pgdat); 6487 /* cpuset refresh routine should be here */ 6488 } 6489 /* Get the number of free pages beyond high watermark in all zones. */ 6490 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 6491 /* 6492 * Disable grouping by mobility if the number of pages in the 6493 * system is too low to allow the mechanism to work. It would be 6494 * more accurate, but expensive to check per-zone. This check is 6495 * made on memory-hotadd so a system can start with mobility 6496 * disabled and enable it later 6497 */ 6498 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 6499 page_group_by_mobility_disabled = 1; 6500 else 6501 page_group_by_mobility_disabled = 0; 6502 6503 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 6504 nr_online_nodes, 6505 page_group_by_mobility_disabled ? "off" : "on", 6506 vm_total_pages); 6507 #ifdef CONFIG_NUMA 6508 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 6509 #endif 6510 } 6511 6512 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 6513 static bool __meminit 6514 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 6515 { 6516 static struct memblock_region *r; 6517 6518 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 6519 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 6520 for_each_mem_region(r) { 6521 if (*pfn < memblock_region_memory_end_pfn(r)) 6522 break; 6523 } 6524 } 6525 if (*pfn >= memblock_region_memory_base_pfn(r) && 6526 memblock_is_mirror(r)) { 6527 *pfn = memblock_region_memory_end_pfn(r); 6528 return true; 6529 } 6530 } 6531 return false; 6532 } 6533 6534 /* 6535 * Initially all pages are reserved - free ones are freed 6536 * up by memblock_free_all() once the early boot process is 6537 * done. Non-atomic initialization, single-pass. 6538 * 6539 * All aligned pageblocks are initialized to the specified migratetype 6540 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 6541 * zone stats (e.g., nr_isolate_pageblock) are touched. 6542 */ 6543 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 6544 unsigned long start_pfn, unsigned long zone_end_pfn, 6545 enum meminit_context context, 6546 struct vmem_altmap *altmap, int migratetype) 6547 { 6548 unsigned long pfn, end_pfn = start_pfn + size; 6549 struct page *page; 6550 6551 if (highest_memmap_pfn < end_pfn - 1) 6552 highest_memmap_pfn = end_pfn - 1; 6553 6554 #ifdef CONFIG_ZONE_DEVICE 6555 /* 6556 * Honor reservation requested by the driver for this ZONE_DEVICE 6557 * memory. We limit the total number of pages to initialize to just 6558 * those that might contain the memory mapping. We will defer the 6559 * ZONE_DEVICE page initialization until after we have released 6560 * the hotplug lock. 6561 */ 6562 if (zone == ZONE_DEVICE) { 6563 if (!altmap) 6564 return; 6565 6566 if (start_pfn == altmap->base_pfn) 6567 start_pfn += altmap->reserve; 6568 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6569 } 6570 #endif 6571 6572 for (pfn = start_pfn; pfn < end_pfn; ) { 6573 /* 6574 * There can be holes in boot-time mem_map[]s handed to this 6575 * function. They do not exist on hotplugged memory. 6576 */ 6577 if (context == MEMINIT_EARLY) { 6578 if (overlap_memmap_init(zone, &pfn)) 6579 continue; 6580 if (defer_init(nid, pfn, zone_end_pfn)) 6581 break; 6582 } 6583 6584 page = pfn_to_page(pfn); 6585 __init_single_page(page, pfn, zone, nid); 6586 if (context == MEMINIT_HOTPLUG) 6587 __SetPageReserved(page); 6588 6589 /* 6590 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 6591 * such that unmovable allocations won't be scattered all 6592 * over the place during system boot. 6593 */ 6594 if (IS_ALIGNED(pfn, pageblock_nr_pages)) { 6595 set_pageblock_migratetype(page, migratetype); 6596 cond_resched(); 6597 } 6598 pfn++; 6599 } 6600 } 6601 6602 #ifdef CONFIG_ZONE_DEVICE 6603 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 6604 unsigned long zone_idx, int nid, 6605 struct dev_pagemap *pgmap) 6606 { 6607 6608 __init_single_page(page, pfn, zone_idx, nid); 6609 6610 /* 6611 * Mark page reserved as it will need to wait for onlining 6612 * phase for it to be fully associated with a zone. 6613 * 6614 * We can use the non-atomic __set_bit operation for setting 6615 * the flag as we are still initializing the pages. 6616 */ 6617 __SetPageReserved(page); 6618 6619 /* 6620 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 6621 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 6622 * ever freed or placed on a driver-private list. 6623 */ 6624 page->pgmap = pgmap; 6625 page->zone_device_data = NULL; 6626 6627 /* 6628 * Mark the block movable so that blocks are reserved for 6629 * movable at startup. This will force kernel allocations 6630 * to reserve their blocks rather than leaking throughout 6631 * the address space during boot when many long-lived 6632 * kernel allocations are made. 6633 * 6634 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 6635 * because this is done early in section_activate() 6636 */ 6637 if (IS_ALIGNED(pfn, pageblock_nr_pages)) { 6638 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 6639 cond_resched(); 6640 } 6641 } 6642 6643 /* 6644 * With compound page geometry and when struct pages are stored in ram most 6645 * tail pages are reused. Consequently, the amount of unique struct pages to 6646 * initialize is a lot smaller that the total amount of struct pages being 6647 * mapped. This is a paired / mild layering violation with explicit knowledge 6648 * of how the sparse_vmemmap internals handle compound pages in the lack 6649 * of an altmap. See vmemmap_populate_compound_pages(). 6650 */ 6651 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, 6652 unsigned long nr_pages) 6653 { 6654 return is_power_of_2(sizeof(struct page)) && 6655 !altmap ? 2 * (PAGE_SIZE / sizeof(struct page)) : nr_pages; 6656 } 6657 6658 static void __ref memmap_init_compound(struct page *head, 6659 unsigned long head_pfn, 6660 unsigned long zone_idx, int nid, 6661 struct dev_pagemap *pgmap, 6662 unsigned long nr_pages) 6663 { 6664 unsigned long pfn, end_pfn = head_pfn + nr_pages; 6665 unsigned int order = pgmap->vmemmap_shift; 6666 6667 __SetPageHead(head); 6668 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 6669 struct page *page = pfn_to_page(pfn); 6670 6671 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 6672 prep_compound_tail(head, pfn - head_pfn); 6673 set_page_count(page, 0); 6674 6675 /* 6676 * The first tail page stores compound_mapcount_ptr() and 6677 * compound_order() and the second tail page stores 6678 * compound_pincount_ptr(). Call prep_compound_head() after 6679 * the first and second tail pages have been initialized to 6680 * not have the data overwritten. 6681 */ 6682 if (pfn == head_pfn + 2) 6683 prep_compound_head(head, order); 6684 } 6685 } 6686 6687 void __ref memmap_init_zone_device(struct zone *zone, 6688 unsigned long start_pfn, 6689 unsigned long nr_pages, 6690 struct dev_pagemap *pgmap) 6691 { 6692 unsigned long pfn, end_pfn = start_pfn + nr_pages; 6693 struct pglist_data *pgdat = zone->zone_pgdat; 6694 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 6695 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 6696 unsigned long zone_idx = zone_idx(zone); 6697 unsigned long start = jiffies; 6698 int nid = pgdat->node_id; 6699 6700 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) 6701 return; 6702 6703 /* 6704 * The call to memmap_init should have already taken care 6705 * of the pages reserved for the memmap, so we can just jump to 6706 * the end of that region and start processing the device pages. 6707 */ 6708 if (altmap) { 6709 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6710 nr_pages = end_pfn - start_pfn; 6711 } 6712 6713 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 6714 struct page *page = pfn_to_page(pfn); 6715 6716 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 6717 6718 if (pfns_per_compound == 1) 6719 continue; 6720 6721 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 6722 compound_nr_pages(altmap, pfns_per_compound)); 6723 } 6724 6725 pr_info("%s initialised %lu pages in %ums\n", __func__, 6726 nr_pages, jiffies_to_msecs(jiffies - start)); 6727 } 6728 6729 #endif 6730 static void __meminit zone_init_free_lists(struct zone *zone) 6731 { 6732 unsigned int order, t; 6733 for_each_migratetype_order(order, t) { 6734 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 6735 zone->free_area[order].nr_free = 0; 6736 } 6737 } 6738 6739 /* 6740 * Only struct pages that correspond to ranges defined by memblock.memory 6741 * are zeroed and initialized by going through __init_single_page() during 6742 * memmap_init_zone_range(). 6743 * 6744 * But, there could be struct pages that correspond to holes in 6745 * memblock.memory. This can happen because of the following reasons: 6746 * - physical memory bank size is not necessarily the exact multiple of the 6747 * arbitrary section size 6748 * - early reserved memory may not be listed in memblock.memory 6749 * - memory layouts defined with memmap= kernel parameter may not align 6750 * nicely with memmap sections 6751 * 6752 * Explicitly initialize those struct pages so that: 6753 * - PG_Reserved is set 6754 * - zone and node links point to zone and node that span the page if the 6755 * hole is in the middle of a zone 6756 * - zone and node links point to adjacent zone/node if the hole falls on 6757 * the zone boundary; the pages in such holes will be prepended to the 6758 * zone/node above the hole except for the trailing pages in the last 6759 * section that will be appended to the zone/node below. 6760 */ 6761 static void __init init_unavailable_range(unsigned long spfn, 6762 unsigned long epfn, 6763 int zone, int node) 6764 { 6765 unsigned long pfn; 6766 u64 pgcnt = 0; 6767 6768 for (pfn = spfn; pfn < epfn; pfn++) { 6769 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { 6770 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) 6771 + pageblock_nr_pages - 1; 6772 continue; 6773 } 6774 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 6775 __SetPageReserved(pfn_to_page(pfn)); 6776 pgcnt++; 6777 } 6778 6779 if (pgcnt) 6780 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", 6781 node, zone_names[zone], pgcnt); 6782 } 6783 6784 static void __init memmap_init_zone_range(struct zone *zone, 6785 unsigned long start_pfn, 6786 unsigned long end_pfn, 6787 unsigned long *hole_pfn) 6788 { 6789 unsigned long zone_start_pfn = zone->zone_start_pfn; 6790 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 6791 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 6792 6793 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 6794 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 6795 6796 if (start_pfn >= end_pfn) 6797 return; 6798 6799 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 6800 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); 6801 6802 if (*hole_pfn < start_pfn) 6803 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 6804 6805 *hole_pfn = end_pfn; 6806 } 6807 6808 static void __init memmap_init(void) 6809 { 6810 unsigned long start_pfn, end_pfn; 6811 unsigned long hole_pfn = 0; 6812 int i, j, zone_id = 0, nid; 6813 6814 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 6815 struct pglist_data *node = NODE_DATA(nid); 6816 6817 for (j = 0; j < MAX_NR_ZONES; j++) { 6818 struct zone *zone = node->node_zones + j; 6819 6820 if (!populated_zone(zone)) 6821 continue; 6822 6823 memmap_init_zone_range(zone, start_pfn, end_pfn, 6824 &hole_pfn); 6825 zone_id = j; 6826 } 6827 } 6828 6829 #ifdef CONFIG_SPARSEMEM 6830 /* 6831 * Initialize the memory map for hole in the range [memory_end, 6832 * section_end]. 6833 * Append the pages in this hole to the highest zone in the last 6834 * node. 6835 * The call to init_unavailable_range() is outside the ifdef to 6836 * silence the compiler warining about zone_id set but not used; 6837 * for FLATMEM it is a nop anyway 6838 */ 6839 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 6840 if (hole_pfn < end_pfn) 6841 #endif 6842 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 6843 } 6844 6845 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 6846 phys_addr_t min_addr, int nid, bool exact_nid) 6847 { 6848 void *ptr; 6849 6850 if (exact_nid) 6851 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 6852 MEMBLOCK_ALLOC_ACCESSIBLE, 6853 nid); 6854 else 6855 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 6856 MEMBLOCK_ALLOC_ACCESSIBLE, 6857 nid); 6858 6859 if (ptr && size > 0) 6860 page_init_poison(ptr, size); 6861 6862 return ptr; 6863 } 6864 6865 static int zone_batchsize(struct zone *zone) 6866 { 6867 #ifdef CONFIG_MMU 6868 int batch; 6869 6870 /* 6871 * The number of pages to batch allocate is either ~0.1% 6872 * of the zone or 1MB, whichever is smaller. The batch 6873 * size is striking a balance between allocation latency 6874 * and zone lock contention. 6875 */ 6876 batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE); 6877 batch /= 4; /* We effectively *= 4 below */ 6878 if (batch < 1) 6879 batch = 1; 6880 6881 /* 6882 * Clamp the batch to a 2^n - 1 value. Having a power 6883 * of 2 value was found to be more likely to have 6884 * suboptimal cache aliasing properties in some cases. 6885 * 6886 * For example if 2 tasks are alternately allocating 6887 * batches of pages, one task can end up with a lot 6888 * of pages of one half of the possible page colors 6889 * and the other with pages of the other colors. 6890 */ 6891 batch = rounddown_pow_of_two(batch + batch/2) - 1; 6892 6893 return batch; 6894 6895 #else 6896 /* The deferral and batching of frees should be suppressed under NOMMU 6897 * conditions. 6898 * 6899 * The problem is that NOMMU needs to be able to allocate large chunks 6900 * of contiguous memory as there's no hardware page translation to 6901 * assemble apparent contiguous memory from discontiguous pages. 6902 * 6903 * Queueing large contiguous runs of pages for batching, however, 6904 * causes the pages to actually be freed in smaller chunks. As there 6905 * can be a significant delay between the individual batches being 6906 * recycled, this leads to the once large chunks of space being 6907 * fragmented and becoming unavailable for high-order allocations. 6908 */ 6909 return 0; 6910 #endif 6911 } 6912 6913 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 6914 { 6915 #ifdef CONFIG_MMU 6916 int high; 6917 int nr_split_cpus; 6918 unsigned long total_pages; 6919 6920 if (!percpu_pagelist_high_fraction) { 6921 /* 6922 * By default, the high value of the pcp is based on the zone 6923 * low watermark so that if they are full then background 6924 * reclaim will not be started prematurely. 6925 */ 6926 total_pages = low_wmark_pages(zone); 6927 } else { 6928 /* 6929 * If percpu_pagelist_high_fraction is configured, the high 6930 * value is based on a fraction of the managed pages in the 6931 * zone. 6932 */ 6933 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 6934 } 6935 6936 /* 6937 * Split the high value across all online CPUs local to the zone. Note 6938 * that early in boot that CPUs may not be online yet and that during 6939 * CPU hotplug that the cpumask is not yet updated when a CPU is being 6940 * onlined. For memory nodes that have no CPUs, split pcp->high across 6941 * all online CPUs to mitigate the risk that reclaim is triggered 6942 * prematurely due to pages stored on pcp lists. 6943 */ 6944 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 6945 if (!nr_split_cpus) 6946 nr_split_cpus = num_online_cpus(); 6947 high = total_pages / nr_split_cpus; 6948 6949 /* 6950 * Ensure high is at least batch*4. The multiple is based on the 6951 * historical relationship between high and batch. 6952 */ 6953 high = max(high, batch << 2); 6954 6955 return high; 6956 #else 6957 return 0; 6958 #endif 6959 } 6960 6961 /* 6962 * pcp->high and pcp->batch values are related and generally batch is lower 6963 * than high. They are also related to pcp->count such that count is lower 6964 * than high, and as soon as it reaches high, the pcplist is flushed. 6965 * 6966 * However, guaranteeing these relations at all times would require e.g. write 6967 * barriers here but also careful usage of read barriers at the read side, and 6968 * thus be prone to error and bad for performance. Thus the update only prevents 6969 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 6970 * can cope with those fields changing asynchronously, and fully trust only the 6971 * pcp->count field on the local CPU with interrupts disabled. 6972 * 6973 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 6974 * outside of boot time (or some other assurance that no concurrent updaters 6975 * exist). 6976 */ 6977 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 6978 unsigned long batch) 6979 { 6980 WRITE_ONCE(pcp->batch, batch); 6981 WRITE_ONCE(pcp->high, high); 6982 } 6983 6984 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 6985 { 6986 int pindex; 6987 6988 memset(pcp, 0, sizeof(*pcp)); 6989 memset(pzstats, 0, sizeof(*pzstats)); 6990 6991 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 6992 INIT_LIST_HEAD(&pcp->lists[pindex]); 6993 6994 /* 6995 * Set batch and high values safe for a boot pageset. A true percpu 6996 * pageset's initialization will update them subsequently. Here we don't 6997 * need to be as careful as pageset_update() as nobody can access the 6998 * pageset yet. 6999 */ 7000 pcp->high = BOOT_PAGESET_HIGH; 7001 pcp->batch = BOOT_PAGESET_BATCH; 7002 pcp->free_factor = 0; 7003 } 7004 7005 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 7006 unsigned long batch) 7007 { 7008 struct per_cpu_pages *pcp; 7009 int cpu; 7010 7011 for_each_possible_cpu(cpu) { 7012 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 7013 pageset_update(pcp, high, batch); 7014 } 7015 } 7016 7017 /* 7018 * Calculate and set new high and batch values for all per-cpu pagesets of a 7019 * zone based on the zone's size. 7020 */ 7021 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 7022 { 7023 int new_high, new_batch; 7024 7025 new_batch = max(1, zone_batchsize(zone)); 7026 new_high = zone_highsize(zone, new_batch, cpu_online); 7027 7028 if (zone->pageset_high == new_high && 7029 zone->pageset_batch == new_batch) 7030 return; 7031 7032 zone->pageset_high = new_high; 7033 zone->pageset_batch = new_batch; 7034 7035 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 7036 } 7037 7038 void __meminit setup_zone_pageset(struct zone *zone) 7039 { 7040 int cpu; 7041 7042 /* Size may be 0 on !SMP && !NUMA */ 7043 if (sizeof(struct per_cpu_zonestat) > 0) 7044 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 7045 7046 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 7047 for_each_possible_cpu(cpu) { 7048 struct per_cpu_pages *pcp; 7049 struct per_cpu_zonestat *pzstats; 7050 7051 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 7052 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7053 per_cpu_pages_init(pcp, pzstats); 7054 } 7055 7056 zone_set_pageset_high_and_batch(zone, 0); 7057 } 7058 7059 /* 7060 * Allocate per cpu pagesets and initialize them. 7061 * Before this call only boot pagesets were available. 7062 */ 7063 void __init setup_per_cpu_pageset(void) 7064 { 7065 struct pglist_data *pgdat; 7066 struct zone *zone; 7067 int __maybe_unused cpu; 7068 7069 for_each_populated_zone(zone) 7070 setup_zone_pageset(zone); 7071 7072 #ifdef CONFIG_NUMA 7073 /* 7074 * Unpopulated zones continue using the boot pagesets. 7075 * The numa stats for these pagesets need to be reset. 7076 * Otherwise, they will end up skewing the stats of 7077 * the nodes these zones are associated with. 7078 */ 7079 for_each_possible_cpu(cpu) { 7080 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 7081 memset(pzstats->vm_numa_event, 0, 7082 sizeof(pzstats->vm_numa_event)); 7083 } 7084 #endif 7085 7086 for_each_online_pgdat(pgdat) 7087 pgdat->per_cpu_nodestats = 7088 alloc_percpu(struct per_cpu_nodestat); 7089 } 7090 7091 static __meminit void zone_pcp_init(struct zone *zone) 7092 { 7093 /* 7094 * per cpu subsystem is not up at this point. The following code 7095 * relies on the ability of the linker to provide the 7096 * offset of a (static) per cpu variable into the per cpu area. 7097 */ 7098 zone->per_cpu_pageset = &boot_pageset; 7099 zone->per_cpu_zonestats = &boot_zonestats; 7100 zone->pageset_high = BOOT_PAGESET_HIGH; 7101 zone->pageset_batch = BOOT_PAGESET_BATCH; 7102 7103 if (populated_zone(zone)) 7104 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 7105 zone->present_pages, zone_batchsize(zone)); 7106 } 7107 7108 void __meminit init_currently_empty_zone(struct zone *zone, 7109 unsigned long zone_start_pfn, 7110 unsigned long size) 7111 { 7112 struct pglist_data *pgdat = zone->zone_pgdat; 7113 int zone_idx = zone_idx(zone) + 1; 7114 7115 if (zone_idx > pgdat->nr_zones) 7116 pgdat->nr_zones = zone_idx; 7117 7118 zone->zone_start_pfn = zone_start_pfn; 7119 7120 mminit_dprintk(MMINIT_TRACE, "memmap_init", 7121 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 7122 pgdat->node_id, 7123 (unsigned long)zone_idx(zone), 7124 zone_start_pfn, (zone_start_pfn + size)); 7125 7126 zone_init_free_lists(zone); 7127 zone->initialized = 1; 7128 } 7129 7130 /** 7131 * get_pfn_range_for_nid - Return the start and end page frames for a node 7132 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 7133 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 7134 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 7135 * 7136 * It returns the start and end page frame of a node based on information 7137 * provided by memblock_set_node(). If called for a node 7138 * with no available memory, a warning is printed and the start and end 7139 * PFNs will be 0. 7140 */ 7141 void __init get_pfn_range_for_nid(unsigned int nid, 7142 unsigned long *start_pfn, unsigned long *end_pfn) 7143 { 7144 unsigned long this_start_pfn, this_end_pfn; 7145 int i; 7146 7147 *start_pfn = -1UL; 7148 *end_pfn = 0; 7149 7150 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 7151 *start_pfn = min(*start_pfn, this_start_pfn); 7152 *end_pfn = max(*end_pfn, this_end_pfn); 7153 } 7154 7155 if (*start_pfn == -1UL) 7156 *start_pfn = 0; 7157 } 7158 7159 /* 7160 * This finds a zone that can be used for ZONE_MOVABLE pages. The 7161 * assumption is made that zones within a node are ordered in monotonic 7162 * increasing memory addresses so that the "highest" populated zone is used 7163 */ 7164 static void __init find_usable_zone_for_movable(void) 7165 { 7166 int zone_index; 7167 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 7168 if (zone_index == ZONE_MOVABLE) 7169 continue; 7170 7171 if (arch_zone_highest_possible_pfn[zone_index] > 7172 arch_zone_lowest_possible_pfn[zone_index]) 7173 break; 7174 } 7175 7176 VM_BUG_ON(zone_index == -1); 7177 movable_zone = zone_index; 7178 } 7179 7180 /* 7181 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 7182 * because it is sized independent of architecture. Unlike the other zones, 7183 * the starting point for ZONE_MOVABLE is not fixed. It may be different 7184 * in each node depending on the size of each node and how evenly kernelcore 7185 * is distributed. This helper function adjusts the zone ranges 7186 * provided by the architecture for a given node by using the end of the 7187 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 7188 * zones within a node are in order of monotonic increases memory addresses 7189 */ 7190 static void __init adjust_zone_range_for_zone_movable(int nid, 7191 unsigned long zone_type, 7192 unsigned long node_start_pfn, 7193 unsigned long node_end_pfn, 7194 unsigned long *zone_start_pfn, 7195 unsigned long *zone_end_pfn) 7196 { 7197 /* Only adjust if ZONE_MOVABLE is on this node */ 7198 if (zone_movable_pfn[nid]) { 7199 /* Size ZONE_MOVABLE */ 7200 if (zone_type == ZONE_MOVABLE) { 7201 *zone_start_pfn = zone_movable_pfn[nid]; 7202 *zone_end_pfn = min(node_end_pfn, 7203 arch_zone_highest_possible_pfn[movable_zone]); 7204 7205 /* Adjust for ZONE_MOVABLE starting within this range */ 7206 } else if (!mirrored_kernelcore && 7207 *zone_start_pfn < zone_movable_pfn[nid] && 7208 *zone_end_pfn > zone_movable_pfn[nid]) { 7209 *zone_end_pfn = zone_movable_pfn[nid]; 7210 7211 /* Check if this whole range is within ZONE_MOVABLE */ 7212 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 7213 *zone_start_pfn = *zone_end_pfn; 7214 } 7215 } 7216 7217 /* 7218 * Return the number of pages a zone spans in a node, including holes 7219 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 7220 */ 7221 static unsigned long __init zone_spanned_pages_in_node(int nid, 7222 unsigned long zone_type, 7223 unsigned long node_start_pfn, 7224 unsigned long node_end_pfn, 7225 unsigned long *zone_start_pfn, 7226 unsigned long *zone_end_pfn) 7227 { 7228 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 7229 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 7230 /* When hotadd a new node from cpu_up(), the node should be empty */ 7231 if (!node_start_pfn && !node_end_pfn) 7232 return 0; 7233 7234 /* Get the start and end of the zone */ 7235 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 7236 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 7237 adjust_zone_range_for_zone_movable(nid, zone_type, 7238 node_start_pfn, node_end_pfn, 7239 zone_start_pfn, zone_end_pfn); 7240 7241 /* Check that this node has pages within the zone's required range */ 7242 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 7243 return 0; 7244 7245 /* Move the zone boundaries inside the node if necessary */ 7246 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 7247 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 7248 7249 /* Return the spanned pages */ 7250 return *zone_end_pfn - *zone_start_pfn; 7251 } 7252 7253 /* 7254 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 7255 * then all holes in the requested range will be accounted for. 7256 */ 7257 unsigned long __init __absent_pages_in_range(int nid, 7258 unsigned long range_start_pfn, 7259 unsigned long range_end_pfn) 7260 { 7261 unsigned long nr_absent = range_end_pfn - range_start_pfn; 7262 unsigned long start_pfn, end_pfn; 7263 int i; 7264 7265 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 7266 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 7267 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 7268 nr_absent -= end_pfn - start_pfn; 7269 } 7270 return nr_absent; 7271 } 7272 7273 /** 7274 * absent_pages_in_range - Return number of page frames in holes within a range 7275 * @start_pfn: The start PFN to start searching for holes 7276 * @end_pfn: The end PFN to stop searching for holes 7277 * 7278 * Return: the number of pages frames in memory holes within a range. 7279 */ 7280 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 7281 unsigned long end_pfn) 7282 { 7283 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 7284 } 7285 7286 /* Return the number of page frames in holes in a zone on a node */ 7287 static unsigned long __init zone_absent_pages_in_node(int nid, 7288 unsigned long zone_type, 7289 unsigned long node_start_pfn, 7290 unsigned long node_end_pfn) 7291 { 7292 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 7293 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 7294 unsigned long zone_start_pfn, zone_end_pfn; 7295 unsigned long nr_absent; 7296 7297 /* When hotadd a new node from cpu_up(), the node should be empty */ 7298 if (!node_start_pfn && !node_end_pfn) 7299 return 0; 7300 7301 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 7302 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 7303 7304 adjust_zone_range_for_zone_movable(nid, zone_type, 7305 node_start_pfn, node_end_pfn, 7306 &zone_start_pfn, &zone_end_pfn); 7307 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 7308 7309 /* 7310 * ZONE_MOVABLE handling. 7311 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 7312 * and vice versa. 7313 */ 7314 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 7315 unsigned long start_pfn, end_pfn; 7316 struct memblock_region *r; 7317 7318 for_each_mem_region(r) { 7319 start_pfn = clamp(memblock_region_memory_base_pfn(r), 7320 zone_start_pfn, zone_end_pfn); 7321 end_pfn = clamp(memblock_region_memory_end_pfn(r), 7322 zone_start_pfn, zone_end_pfn); 7323 7324 if (zone_type == ZONE_MOVABLE && 7325 memblock_is_mirror(r)) 7326 nr_absent += end_pfn - start_pfn; 7327 7328 if (zone_type == ZONE_NORMAL && 7329 !memblock_is_mirror(r)) 7330 nr_absent += end_pfn - start_pfn; 7331 } 7332 } 7333 7334 return nr_absent; 7335 } 7336 7337 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 7338 unsigned long node_start_pfn, 7339 unsigned long node_end_pfn) 7340 { 7341 unsigned long realtotalpages = 0, totalpages = 0; 7342 enum zone_type i; 7343 7344 for (i = 0; i < MAX_NR_ZONES; i++) { 7345 struct zone *zone = pgdat->node_zones + i; 7346 unsigned long zone_start_pfn, zone_end_pfn; 7347 unsigned long spanned, absent; 7348 unsigned long size, real_size; 7349 7350 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 7351 node_start_pfn, 7352 node_end_pfn, 7353 &zone_start_pfn, 7354 &zone_end_pfn); 7355 absent = zone_absent_pages_in_node(pgdat->node_id, i, 7356 node_start_pfn, 7357 node_end_pfn); 7358 7359 size = spanned; 7360 real_size = size - absent; 7361 7362 if (size) 7363 zone->zone_start_pfn = zone_start_pfn; 7364 else 7365 zone->zone_start_pfn = 0; 7366 zone->spanned_pages = size; 7367 zone->present_pages = real_size; 7368 #if defined(CONFIG_MEMORY_HOTPLUG) 7369 zone->present_early_pages = real_size; 7370 #endif 7371 7372 totalpages += size; 7373 realtotalpages += real_size; 7374 } 7375 7376 pgdat->node_spanned_pages = totalpages; 7377 pgdat->node_present_pages = realtotalpages; 7378 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 7379 } 7380 7381 #ifndef CONFIG_SPARSEMEM 7382 /* 7383 * Calculate the size of the zone->blockflags rounded to an unsigned long 7384 * Start by making sure zonesize is a multiple of pageblock_order by rounding 7385 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 7386 * round what is now in bits to nearest long in bits, then return it in 7387 * bytes. 7388 */ 7389 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 7390 { 7391 unsigned long usemapsize; 7392 7393 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 7394 usemapsize = roundup(zonesize, pageblock_nr_pages); 7395 usemapsize = usemapsize >> pageblock_order; 7396 usemapsize *= NR_PAGEBLOCK_BITS; 7397 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 7398 7399 return usemapsize / 8; 7400 } 7401 7402 static void __ref setup_usemap(struct zone *zone) 7403 { 7404 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 7405 zone->spanned_pages); 7406 zone->pageblock_flags = NULL; 7407 if (usemapsize) { 7408 zone->pageblock_flags = 7409 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 7410 zone_to_nid(zone)); 7411 if (!zone->pageblock_flags) 7412 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 7413 usemapsize, zone->name, zone_to_nid(zone)); 7414 } 7415 } 7416 #else 7417 static inline void setup_usemap(struct zone *zone) {} 7418 #endif /* CONFIG_SPARSEMEM */ 7419 7420 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 7421 7422 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 7423 void __init set_pageblock_order(void) 7424 { 7425 unsigned int order = MAX_ORDER - 1; 7426 7427 /* Check that pageblock_nr_pages has not already been setup */ 7428 if (pageblock_order) 7429 return; 7430 7431 /* Don't let pageblocks exceed the maximum allocation granularity. */ 7432 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) 7433 order = HUGETLB_PAGE_ORDER; 7434 7435 /* 7436 * Assume the largest contiguous order of interest is a huge page. 7437 * This value may be variable depending on boot parameters on IA64 and 7438 * powerpc. 7439 */ 7440 pageblock_order = order; 7441 } 7442 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 7443 7444 /* 7445 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 7446 * is unused as pageblock_order is set at compile-time. See 7447 * include/linux/pageblock-flags.h for the values of pageblock_order based on 7448 * the kernel config 7449 */ 7450 void __init set_pageblock_order(void) 7451 { 7452 } 7453 7454 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 7455 7456 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, 7457 unsigned long present_pages) 7458 { 7459 unsigned long pages = spanned_pages; 7460 7461 /* 7462 * Provide a more accurate estimation if there are holes within 7463 * the zone and SPARSEMEM is in use. If there are holes within the 7464 * zone, each populated memory region may cost us one or two extra 7465 * memmap pages due to alignment because memmap pages for each 7466 * populated regions may not be naturally aligned on page boundary. 7467 * So the (present_pages >> 4) heuristic is a tradeoff for that. 7468 */ 7469 if (spanned_pages > present_pages + (present_pages >> 4) && 7470 IS_ENABLED(CONFIG_SPARSEMEM)) 7471 pages = present_pages; 7472 7473 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 7474 } 7475 7476 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 7477 static void pgdat_init_split_queue(struct pglist_data *pgdat) 7478 { 7479 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 7480 7481 spin_lock_init(&ds_queue->split_queue_lock); 7482 INIT_LIST_HEAD(&ds_queue->split_queue); 7483 ds_queue->split_queue_len = 0; 7484 } 7485 #else 7486 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 7487 #endif 7488 7489 #ifdef CONFIG_COMPACTION 7490 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 7491 { 7492 init_waitqueue_head(&pgdat->kcompactd_wait); 7493 } 7494 #else 7495 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 7496 #endif 7497 7498 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 7499 { 7500 int i; 7501 7502 pgdat_resize_init(pgdat); 7503 7504 pgdat_init_split_queue(pgdat); 7505 pgdat_init_kcompactd(pgdat); 7506 7507 init_waitqueue_head(&pgdat->kswapd_wait); 7508 init_waitqueue_head(&pgdat->pfmemalloc_wait); 7509 7510 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 7511 init_waitqueue_head(&pgdat->reclaim_wait[i]); 7512 7513 pgdat_page_ext_init(pgdat); 7514 lruvec_init(&pgdat->__lruvec); 7515 } 7516 7517 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 7518 unsigned long remaining_pages) 7519 { 7520 atomic_long_set(&zone->managed_pages, remaining_pages); 7521 zone_set_nid(zone, nid); 7522 zone->name = zone_names[idx]; 7523 zone->zone_pgdat = NODE_DATA(nid); 7524 spin_lock_init(&zone->lock); 7525 zone_seqlock_init(zone); 7526 zone_pcp_init(zone); 7527 } 7528 7529 /* 7530 * Set up the zone data structures 7531 * - init pgdat internals 7532 * - init all zones belonging to this node 7533 * 7534 * NOTE: this function is only called during memory hotplug 7535 */ 7536 #ifdef CONFIG_MEMORY_HOTPLUG 7537 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) 7538 { 7539 int nid = pgdat->node_id; 7540 enum zone_type z; 7541 int cpu; 7542 7543 pgdat_init_internals(pgdat); 7544 7545 if (pgdat->per_cpu_nodestats == &boot_nodestats) 7546 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 7547 7548 /* 7549 * Reset the nr_zones, order and highest_zoneidx before reuse. 7550 * Note that kswapd will init kswapd_highest_zoneidx properly 7551 * when it starts in the near future. 7552 */ 7553 pgdat->nr_zones = 0; 7554 pgdat->kswapd_order = 0; 7555 pgdat->kswapd_highest_zoneidx = 0; 7556 pgdat->node_start_pfn = 0; 7557 for_each_online_cpu(cpu) { 7558 struct per_cpu_nodestat *p; 7559 7560 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 7561 memset(p, 0, sizeof(*p)); 7562 } 7563 7564 for (z = 0; z < MAX_NR_ZONES; z++) 7565 zone_init_internals(&pgdat->node_zones[z], z, nid, 0); 7566 } 7567 #endif 7568 7569 /* 7570 * Set up the zone data structures: 7571 * - mark all pages reserved 7572 * - mark all memory queues empty 7573 * - clear the memory bitmaps 7574 * 7575 * NOTE: pgdat should get zeroed by caller. 7576 * NOTE: this function is only called during early init. 7577 */ 7578 static void __init free_area_init_core(struct pglist_data *pgdat) 7579 { 7580 enum zone_type j; 7581 int nid = pgdat->node_id; 7582 7583 pgdat_init_internals(pgdat); 7584 pgdat->per_cpu_nodestats = &boot_nodestats; 7585 7586 for (j = 0; j < MAX_NR_ZONES; j++) { 7587 struct zone *zone = pgdat->node_zones + j; 7588 unsigned long size, freesize, memmap_pages; 7589 7590 size = zone->spanned_pages; 7591 freesize = zone->present_pages; 7592 7593 /* 7594 * Adjust freesize so that it accounts for how much memory 7595 * is used by this zone for memmap. This affects the watermark 7596 * and per-cpu initialisations 7597 */ 7598 memmap_pages = calc_memmap_size(size, freesize); 7599 if (!is_highmem_idx(j)) { 7600 if (freesize >= memmap_pages) { 7601 freesize -= memmap_pages; 7602 if (memmap_pages) 7603 pr_debug(" %s zone: %lu pages used for memmap\n", 7604 zone_names[j], memmap_pages); 7605 } else 7606 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", 7607 zone_names[j], memmap_pages, freesize); 7608 } 7609 7610 /* Account for reserved pages */ 7611 if (j == 0 && freesize > dma_reserve) { 7612 freesize -= dma_reserve; 7613 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); 7614 } 7615 7616 if (!is_highmem_idx(j)) 7617 nr_kernel_pages += freesize; 7618 /* Charge for highmem memmap if there are enough kernel pages */ 7619 else if (nr_kernel_pages > memmap_pages * 2) 7620 nr_kernel_pages -= memmap_pages; 7621 nr_all_pages += freesize; 7622 7623 /* 7624 * Set an approximate value for lowmem here, it will be adjusted 7625 * when the bootmem allocator frees pages into the buddy system. 7626 * And all highmem pages will be managed by the buddy system. 7627 */ 7628 zone_init_internals(zone, j, nid, freesize); 7629 7630 if (!size) 7631 continue; 7632 7633 set_pageblock_order(); 7634 setup_usemap(zone); 7635 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 7636 } 7637 } 7638 7639 #ifdef CONFIG_FLATMEM 7640 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 7641 { 7642 unsigned long __maybe_unused start = 0; 7643 unsigned long __maybe_unused offset = 0; 7644 7645 /* Skip empty nodes */ 7646 if (!pgdat->node_spanned_pages) 7647 return; 7648 7649 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 7650 offset = pgdat->node_start_pfn - start; 7651 /* ia64 gets its own node_mem_map, before this, without bootmem */ 7652 if (!pgdat->node_mem_map) { 7653 unsigned long size, end; 7654 struct page *map; 7655 7656 /* 7657 * The zone's endpoints aren't required to be MAX_ORDER 7658 * aligned but the node_mem_map endpoints must be in order 7659 * for the buddy allocator to function correctly. 7660 */ 7661 end = pgdat_end_pfn(pgdat); 7662 end = ALIGN(end, MAX_ORDER_NR_PAGES); 7663 size = (end - start) * sizeof(struct page); 7664 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 7665 pgdat->node_id, false); 7666 if (!map) 7667 panic("Failed to allocate %ld bytes for node %d memory map\n", 7668 size, pgdat->node_id); 7669 pgdat->node_mem_map = map + offset; 7670 } 7671 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 7672 __func__, pgdat->node_id, (unsigned long)pgdat, 7673 (unsigned long)pgdat->node_mem_map); 7674 #ifndef CONFIG_NUMA 7675 /* 7676 * With no DISCONTIG, the global mem_map is just set as node 0's 7677 */ 7678 if (pgdat == NODE_DATA(0)) { 7679 mem_map = NODE_DATA(0)->node_mem_map; 7680 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 7681 mem_map -= offset; 7682 } 7683 #endif 7684 } 7685 #else 7686 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 7687 #endif /* CONFIG_FLATMEM */ 7688 7689 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 7690 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 7691 { 7692 pgdat->first_deferred_pfn = ULONG_MAX; 7693 } 7694 #else 7695 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 7696 #endif 7697 7698 static void __init free_area_init_node(int nid) 7699 { 7700 pg_data_t *pgdat = NODE_DATA(nid); 7701 unsigned long start_pfn = 0; 7702 unsigned long end_pfn = 0; 7703 7704 /* pg_data_t should be reset to zero when it's allocated */ 7705 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 7706 7707 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 7708 7709 pgdat->node_id = nid; 7710 pgdat->node_start_pfn = start_pfn; 7711 pgdat->per_cpu_nodestats = NULL; 7712 7713 if (start_pfn != end_pfn) { 7714 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 7715 (u64)start_pfn << PAGE_SHIFT, 7716 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 7717 } else { 7718 pr_info("Initmem setup node %d as memoryless\n", nid); 7719 } 7720 7721 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 7722 7723 alloc_node_mem_map(pgdat); 7724 pgdat_set_deferred_range(pgdat); 7725 7726 free_area_init_core(pgdat); 7727 } 7728 7729 static void __init free_area_init_memoryless_node(int nid) 7730 { 7731 free_area_init_node(nid); 7732 } 7733 7734 #if MAX_NUMNODES > 1 7735 /* 7736 * Figure out the number of possible node ids. 7737 */ 7738 void __init setup_nr_node_ids(void) 7739 { 7740 unsigned int highest; 7741 7742 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 7743 nr_node_ids = highest + 1; 7744 } 7745 #endif 7746 7747 /** 7748 * node_map_pfn_alignment - determine the maximum internode alignment 7749 * 7750 * This function should be called after node map is populated and sorted. 7751 * It calculates the maximum power of two alignment which can distinguish 7752 * all the nodes. 7753 * 7754 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 7755 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 7756 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 7757 * shifted, 1GiB is enough and this function will indicate so. 7758 * 7759 * This is used to test whether pfn -> nid mapping of the chosen memory 7760 * model has fine enough granularity to avoid incorrect mapping for the 7761 * populated node map. 7762 * 7763 * Return: the determined alignment in pfn's. 0 if there is no alignment 7764 * requirement (single node). 7765 */ 7766 unsigned long __init node_map_pfn_alignment(void) 7767 { 7768 unsigned long accl_mask = 0, last_end = 0; 7769 unsigned long start, end, mask; 7770 int last_nid = NUMA_NO_NODE; 7771 int i, nid; 7772 7773 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 7774 if (!start || last_nid < 0 || last_nid == nid) { 7775 last_nid = nid; 7776 last_end = end; 7777 continue; 7778 } 7779 7780 /* 7781 * Start with a mask granular enough to pin-point to the 7782 * start pfn and tick off bits one-by-one until it becomes 7783 * too coarse to separate the current node from the last. 7784 */ 7785 mask = ~((1 << __ffs(start)) - 1); 7786 while (mask && last_end <= (start & (mask << 1))) 7787 mask <<= 1; 7788 7789 /* accumulate all internode masks */ 7790 accl_mask |= mask; 7791 } 7792 7793 /* convert mask to number of pages */ 7794 return ~accl_mask + 1; 7795 } 7796 7797 /** 7798 * find_min_pfn_with_active_regions - Find the minimum PFN registered 7799 * 7800 * Return: the minimum PFN based on information provided via 7801 * memblock_set_node(). 7802 */ 7803 unsigned long __init find_min_pfn_with_active_regions(void) 7804 { 7805 return PHYS_PFN(memblock_start_of_DRAM()); 7806 } 7807 7808 /* 7809 * early_calculate_totalpages() 7810 * Sum pages in active regions for movable zone. 7811 * Populate N_MEMORY for calculating usable_nodes. 7812 */ 7813 static unsigned long __init early_calculate_totalpages(void) 7814 { 7815 unsigned long totalpages = 0; 7816 unsigned long start_pfn, end_pfn; 7817 int i, nid; 7818 7819 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 7820 unsigned long pages = end_pfn - start_pfn; 7821 7822 totalpages += pages; 7823 if (pages) 7824 node_set_state(nid, N_MEMORY); 7825 } 7826 return totalpages; 7827 } 7828 7829 /* 7830 * Find the PFN the Movable zone begins in each node. Kernel memory 7831 * is spread evenly between nodes as long as the nodes have enough 7832 * memory. When they don't, some nodes will have more kernelcore than 7833 * others 7834 */ 7835 static void __init find_zone_movable_pfns_for_nodes(void) 7836 { 7837 int i, nid; 7838 unsigned long usable_startpfn; 7839 unsigned long kernelcore_node, kernelcore_remaining; 7840 /* save the state before borrow the nodemask */ 7841 nodemask_t saved_node_state = node_states[N_MEMORY]; 7842 unsigned long totalpages = early_calculate_totalpages(); 7843 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 7844 struct memblock_region *r; 7845 7846 /* Need to find movable_zone earlier when movable_node is specified. */ 7847 find_usable_zone_for_movable(); 7848 7849 /* 7850 * If movable_node is specified, ignore kernelcore and movablecore 7851 * options. 7852 */ 7853 if (movable_node_is_enabled()) { 7854 for_each_mem_region(r) { 7855 if (!memblock_is_hotpluggable(r)) 7856 continue; 7857 7858 nid = memblock_get_region_node(r); 7859 7860 usable_startpfn = PFN_DOWN(r->base); 7861 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 7862 min(usable_startpfn, zone_movable_pfn[nid]) : 7863 usable_startpfn; 7864 } 7865 7866 goto out2; 7867 } 7868 7869 /* 7870 * If kernelcore=mirror is specified, ignore movablecore option 7871 */ 7872 if (mirrored_kernelcore) { 7873 bool mem_below_4gb_not_mirrored = false; 7874 7875 for_each_mem_region(r) { 7876 if (memblock_is_mirror(r)) 7877 continue; 7878 7879 nid = memblock_get_region_node(r); 7880 7881 usable_startpfn = memblock_region_memory_base_pfn(r); 7882 7883 if (usable_startpfn < PHYS_PFN(SZ_4G)) { 7884 mem_below_4gb_not_mirrored = true; 7885 continue; 7886 } 7887 7888 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 7889 min(usable_startpfn, zone_movable_pfn[nid]) : 7890 usable_startpfn; 7891 } 7892 7893 if (mem_below_4gb_not_mirrored) 7894 pr_warn("This configuration results in unmirrored kernel memory.\n"); 7895 7896 goto out2; 7897 } 7898 7899 /* 7900 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 7901 * amount of necessary memory. 7902 */ 7903 if (required_kernelcore_percent) 7904 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 7905 10000UL; 7906 if (required_movablecore_percent) 7907 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 7908 10000UL; 7909 7910 /* 7911 * If movablecore= was specified, calculate what size of 7912 * kernelcore that corresponds so that memory usable for 7913 * any allocation type is evenly spread. If both kernelcore 7914 * and movablecore are specified, then the value of kernelcore 7915 * will be used for required_kernelcore if it's greater than 7916 * what movablecore would have allowed. 7917 */ 7918 if (required_movablecore) { 7919 unsigned long corepages; 7920 7921 /* 7922 * Round-up so that ZONE_MOVABLE is at least as large as what 7923 * was requested by the user 7924 */ 7925 required_movablecore = 7926 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 7927 required_movablecore = min(totalpages, required_movablecore); 7928 corepages = totalpages - required_movablecore; 7929 7930 required_kernelcore = max(required_kernelcore, corepages); 7931 } 7932 7933 /* 7934 * If kernelcore was not specified or kernelcore size is larger 7935 * than totalpages, there is no ZONE_MOVABLE. 7936 */ 7937 if (!required_kernelcore || required_kernelcore >= totalpages) 7938 goto out; 7939 7940 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 7941 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 7942 7943 restart: 7944 /* Spread kernelcore memory as evenly as possible throughout nodes */ 7945 kernelcore_node = required_kernelcore / usable_nodes; 7946 for_each_node_state(nid, N_MEMORY) { 7947 unsigned long start_pfn, end_pfn; 7948 7949 /* 7950 * Recalculate kernelcore_node if the division per node 7951 * now exceeds what is necessary to satisfy the requested 7952 * amount of memory for the kernel 7953 */ 7954 if (required_kernelcore < kernelcore_node) 7955 kernelcore_node = required_kernelcore / usable_nodes; 7956 7957 /* 7958 * As the map is walked, we track how much memory is usable 7959 * by the kernel using kernelcore_remaining. When it is 7960 * 0, the rest of the node is usable by ZONE_MOVABLE 7961 */ 7962 kernelcore_remaining = kernelcore_node; 7963 7964 /* Go through each range of PFNs within this node */ 7965 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 7966 unsigned long size_pages; 7967 7968 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 7969 if (start_pfn >= end_pfn) 7970 continue; 7971 7972 /* Account for what is only usable for kernelcore */ 7973 if (start_pfn < usable_startpfn) { 7974 unsigned long kernel_pages; 7975 kernel_pages = min(end_pfn, usable_startpfn) 7976 - start_pfn; 7977 7978 kernelcore_remaining -= min(kernel_pages, 7979 kernelcore_remaining); 7980 required_kernelcore -= min(kernel_pages, 7981 required_kernelcore); 7982 7983 /* Continue if range is now fully accounted */ 7984 if (end_pfn <= usable_startpfn) { 7985 7986 /* 7987 * Push zone_movable_pfn to the end so 7988 * that if we have to rebalance 7989 * kernelcore across nodes, we will 7990 * not double account here 7991 */ 7992 zone_movable_pfn[nid] = end_pfn; 7993 continue; 7994 } 7995 start_pfn = usable_startpfn; 7996 } 7997 7998 /* 7999 * The usable PFN range for ZONE_MOVABLE is from 8000 * start_pfn->end_pfn. Calculate size_pages as the 8001 * number of pages used as kernelcore 8002 */ 8003 size_pages = end_pfn - start_pfn; 8004 if (size_pages > kernelcore_remaining) 8005 size_pages = kernelcore_remaining; 8006 zone_movable_pfn[nid] = start_pfn + size_pages; 8007 8008 /* 8009 * Some kernelcore has been met, update counts and 8010 * break if the kernelcore for this node has been 8011 * satisfied 8012 */ 8013 required_kernelcore -= min(required_kernelcore, 8014 size_pages); 8015 kernelcore_remaining -= size_pages; 8016 if (!kernelcore_remaining) 8017 break; 8018 } 8019 } 8020 8021 /* 8022 * If there is still required_kernelcore, we do another pass with one 8023 * less node in the count. This will push zone_movable_pfn[nid] further 8024 * along on the nodes that still have memory until kernelcore is 8025 * satisfied 8026 */ 8027 usable_nodes--; 8028 if (usable_nodes && required_kernelcore > usable_nodes) 8029 goto restart; 8030 8031 out2: 8032 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 8033 for (nid = 0; nid < MAX_NUMNODES; nid++) { 8034 unsigned long start_pfn, end_pfn; 8035 8036 zone_movable_pfn[nid] = 8037 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 8038 8039 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 8040 if (zone_movable_pfn[nid] >= end_pfn) 8041 zone_movable_pfn[nid] = 0; 8042 } 8043 8044 out: 8045 /* restore the node_state */ 8046 node_states[N_MEMORY] = saved_node_state; 8047 } 8048 8049 /* Any regular or high memory on that node ? */ 8050 static void check_for_memory(pg_data_t *pgdat, int nid) 8051 { 8052 enum zone_type zone_type; 8053 8054 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 8055 struct zone *zone = &pgdat->node_zones[zone_type]; 8056 if (populated_zone(zone)) { 8057 if (IS_ENABLED(CONFIG_HIGHMEM)) 8058 node_set_state(nid, N_HIGH_MEMORY); 8059 if (zone_type <= ZONE_NORMAL) 8060 node_set_state(nid, N_NORMAL_MEMORY); 8061 break; 8062 } 8063 } 8064 } 8065 8066 /* 8067 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 8068 * such cases we allow max_zone_pfn sorted in the descending order 8069 */ 8070 bool __weak arch_has_descending_max_zone_pfns(void) 8071 { 8072 return false; 8073 } 8074 8075 /** 8076 * free_area_init - Initialise all pg_data_t and zone data 8077 * @max_zone_pfn: an array of max PFNs for each zone 8078 * 8079 * This will call free_area_init_node() for each active node in the system. 8080 * Using the page ranges provided by memblock_set_node(), the size of each 8081 * zone in each node and their holes is calculated. If the maximum PFN 8082 * between two adjacent zones match, it is assumed that the zone is empty. 8083 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 8084 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 8085 * starts where the previous one ended. For example, ZONE_DMA32 starts 8086 * at arch_max_dma_pfn. 8087 */ 8088 void __init free_area_init(unsigned long *max_zone_pfn) 8089 { 8090 unsigned long start_pfn, end_pfn; 8091 int i, nid, zone; 8092 bool descending; 8093 8094 /* Record where the zone boundaries are */ 8095 memset(arch_zone_lowest_possible_pfn, 0, 8096 sizeof(arch_zone_lowest_possible_pfn)); 8097 memset(arch_zone_highest_possible_pfn, 0, 8098 sizeof(arch_zone_highest_possible_pfn)); 8099 8100 start_pfn = find_min_pfn_with_active_regions(); 8101 descending = arch_has_descending_max_zone_pfns(); 8102 8103 for (i = 0; i < MAX_NR_ZONES; i++) { 8104 if (descending) 8105 zone = MAX_NR_ZONES - i - 1; 8106 else 8107 zone = i; 8108 8109 if (zone == ZONE_MOVABLE) 8110 continue; 8111 8112 end_pfn = max(max_zone_pfn[zone], start_pfn); 8113 arch_zone_lowest_possible_pfn[zone] = start_pfn; 8114 arch_zone_highest_possible_pfn[zone] = end_pfn; 8115 8116 start_pfn = end_pfn; 8117 } 8118 8119 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 8120 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 8121 find_zone_movable_pfns_for_nodes(); 8122 8123 /* Print out the zone ranges */ 8124 pr_info("Zone ranges:\n"); 8125 for (i = 0; i < MAX_NR_ZONES; i++) { 8126 if (i == ZONE_MOVABLE) 8127 continue; 8128 pr_info(" %-8s ", zone_names[i]); 8129 if (arch_zone_lowest_possible_pfn[i] == 8130 arch_zone_highest_possible_pfn[i]) 8131 pr_cont("empty\n"); 8132 else 8133 pr_cont("[mem %#018Lx-%#018Lx]\n", 8134 (u64)arch_zone_lowest_possible_pfn[i] 8135 << PAGE_SHIFT, 8136 ((u64)arch_zone_highest_possible_pfn[i] 8137 << PAGE_SHIFT) - 1); 8138 } 8139 8140 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 8141 pr_info("Movable zone start for each node\n"); 8142 for (i = 0; i < MAX_NUMNODES; i++) { 8143 if (zone_movable_pfn[i]) 8144 pr_info(" Node %d: %#018Lx\n", i, 8145 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 8146 } 8147 8148 /* 8149 * Print out the early node map, and initialize the 8150 * subsection-map relative to active online memory ranges to 8151 * enable future "sub-section" extensions of the memory map. 8152 */ 8153 pr_info("Early memory node ranges\n"); 8154 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 8155 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 8156 (u64)start_pfn << PAGE_SHIFT, 8157 ((u64)end_pfn << PAGE_SHIFT) - 1); 8158 subsection_map_init(start_pfn, end_pfn - start_pfn); 8159 } 8160 8161 /* Initialise every node */ 8162 mminit_verify_pageflags_layout(); 8163 setup_nr_node_ids(); 8164 for_each_node(nid) { 8165 pg_data_t *pgdat; 8166 8167 if (!node_online(nid)) { 8168 pr_info("Initializing node %d as memoryless\n", nid); 8169 8170 /* Allocator not initialized yet */ 8171 pgdat = arch_alloc_nodedata(nid); 8172 if (!pgdat) { 8173 pr_err("Cannot allocate %zuB for node %d.\n", 8174 sizeof(*pgdat), nid); 8175 continue; 8176 } 8177 arch_refresh_nodedata(nid, pgdat); 8178 free_area_init_memoryless_node(nid); 8179 8180 /* 8181 * We do not want to confuse userspace by sysfs 8182 * files/directories for node without any memory 8183 * attached to it, so this node is not marked as 8184 * N_MEMORY and not marked online so that no sysfs 8185 * hierarchy will be created via register_one_node for 8186 * it. The pgdat will get fully initialized by 8187 * hotadd_init_pgdat() when memory is hotplugged into 8188 * this node. 8189 */ 8190 continue; 8191 } 8192 8193 pgdat = NODE_DATA(nid); 8194 free_area_init_node(nid); 8195 8196 /* Any memory on that node */ 8197 if (pgdat->node_present_pages) 8198 node_set_state(nid, N_MEMORY); 8199 check_for_memory(pgdat, nid); 8200 } 8201 8202 memmap_init(); 8203 } 8204 8205 static int __init cmdline_parse_core(char *p, unsigned long *core, 8206 unsigned long *percent) 8207 { 8208 unsigned long long coremem; 8209 char *endptr; 8210 8211 if (!p) 8212 return -EINVAL; 8213 8214 /* Value may be a percentage of total memory, otherwise bytes */ 8215 coremem = simple_strtoull(p, &endptr, 0); 8216 if (*endptr == '%') { 8217 /* Paranoid check for percent values greater than 100 */ 8218 WARN_ON(coremem > 100); 8219 8220 *percent = coremem; 8221 } else { 8222 coremem = memparse(p, &p); 8223 /* Paranoid check that UL is enough for the coremem value */ 8224 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 8225 8226 *core = coremem >> PAGE_SHIFT; 8227 *percent = 0UL; 8228 } 8229 return 0; 8230 } 8231 8232 /* 8233 * kernelcore=size sets the amount of memory for use for allocations that 8234 * cannot be reclaimed or migrated. 8235 */ 8236 static int __init cmdline_parse_kernelcore(char *p) 8237 { 8238 /* parse kernelcore=mirror */ 8239 if (parse_option_str(p, "mirror")) { 8240 mirrored_kernelcore = true; 8241 return 0; 8242 } 8243 8244 return cmdline_parse_core(p, &required_kernelcore, 8245 &required_kernelcore_percent); 8246 } 8247 8248 /* 8249 * movablecore=size sets the amount of memory for use for allocations that 8250 * can be reclaimed or migrated. 8251 */ 8252 static int __init cmdline_parse_movablecore(char *p) 8253 { 8254 return cmdline_parse_core(p, &required_movablecore, 8255 &required_movablecore_percent); 8256 } 8257 8258 early_param("kernelcore", cmdline_parse_kernelcore); 8259 early_param("movablecore", cmdline_parse_movablecore); 8260 8261 void adjust_managed_page_count(struct page *page, long count) 8262 { 8263 atomic_long_add(count, &page_zone(page)->managed_pages); 8264 totalram_pages_add(count); 8265 #ifdef CONFIG_HIGHMEM 8266 if (PageHighMem(page)) 8267 totalhigh_pages_add(count); 8268 #endif 8269 } 8270 EXPORT_SYMBOL(adjust_managed_page_count); 8271 8272 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 8273 { 8274 void *pos; 8275 unsigned long pages = 0; 8276 8277 start = (void *)PAGE_ALIGN((unsigned long)start); 8278 end = (void *)((unsigned long)end & PAGE_MASK); 8279 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 8280 struct page *page = virt_to_page(pos); 8281 void *direct_map_addr; 8282 8283 /* 8284 * 'direct_map_addr' might be different from 'pos' 8285 * because some architectures' virt_to_page() 8286 * work with aliases. Getting the direct map 8287 * address ensures that we get a _writeable_ 8288 * alias for the memset(). 8289 */ 8290 direct_map_addr = page_address(page); 8291 /* 8292 * Perform a kasan-unchecked memset() since this memory 8293 * has not been initialized. 8294 */ 8295 direct_map_addr = kasan_reset_tag(direct_map_addr); 8296 if ((unsigned int)poison <= 0xFF) 8297 memset(direct_map_addr, poison, PAGE_SIZE); 8298 8299 free_reserved_page(page); 8300 } 8301 8302 if (pages && s) 8303 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 8304 8305 return pages; 8306 } 8307 8308 void __init mem_init_print_info(void) 8309 { 8310 unsigned long physpages, codesize, datasize, rosize, bss_size; 8311 unsigned long init_code_size, init_data_size; 8312 8313 physpages = get_num_physpages(); 8314 codesize = _etext - _stext; 8315 datasize = _edata - _sdata; 8316 rosize = __end_rodata - __start_rodata; 8317 bss_size = __bss_stop - __bss_start; 8318 init_data_size = __init_end - __init_begin; 8319 init_code_size = _einittext - _sinittext; 8320 8321 /* 8322 * Detect special cases and adjust section sizes accordingly: 8323 * 1) .init.* may be embedded into .data sections 8324 * 2) .init.text.* may be out of [__init_begin, __init_end], 8325 * please refer to arch/tile/kernel/vmlinux.lds.S. 8326 * 3) .rodata.* may be embedded into .text or .data sections. 8327 */ 8328 #define adj_init_size(start, end, size, pos, adj) \ 8329 do { \ 8330 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 8331 size -= adj; \ 8332 } while (0) 8333 8334 adj_init_size(__init_begin, __init_end, init_data_size, 8335 _sinittext, init_code_size); 8336 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 8337 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 8338 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 8339 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 8340 8341 #undef adj_init_size 8342 8343 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 8344 #ifdef CONFIG_HIGHMEM 8345 ", %luK highmem" 8346 #endif 8347 ")\n", 8348 K(nr_free_pages()), K(physpages), 8349 codesize >> 10, datasize >> 10, rosize >> 10, 8350 (init_data_size + init_code_size) >> 10, bss_size >> 10, 8351 K(physpages - totalram_pages() - totalcma_pages), 8352 K(totalcma_pages) 8353 #ifdef CONFIG_HIGHMEM 8354 , K(totalhigh_pages()) 8355 #endif 8356 ); 8357 } 8358 8359 /** 8360 * set_dma_reserve - set the specified number of pages reserved in the first zone 8361 * @new_dma_reserve: The number of pages to mark reserved 8362 * 8363 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 8364 * In the DMA zone, a significant percentage may be consumed by kernel image 8365 * and other unfreeable allocations which can skew the watermarks badly. This 8366 * function may optionally be used to account for unfreeable pages in the 8367 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 8368 * smaller per-cpu batchsize. 8369 */ 8370 void __init set_dma_reserve(unsigned long new_dma_reserve) 8371 { 8372 dma_reserve = new_dma_reserve; 8373 } 8374 8375 static int page_alloc_cpu_dead(unsigned int cpu) 8376 { 8377 struct zone *zone; 8378 8379 lru_add_drain_cpu(cpu); 8380 mlock_page_drain_remote(cpu); 8381 drain_pages(cpu); 8382 8383 /* 8384 * Spill the event counters of the dead processor 8385 * into the current processors event counters. 8386 * This artificially elevates the count of the current 8387 * processor. 8388 */ 8389 vm_events_fold_cpu(cpu); 8390 8391 /* 8392 * Zero the differential counters of the dead processor 8393 * so that the vm statistics are consistent. 8394 * 8395 * This is only okay since the processor is dead and cannot 8396 * race with what we are doing. 8397 */ 8398 cpu_vm_stats_fold(cpu); 8399 8400 for_each_populated_zone(zone) 8401 zone_pcp_update(zone, 0); 8402 8403 return 0; 8404 } 8405 8406 static int page_alloc_cpu_online(unsigned int cpu) 8407 { 8408 struct zone *zone; 8409 8410 for_each_populated_zone(zone) 8411 zone_pcp_update(zone, 1); 8412 return 0; 8413 } 8414 8415 #ifdef CONFIG_NUMA 8416 int hashdist = HASHDIST_DEFAULT; 8417 8418 static int __init set_hashdist(char *str) 8419 { 8420 if (!str) 8421 return 0; 8422 hashdist = simple_strtoul(str, &str, 0); 8423 return 1; 8424 } 8425 __setup("hashdist=", set_hashdist); 8426 #endif 8427 8428 void __init page_alloc_init(void) 8429 { 8430 int ret; 8431 8432 #ifdef CONFIG_NUMA 8433 if (num_node_state(N_MEMORY) == 1) 8434 hashdist = 0; 8435 #endif 8436 8437 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 8438 "mm/page_alloc:pcp", 8439 page_alloc_cpu_online, 8440 page_alloc_cpu_dead); 8441 WARN_ON(ret < 0); 8442 } 8443 8444 /* 8445 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 8446 * or min_free_kbytes changes. 8447 */ 8448 static void calculate_totalreserve_pages(void) 8449 { 8450 struct pglist_data *pgdat; 8451 unsigned long reserve_pages = 0; 8452 enum zone_type i, j; 8453 8454 for_each_online_pgdat(pgdat) { 8455 8456 pgdat->totalreserve_pages = 0; 8457 8458 for (i = 0; i < MAX_NR_ZONES; i++) { 8459 struct zone *zone = pgdat->node_zones + i; 8460 long max = 0; 8461 unsigned long managed_pages = zone_managed_pages(zone); 8462 8463 /* Find valid and maximum lowmem_reserve in the zone */ 8464 for (j = i; j < MAX_NR_ZONES; j++) { 8465 if (zone->lowmem_reserve[j] > max) 8466 max = zone->lowmem_reserve[j]; 8467 } 8468 8469 /* we treat the high watermark as reserved pages. */ 8470 max += high_wmark_pages(zone); 8471 8472 if (max > managed_pages) 8473 max = managed_pages; 8474 8475 pgdat->totalreserve_pages += max; 8476 8477 reserve_pages += max; 8478 } 8479 } 8480 totalreserve_pages = reserve_pages; 8481 } 8482 8483 /* 8484 * setup_per_zone_lowmem_reserve - called whenever 8485 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 8486 * has a correct pages reserved value, so an adequate number of 8487 * pages are left in the zone after a successful __alloc_pages(). 8488 */ 8489 static void setup_per_zone_lowmem_reserve(void) 8490 { 8491 struct pglist_data *pgdat; 8492 enum zone_type i, j; 8493 8494 for_each_online_pgdat(pgdat) { 8495 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 8496 struct zone *zone = &pgdat->node_zones[i]; 8497 int ratio = sysctl_lowmem_reserve_ratio[i]; 8498 bool clear = !ratio || !zone_managed_pages(zone); 8499 unsigned long managed_pages = 0; 8500 8501 for (j = i + 1; j < MAX_NR_ZONES; j++) { 8502 struct zone *upper_zone = &pgdat->node_zones[j]; 8503 8504 managed_pages += zone_managed_pages(upper_zone); 8505 8506 if (clear) 8507 zone->lowmem_reserve[j] = 0; 8508 else 8509 zone->lowmem_reserve[j] = managed_pages / ratio; 8510 } 8511 } 8512 } 8513 8514 /* update totalreserve_pages */ 8515 calculate_totalreserve_pages(); 8516 } 8517 8518 static void __setup_per_zone_wmarks(void) 8519 { 8520 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 8521 unsigned long lowmem_pages = 0; 8522 struct zone *zone; 8523 unsigned long flags; 8524 8525 /* Calculate total number of !ZONE_HIGHMEM pages */ 8526 for_each_zone(zone) { 8527 if (!is_highmem(zone)) 8528 lowmem_pages += zone_managed_pages(zone); 8529 } 8530 8531 for_each_zone(zone) { 8532 u64 tmp; 8533 8534 spin_lock_irqsave(&zone->lock, flags); 8535 tmp = (u64)pages_min * zone_managed_pages(zone); 8536 do_div(tmp, lowmem_pages); 8537 if (is_highmem(zone)) { 8538 /* 8539 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 8540 * need highmem pages, so cap pages_min to a small 8541 * value here. 8542 * 8543 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 8544 * deltas control async page reclaim, and so should 8545 * not be capped for highmem. 8546 */ 8547 unsigned long min_pages; 8548 8549 min_pages = zone_managed_pages(zone) / 1024; 8550 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 8551 zone->_watermark[WMARK_MIN] = min_pages; 8552 } else { 8553 /* 8554 * If it's a lowmem zone, reserve a number of pages 8555 * proportionate to the zone's size. 8556 */ 8557 zone->_watermark[WMARK_MIN] = tmp; 8558 } 8559 8560 /* 8561 * Set the kswapd watermarks distance according to the 8562 * scale factor in proportion to available memory, but 8563 * ensure a minimum size on small systems. 8564 */ 8565 tmp = max_t(u64, tmp >> 2, 8566 mult_frac(zone_managed_pages(zone), 8567 watermark_scale_factor, 10000)); 8568 8569 zone->watermark_boost = 0; 8570 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 8571 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 8572 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 8573 8574 spin_unlock_irqrestore(&zone->lock, flags); 8575 } 8576 8577 /* update totalreserve_pages */ 8578 calculate_totalreserve_pages(); 8579 } 8580 8581 /** 8582 * setup_per_zone_wmarks - called when min_free_kbytes changes 8583 * or when memory is hot-{added|removed} 8584 * 8585 * Ensures that the watermark[min,low,high] values for each zone are set 8586 * correctly with respect to min_free_kbytes. 8587 */ 8588 void setup_per_zone_wmarks(void) 8589 { 8590 struct zone *zone; 8591 static DEFINE_SPINLOCK(lock); 8592 8593 spin_lock(&lock); 8594 __setup_per_zone_wmarks(); 8595 spin_unlock(&lock); 8596 8597 /* 8598 * The watermark size have changed so update the pcpu batch 8599 * and high limits or the limits may be inappropriate. 8600 */ 8601 for_each_zone(zone) 8602 zone_pcp_update(zone, 0); 8603 } 8604 8605 /* 8606 * Initialise min_free_kbytes. 8607 * 8608 * For small machines we want it small (128k min). For large machines 8609 * we want it large (256MB max). But it is not linear, because network 8610 * bandwidth does not increase linearly with machine size. We use 8611 * 8612 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 8613 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 8614 * 8615 * which yields 8616 * 8617 * 16MB: 512k 8618 * 32MB: 724k 8619 * 64MB: 1024k 8620 * 128MB: 1448k 8621 * 256MB: 2048k 8622 * 512MB: 2896k 8623 * 1024MB: 4096k 8624 * 2048MB: 5792k 8625 * 4096MB: 8192k 8626 * 8192MB: 11584k 8627 * 16384MB: 16384k 8628 */ 8629 void calculate_min_free_kbytes(void) 8630 { 8631 unsigned long lowmem_kbytes; 8632 int new_min_free_kbytes; 8633 8634 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 8635 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 8636 8637 if (new_min_free_kbytes > user_min_free_kbytes) 8638 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 8639 else 8640 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 8641 new_min_free_kbytes, user_min_free_kbytes); 8642 8643 } 8644 8645 int __meminit init_per_zone_wmark_min(void) 8646 { 8647 calculate_min_free_kbytes(); 8648 setup_per_zone_wmarks(); 8649 refresh_zone_stat_thresholds(); 8650 setup_per_zone_lowmem_reserve(); 8651 8652 #ifdef CONFIG_NUMA 8653 setup_min_unmapped_ratio(); 8654 setup_min_slab_ratio(); 8655 #endif 8656 8657 khugepaged_min_free_kbytes_update(); 8658 8659 return 0; 8660 } 8661 postcore_initcall(init_per_zone_wmark_min) 8662 8663 /* 8664 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 8665 * that we can call two helper functions whenever min_free_kbytes 8666 * changes. 8667 */ 8668 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 8669 void *buffer, size_t *length, loff_t *ppos) 8670 { 8671 int rc; 8672 8673 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8674 if (rc) 8675 return rc; 8676 8677 if (write) { 8678 user_min_free_kbytes = min_free_kbytes; 8679 setup_per_zone_wmarks(); 8680 } 8681 return 0; 8682 } 8683 8684 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 8685 void *buffer, size_t *length, loff_t *ppos) 8686 { 8687 int rc; 8688 8689 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8690 if (rc) 8691 return rc; 8692 8693 if (write) 8694 setup_per_zone_wmarks(); 8695 8696 return 0; 8697 } 8698 8699 #ifdef CONFIG_NUMA 8700 static void setup_min_unmapped_ratio(void) 8701 { 8702 pg_data_t *pgdat; 8703 struct zone *zone; 8704 8705 for_each_online_pgdat(pgdat) 8706 pgdat->min_unmapped_pages = 0; 8707 8708 for_each_zone(zone) 8709 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 8710 sysctl_min_unmapped_ratio) / 100; 8711 } 8712 8713 8714 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 8715 void *buffer, size_t *length, loff_t *ppos) 8716 { 8717 int rc; 8718 8719 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8720 if (rc) 8721 return rc; 8722 8723 setup_min_unmapped_ratio(); 8724 8725 return 0; 8726 } 8727 8728 static void setup_min_slab_ratio(void) 8729 { 8730 pg_data_t *pgdat; 8731 struct zone *zone; 8732 8733 for_each_online_pgdat(pgdat) 8734 pgdat->min_slab_pages = 0; 8735 8736 for_each_zone(zone) 8737 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 8738 sysctl_min_slab_ratio) / 100; 8739 } 8740 8741 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 8742 void *buffer, size_t *length, loff_t *ppos) 8743 { 8744 int rc; 8745 8746 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8747 if (rc) 8748 return rc; 8749 8750 setup_min_slab_ratio(); 8751 8752 return 0; 8753 } 8754 #endif 8755 8756 /* 8757 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 8758 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 8759 * whenever sysctl_lowmem_reserve_ratio changes. 8760 * 8761 * The reserve ratio obviously has absolutely no relation with the 8762 * minimum watermarks. The lowmem reserve ratio can only make sense 8763 * if in function of the boot time zone sizes. 8764 */ 8765 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 8766 void *buffer, size_t *length, loff_t *ppos) 8767 { 8768 int i; 8769 8770 proc_dointvec_minmax(table, write, buffer, length, ppos); 8771 8772 for (i = 0; i < MAX_NR_ZONES; i++) { 8773 if (sysctl_lowmem_reserve_ratio[i] < 1) 8774 sysctl_lowmem_reserve_ratio[i] = 0; 8775 } 8776 8777 setup_per_zone_lowmem_reserve(); 8778 return 0; 8779 } 8780 8781 /* 8782 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 8783 * cpu. It is the fraction of total pages in each zone that a hot per cpu 8784 * pagelist can have before it gets flushed back to buddy allocator. 8785 */ 8786 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 8787 int write, void *buffer, size_t *length, loff_t *ppos) 8788 { 8789 struct zone *zone; 8790 int old_percpu_pagelist_high_fraction; 8791 int ret; 8792 8793 mutex_lock(&pcp_batch_high_lock); 8794 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 8795 8796 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 8797 if (!write || ret < 0) 8798 goto out; 8799 8800 /* Sanity checking to avoid pcp imbalance */ 8801 if (percpu_pagelist_high_fraction && 8802 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 8803 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 8804 ret = -EINVAL; 8805 goto out; 8806 } 8807 8808 /* No change? */ 8809 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 8810 goto out; 8811 8812 for_each_populated_zone(zone) 8813 zone_set_pageset_high_and_batch(zone, 0); 8814 out: 8815 mutex_unlock(&pcp_batch_high_lock); 8816 return ret; 8817 } 8818 8819 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 8820 /* 8821 * Returns the number of pages that arch has reserved but 8822 * is not known to alloc_large_system_hash(). 8823 */ 8824 static unsigned long __init arch_reserved_kernel_pages(void) 8825 { 8826 return 0; 8827 } 8828 #endif 8829 8830 /* 8831 * Adaptive scale is meant to reduce sizes of hash tables on large memory 8832 * machines. As memory size is increased the scale is also increased but at 8833 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 8834 * quadruples the scale is increased by one, which means the size of hash table 8835 * only doubles, instead of quadrupling as well. 8836 * Because 32-bit systems cannot have large physical memory, where this scaling 8837 * makes sense, it is disabled on such platforms. 8838 */ 8839 #if __BITS_PER_LONG > 32 8840 #define ADAPT_SCALE_BASE (64ul << 30) 8841 #define ADAPT_SCALE_SHIFT 2 8842 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 8843 #endif 8844 8845 /* 8846 * allocate a large system hash table from bootmem 8847 * - it is assumed that the hash table must contain an exact power-of-2 8848 * quantity of entries 8849 * - limit is the number of hash buckets, not the total allocation size 8850 */ 8851 void *__init alloc_large_system_hash(const char *tablename, 8852 unsigned long bucketsize, 8853 unsigned long numentries, 8854 int scale, 8855 int flags, 8856 unsigned int *_hash_shift, 8857 unsigned int *_hash_mask, 8858 unsigned long low_limit, 8859 unsigned long high_limit) 8860 { 8861 unsigned long long max = high_limit; 8862 unsigned long log2qty, size; 8863 void *table = NULL; 8864 gfp_t gfp_flags; 8865 bool virt; 8866 bool huge; 8867 8868 /* allow the kernel cmdline to have a say */ 8869 if (!numentries) { 8870 /* round applicable memory size up to nearest megabyte */ 8871 numentries = nr_kernel_pages; 8872 numentries -= arch_reserved_kernel_pages(); 8873 8874 /* It isn't necessary when PAGE_SIZE >= 1MB */ 8875 if (PAGE_SHIFT < 20) 8876 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 8877 8878 #if __BITS_PER_LONG > 32 8879 if (!high_limit) { 8880 unsigned long adapt; 8881 8882 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 8883 adapt <<= ADAPT_SCALE_SHIFT) 8884 scale++; 8885 } 8886 #endif 8887 8888 /* limit to 1 bucket per 2^scale bytes of low memory */ 8889 if (scale > PAGE_SHIFT) 8890 numentries >>= (scale - PAGE_SHIFT); 8891 else 8892 numentries <<= (PAGE_SHIFT - scale); 8893 8894 /* Make sure we've got at least a 0-order allocation.. */ 8895 if (unlikely(flags & HASH_SMALL)) { 8896 /* Makes no sense without HASH_EARLY */ 8897 WARN_ON(!(flags & HASH_EARLY)); 8898 if (!(numentries >> *_hash_shift)) { 8899 numentries = 1UL << *_hash_shift; 8900 BUG_ON(!numentries); 8901 } 8902 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 8903 numentries = PAGE_SIZE / bucketsize; 8904 } 8905 numentries = roundup_pow_of_two(numentries); 8906 8907 /* limit allocation size to 1/16 total memory by default */ 8908 if (max == 0) { 8909 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 8910 do_div(max, bucketsize); 8911 } 8912 max = min(max, 0x80000000ULL); 8913 8914 if (numentries < low_limit) 8915 numentries = low_limit; 8916 if (numentries > max) 8917 numentries = max; 8918 8919 log2qty = ilog2(numentries); 8920 8921 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 8922 do { 8923 virt = false; 8924 size = bucketsize << log2qty; 8925 if (flags & HASH_EARLY) { 8926 if (flags & HASH_ZERO) 8927 table = memblock_alloc(size, SMP_CACHE_BYTES); 8928 else 8929 table = memblock_alloc_raw(size, 8930 SMP_CACHE_BYTES); 8931 } else if (get_order(size) >= MAX_ORDER || hashdist) { 8932 table = vmalloc_huge(size, gfp_flags); 8933 virt = true; 8934 if (table) 8935 huge = is_vm_area_hugepages(table); 8936 } else { 8937 /* 8938 * If bucketsize is not a power-of-two, we may free 8939 * some pages at the end of hash table which 8940 * alloc_pages_exact() automatically does 8941 */ 8942 table = alloc_pages_exact(size, gfp_flags); 8943 kmemleak_alloc(table, size, 1, gfp_flags); 8944 } 8945 } while (!table && size > PAGE_SIZE && --log2qty); 8946 8947 if (!table) 8948 panic("Failed to allocate %s hash table\n", tablename); 8949 8950 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 8951 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, 8952 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 8953 8954 if (_hash_shift) 8955 *_hash_shift = log2qty; 8956 if (_hash_mask) 8957 *_hash_mask = (1 << log2qty) - 1; 8958 8959 return table; 8960 } 8961 8962 #ifdef CONFIG_CONTIG_ALLOC 8963 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 8964 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 8965 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 8966 static void alloc_contig_dump_pages(struct list_head *page_list) 8967 { 8968 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 8969 8970 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 8971 struct page *page; 8972 8973 dump_stack(); 8974 list_for_each_entry(page, page_list, lru) 8975 dump_page(page, "migration failure"); 8976 } 8977 } 8978 #else 8979 static inline void alloc_contig_dump_pages(struct list_head *page_list) 8980 { 8981 } 8982 #endif 8983 8984 /* [start, end) must belong to a single zone. */ 8985 int __alloc_contig_migrate_range(struct compact_control *cc, 8986 unsigned long start, unsigned long end) 8987 { 8988 /* This function is based on compact_zone() from compaction.c. */ 8989 unsigned int nr_reclaimed; 8990 unsigned long pfn = start; 8991 unsigned int tries = 0; 8992 int ret = 0; 8993 struct migration_target_control mtc = { 8994 .nid = zone_to_nid(cc->zone), 8995 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 8996 }; 8997 8998 lru_cache_disable(); 8999 9000 while (pfn < end || !list_empty(&cc->migratepages)) { 9001 if (fatal_signal_pending(current)) { 9002 ret = -EINTR; 9003 break; 9004 } 9005 9006 if (list_empty(&cc->migratepages)) { 9007 cc->nr_migratepages = 0; 9008 ret = isolate_migratepages_range(cc, pfn, end); 9009 if (ret && ret != -EAGAIN) 9010 break; 9011 pfn = cc->migrate_pfn; 9012 tries = 0; 9013 } else if (++tries == 5) { 9014 ret = -EBUSY; 9015 break; 9016 } 9017 9018 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 9019 &cc->migratepages); 9020 cc->nr_migratepages -= nr_reclaimed; 9021 9022 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 9023 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 9024 9025 /* 9026 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 9027 * to retry again over this error, so do the same here. 9028 */ 9029 if (ret == -ENOMEM) 9030 break; 9031 } 9032 9033 lru_cache_enable(); 9034 if (ret < 0) { 9035 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 9036 alloc_contig_dump_pages(&cc->migratepages); 9037 putback_movable_pages(&cc->migratepages); 9038 return ret; 9039 } 9040 return 0; 9041 } 9042 9043 /** 9044 * alloc_contig_range() -- tries to allocate given range of pages 9045 * @start: start PFN to allocate 9046 * @end: one-past-the-last PFN to allocate 9047 * @migratetype: migratetype of the underlying pageblocks (either 9048 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 9049 * in range must have the same migratetype and it must 9050 * be either of the two. 9051 * @gfp_mask: GFP mask to use during compaction 9052 * 9053 * The PFN range does not have to be pageblock aligned. The PFN range must 9054 * belong to a single zone. 9055 * 9056 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 9057 * pageblocks in the range. Once isolated, the pageblocks should not 9058 * be modified by others. 9059 * 9060 * Return: zero on success or negative error code. On success all 9061 * pages which PFN is in [start, end) are allocated for the caller and 9062 * need to be freed with free_contig_range(). 9063 */ 9064 int alloc_contig_range(unsigned long start, unsigned long end, 9065 unsigned migratetype, gfp_t gfp_mask) 9066 { 9067 unsigned long outer_start, outer_end; 9068 int order; 9069 int ret = 0; 9070 9071 struct compact_control cc = { 9072 .nr_migratepages = 0, 9073 .order = -1, 9074 .zone = page_zone(pfn_to_page(start)), 9075 .mode = MIGRATE_SYNC, 9076 .ignore_skip_hint = true, 9077 .no_set_skip_hint = true, 9078 .gfp_mask = current_gfp_context(gfp_mask), 9079 .alloc_contig = true, 9080 }; 9081 INIT_LIST_HEAD(&cc.migratepages); 9082 9083 /* 9084 * What we do here is we mark all pageblocks in range as 9085 * MIGRATE_ISOLATE. Because pageblock and max order pages may 9086 * have different sizes, and due to the way page allocator 9087 * work, start_isolate_page_range() has special handlings for this. 9088 * 9089 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 9090 * migrate the pages from an unaligned range (ie. pages that 9091 * we are interested in). This will put all the pages in 9092 * range back to page allocator as MIGRATE_ISOLATE. 9093 * 9094 * When this is done, we take the pages in range from page 9095 * allocator removing them from the buddy system. This way 9096 * page allocator will never consider using them. 9097 * 9098 * This lets us mark the pageblocks back as 9099 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 9100 * aligned range but not in the unaligned, original range are 9101 * put back to page allocator so that buddy can use them. 9102 */ 9103 9104 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 9105 if (ret) 9106 goto done; 9107 9108 drain_all_pages(cc.zone); 9109 9110 /* 9111 * In case of -EBUSY, we'd like to know which page causes problem. 9112 * So, just fall through. test_pages_isolated() has a tracepoint 9113 * which will report the busy page. 9114 * 9115 * It is possible that busy pages could become available before 9116 * the call to test_pages_isolated, and the range will actually be 9117 * allocated. So, if we fall through be sure to clear ret so that 9118 * -EBUSY is not accidentally used or returned to caller. 9119 */ 9120 ret = __alloc_contig_migrate_range(&cc, start, end); 9121 if (ret && ret != -EBUSY) 9122 goto done; 9123 ret = 0; 9124 9125 /* 9126 * Pages from [start, end) are within a pageblock_nr_pages 9127 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 9128 * more, all pages in [start, end) are free in page allocator. 9129 * What we are going to do is to allocate all pages from 9130 * [start, end) (that is remove them from page allocator). 9131 * 9132 * The only problem is that pages at the beginning and at the 9133 * end of interesting range may be not aligned with pages that 9134 * page allocator holds, ie. they can be part of higher order 9135 * pages. Because of this, we reserve the bigger range and 9136 * once this is done free the pages we are not interested in. 9137 * 9138 * We don't have to hold zone->lock here because the pages are 9139 * isolated thus they won't get removed from buddy. 9140 */ 9141 9142 order = 0; 9143 outer_start = start; 9144 while (!PageBuddy(pfn_to_page(outer_start))) { 9145 if (++order >= MAX_ORDER) { 9146 outer_start = start; 9147 break; 9148 } 9149 outer_start &= ~0UL << order; 9150 } 9151 9152 if (outer_start != start) { 9153 order = buddy_order(pfn_to_page(outer_start)); 9154 9155 /* 9156 * outer_start page could be small order buddy page and 9157 * it doesn't include start page. Adjust outer_start 9158 * in this case to report failed page properly 9159 * on tracepoint in test_pages_isolated() 9160 */ 9161 if (outer_start + (1UL << order) <= start) 9162 outer_start = start; 9163 } 9164 9165 /* Make sure the range is really isolated. */ 9166 if (test_pages_isolated(outer_start, end, 0)) { 9167 ret = -EBUSY; 9168 goto done; 9169 } 9170 9171 /* Grab isolated pages from freelists. */ 9172 outer_end = isolate_freepages_range(&cc, outer_start, end); 9173 if (!outer_end) { 9174 ret = -EBUSY; 9175 goto done; 9176 } 9177 9178 /* Free head and tail (if any) */ 9179 if (start != outer_start) 9180 free_contig_range(outer_start, start - outer_start); 9181 if (end != outer_end) 9182 free_contig_range(end, outer_end - end); 9183 9184 done: 9185 undo_isolate_page_range(start, end, migratetype); 9186 return ret; 9187 } 9188 EXPORT_SYMBOL(alloc_contig_range); 9189 9190 static int __alloc_contig_pages(unsigned long start_pfn, 9191 unsigned long nr_pages, gfp_t gfp_mask) 9192 { 9193 unsigned long end_pfn = start_pfn + nr_pages; 9194 9195 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 9196 gfp_mask); 9197 } 9198 9199 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 9200 unsigned long nr_pages) 9201 { 9202 unsigned long i, end_pfn = start_pfn + nr_pages; 9203 struct page *page; 9204 9205 for (i = start_pfn; i < end_pfn; i++) { 9206 page = pfn_to_online_page(i); 9207 if (!page) 9208 return false; 9209 9210 if (page_zone(page) != z) 9211 return false; 9212 9213 if (PageReserved(page)) 9214 return false; 9215 } 9216 return true; 9217 } 9218 9219 static bool zone_spans_last_pfn(const struct zone *zone, 9220 unsigned long start_pfn, unsigned long nr_pages) 9221 { 9222 unsigned long last_pfn = start_pfn + nr_pages - 1; 9223 9224 return zone_spans_pfn(zone, last_pfn); 9225 } 9226 9227 /** 9228 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 9229 * @nr_pages: Number of contiguous pages to allocate 9230 * @gfp_mask: GFP mask to limit search and used during compaction 9231 * @nid: Target node 9232 * @nodemask: Mask for other possible nodes 9233 * 9234 * This routine is a wrapper around alloc_contig_range(). It scans over zones 9235 * on an applicable zonelist to find a contiguous pfn range which can then be 9236 * tried for allocation with alloc_contig_range(). This routine is intended 9237 * for allocation requests which can not be fulfilled with the buddy allocator. 9238 * 9239 * The allocated memory is always aligned to a page boundary. If nr_pages is a 9240 * power of two, then allocated range is also guaranteed to be aligned to same 9241 * nr_pages (e.g. 1GB request would be aligned to 1GB). 9242 * 9243 * Allocated pages can be freed with free_contig_range() or by manually calling 9244 * __free_page() on each allocated page. 9245 * 9246 * Return: pointer to contiguous pages on success, or NULL if not successful. 9247 */ 9248 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 9249 int nid, nodemask_t *nodemask) 9250 { 9251 unsigned long ret, pfn, flags; 9252 struct zonelist *zonelist; 9253 struct zone *zone; 9254 struct zoneref *z; 9255 9256 zonelist = node_zonelist(nid, gfp_mask); 9257 for_each_zone_zonelist_nodemask(zone, z, zonelist, 9258 gfp_zone(gfp_mask), nodemask) { 9259 spin_lock_irqsave(&zone->lock, flags); 9260 9261 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 9262 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 9263 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 9264 /* 9265 * We release the zone lock here because 9266 * alloc_contig_range() will also lock the zone 9267 * at some point. If there's an allocation 9268 * spinning on this lock, it may win the race 9269 * and cause alloc_contig_range() to fail... 9270 */ 9271 spin_unlock_irqrestore(&zone->lock, flags); 9272 ret = __alloc_contig_pages(pfn, nr_pages, 9273 gfp_mask); 9274 if (!ret) 9275 return pfn_to_page(pfn); 9276 spin_lock_irqsave(&zone->lock, flags); 9277 } 9278 pfn += nr_pages; 9279 } 9280 spin_unlock_irqrestore(&zone->lock, flags); 9281 } 9282 return NULL; 9283 } 9284 #endif /* CONFIG_CONTIG_ALLOC */ 9285 9286 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 9287 { 9288 unsigned long count = 0; 9289 9290 for (; nr_pages--; pfn++) { 9291 struct page *page = pfn_to_page(pfn); 9292 9293 count += page_count(page) != 1; 9294 __free_page(page); 9295 } 9296 WARN(count != 0, "%lu pages are still in use!\n", count); 9297 } 9298 EXPORT_SYMBOL(free_contig_range); 9299 9300 /* 9301 * The zone indicated has a new number of managed_pages; batch sizes and percpu 9302 * page high values need to be recalculated. 9303 */ 9304 void zone_pcp_update(struct zone *zone, int cpu_online) 9305 { 9306 mutex_lock(&pcp_batch_high_lock); 9307 zone_set_pageset_high_and_batch(zone, cpu_online); 9308 mutex_unlock(&pcp_batch_high_lock); 9309 } 9310 9311 /* 9312 * Effectively disable pcplists for the zone by setting the high limit to 0 9313 * and draining all cpus. A concurrent page freeing on another CPU that's about 9314 * to put the page on pcplist will either finish before the drain and the page 9315 * will be drained, or observe the new high limit and skip the pcplist. 9316 * 9317 * Must be paired with a call to zone_pcp_enable(). 9318 */ 9319 void zone_pcp_disable(struct zone *zone) 9320 { 9321 mutex_lock(&pcp_batch_high_lock); 9322 __zone_set_pageset_high_and_batch(zone, 0, 1); 9323 __drain_all_pages(zone, true); 9324 } 9325 9326 void zone_pcp_enable(struct zone *zone) 9327 { 9328 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 9329 mutex_unlock(&pcp_batch_high_lock); 9330 } 9331 9332 void zone_pcp_reset(struct zone *zone) 9333 { 9334 int cpu; 9335 struct per_cpu_zonestat *pzstats; 9336 9337 if (zone->per_cpu_pageset != &boot_pageset) { 9338 for_each_online_cpu(cpu) { 9339 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 9340 drain_zonestat(zone, pzstats); 9341 } 9342 free_percpu(zone->per_cpu_pageset); 9343 free_percpu(zone->per_cpu_zonestats); 9344 zone->per_cpu_pageset = &boot_pageset; 9345 zone->per_cpu_zonestats = &boot_zonestats; 9346 } 9347 } 9348 9349 #ifdef CONFIG_MEMORY_HOTREMOVE 9350 /* 9351 * All pages in the range must be in a single zone, must not contain holes, 9352 * must span full sections, and must be isolated before calling this function. 9353 */ 9354 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 9355 { 9356 unsigned long pfn = start_pfn; 9357 struct page *page; 9358 struct zone *zone; 9359 unsigned int order; 9360 unsigned long flags; 9361 9362 offline_mem_sections(pfn, end_pfn); 9363 zone = page_zone(pfn_to_page(pfn)); 9364 spin_lock_irqsave(&zone->lock, flags); 9365 while (pfn < end_pfn) { 9366 page = pfn_to_page(pfn); 9367 /* 9368 * The HWPoisoned page may be not in buddy system, and 9369 * page_count() is not 0. 9370 */ 9371 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 9372 pfn++; 9373 continue; 9374 } 9375 /* 9376 * At this point all remaining PageOffline() pages have a 9377 * reference count of 0 and can simply be skipped. 9378 */ 9379 if (PageOffline(page)) { 9380 BUG_ON(page_count(page)); 9381 BUG_ON(PageBuddy(page)); 9382 pfn++; 9383 continue; 9384 } 9385 9386 BUG_ON(page_count(page)); 9387 BUG_ON(!PageBuddy(page)); 9388 order = buddy_order(page); 9389 del_page_from_free_list(page, zone, order); 9390 pfn += (1 << order); 9391 } 9392 spin_unlock_irqrestore(&zone->lock, flags); 9393 } 9394 #endif 9395 9396 /* 9397 * This function returns a stable result only if called under zone lock. 9398 */ 9399 bool is_free_buddy_page(struct page *page) 9400 { 9401 unsigned long pfn = page_to_pfn(page); 9402 unsigned int order; 9403 9404 for (order = 0; order < MAX_ORDER; order++) { 9405 struct page *page_head = page - (pfn & ((1 << order) - 1)); 9406 9407 if (PageBuddy(page_head) && 9408 buddy_order_unsafe(page_head) >= order) 9409 break; 9410 } 9411 9412 return order < MAX_ORDER; 9413 } 9414 EXPORT_SYMBOL(is_free_buddy_page); 9415 9416 #ifdef CONFIG_MEMORY_FAILURE 9417 /* 9418 * Break down a higher-order page in sub-pages, and keep our target out of 9419 * buddy allocator. 9420 */ 9421 static void break_down_buddy_pages(struct zone *zone, struct page *page, 9422 struct page *target, int low, int high, 9423 int migratetype) 9424 { 9425 unsigned long size = 1 << high; 9426 struct page *current_buddy, *next_page; 9427 9428 while (high > low) { 9429 high--; 9430 size >>= 1; 9431 9432 if (target >= &page[size]) { 9433 next_page = page + size; 9434 current_buddy = page; 9435 } else { 9436 next_page = page; 9437 current_buddy = page + size; 9438 } 9439 9440 if (set_page_guard(zone, current_buddy, high, migratetype)) 9441 continue; 9442 9443 if (current_buddy != target) { 9444 add_to_free_list(current_buddy, zone, high, migratetype); 9445 set_buddy_order(current_buddy, high); 9446 page = next_page; 9447 } 9448 } 9449 } 9450 9451 /* 9452 * Take a page that will be marked as poisoned off the buddy allocator. 9453 */ 9454 bool take_page_off_buddy(struct page *page) 9455 { 9456 struct zone *zone = page_zone(page); 9457 unsigned long pfn = page_to_pfn(page); 9458 unsigned long flags; 9459 unsigned int order; 9460 bool ret = false; 9461 9462 spin_lock_irqsave(&zone->lock, flags); 9463 for (order = 0; order < MAX_ORDER; order++) { 9464 struct page *page_head = page - (pfn & ((1 << order) - 1)); 9465 int page_order = buddy_order(page_head); 9466 9467 if (PageBuddy(page_head) && page_order >= order) { 9468 unsigned long pfn_head = page_to_pfn(page_head); 9469 int migratetype = get_pfnblock_migratetype(page_head, 9470 pfn_head); 9471 9472 del_page_from_free_list(page_head, zone, page_order); 9473 break_down_buddy_pages(zone, page_head, page, 0, 9474 page_order, migratetype); 9475 SetPageHWPoisonTakenOff(page); 9476 if (!is_migrate_isolate(migratetype)) 9477 __mod_zone_freepage_state(zone, -1, migratetype); 9478 ret = true; 9479 break; 9480 } 9481 if (page_count(page_head) > 0) 9482 break; 9483 } 9484 spin_unlock_irqrestore(&zone->lock, flags); 9485 return ret; 9486 } 9487 9488 /* 9489 * Cancel takeoff done by take_page_off_buddy(). 9490 */ 9491 bool put_page_back_buddy(struct page *page) 9492 { 9493 struct zone *zone = page_zone(page); 9494 unsigned long pfn = page_to_pfn(page); 9495 unsigned long flags; 9496 int migratetype = get_pfnblock_migratetype(page, pfn); 9497 bool ret = false; 9498 9499 spin_lock_irqsave(&zone->lock, flags); 9500 if (put_page_testzero(page)) { 9501 ClearPageHWPoisonTakenOff(page); 9502 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 9503 if (TestClearPageHWPoison(page)) { 9504 ret = true; 9505 } 9506 } 9507 spin_unlock_irqrestore(&zone->lock, flags); 9508 9509 return ret; 9510 } 9511 #endif 9512 9513 #ifdef CONFIG_ZONE_DMA 9514 bool has_managed_dma(void) 9515 { 9516 struct pglist_data *pgdat; 9517 9518 for_each_online_pgdat(pgdat) { 9519 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 9520 9521 if (managed_zone(zone)) 9522 return true; 9523 } 9524 return false; 9525 } 9526 #endif /* CONFIG_ZONE_DMA */ 9527