1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/swap.h> 22 #include <linux/swapops.h> 23 #include <linux/interrupt.h> 24 #include <linux/pagemap.h> 25 #include <linux/jiffies.h> 26 #include <linux/memblock.h> 27 #include <linux/compiler.h> 28 #include <linux/kernel.h> 29 #include <linux/kasan.h> 30 #include <linux/module.h> 31 #include <linux/suspend.h> 32 #include <linux/pagevec.h> 33 #include <linux/blkdev.h> 34 #include <linux/slab.h> 35 #include <linux/ratelimit.h> 36 #include <linux/oom.h> 37 #include <linux/topology.h> 38 #include <linux/sysctl.h> 39 #include <linux/cpu.h> 40 #include <linux/cpuset.h> 41 #include <linux/memory_hotplug.h> 42 #include <linux/nodemask.h> 43 #include <linux/vmalloc.h> 44 #include <linux/vmstat.h> 45 #include <linux/mempolicy.h> 46 #include <linux/memremap.h> 47 #include <linux/stop_machine.h> 48 #include <linux/random.h> 49 #include <linux/sort.h> 50 #include <linux/pfn.h> 51 #include <linux/backing-dev.h> 52 #include <linux/fault-inject.h> 53 #include <linux/page-isolation.h> 54 #include <linux/debugobjects.h> 55 #include <linux/kmemleak.h> 56 #include <linux/compaction.h> 57 #include <trace/events/kmem.h> 58 #include <trace/events/oom.h> 59 #include <linux/prefetch.h> 60 #include <linux/mm_inline.h> 61 #include <linux/mmu_notifier.h> 62 #include <linux/migrate.h> 63 #include <linux/hugetlb.h> 64 #include <linux/sched/rt.h> 65 #include <linux/sched/mm.h> 66 #include <linux/page_owner.h> 67 #include <linux/page_table_check.h> 68 #include <linux/kthread.h> 69 #include <linux/memcontrol.h> 70 #include <linux/ftrace.h> 71 #include <linux/lockdep.h> 72 #include <linux/nmi.h> 73 #include <linux/psi.h> 74 #include <linux/padata.h> 75 #include <linux/khugepaged.h> 76 #include <linux/buffer_head.h> 77 #include <linux/delayacct.h> 78 #include <asm/sections.h> 79 #include <asm/tlbflush.h> 80 #include <asm/div64.h> 81 #include "internal.h" 82 #include "shuffle.h" 83 #include "page_reporting.h" 84 #include "swap.h" 85 86 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 87 typedef int __bitwise fpi_t; 88 89 /* No special request */ 90 #define FPI_NONE ((__force fpi_t)0) 91 92 /* 93 * Skip free page reporting notification for the (possibly merged) page. 94 * This does not hinder free page reporting from grabbing the page, 95 * reporting it and marking it "reported" - it only skips notifying 96 * the free page reporting infrastructure about a newly freed page. For 97 * example, used when temporarily pulling a page from a freelist and 98 * putting it back unmodified. 99 */ 100 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 101 102 /* 103 * Place the (possibly merged) page to the tail of the freelist. Will ignore 104 * page shuffling (relevant code - e.g., memory onlining - is expected to 105 * shuffle the whole zone). 106 * 107 * Note: No code should rely on this flag for correctness - it's purely 108 * to allow for optimizations when handing back either fresh pages 109 * (memory onlining) or untouched pages (page isolation, free page 110 * reporting). 111 */ 112 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 113 114 /* 115 * Don't poison memory with KASAN (only for the tag-based modes). 116 * During boot, all non-reserved memblock memory is exposed to page_alloc. 117 * Poisoning all that memory lengthens boot time, especially on systems with 118 * large amount of RAM. This flag is used to skip that poisoning. 119 * This is only done for the tag-based KASAN modes, as those are able to 120 * detect memory corruptions with the memory tags assigned by default. 121 * All memory allocated normally after boot gets poisoned as usual. 122 */ 123 #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2)) 124 125 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 126 static DEFINE_MUTEX(pcp_batch_high_lock); 127 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 128 129 struct pagesets { 130 local_lock_t lock; 131 }; 132 static DEFINE_PER_CPU(struct pagesets, pagesets) = { 133 .lock = INIT_LOCAL_LOCK(lock), 134 }; 135 136 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 137 DEFINE_PER_CPU(int, numa_node); 138 EXPORT_PER_CPU_SYMBOL(numa_node); 139 #endif 140 141 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 142 143 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 144 /* 145 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 146 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 147 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 148 * defined in <linux/topology.h>. 149 */ 150 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 151 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 152 #endif 153 154 /* work_structs for global per-cpu drains */ 155 struct pcpu_drain { 156 struct zone *zone; 157 struct work_struct work; 158 }; 159 static DEFINE_MUTEX(pcpu_drain_mutex); 160 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); 161 162 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 163 volatile unsigned long latent_entropy __latent_entropy; 164 EXPORT_SYMBOL(latent_entropy); 165 #endif 166 167 /* 168 * Array of node states. 169 */ 170 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 171 [N_POSSIBLE] = NODE_MASK_ALL, 172 [N_ONLINE] = { { [0] = 1UL } }, 173 #ifndef CONFIG_NUMA 174 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 175 #ifdef CONFIG_HIGHMEM 176 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 177 #endif 178 [N_MEMORY] = { { [0] = 1UL } }, 179 [N_CPU] = { { [0] = 1UL } }, 180 #endif /* NUMA */ 181 }; 182 EXPORT_SYMBOL(node_states); 183 184 atomic_long_t _totalram_pages __read_mostly; 185 EXPORT_SYMBOL(_totalram_pages); 186 unsigned long totalreserve_pages __read_mostly; 187 unsigned long totalcma_pages __read_mostly; 188 189 int percpu_pagelist_high_fraction; 190 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 191 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 192 EXPORT_SYMBOL(init_on_alloc); 193 194 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 195 EXPORT_SYMBOL(init_on_free); 196 197 static bool _init_on_alloc_enabled_early __read_mostly 198 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 199 static int __init early_init_on_alloc(char *buf) 200 { 201 202 return kstrtobool(buf, &_init_on_alloc_enabled_early); 203 } 204 early_param("init_on_alloc", early_init_on_alloc); 205 206 static bool _init_on_free_enabled_early __read_mostly 207 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 208 static int __init early_init_on_free(char *buf) 209 { 210 return kstrtobool(buf, &_init_on_free_enabled_early); 211 } 212 early_param("init_on_free", early_init_on_free); 213 214 /* 215 * A cached value of the page's pageblock's migratetype, used when the page is 216 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 217 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 218 * Also the migratetype set in the page does not necessarily match the pcplist 219 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 220 * other index - this ensures that it will be put on the correct CMA freelist. 221 */ 222 static inline int get_pcppage_migratetype(struct page *page) 223 { 224 return page->index; 225 } 226 227 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 228 { 229 page->index = migratetype; 230 } 231 232 #ifdef CONFIG_PM_SLEEP 233 /* 234 * The following functions are used by the suspend/hibernate code to temporarily 235 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 236 * while devices are suspended. To avoid races with the suspend/hibernate code, 237 * they should always be called with system_transition_mutex held 238 * (gfp_allowed_mask also should only be modified with system_transition_mutex 239 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 240 * with that modification). 241 */ 242 243 static gfp_t saved_gfp_mask; 244 245 void pm_restore_gfp_mask(void) 246 { 247 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 248 if (saved_gfp_mask) { 249 gfp_allowed_mask = saved_gfp_mask; 250 saved_gfp_mask = 0; 251 } 252 } 253 254 void pm_restrict_gfp_mask(void) 255 { 256 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 257 WARN_ON(saved_gfp_mask); 258 saved_gfp_mask = gfp_allowed_mask; 259 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 260 } 261 262 bool pm_suspended_storage(void) 263 { 264 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 265 return false; 266 return true; 267 } 268 #endif /* CONFIG_PM_SLEEP */ 269 270 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 271 unsigned int pageblock_order __read_mostly; 272 #endif 273 274 static void __free_pages_ok(struct page *page, unsigned int order, 275 fpi_t fpi_flags); 276 277 /* 278 * results with 256, 32 in the lowmem_reserve sysctl: 279 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 280 * 1G machine -> (16M dma, 784M normal, 224M high) 281 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 282 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 283 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 284 * 285 * TBD: should special case ZONE_DMA32 machines here - in those we normally 286 * don't need any ZONE_NORMAL reservation 287 */ 288 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 289 #ifdef CONFIG_ZONE_DMA 290 [ZONE_DMA] = 256, 291 #endif 292 #ifdef CONFIG_ZONE_DMA32 293 [ZONE_DMA32] = 256, 294 #endif 295 [ZONE_NORMAL] = 32, 296 #ifdef CONFIG_HIGHMEM 297 [ZONE_HIGHMEM] = 0, 298 #endif 299 [ZONE_MOVABLE] = 0, 300 }; 301 302 static char * const zone_names[MAX_NR_ZONES] = { 303 #ifdef CONFIG_ZONE_DMA 304 "DMA", 305 #endif 306 #ifdef CONFIG_ZONE_DMA32 307 "DMA32", 308 #endif 309 "Normal", 310 #ifdef CONFIG_HIGHMEM 311 "HighMem", 312 #endif 313 "Movable", 314 #ifdef CONFIG_ZONE_DEVICE 315 "Device", 316 #endif 317 }; 318 319 const char * const migratetype_names[MIGRATE_TYPES] = { 320 "Unmovable", 321 "Movable", 322 "Reclaimable", 323 "HighAtomic", 324 #ifdef CONFIG_CMA 325 "CMA", 326 #endif 327 #ifdef CONFIG_MEMORY_ISOLATION 328 "Isolate", 329 #endif 330 }; 331 332 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { 333 [NULL_COMPOUND_DTOR] = NULL, 334 [COMPOUND_PAGE_DTOR] = free_compound_page, 335 #ifdef CONFIG_HUGETLB_PAGE 336 [HUGETLB_PAGE_DTOR] = free_huge_page, 337 #endif 338 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 339 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, 340 #endif 341 }; 342 343 int min_free_kbytes = 1024; 344 int user_min_free_kbytes = -1; 345 int watermark_boost_factor __read_mostly = 15000; 346 int watermark_scale_factor = 10; 347 348 static unsigned long nr_kernel_pages __initdata; 349 static unsigned long nr_all_pages __initdata; 350 static unsigned long dma_reserve __initdata; 351 352 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 353 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 354 static unsigned long required_kernelcore __initdata; 355 static unsigned long required_kernelcore_percent __initdata; 356 static unsigned long required_movablecore __initdata; 357 static unsigned long required_movablecore_percent __initdata; 358 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 359 static bool mirrored_kernelcore __meminitdata; 360 361 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 362 int movable_zone; 363 EXPORT_SYMBOL(movable_zone); 364 365 #if MAX_NUMNODES > 1 366 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 367 unsigned int nr_online_nodes __read_mostly = 1; 368 EXPORT_SYMBOL(nr_node_ids); 369 EXPORT_SYMBOL(nr_online_nodes); 370 #endif 371 372 int page_group_by_mobility_disabled __read_mostly; 373 374 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 375 /* 376 * During boot we initialize deferred pages on-demand, as needed, but once 377 * page_alloc_init_late() has finished, the deferred pages are all initialized, 378 * and we can permanently disable that path. 379 */ 380 static DEFINE_STATIC_KEY_TRUE(deferred_pages); 381 382 static inline bool deferred_pages_enabled(void) 383 { 384 return static_branch_unlikely(&deferred_pages); 385 } 386 387 /* Returns true if the struct page for the pfn is uninitialised */ 388 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 389 { 390 int nid = early_pfn_to_nid(pfn); 391 392 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 393 return true; 394 395 return false; 396 } 397 398 /* 399 * Returns true when the remaining initialisation should be deferred until 400 * later in the boot cycle when it can be parallelised. 401 */ 402 static bool __meminit 403 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 404 { 405 static unsigned long prev_end_pfn, nr_initialised; 406 407 /* 408 * prev_end_pfn static that contains the end of previous zone 409 * No need to protect because called very early in boot before smp_init. 410 */ 411 if (prev_end_pfn != end_pfn) { 412 prev_end_pfn = end_pfn; 413 nr_initialised = 0; 414 } 415 416 /* Always populate low zones for address-constrained allocations */ 417 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 418 return false; 419 420 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 421 return true; 422 /* 423 * We start only with one section of pages, more pages are added as 424 * needed until the rest of deferred pages are initialized. 425 */ 426 nr_initialised++; 427 if ((nr_initialised > PAGES_PER_SECTION) && 428 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 429 NODE_DATA(nid)->first_deferred_pfn = pfn; 430 return true; 431 } 432 return false; 433 } 434 #else 435 static inline bool deferred_pages_enabled(void) 436 { 437 return false; 438 } 439 440 static inline bool early_page_uninitialised(unsigned long pfn) 441 { 442 return false; 443 } 444 445 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 446 { 447 return false; 448 } 449 #endif 450 451 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 452 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 453 unsigned long pfn) 454 { 455 #ifdef CONFIG_SPARSEMEM 456 return section_to_usemap(__pfn_to_section(pfn)); 457 #else 458 return page_zone(page)->pageblock_flags; 459 #endif /* CONFIG_SPARSEMEM */ 460 } 461 462 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 463 { 464 #ifdef CONFIG_SPARSEMEM 465 pfn &= (PAGES_PER_SECTION-1); 466 #else 467 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); 468 #endif /* CONFIG_SPARSEMEM */ 469 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 470 } 471 472 static __always_inline 473 unsigned long __get_pfnblock_flags_mask(const struct page *page, 474 unsigned long pfn, 475 unsigned long mask) 476 { 477 unsigned long *bitmap; 478 unsigned long bitidx, word_bitidx; 479 unsigned long word; 480 481 bitmap = get_pageblock_bitmap(page, pfn); 482 bitidx = pfn_to_bitidx(page, pfn); 483 word_bitidx = bitidx / BITS_PER_LONG; 484 bitidx &= (BITS_PER_LONG-1); 485 /* 486 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 487 * a consistent read of the memory array, so that results, even though 488 * racy, are not corrupted. 489 */ 490 word = READ_ONCE(bitmap[word_bitidx]); 491 return (word >> bitidx) & mask; 492 } 493 494 /** 495 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 496 * @page: The page within the block of interest 497 * @pfn: The target page frame number 498 * @mask: mask of bits that the caller is interested in 499 * 500 * Return: pageblock_bits flags 501 */ 502 unsigned long get_pfnblock_flags_mask(const struct page *page, 503 unsigned long pfn, unsigned long mask) 504 { 505 return __get_pfnblock_flags_mask(page, pfn, mask); 506 } 507 508 static __always_inline int get_pfnblock_migratetype(const struct page *page, 509 unsigned long pfn) 510 { 511 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 512 } 513 514 /** 515 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 516 * @page: The page within the block of interest 517 * @flags: The flags to set 518 * @pfn: The target page frame number 519 * @mask: mask of bits that the caller is interested in 520 */ 521 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 522 unsigned long pfn, 523 unsigned long mask) 524 { 525 unsigned long *bitmap; 526 unsigned long bitidx, word_bitidx; 527 unsigned long old_word, word; 528 529 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 530 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 531 532 bitmap = get_pageblock_bitmap(page, pfn); 533 bitidx = pfn_to_bitidx(page, pfn); 534 word_bitidx = bitidx / BITS_PER_LONG; 535 bitidx &= (BITS_PER_LONG-1); 536 537 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 538 539 mask <<= bitidx; 540 flags <<= bitidx; 541 542 word = READ_ONCE(bitmap[word_bitidx]); 543 for (;;) { 544 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 545 if (word == old_word) 546 break; 547 word = old_word; 548 } 549 } 550 551 void set_pageblock_migratetype(struct page *page, int migratetype) 552 { 553 if (unlikely(page_group_by_mobility_disabled && 554 migratetype < MIGRATE_PCPTYPES)) 555 migratetype = MIGRATE_UNMOVABLE; 556 557 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 558 page_to_pfn(page), MIGRATETYPE_MASK); 559 } 560 561 #ifdef CONFIG_DEBUG_VM 562 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 563 { 564 int ret = 0; 565 unsigned seq; 566 unsigned long pfn = page_to_pfn(page); 567 unsigned long sp, start_pfn; 568 569 do { 570 seq = zone_span_seqbegin(zone); 571 start_pfn = zone->zone_start_pfn; 572 sp = zone->spanned_pages; 573 if (!zone_spans_pfn(zone, pfn)) 574 ret = 1; 575 } while (zone_span_seqretry(zone, seq)); 576 577 if (ret) 578 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 579 pfn, zone_to_nid(zone), zone->name, 580 start_pfn, start_pfn + sp); 581 582 return ret; 583 } 584 585 static int page_is_consistent(struct zone *zone, struct page *page) 586 { 587 if (zone != page_zone(page)) 588 return 0; 589 590 return 1; 591 } 592 /* 593 * Temporary debugging check for pages not lying within a given zone. 594 */ 595 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 596 { 597 if (page_outside_zone_boundaries(zone, page)) 598 return 1; 599 if (!page_is_consistent(zone, page)) 600 return 1; 601 602 return 0; 603 } 604 #else 605 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 606 { 607 return 0; 608 } 609 #endif 610 611 static void bad_page(struct page *page, const char *reason) 612 { 613 static unsigned long resume; 614 static unsigned long nr_shown; 615 static unsigned long nr_unshown; 616 617 /* 618 * Allow a burst of 60 reports, then keep quiet for that minute; 619 * or allow a steady drip of one report per second. 620 */ 621 if (nr_shown == 60) { 622 if (time_before(jiffies, resume)) { 623 nr_unshown++; 624 goto out; 625 } 626 if (nr_unshown) { 627 pr_alert( 628 "BUG: Bad page state: %lu messages suppressed\n", 629 nr_unshown); 630 nr_unshown = 0; 631 } 632 nr_shown = 0; 633 } 634 if (nr_shown++ == 0) 635 resume = jiffies + 60 * HZ; 636 637 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 638 current->comm, page_to_pfn(page)); 639 dump_page(page, reason); 640 641 print_modules(); 642 dump_stack(); 643 out: 644 /* Leave bad fields for debug, except PageBuddy could make trouble */ 645 page_mapcount_reset(page); /* remove PageBuddy */ 646 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 647 } 648 649 static inline unsigned int order_to_pindex(int migratetype, int order) 650 { 651 int base = order; 652 653 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 654 if (order > PAGE_ALLOC_COSTLY_ORDER) { 655 VM_BUG_ON(order != pageblock_order); 656 base = PAGE_ALLOC_COSTLY_ORDER + 1; 657 } 658 #else 659 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 660 #endif 661 662 return (MIGRATE_PCPTYPES * base) + migratetype; 663 } 664 665 static inline int pindex_to_order(unsigned int pindex) 666 { 667 int order = pindex / MIGRATE_PCPTYPES; 668 669 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 670 if (order > PAGE_ALLOC_COSTLY_ORDER) 671 order = pageblock_order; 672 #else 673 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 674 #endif 675 676 return order; 677 } 678 679 static inline bool pcp_allowed_order(unsigned int order) 680 { 681 if (order <= PAGE_ALLOC_COSTLY_ORDER) 682 return true; 683 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 684 if (order == pageblock_order) 685 return true; 686 #endif 687 return false; 688 } 689 690 static inline void free_the_page(struct page *page, unsigned int order) 691 { 692 if (pcp_allowed_order(order)) /* Via pcp? */ 693 free_unref_page(page, order); 694 else 695 __free_pages_ok(page, order, FPI_NONE); 696 } 697 698 /* 699 * Higher-order pages are called "compound pages". They are structured thusly: 700 * 701 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 702 * 703 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 704 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 705 * 706 * The first tail page's ->compound_dtor holds the offset in array of compound 707 * page destructors. See compound_page_dtors. 708 * 709 * The first tail page's ->compound_order holds the order of allocation. 710 * This usage means that zero-order pages may not be compound. 711 */ 712 713 void free_compound_page(struct page *page) 714 { 715 mem_cgroup_uncharge(page_folio(page)); 716 free_the_page(page, compound_order(page)); 717 } 718 719 static void prep_compound_head(struct page *page, unsigned int order) 720 { 721 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 722 set_compound_order(page, order); 723 atomic_set(compound_mapcount_ptr(page), -1); 724 atomic_set(compound_pincount_ptr(page), 0); 725 } 726 727 static void prep_compound_tail(struct page *head, int tail_idx) 728 { 729 struct page *p = head + tail_idx; 730 731 p->mapping = TAIL_MAPPING; 732 set_compound_head(p, head); 733 } 734 735 void prep_compound_page(struct page *page, unsigned int order) 736 { 737 int i; 738 int nr_pages = 1 << order; 739 740 __SetPageHead(page); 741 for (i = 1; i < nr_pages; i++) 742 prep_compound_tail(page, i); 743 744 prep_compound_head(page, order); 745 } 746 747 #ifdef CONFIG_DEBUG_PAGEALLOC 748 unsigned int _debug_guardpage_minorder; 749 750 bool _debug_pagealloc_enabled_early __read_mostly 751 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 752 EXPORT_SYMBOL(_debug_pagealloc_enabled_early); 753 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 754 EXPORT_SYMBOL(_debug_pagealloc_enabled); 755 756 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 757 758 static int __init early_debug_pagealloc(char *buf) 759 { 760 return kstrtobool(buf, &_debug_pagealloc_enabled_early); 761 } 762 early_param("debug_pagealloc", early_debug_pagealloc); 763 764 static int __init debug_guardpage_minorder_setup(char *buf) 765 { 766 unsigned long res; 767 768 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 769 pr_err("Bad debug_guardpage_minorder value\n"); 770 return 0; 771 } 772 _debug_guardpage_minorder = res; 773 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 774 return 0; 775 } 776 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); 777 778 static inline bool set_page_guard(struct zone *zone, struct page *page, 779 unsigned int order, int migratetype) 780 { 781 if (!debug_guardpage_enabled()) 782 return false; 783 784 if (order >= debug_guardpage_minorder()) 785 return false; 786 787 __SetPageGuard(page); 788 INIT_LIST_HEAD(&page->lru); 789 set_page_private(page, order); 790 /* Guard pages are not available for any usage */ 791 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 792 793 return true; 794 } 795 796 static inline void clear_page_guard(struct zone *zone, struct page *page, 797 unsigned int order, int migratetype) 798 { 799 if (!debug_guardpage_enabled()) 800 return; 801 802 __ClearPageGuard(page); 803 804 set_page_private(page, 0); 805 if (!is_migrate_isolate(migratetype)) 806 __mod_zone_freepage_state(zone, (1 << order), migratetype); 807 } 808 #else 809 static inline bool set_page_guard(struct zone *zone, struct page *page, 810 unsigned int order, int migratetype) { return false; } 811 static inline void clear_page_guard(struct zone *zone, struct page *page, 812 unsigned int order, int migratetype) {} 813 #endif 814 815 /* 816 * Enable static keys related to various memory debugging and hardening options. 817 * Some override others, and depend on early params that are evaluated in the 818 * order of appearance. So we need to first gather the full picture of what was 819 * enabled, and then make decisions. 820 */ 821 void init_mem_debugging_and_hardening(void) 822 { 823 bool page_poisoning_requested = false; 824 825 #ifdef CONFIG_PAGE_POISONING 826 /* 827 * Page poisoning is debug page alloc for some arches. If 828 * either of those options are enabled, enable poisoning. 829 */ 830 if (page_poisoning_enabled() || 831 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 832 debug_pagealloc_enabled())) { 833 static_branch_enable(&_page_poisoning_enabled); 834 page_poisoning_requested = true; 835 } 836 #endif 837 838 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 839 page_poisoning_requested) { 840 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 841 "will take precedence over init_on_alloc and init_on_free\n"); 842 _init_on_alloc_enabled_early = false; 843 _init_on_free_enabled_early = false; 844 } 845 846 if (_init_on_alloc_enabled_early) 847 static_branch_enable(&init_on_alloc); 848 else 849 static_branch_disable(&init_on_alloc); 850 851 if (_init_on_free_enabled_early) 852 static_branch_enable(&init_on_free); 853 else 854 static_branch_disable(&init_on_free); 855 856 #ifdef CONFIG_DEBUG_PAGEALLOC 857 if (!debug_pagealloc_enabled()) 858 return; 859 860 static_branch_enable(&_debug_pagealloc_enabled); 861 862 if (!debug_guardpage_minorder()) 863 return; 864 865 static_branch_enable(&_debug_guardpage_enabled); 866 #endif 867 } 868 869 static inline void set_buddy_order(struct page *page, unsigned int order) 870 { 871 set_page_private(page, order); 872 __SetPageBuddy(page); 873 } 874 875 #ifdef CONFIG_COMPACTION 876 static inline struct capture_control *task_capc(struct zone *zone) 877 { 878 struct capture_control *capc = current->capture_control; 879 880 return unlikely(capc) && 881 !(current->flags & PF_KTHREAD) && 882 !capc->page && 883 capc->cc->zone == zone ? capc : NULL; 884 } 885 886 static inline bool 887 compaction_capture(struct capture_control *capc, struct page *page, 888 int order, int migratetype) 889 { 890 if (!capc || order != capc->cc->order) 891 return false; 892 893 /* Do not accidentally pollute CMA or isolated regions*/ 894 if (is_migrate_cma(migratetype) || 895 is_migrate_isolate(migratetype)) 896 return false; 897 898 /* 899 * Do not let lower order allocations pollute a movable pageblock. 900 * This might let an unmovable request use a reclaimable pageblock 901 * and vice-versa but no more than normal fallback logic which can 902 * have trouble finding a high-order free page. 903 */ 904 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 905 return false; 906 907 capc->page = page; 908 return true; 909 } 910 911 #else 912 static inline struct capture_control *task_capc(struct zone *zone) 913 { 914 return NULL; 915 } 916 917 static inline bool 918 compaction_capture(struct capture_control *capc, struct page *page, 919 int order, int migratetype) 920 { 921 return false; 922 } 923 #endif /* CONFIG_COMPACTION */ 924 925 /* Used for pages not on another list */ 926 static inline void add_to_free_list(struct page *page, struct zone *zone, 927 unsigned int order, int migratetype) 928 { 929 struct free_area *area = &zone->free_area[order]; 930 931 list_add(&page->lru, &area->free_list[migratetype]); 932 area->nr_free++; 933 } 934 935 /* Used for pages not on another list */ 936 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 937 unsigned int order, int migratetype) 938 { 939 struct free_area *area = &zone->free_area[order]; 940 941 list_add_tail(&page->lru, &area->free_list[migratetype]); 942 area->nr_free++; 943 } 944 945 /* 946 * Used for pages which are on another list. Move the pages to the tail 947 * of the list - so the moved pages won't immediately be considered for 948 * allocation again (e.g., optimization for memory onlining). 949 */ 950 static inline void move_to_free_list(struct page *page, struct zone *zone, 951 unsigned int order, int migratetype) 952 { 953 struct free_area *area = &zone->free_area[order]; 954 955 list_move_tail(&page->lru, &area->free_list[migratetype]); 956 } 957 958 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 959 unsigned int order) 960 { 961 /* clear reported state and update reported page count */ 962 if (page_reported(page)) 963 __ClearPageReported(page); 964 965 list_del(&page->lru); 966 __ClearPageBuddy(page); 967 set_page_private(page, 0); 968 zone->free_area[order].nr_free--; 969 } 970 971 /* 972 * If this is not the largest possible page, check if the buddy 973 * of the next-highest order is free. If it is, it's possible 974 * that pages are being freed that will coalesce soon. In case, 975 * that is happening, add the free page to the tail of the list 976 * so it's less likely to be used soon and more likely to be merged 977 * as a higher order page 978 */ 979 static inline bool 980 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 981 struct page *page, unsigned int order) 982 { 983 unsigned long higher_page_pfn; 984 struct page *higher_page; 985 986 if (order >= MAX_ORDER - 2) 987 return false; 988 989 higher_page_pfn = buddy_pfn & pfn; 990 higher_page = page + (higher_page_pfn - pfn); 991 992 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 993 NULL) != NULL; 994 } 995 996 /* 997 * Freeing function for a buddy system allocator. 998 * 999 * The concept of a buddy system is to maintain direct-mapped table 1000 * (containing bit values) for memory blocks of various "orders". 1001 * The bottom level table contains the map for the smallest allocatable 1002 * units of memory (here, pages), and each level above it describes 1003 * pairs of units from the levels below, hence, "buddies". 1004 * At a high level, all that happens here is marking the table entry 1005 * at the bottom level available, and propagating the changes upward 1006 * as necessary, plus some accounting needed to play nicely with other 1007 * parts of the VM system. 1008 * At each level, we keep a list of pages, which are heads of continuous 1009 * free pages of length of (1 << order) and marked with PageBuddy. 1010 * Page's order is recorded in page_private(page) field. 1011 * So when we are allocating or freeing one, we can derive the state of the 1012 * other. That is, if we allocate a small block, and both were 1013 * free, the remainder of the region must be split into blocks. 1014 * If a block is freed, and its buddy is also free, then this 1015 * triggers coalescing into a block of larger size. 1016 * 1017 * -- nyc 1018 */ 1019 1020 static inline void __free_one_page(struct page *page, 1021 unsigned long pfn, 1022 struct zone *zone, unsigned int order, 1023 int migratetype, fpi_t fpi_flags) 1024 { 1025 struct capture_control *capc = task_capc(zone); 1026 unsigned long buddy_pfn; 1027 unsigned long combined_pfn; 1028 struct page *buddy; 1029 bool to_tail; 1030 1031 VM_BUG_ON(!zone_is_initialized(zone)); 1032 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 1033 1034 VM_BUG_ON(migratetype == -1); 1035 if (likely(!is_migrate_isolate(migratetype))) 1036 __mod_zone_freepage_state(zone, 1 << order, migratetype); 1037 1038 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 1039 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1040 1041 while (order < MAX_ORDER - 1) { 1042 if (compaction_capture(capc, page, order, migratetype)) { 1043 __mod_zone_freepage_state(zone, -(1 << order), 1044 migratetype); 1045 return; 1046 } 1047 1048 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 1049 if (!buddy) 1050 goto done_merging; 1051 1052 if (unlikely(order >= pageblock_order)) { 1053 /* 1054 * We want to prevent merge between freepages on pageblock 1055 * without fallbacks and normal pageblock. Without this, 1056 * pageblock isolation could cause incorrect freepage or CMA 1057 * accounting or HIGHATOMIC accounting. 1058 */ 1059 int buddy_mt = get_pageblock_migratetype(buddy); 1060 1061 if (migratetype != buddy_mt 1062 && (!migratetype_is_mergeable(migratetype) || 1063 !migratetype_is_mergeable(buddy_mt))) 1064 goto done_merging; 1065 } 1066 1067 /* 1068 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 1069 * merge with it and move up one order. 1070 */ 1071 if (page_is_guard(buddy)) 1072 clear_page_guard(zone, buddy, order, migratetype); 1073 else 1074 del_page_from_free_list(buddy, zone, order); 1075 combined_pfn = buddy_pfn & pfn; 1076 page = page + (combined_pfn - pfn); 1077 pfn = combined_pfn; 1078 order++; 1079 } 1080 1081 done_merging: 1082 set_buddy_order(page, order); 1083 1084 if (fpi_flags & FPI_TO_TAIL) 1085 to_tail = true; 1086 else if (is_shuffle_order(order)) 1087 to_tail = shuffle_pick_tail(); 1088 else 1089 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1090 1091 if (to_tail) 1092 add_to_free_list_tail(page, zone, order, migratetype); 1093 else 1094 add_to_free_list(page, zone, order, migratetype); 1095 1096 /* Notify page reporting subsystem of freed page */ 1097 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1098 page_reporting_notify_free(order); 1099 } 1100 1101 /** 1102 * split_free_page() -- split a free page at split_pfn_offset 1103 * @free_page: the original free page 1104 * @order: the order of the page 1105 * @split_pfn_offset: split offset within the page 1106 * 1107 * Return -ENOENT if the free page is changed, otherwise 0 1108 * 1109 * It is used when the free page crosses two pageblocks with different migratetypes 1110 * at split_pfn_offset within the page. The split free page will be put into 1111 * separate migratetype lists afterwards. Otherwise, the function achieves 1112 * nothing. 1113 */ 1114 int split_free_page(struct page *free_page, 1115 unsigned int order, unsigned long split_pfn_offset) 1116 { 1117 struct zone *zone = page_zone(free_page); 1118 unsigned long free_page_pfn = page_to_pfn(free_page); 1119 unsigned long pfn; 1120 unsigned long flags; 1121 int free_page_order; 1122 int mt; 1123 int ret = 0; 1124 1125 if (split_pfn_offset == 0) 1126 return ret; 1127 1128 spin_lock_irqsave(&zone->lock, flags); 1129 1130 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { 1131 ret = -ENOENT; 1132 goto out; 1133 } 1134 1135 mt = get_pageblock_migratetype(free_page); 1136 if (likely(!is_migrate_isolate(mt))) 1137 __mod_zone_freepage_state(zone, -(1UL << order), mt); 1138 1139 del_page_from_free_list(free_page, zone, order); 1140 for (pfn = free_page_pfn; 1141 pfn < free_page_pfn + (1UL << order);) { 1142 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); 1143 1144 free_page_order = min_t(unsigned int, 1145 pfn ? __ffs(pfn) : order, 1146 __fls(split_pfn_offset)); 1147 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, 1148 mt, FPI_NONE); 1149 pfn += 1UL << free_page_order; 1150 split_pfn_offset -= (1UL << free_page_order); 1151 /* we have done the first part, now switch to second part */ 1152 if (split_pfn_offset == 0) 1153 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); 1154 } 1155 out: 1156 spin_unlock_irqrestore(&zone->lock, flags); 1157 return ret; 1158 } 1159 /* 1160 * A bad page could be due to a number of fields. Instead of multiple branches, 1161 * try and check multiple fields with one check. The caller must do a detailed 1162 * check if necessary. 1163 */ 1164 static inline bool page_expected_state(struct page *page, 1165 unsigned long check_flags) 1166 { 1167 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1168 return false; 1169 1170 if (unlikely((unsigned long)page->mapping | 1171 page_ref_count(page) | 1172 #ifdef CONFIG_MEMCG 1173 page->memcg_data | 1174 #endif 1175 (page->flags & check_flags))) 1176 return false; 1177 1178 return true; 1179 } 1180 1181 static const char *page_bad_reason(struct page *page, unsigned long flags) 1182 { 1183 const char *bad_reason = NULL; 1184 1185 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1186 bad_reason = "nonzero mapcount"; 1187 if (unlikely(page->mapping != NULL)) 1188 bad_reason = "non-NULL mapping"; 1189 if (unlikely(page_ref_count(page) != 0)) 1190 bad_reason = "nonzero _refcount"; 1191 if (unlikely(page->flags & flags)) { 1192 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1193 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1194 else 1195 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1196 } 1197 #ifdef CONFIG_MEMCG 1198 if (unlikely(page->memcg_data)) 1199 bad_reason = "page still charged to cgroup"; 1200 #endif 1201 return bad_reason; 1202 } 1203 1204 static void check_free_page_bad(struct page *page) 1205 { 1206 bad_page(page, 1207 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1208 } 1209 1210 static inline int check_free_page(struct page *page) 1211 { 1212 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1213 return 0; 1214 1215 /* Something has gone sideways, find it */ 1216 check_free_page_bad(page); 1217 return 1; 1218 } 1219 1220 static int free_tail_pages_check(struct page *head_page, struct page *page) 1221 { 1222 int ret = 1; 1223 1224 /* 1225 * We rely page->lru.next never has bit 0 set, unless the page 1226 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1227 */ 1228 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1229 1230 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 1231 ret = 0; 1232 goto out; 1233 } 1234 switch (page - head_page) { 1235 case 1: 1236 /* the first tail page: ->mapping may be compound_mapcount() */ 1237 if (unlikely(compound_mapcount(page))) { 1238 bad_page(page, "nonzero compound_mapcount"); 1239 goto out; 1240 } 1241 break; 1242 case 2: 1243 /* 1244 * the second tail page: ->mapping is 1245 * deferred_list.next -- ignore value. 1246 */ 1247 break; 1248 default: 1249 if (page->mapping != TAIL_MAPPING) { 1250 bad_page(page, "corrupted mapping in tail page"); 1251 goto out; 1252 } 1253 break; 1254 } 1255 if (unlikely(!PageTail(page))) { 1256 bad_page(page, "PageTail not set"); 1257 goto out; 1258 } 1259 if (unlikely(compound_head(page) != head_page)) { 1260 bad_page(page, "compound_head not consistent"); 1261 goto out; 1262 } 1263 ret = 0; 1264 out: 1265 page->mapping = NULL; 1266 clear_compound_head(page); 1267 return ret; 1268 } 1269 1270 /* 1271 * Skip KASAN memory poisoning when either: 1272 * 1273 * 1. Deferred memory initialization has not yet completed, 1274 * see the explanation below. 1275 * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON, 1276 * see the comment next to it. 1277 * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON, 1278 * see the comment next to it. 1279 * 1280 * Poisoning pages during deferred memory init will greatly lengthen the 1281 * process and cause problem in large memory systems as the deferred pages 1282 * initialization is done with interrupt disabled. 1283 * 1284 * Assuming that there will be no reference to those newly initialized 1285 * pages before they are ever allocated, this should have no effect on 1286 * KASAN memory tracking as the poison will be properly inserted at page 1287 * allocation time. The only corner case is when pages are allocated by 1288 * on-demand allocation and then freed again before the deferred pages 1289 * initialization is done, but this is not likely to happen. 1290 */ 1291 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 1292 { 1293 return deferred_pages_enabled() || 1294 (!IS_ENABLED(CONFIG_KASAN_GENERIC) && 1295 (fpi_flags & FPI_SKIP_KASAN_POISON)) || 1296 PageSkipKASanPoison(page); 1297 } 1298 1299 static void kernel_init_free_pages(struct page *page, int numpages) 1300 { 1301 int i; 1302 1303 /* s390's use of memset() could override KASAN redzones. */ 1304 kasan_disable_current(); 1305 for (i = 0; i < numpages; i++) { 1306 u8 tag = page_kasan_tag(page + i); 1307 page_kasan_tag_reset(page + i); 1308 clear_highpage(page + i); 1309 page_kasan_tag_set(page + i, tag); 1310 } 1311 kasan_enable_current(); 1312 } 1313 1314 static __always_inline bool free_pages_prepare(struct page *page, 1315 unsigned int order, bool check_free, fpi_t fpi_flags) 1316 { 1317 int bad = 0; 1318 bool init = want_init_on_free(); 1319 1320 VM_BUG_ON_PAGE(PageTail(page), page); 1321 1322 trace_mm_page_free(page, order); 1323 1324 if (unlikely(PageHWPoison(page)) && !order) { 1325 /* 1326 * Do not let hwpoison pages hit pcplists/buddy 1327 * Untie memcg state and reset page's owner 1328 */ 1329 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1330 __memcg_kmem_uncharge_page(page, order); 1331 reset_page_owner(page, order); 1332 page_table_check_free(page, order); 1333 return false; 1334 } 1335 1336 /* 1337 * Check tail pages before head page information is cleared to 1338 * avoid checking PageCompound for order-0 pages. 1339 */ 1340 if (unlikely(order)) { 1341 bool compound = PageCompound(page); 1342 int i; 1343 1344 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1345 1346 if (compound) { 1347 ClearPageDoubleMap(page); 1348 ClearPageHasHWPoisoned(page); 1349 } 1350 for (i = 1; i < (1 << order); i++) { 1351 if (compound) 1352 bad += free_tail_pages_check(page, page + i); 1353 if (unlikely(check_free_page(page + i))) { 1354 bad++; 1355 continue; 1356 } 1357 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1358 } 1359 } 1360 if (PageMappingFlags(page)) 1361 page->mapping = NULL; 1362 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1363 __memcg_kmem_uncharge_page(page, order); 1364 if (check_free) 1365 bad += check_free_page(page); 1366 if (bad) 1367 return false; 1368 1369 page_cpupid_reset_last(page); 1370 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1371 reset_page_owner(page, order); 1372 page_table_check_free(page, order); 1373 1374 if (!PageHighMem(page)) { 1375 debug_check_no_locks_freed(page_address(page), 1376 PAGE_SIZE << order); 1377 debug_check_no_obj_freed(page_address(page), 1378 PAGE_SIZE << order); 1379 } 1380 1381 kernel_poison_pages(page, 1 << order); 1382 1383 /* 1384 * As memory initialization might be integrated into KASAN, 1385 * KASAN poisoning and memory initialization code must be 1386 * kept together to avoid discrepancies in behavior. 1387 * 1388 * With hardware tag-based KASAN, memory tags must be set before the 1389 * page becomes unavailable via debug_pagealloc or arch_free_page. 1390 */ 1391 if (!should_skip_kasan_poison(page, fpi_flags)) { 1392 kasan_poison_pages(page, order, init); 1393 1394 /* Memory is already initialized if KASAN did it internally. */ 1395 if (kasan_has_integrated_init()) 1396 init = false; 1397 } 1398 if (init) 1399 kernel_init_free_pages(page, 1 << order); 1400 1401 /* 1402 * arch_free_page() can make the page's contents inaccessible. s390 1403 * does this. So nothing which can access the page's contents should 1404 * happen after this. 1405 */ 1406 arch_free_page(page, order); 1407 1408 debug_pagealloc_unmap_pages(page, 1 << order); 1409 1410 return true; 1411 } 1412 1413 #ifdef CONFIG_DEBUG_VM 1414 /* 1415 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed 1416 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when 1417 * moved from pcp lists to free lists. 1418 */ 1419 static bool free_pcp_prepare(struct page *page, unsigned int order) 1420 { 1421 return free_pages_prepare(page, order, true, FPI_NONE); 1422 } 1423 1424 static bool bulkfree_pcp_prepare(struct page *page) 1425 { 1426 if (debug_pagealloc_enabled_static()) 1427 return check_free_page(page); 1428 else 1429 return false; 1430 } 1431 #else 1432 /* 1433 * With DEBUG_VM disabled, order-0 pages being freed are checked only when 1434 * moving from pcp lists to free list in order to reduce overhead. With 1435 * debug_pagealloc enabled, they are checked also immediately when being freed 1436 * to the pcp lists. 1437 */ 1438 static bool free_pcp_prepare(struct page *page, unsigned int order) 1439 { 1440 if (debug_pagealloc_enabled_static()) 1441 return free_pages_prepare(page, order, true, FPI_NONE); 1442 else 1443 return free_pages_prepare(page, order, false, FPI_NONE); 1444 } 1445 1446 static bool bulkfree_pcp_prepare(struct page *page) 1447 { 1448 return check_free_page(page); 1449 } 1450 #endif /* CONFIG_DEBUG_VM */ 1451 1452 /* 1453 * Frees a number of pages from the PCP lists 1454 * Assumes all pages on list are in same zone. 1455 * count is the number of pages to free. 1456 */ 1457 static void free_pcppages_bulk(struct zone *zone, int count, 1458 struct per_cpu_pages *pcp, 1459 int pindex) 1460 { 1461 int min_pindex = 0; 1462 int max_pindex = NR_PCP_LISTS - 1; 1463 unsigned int order; 1464 bool isolated_pageblocks; 1465 struct page *page; 1466 1467 /* 1468 * Ensure proper count is passed which otherwise would stuck in the 1469 * below while (list_empty(list)) loop. 1470 */ 1471 count = min(pcp->count, count); 1472 1473 /* Ensure requested pindex is drained first. */ 1474 pindex = pindex - 1; 1475 1476 /* 1477 * local_lock_irq held so equivalent to spin_lock_irqsave for 1478 * both PREEMPT_RT and non-PREEMPT_RT configurations. 1479 */ 1480 spin_lock(&zone->lock); 1481 isolated_pageblocks = has_isolate_pageblock(zone); 1482 1483 while (count > 0) { 1484 struct list_head *list; 1485 int nr_pages; 1486 1487 /* Remove pages from lists in a round-robin fashion. */ 1488 do { 1489 if (++pindex > max_pindex) 1490 pindex = min_pindex; 1491 list = &pcp->lists[pindex]; 1492 if (!list_empty(list)) 1493 break; 1494 1495 if (pindex == max_pindex) 1496 max_pindex--; 1497 if (pindex == min_pindex) 1498 min_pindex++; 1499 } while (1); 1500 1501 order = pindex_to_order(pindex); 1502 nr_pages = 1 << order; 1503 BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH)); 1504 do { 1505 int mt; 1506 1507 page = list_last_entry(list, struct page, lru); 1508 mt = get_pcppage_migratetype(page); 1509 1510 /* must delete to avoid corrupting pcp list */ 1511 list_del(&page->lru); 1512 count -= nr_pages; 1513 pcp->count -= nr_pages; 1514 1515 if (bulkfree_pcp_prepare(page)) 1516 continue; 1517 1518 /* MIGRATE_ISOLATE page should not go to pcplists */ 1519 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1520 /* Pageblock could have been isolated meanwhile */ 1521 if (unlikely(isolated_pageblocks)) 1522 mt = get_pageblock_migratetype(page); 1523 1524 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1525 trace_mm_page_pcpu_drain(page, order, mt); 1526 } while (count > 0 && !list_empty(list)); 1527 } 1528 1529 spin_unlock(&zone->lock); 1530 } 1531 1532 static void free_one_page(struct zone *zone, 1533 struct page *page, unsigned long pfn, 1534 unsigned int order, 1535 int migratetype, fpi_t fpi_flags) 1536 { 1537 unsigned long flags; 1538 1539 spin_lock_irqsave(&zone->lock, flags); 1540 if (unlikely(has_isolate_pageblock(zone) || 1541 is_migrate_isolate(migratetype))) { 1542 migratetype = get_pfnblock_migratetype(page, pfn); 1543 } 1544 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1545 spin_unlock_irqrestore(&zone->lock, flags); 1546 } 1547 1548 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1549 unsigned long zone, int nid) 1550 { 1551 mm_zero_struct_page(page); 1552 set_page_links(page, zone, nid, pfn); 1553 init_page_count(page); 1554 page_mapcount_reset(page); 1555 page_cpupid_reset_last(page); 1556 page_kasan_tag_reset(page); 1557 1558 INIT_LIST_HEAD(&page->lru); 1559 #ifdef WANT_PAGE_VIRTUAL 1560 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1561 if (!is_highmem_idx(zone)) 1562 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1563 #endif 1564 } 1565 1566 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1567 static void __meminit init_reserved_page(unsigned long pfn) 1568 { 1569 pg_data_t *pgdat; 1570 int nid, zid; 1571 1572 if (!early_page_uninitialised(pfn)) 1573 return; 1574 1575 nid = early_pfn_to_nid(pfn); 1576 pgdat = NODE_DATA(nid); 1577 1578 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1579 struct zone *zone = &pgdat->node_zones[zid]; 1580 1581 if (zone_spans_pfn(zone, pfn)) 1582 break; 1583 } 1584 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 1585 } 1586 #else 1587 static inline void init_reserved_page(unsigned long pfn) 1588 { 1589 } 1590 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1591 1592 /* 1593 * Initialised pages do not have PageReserved set. This function is 1594 * called for each range allocated by the bootmem allocator and 1595 * marks the pages PageReserved. The remaining valid pages are later 1596 * sent to the buddy page allocator. 1597 */ 1598 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 1599 { 1600 unsigned long start_pfn = PFN_DOWN(start); 1601 unsigned long end_pfn = PFN_UP(end); 1602 1603 for (; start_pfn < end_pfn; start_pfn++) { 1604 if (pfn_valid(start_pfn)) { 1605 struct page *page = pfn_to_page(start_pfn); 1606 1607 init_reserved_page(start_pfn); 1608 1609 /* Avoid false-positive PageTail() */ 1610 INIT_LIST_HEAD(&page->lru); 1611 1612 /* 1613 * no need for atomic set_bit because the struct 1614 * page is not visible yet so nobody should 1615 * access it yet. 1616 */ 1617 __SetPageReserved(page); 1618 } 1619 } 1620 } 1621 1622 static void __free_pages_ok(struct page *page, unsigned int order, 1623 fpi_t fpi_flags) 1624 { 1625 unsigned long flags; 1626 int migratetype; 1627 unsigned long pfn = page_to_pfn(page); 1628 struct zone *zone = page_zone(page); 1629 1630 if (!free_pages_prepare(page, order, true, fpi_flags)) 1631 return; 1632 1633 migratetype = get_pfnblock_migratetype(page, pfn); 1634 1635 spin_lock_irqsave(&zone->lock, flags); 1636 if (unlikely(has_isolate_pageblock(zone) || 1637 is_migrate_isolate(migratetype))) { 1638 migratetype = get_pfnblock_migratetype(page, pfn); 1639 } 1640 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1641 spin_unlock_irqrestore(&zone->lock, flags); 1642 1643 __count_vm_events(PGFREE, 1 << order); 1644 } 1645 1646 void __free_pages_core(struct page *page, unsigned int order) 1647 { 1648 unsigned int nr_pages = 1 << order; 1649 struct page *p = page; 1650 unsigned int loop; 1651 1652 /* 1653 * When initializing the memmap, __init_single_page() sets the refcount 1654 * of all pages to 1 ("allocated"/"not free"). We have to set the 1655 * refcount of all involved pages to 0. 1656 */ 1657 prefetchw(p); 1658 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1659 prefetchw(p + 1); 1660 __ClearPageReserved(p); 1661 set_page_count(p, 0); 1662 } 1663 __ClearPageReserved(p); 1664 set_page_count(p, 0); 1665 1666 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1667 1668 /* 1669 * Bypass PCP and place fresh pages right to the tail, primarily 1670 * relevant for memory onlining. 1671 */ 1672 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); 1673 } 1674 1675 #ifdef CONFIG_NUMA 1676 1677 /* 1678 * During memory init memblocks map pfns to nids. The search is expensive and 1679 * this caches recent lookups. The implementation of __early_pfn_to_nid 1680 * treats start/end as pfns. 1681 */ 1682 struct mminit_pfnnid_cache { 1683 unsigned long last_start; 1684 unsigned long last_end; 1685 int last_nid; 1686 }; 1687 1688 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1689 1690 /* 1691 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 1692 */ 1693 static int __meminit __early_pfn_to_nid(unsigned long pfn, 1694 struct mminit_pfnnid_cache *state) 1695 { 1696 unsigned long start_pfn, end_pfn; 1697 int nid; 1698 1699 if (state->last_start <= pfn && pfn < state->last_end) 1700 return state->last_nid; 1701 1702 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 1703 if (nid != NUMA_NO_NODE) { 1704 state->last_start = start_pfn; 1705 state->last_end = end_pfn; 1706 state->last_nid = nid; 1707 } 1708 1709 return nid; 1710 } 1711 1712 int __meminit early_pfn_to_nid(unsigned long pfn) 1713 { 1714 static DEFINE_SPINLOCK(early_pfn_lock); 1715 int nid; 1716 1717 spin_lock(&early_pfn_lock); 1718 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1719 if (nid < 0) 1720 nid = first_online_node; 1721 spin_unlock(&early_pfn_lock); 1722 1723 return nid; 1724 } 1725 #endif /* CONFIG_NUMA */ 1726 1727 void __init memblock_free_pages(struct page *page, unsigned long pfn, 1728 unsigned int order) 1729 { 1730 if (early_page_uninitialised(pfn)) 1731 return; 1732 __free_pages_core(page, order); 1733 } 1734 1735 /* 1736 * Check that the whole (or subset of) a pageblock given by the interval of 1737 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1738 * with the migration of free compaction scanner. 1739 * 1740 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1741 * 1742 * It's possible on some configurations to have a setup like node0 node1 node0 1743 * i.e. it's possible that all pages within a zones range of pages do not 1744 * belong to a single zone. We assume that a border between node0 and node1 1745 * can occur within a single pageblock, but not a node0 node1 node0 1746 * interleaving within a single pageblock. It is therefore sufficient to check 1747 * the first and last page of a pageblock and avoid checking each individual 1748 * page in a pageblock. 1749 */ 1750 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1751 unsigned long end_pfn, struct zone *zone) 1752 { 1753 struct page *start_page; 1754 struct page *end_page; 1755 1756 /* end_pfn is one past the range we are checking */ 1757 end_pfn--; 1758 1759 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1760 return NULL; 1761 1762 start_page = pfn_to_online_page(start_pfn); 1763 if (!start_page) 1764 return NULL; 1765 1766 if (page_zone(start_page) != zone) 1767 return NULL; 1768 1769 end_page = pfn_to_page(end_pfn); 1770 1771 /* This gives a shorter code than deriving page_zone(end_page) */ 1772 if (page_zone_id(start_page) != page_zone_id(end_page)) 1773 return NULL; 1774 1775 return start_page; 1776 } 1777 1778 void set_zone_contiguous(struct zone *zone) 1779 { 1780 unsigned long block_start_pfn = zone->zone_start_pfn; 1781 unsigned long block_end_pfn; 1782 1783 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); 1784 for (; block_start_pfn < zone_end_pfn(zone); 1785 block_start_pfn = block_end_pfn, 1786 block_end_pfn += pageblock_nr_pages) { 1787 1788 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1789 1790 if (!__pageblock_pfn_to_page(block_start_pfn, 1791 block_end_pfn, zone)) 1792 return; 1793 cond_resched(); 1794 } 1795 1796 /* We confirm that there is no hole */ 1797 zone->contiguous = true; 1798 } 1799 1800 void clear_zone_contiguous(struct zone *zone) 1801 { 1802 zone->contiguous = false; 1803 } 1804 1805 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1806 static void __init deferred_free_range(unsigned long pfn, 1807 unsigned long nr_pages) 1808 { 1809 struct page *page; 1810 unsigned long i; 1811 1812 if (!nr_pages) 1813 return; 1814 1815 page = pfn_to_page(pfn); 1816 1817 /* Free a large naturally-aligned chunk if possible */ 1818 if (nr_pages == pageblock_nr_pages && 1819 (pfn & (pageblock_nr_pages - 1)) == 0) { 1820 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1821 __free_pages_core(page, pageblock_order); 1822 return; 1823 } 1824 1825 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1826 if ((pfn & (pageblock_nr_pages - 1)) == 0) 1827 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1828 __free_pages_core(page, 0); 1829 } 1830 } 1831 1832 /* Completion tracking for deferred_init_memmap() threads */ 1833 static atomic_t pgdat_init_n_undone __initdata; 1834 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1835 1836 static inline void __init pgdat_init_report_one_done(void) 1837 { 1838 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1839 complete(&pgdat_init_all_done_comp); 1840 } 1841 1842 /* 1843 * Returns true if page needs to be initialized or freed to buddy allocator. 1844 * 1845 * First we check if pfn is valid on architectures where it is possible to have 1846 * holes within pageblock_nr_pages. On systems where it is not possible, this 1847 * function is optimized out. 1848 * 1849 * Then, we check if a current large page is valid by only checking the validity 1850 * of the head pfn. 1851 */ 1852 static inline bool __init deferred_pfn_valid(unsigned long pfn) 1853 { 1854 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) 1855 return false; 1856 return true; 1857 } 1858 1859 /* 1860 * Free pages to buddy allocator. Try to free aligned pages in 1861 * pageblock_nr_pages sizes. 1862 */ 1863 static void __init deferred_free_pages(unsigned long pfn, 1864 unsigned long end_pfn) 1865 { 1866 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1867 unsigned long nr_free = 0; 1868 1869 for (; pfn < end_pfn; pfn++) { 1870 if (!deferred_pfn_valid(pfn)) { 1871 deferred_free_range(pfn - nr_free, nr_free); 1872 nr_free = 0; 1873 } else if (!(pfn & nr_pgmask)) { 1874 deferred_free_range(pfn - nr_free, nr_free); 1875 nr_free = 1; 1876 } else { 1877 nr_free++; 1878 } 1879 } 1880 /* Free the last block of pages to allocator */ 1881 deferred_free_range(pfn - nr_free, nr_free); 1882 } 1883 1884 /* 1885 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 1886 * by performing it only once every pageblock_nr_pages. 1887 * Return number of pages initialized. 1888 */ 1889 static unsigned long __init deferred_init_pages(struct zone *zone, 1890 unsigned long pfn, 1891 unsigned long end_pfn) 1892 { 1893 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1894 int nid = zone_to_nid(zone); 1895 unsigned long nr_pages = 0; 1896 int zid = zone_idx(zone); 1897 struct page *page = NULL; 1898 1899 for (; pfn < end_pfn; pfn++) { 1900 if (!deferred_pfn_valid(pfn)) { 1901 page = NULL; 1902 continue; 1903 } else if (!page || !(pfn & nr_pgmask)) { 1904 page = pfn_to_page(pfn); 1905 } else { 1906 page++; 1907 } 1908 __init_single_page(page, pfn, zid, nid); 1909 nr_pages++; 1910 } 1911 return (nr_pages); 1912 } 1913 1914 /* 1915 * This function is meant to pre-load the iterator for the zone init. 1916 * Specifically it walks through the ranges until we are caught up to the 1917 * first_init_pfn value and exits there. If we never encounter the value we 1918 * return false indicating there are no valid ranges left. 1919 */ 1920 static bool __init 1921 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, 1922 unsigned long *spfn, unsigned long *epfn, 1923 unsigned long first_init_pfn) 1924 { 1925 u64 j; 1926 1927 /* 1928 * Start out by walking through the ranges in this zone that have 1929 * already been initialized. We don't need to do anything with them 1930 * so we just need to flush them out of the system. 1931 */ 1932 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { 1933 if (*epfn <= first_init_pfn) 1934 continue; 1935 if (*spfn < first_init_pfn) 1936 *spfn = first_init_pfn; 1937 *i = j; 1938 return true; 1939 } 1940 1941 return false; 1942 } 1943 1944 /* 1945 * Initialize and free pages. We do it in two loops: first we initialize 1946 * struct page, then free to buddy allocator, because while we are 1947 * freeing pages we can access pages that are ahead (computing buddy 1948 * page in __free_one_page()). 1949 * 1950 * In order to try and keep some memory in the cache we have the loop 1951 * broken along max page order boundaries. This way we will not cause 1952 * any issues with the buddy page computation. 1953 */ 1954 static unsigned long __init 1955 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, 1956 unsigned long *end_pfn) 1957 { 1958 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); 1959 unsigned long spfn = *start_pfn, epfn = *end_pfn; 1960 unsigned long nr_pages = 0; 1961 u64 j = *i; 1962 1963 /* First we loop through and initialize the page values */ 1964 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { 1965 unsigned long t; 1966 1967 if (mo_pfn <= *start_pfn) 1968 break; 1969 1970 t = min(mo_pfn, *end_pfn); 1971 nr_pages += deferred_init_pages(zone, *start_pfn, t); 1972 1973 if (mo_pfn < *end_pfn) { 1974 *start_pfn = mo_pfn; 1975 break; 1976 } 1977 } 1978 1979 /* Reset values and now loop through freeing pages as needed */ 1980 swap(j, *i); 1981 1982 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { 1983 unsigned long t; 1984 1985 if (mo_pfn <= spfn) 1986 break; 1987 1988 t = min(mo_pfn, epfn); 1989 deferred_free_pages(spfn, t); 1990 1991 if (mo_pfn <= epfn) 1992 break; 1993 } 1994 1995 return nr_pages; 1996 } 1997 1998 static void __init 1999 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 2000 void *arg) 2001 { 2002 unsigned long spfn, epfn; 2003 struct zone *zone = arg; 2004 u64 i; 2005 2006 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); 2007 2008 /* 2009 * Initialize and free pages in MAX_ORDER sized increments so that we 2010 * can avoid introducing any issues with the buddy allocator. 2011 */ 2012 while (spfn < end_pfn) { 2013 deferred_init_maxorder(&i, zone, &spfn, &epfn); 2014 cond_resched(); 2015 } 2016 } 2017 2018 /* An arch may override for more concurrency. */ 2019 __weak int __init 2020 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2021 { 2022 return 1; 2023 } 2024 2025 /* Initialise remaining memory on a node */ 2026 static int __init deferred_init_memmap(void *data) 2027 { 2028 pg_data_t *pgdat = data; 2029 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2030 unsigned long spfn = 0, epfn = 0; 2031 unsigned long first_init_pfn, flags; 2032 unsigned long start = jiffies; 2033 struct zone *zone; 2034 int zid, max_threads; 2035 u64 i; 2036 2037 /* Bind memory initialisation thread to a local node if possible */ 2038 if (!cpumask_empty(cpumask)) 2039 set_cpus_allowed_ptr(current, cpumask); 2040 2041 pgdat_resize_lock(pgdat, &flags); 2042 first_init_pfn = pgdat->first_deferred_pfn; 2043 if (first_init_pfn == ULONG_MAX) { 2044 pgdat_resize_unlock(pgdat, &flags); 2045 pgdat_init_report_one_done(); 2046 return 0; 2047 } 2048 2049 /* Sanity check boundaries */ 2050 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2051 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2052 pgdat->first_deferred_pfn = ULONG_MAX; 2053 2054 /* 2055 * Once we unlock here, the zone cannot be grown anymore, thus if an 2056 * interrupt thread must allocate this early in boot, zone must be 2057 * pre-grown prior to start of deferred page initialization. 2058 */ 2059 pgdat_resize_unlock(pgdat, &flags); 2060 2061 /* Only the highest zone is deferred so find it */ 2062 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 2063 zone = pgdat->node_zones + zid; 2064 if (first_init_pfn < zone_end_pfn(zone)) 2065 break; 2066 } 2067 2068 /* If the zone is empty somebody else may have cleared out the zone */ 2069 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2070 first_init_pfn)) 2071 goto zone_empty; 2072 2073 max_threads = deferred_page_init_max_threads(cpumask); 2074 2075 while (spfn < epfn) { 2076 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); 2077 struct padata_mt_job job = { 2078 .thread_fn = deferred_init_memmap_chunk, 2079 .fn_arg = zone, 2080 .start = spfn, 2081 .size = epfn_align - spfn, 2082 .align = PAGES_PER_SECTION, 2083 .min_chunk = PAGES_PER_SECTION, 2084 .max_threads = max_threads, 2085 }; 2086 2087 padata_do_multithreaded(&job); 2088 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2089 epfn_align); 2090 } 2091 zone_empty: 2092 /* Sanity check that the next zone really is unpopulated */ 2093 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 2094 2095 pr_info("node %d deferred pages initialised in %ums\n", 2096 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2097 2098 pgdat_init_report_one_done(); 2099 return 0; 2100 } 2101 2102 /* 2103 * If this zone has deferred pages, try to grow it by initializing enough 2104 * deferred pages to satisfy the allocation specified by order, rounded up to 2105 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2106 * of SECTION_SIZE bytes by initializing struct pages in increments of 2107 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2108 * 2109 * Return true when zone was grown, otherwise return false. We return true even 2110 * when we grow less than requested, to let the caller decide if there are 2111 * enough pages to satisfy the allocation. 2112 * 2113 * Note: We use noinline because this function is needed only during boot, and 2114 * it is called from a __ref function _deferred_grow_zone. This way we are 2115 * making sure that it is not inlined into permanent text section. 2116 */ 2117 static noinline bool __init 2118 deferred_grow_zone(struct zone *zone, unsigned int order) 2119 { 2120 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 2121 pg_data_t *pgdat = zone->zone_pgdat; 2122 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2123 unsigned long spfn, epfn, flags; 2124 unsigned long nr_pages = 0; 2125 u64 i; 2126 2127 /* Only the last zone may have deferred pages */ 2128 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2129 return false; 2130 2131 pgdat_resize_lock(pgdat, &flags); 2132 2133 /* 2134 * If someone grew this zone while we were waiting for spinlock, return 2135 * true, as there might be enough pages already. 2136 */ 2137 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2138 pgdat_resize_unlock(pgdat, &flags); 2139 return true; 2140 } 2141 2142 /* If the zone is empty somebody else may have cleared out the zone */ 2143 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2144 first_deferred_pfn)) { 2145 pgdat->first_deferred_pfn = ULONG_MAX; 2146 pgdat_resize_unlock(pgdat, &flags); 2147 /* Retry only once. */ 2148 return first_deferred_pfn != ULONG_MAX; 2149 } 2150 2151 /* 2152 * Initialize and free pages in MAX_ORDER sized increments so 2153 * that we can avoid introducing any issues with the buddy 2154 * allocator. 2155 */ 2156 while (spfn < epfn) { 2157 /* update our first deferred PFN for this section */ 2158 first_deferred_pfn = spfn; 2159 2160 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); 2161 touch_nmi_watchdog(); 2162 2163 /* We should only stop along section boundaries */ 2164 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) 2165 continue; 2166 2167 /* If our quota has been met we can stop here */ 2168 if (nr_pages >= nr_pages_needed) 2169 break; 2170 } 2171 2172 pgdat->first_deferred_pfn = spfn; 2173 pgdat_resize_unlock(pgdat, &flags); 2174 2175 return nr_pages > 0; 2176 } 2177 2178 /* 2179 * deferred_grow_zone() is __init, but it is called from 2180 * get_page_from_freelist() during early boot until deferred_pages permanently 2181 * disables this call. This is why we have refdata wrapper to avoid warning, 2182 * and to ensure that the function body gets unloaded. 2183 */ 2184 static bool __ref 2185 _deferred_grow_zone(struct zone *zone, unsigned int order) 2186 { 2187 return deferred_grow_zone(zone, order); 2188 } 2189 2190 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2191 2192 void __init page_alloc_init_late(void) 2193 { 2194 struct zone *zone; 2195 int nid; 2196 2197 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2198 2199 /* There will be num_node_state(N_MEMORY) threads */ 2200 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2201 for_each_node_state(nid, N_MEMORY) { 2202 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2203 } 2204 2205 /* Block until all are initialised */ 2206 wait_for_completion(&pgdat_init_all_done_comp); 2207 2208 /* 2209 * We initialized the rest of the deferred pages. Permanently disable 2210 * on-demand struct page initialization. 2211 */ 2212 static_branch_disable(&deferred_pages); 2213 2214 /* Reinit limits that are based on free pages after the kernel is up */ 2215 files_maxfiles_init(); 2216 #endif 2217 2218 buffer_init(); 2219 2220 /* Discard memblock private memory */ 2221 memblock_discard(); 2222 2223 for_each_node_state(nid, N_MEMORY) 2224 shuffle_free_memory(NODE_DATA(nid)); 2225 2226 for_each_populated_zone(zone) 2227 set_zone_contiguous(zone); 2228 } 2229 2230 #ifdef CONFIG_CMA 2231 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 2232 void __init init_cma_reserved_pageblock(struct page *page) 2233 { 2234 unsigned i = pageblock_nr_pages; 2235 struct page *p = page; 2236 2237 do { 2238 __ClearPageReserved(p); 2239 set_page_count(p, 0); 2240 } while (++p, --i); 2241 2242 set_pageblock_migratetype(page, MIGRATE_CMA); 2243 set_page_refcounted(page); 2244 __free_pages(page, pageblock_order); 2245 2246 adjust_managed_page_count(page, pageblock_nr_pages); 2247 page_zone(page)->cma_pages += pageblock_nr_pages; 2248 } 2249 #endif 2250 2251 /* 2252 * The order of subdivision here is critical for the IO subsystem. 2253 * Please do not alter this order without good reasons and regression 2254 * testing. Specifically, as large blocks of memory are subdivided, 2255 * the order in which smaller blocks are delivered depends on the order 2256 * they're subdivided in this function. This is the primary factor 2257 * influencing the order in which pages are delivered to the IO 2258 * subsystem according to empirical testing, and this is also justified 2259 * by considering the behavior of a buddy system containing a single 2260 * large block of memory acted on by a series of small allocations. 2261 * This behavior is a critical factor in sglist merging's success. 2262 * 2263 * -- nyc 2264 */ 2265 static inline void expand(struct zone *zone, struct page *page, 2266 int low, int high, int migratetype) 2267 { 2268 unsigned long size = 1 << high; 2269 2270 while (high > low) { 2271 high--; 2272 size >>= 1; 2273 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 2274 2275 /* 2276 * Mark as guard pages (or page), that will allow to 2277 * merge back to allocator when buddy will be freed. 2278 * Corresponding page table entries will not be touched, 2279 * pages will stay not present in virtual address space 2280 */ 2281 if (set_page_guard(zone, &page[size], high, migratetype)) 2282 continue; 2283 2284 add_to_free_list(&page[size], zone, high, migratetype); 2285 set_buddy_order(&page[size], high); 2286 } 2287 } 2288 2289 static void check_new_page_bad(struct page *page) 2290 { 2291 if (unlikely(page->flags & __PG_HWPOISON)) { 2292 /* Don't complain about hwpoisoned pages */ 2293 page_mapcount_reset(page); /* remove PageBuddy */ 2294 return; 2295 } 2296 2297 bad_page(page, 2298 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 2299 } 2300 2301 /* 2302 * This page is about to be returned from the page allocator 2303 */ 2304 static inline int check_new_page(struct page *page) 2305 { 2306 if (likely(page_expected_state(page, 2307 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 2308 return 0; 2309 2310 check_new_page_bad(page); 2311 return 1; 2312 } 2313 2314 static bool check_new_pages(struct page *page, unsigned int order) 2315 { 2316 int i; 2317 for (i = 0; i < (1 << order); i++) { 2318 struct page *p = page + i; 2319 2320 if (unlikely(check_new_page(p))) 2321 return true; 2322 } 2323 2324 return false; 2325 } 2326 2327 #ifdef CONFIG_DEBUG_VM 2328 /* 2329 * With DEBUG_VM enabled, order-0 pages are checked for expected state when 2330 * being allocated from pcp lists. With debug_pagealloc also enabled, they are 2331 * also checked when pcp lists are refilled from the free lists. 2332 */ 2333 static inline bool check_pcp_refill(struct page *page, unsigned int order) 2334 { 2335 if (debug_pagealloc_enabled_static()) 2336 return check_new_pages(page, order); 2337 else 2338 return false; 2339 } 2340 2341 static inline bool check_new_pcp(struct page *page, unsigned int order) 2342 { 2343 return check_new_pages(page, order); 2344 } 2345 #else 2346 /* 2347 * With DEBUG_VM disabled, free order-0 pages are checked for expected state 2348 * when pcp lists are being refilled from the free lists. With debug_pagealloc 2349 * enabled, they are also checked when being allocated from the pcp lists. 2350 */ 2351 static inline bool check_pcp_refill(struct page *page, unsigned int order) 2352 { 2353 return check_new_pages(page, order); 2354 } 2355 static inline bool check_new_pcp(struct page *page, unsigned int order) 2356 { 2357 if (debug_pagealloc_enabled_static()) 2358 return check_new_pages(page, order); 2359 else 2360 return false; 2361 } 2362 #endif /* CONFIG_DEBUG_VM */ 2363 2364 static inline bool should_skip_kasan_unpoison(gfp_t flags, bool init_tags) 2365 { 2366 /* Don't skip if a software KASAN mode is enabled. */ 2367 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 2368 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 2369 return false; 2370 2371 /* Skip, if hardware tag-based KASAN is not enabled. */ 2372 if (!kasan_hw_tags_enabled()) 2373 return true; 2374 2375 /* 2376 * With hardware tag-based KASAN enabled, skip if either: 2377 * 2378 * 1. Memory tags have already been cleared via tag_clear_highpage(). 2379 * 2. Skipping has been requested via __GFP_SKIP_KASAN_UNPOISON. 2380 */ 2381 return init_tags || (flags & __GFP_SKIP_KASAN_UNPOISON); 2382 } 2383 2384 static inline bool should_skip_init(gfp_t flags) 2385 { 2386 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 2387 if (!kasan_hw_tags_enabled()) 2388 return false; 2389 2390 /* For hardware tag-based KASAN, skip if requested. */ 2391 return (flags & __GFP_SKIP_ZERO); 2392 } 2393 2394 inline void post_alloc_hook(struct page *page, unsigned int order, 2395 gfp_t gfp_flags) 2396 { 2397 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 2398 !should_skip_init(gfp_flags); 2399 bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS); 2400 2401 set_page_private(page, 0); 2402 set_page_refcounted(page); 2403 2404 arch_alloc_page(page, order); 2405 debug_pagealloc_map_pages(page, 1 << order); 2406 2407 /* 2408 * Page unpoisoning must happen before memory initialization. 2409 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 2410 * allocations and the page unpoisoning code will complain. 2411 */ 2412 kernel_unpoison_pages(page, 1 << order); 2413 2414 /* 2415 * As memory initialization might be integrated into KASAN, 2416 * KASAN unpoisoning and memory initializion code must be 2417 * kept together to avoid discrepancies in behavior. 2418 */ 2419 2420 /* 2421 * If memory tags should be zeroed (which happens only when memory 2422 * should be initialized as well). 2423 */ 2424 if (init_tags) { 2425 int i; 2426 2427 /* Initialize both memory and tags. */ 2428 for (i = 0; i != 1 << order; ++i) 2429 tag_clear_highpage(page + i); 2430 2431 /* Note that memory is already initialized by the loop above. */ 2432 init = false; 2433 } 2434 if (!should_skip_kasan_unpoison(gfp_flags, init_tags)) { 2435 /* Unpoison shadow memory or set memory tags. */ 2436 kasan_unpoison_pages(page, order, init); 2437 2438 /* Note that memory is already initialized by KASAN. */ 2439 if (kasan_has_integrated_init()) 2440 init = false; 2441 } 2442 /* If memory is still not initialized, do it now. */ 2443 if (init) 2444 kernel_init_free_pages(page, 1 << order); 2445 /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */ 2446 if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON)) 2447 SetPageSkipKASanPoison(page); 2448 2449 set_page_owner(page, order, gfp_flags); 2450 page_table_check_alloc(page, order); 2451 } 2452 2453 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 2454 unsigned int alloc_flags) 2455 { 2456 post_alloc_hook(page, order, gfp_flags); 2457 2458 if (order && (gfp_flags & __GFP_COMP)) 2459 prep_compound_page(page, order); 2460 2461 /* 2462 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 2463 * allocate the page. The expectation is that the caller is taking 2464 * steps that will free more memory. The caller should avoid the page 2465 * being used for !PFMEMALLOC purposes. 2466 */ 2467 if (alloc_flags & ALLOC_NO_WATERMARKS) 2468 set_page_pfmemalloc(page); 2469 else 2470 clear_page_pfmemalloc(page); 2471 } 2472 2473 /* 2474 * Go through the free lists for the given migratetype and remove 2475 * the smallest available page from the freelists 2476 */ 2477 static __always_inline 2478 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 2479 int migratetype) 2480 { 2481 unsigned int current_order; 2482 struct free_area *area; 2483 struct page *page; 2484 2485 /* Find a page of the appropriate size in the preferred list */ 2486 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 2487 area = &(zone->free_area[current_order]); 2488 page = get_page_from_free_area(area, migratetype); 2489 if (!page) 2490 continue; 2491 del_page_from_free_list(page, zone, current_order); 2492 expand(zone, page, order, current_order, migratetype); 2493 set_pcppage_migratetype(page, migratetype); 2494 trace_mm_page_alloc_zone_locked(page, order, migratetype, 2495 pcp_allowed_order(order) && 2496 migratetype < MIGRATE_PCPTYPES); 2497 return page; 2498 } 2499 2500 return NULL; 2501 } 2502 2503 2504 /* 2505 * This array describes the order lists are fallen back to when 2506 * the free lists for the desirable migrate type are depleted 2507 * 2508 * The other migratetypes do not have fallbacks. 2509 */ 2510 static int fallbacks[MIGRATE_TYPES][3] = { 2511 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2512 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 2513 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2514 }; 2515 2516 #ifdef CONFIG_CMA 2517 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2518 unsigned int order) 2519 { 2520 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 2521 } 2522 #else 2523 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2524 unsigned int order) { return NULL; } 2525 #endif 2526 2527 /* 2528 * Move the free pages in a range to the freelist tail of the requested type. 2529 * Note that start_page and end_pages are not aligned on a pageblock 2530 * boundary. If alignment is required, use move_freepages_block() 2531 */ 2532 static int move_freepages(struct zone *zone, 2533 unsigned long start_pfn, unsigned long end_pfn, 2534 int migratetype, int *num_movable) 2535 { 2536 struct page *page; 2537 unsigned long pfn; 2538 unsigned int order; 2539 int pages_moved = 0; 2540 2541 for (pfn = start_pfn; pfn <= end_pfn;) { 2542 page = pfn_to_page(pfn); 2543 if (!PageBuddy(page)) { 2544 /* 2545 * We assume that pages that could be isolated for 2546 * migration are movable. But we don't actually try 2547 * isolating, as that would be expensive. 2548 */ 2549 if (num_movable && 2550 (PageLRU(page) || __PageMovable(page))) 2551 (*num_movable)++; 2552 pfn++; 2553 continue; 2554 } 2555 2556 /* Make sure we are not inadvertently changing nodes */ 2557 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 2558 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 2559 2560 order = buddy_order(page); 2561 move_to_free_list(page, zone, order, migratetype); 2562 pfn += 1 << order; 2563 pages_moved += 1 << order; 2564 } 2565 2566 return pages_moved; 2567 } 2568 2569 int move_freepages_block(struct zone *zone, struct page *page, 2570 int migratetype, int *num_movable) 2571 { 2572 unsigned long start_pfn, end_pfn, pfn; 2573 2574 if (num_movable) 2575 *num_movable = 0; 2576 2577 pfn = page_to_pfn(page); 2578 start_pfn = pfn & ~(pageblock_nr_pages - 1); 2579 end_pfn = start_pfn + pageblock_nr_pages - 1; 2580 2581 /* Do not cross zone boundaries */ 2582 if (!zone_spans_pfn(zone, start_pfn)) 2583 start_pfn = pfn; 2584 if (!zone_spans_pfn(zone, end_pfn)) 2585 return 0; 2586 2587 return move_freepages(zone, start_pfn, end_pfn, migratetype, 2588 num_movable); 2589 } 2590 2591 static void change_pageblock_range(struct page *pageblock_page, 2592 int start_order, int migratetype) 2593 { 2594 int nr_pageblocks = 1 << (start_order - pageblock_order); 2595 2596 while (nr_pageblocks--) { 2597 set_pageblock_migratetype(pageblock_page, migratetype); 2598 pageblock_page += pageblock_nr_pages; 2599 } 2600 } 2601 2602 /* 2603 * When we are falling back to another migratetype during allocation, try to 2604 * steal extra free pages from the same pageblocks to satisfy further 2605 * allocations, instead of polluting multiple pageblocks. 2606 * 2607 * If we are stealing a relatively large buddy page, it is likely there will 2608 * be more free pages in the pageblock, so try to steal them all. For 2609 * reclaimable and unmovable allocations, we steal regardless of page size, 2610 * as fragmentation caused by those allocations polluting movable pageblocks 2611 * is worse than movable allocations stealing from unmovable and reclaimable 2612 * pageblocks. 2613 */ 2614 static bool can_steal_fallback(unsigned int order, int start_mt) 2615 { 2616 /* 2617 * Leaving this order check is intended, although there is 2618 * relaxed order check in next check. The reason is that 2619 * we can actually steal whole pageblock if this condition met, 2620 * but, below check doesn't guarantee it and that is just heuristic 2621 * so could be changed anytime. 2622 */ 2623 if (order >= pageblock_order) 2624 return true; 2625 2626 if (order >= pageblock_order / 2 || 2627 start_mt == MIGRATE_RECLAIMABLE || 2628 start_mt == MIGRATE_UNMOVABLE || 2629 page_group_by_mobility_disabled) 2630 return true; 2631 2632 return false; 2633 } 2634 2635 static inline bool boost_watermark(struct zone *zone) 2636 { 2637 unsigned long max_boost; 2638 2639 if (!watermark_boost_factor) 2640 return false; 2641 /* 2642 * Don't bother in zones that are unlikely to produce results. 2643 * On small machines, including kdump capture kernels running 2644 * in a small area, boosting the watermark can cause an out of 2645 * memory situation immediately. 2646 */ 2647 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2648 return false; 2649 2650 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2651 watermark_boost_factor, 10000); 2652 2653 /* 2654 * high watermark may be uninitialised if fragmentation occurs 2655 * very early in boot so do not boost. We do not fall 2656 * through and boost by pageblock_nr_pages as failing 2657 * allocations that early means that reclaim is not going 2658 * to help and it may even be impossible to reclaim the 2659 * boosted watermark resulting in a hang. 2660 */ 2661 if (!max_boost) 2662 return false; 2663 2664 max_boost = max(pageblock_nr_pages, max_boost); 2665 2666 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2667 max_boost); 2668 2669 return true; 2670 } 2671 2672 /* 2673 * This function implements actual steal behaviour. If order is large enough, 2674 * we can steal whole pageblock. If not, we first move freepages in this 2675 * pageblock to our migratetype and determine how many already-allocated pages 2676 * are there in the pageblock with a compatible migratetype. If at least half 2677 * of pages are free or compatible, we can change migratetype of the pageblock 2678 * itself, so pages freed in the future will be put on the correct free list. 2679 */ 2680 static void steal_suitable_fallback(struct zone *zone, struct page *page, 2681 unsigned int alloc_flags, int start_type, bool whole_block) 2682 { 2683 unsigned int current_order = buddy_order(page); 2684 int free_pages, movable_pages, alike_pages; 2685 int old_block_type; 2686 2687 old_block_type = get_pageblock_migratetype(page); 2688 2689 /* 2690 * This can happen due to races and we want to prevent broken 2691 * highatomic accounting. 2692 */ 2693 if (is_migrate_highatomic(old_block_type)) 2694 goto single_page; 2695 2696 /* Take ownership for orders >= pageblock_order */ 2697 if (current_order >= pageblock_order) { 2698 change_pageblock_range(page, current_order, start_type); 2699 goto single_page; 2700 } 2701 2702 /* 2703 * Boost watermarks to increase reclaim pressure to reduce the 2704 * likelihood of future fallbacks. Wake kswapd now as the node 2705 * may be balanced overall and kswapd will not wake naturally. 2706 */ 2707 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2708 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2709 2710 /* We are not allowed to try stealing from the whole block */ 2711 if (!whole_block) 2712 goto single_page; 2713 2714 free_pages = move_freepages_block(zone, page, start_type, 2715 &movable_pages); 2716 /* 2717 * Determine how many pages are compatible with our allocation. 2718 * For movable allocation, it's the number of movable pages which 2719 * we just obtained. For other types it's a bit more tricky. 2720 */ 2721 if (start_type == MIGRATE_MOVABLE) { 2722 alike_pages = movable_pages; 2723 } else { 2724 /* 2725 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2726 * to MOVABLE pageblock, consider all non-movable pages as 2727 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2728 * vice versa, be conservative since we can't distinguish the 2729 * exact migratetype of non-movable pages. 2730 */ 2731 if (old_block_type == MIGRATE_MOVABLE) 2732 alike_pages = pageblock_nr_pages 2733 - (free_pages + movable_pages); 2734 else 2735 alike_pages = 0; 2736 } 2737 2738 /* moving whole block can fail due to zone boundary conditions */ 2739 if (!free_pages) 2740 goto single_page; 2741 2742 /* 2743 * If a sufficient number of pages in the block are either free or of 2744 * comparable migratability as our allocation, claim the whole block. 2745 */ 2746 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2747 page_group_by_mobility_disabled) 2748 set_pageblock_migratetype(page, start_type); 2749 2750 return; 2751 2752 single_page: 2753 move_to_free_list(page, zone, current_order, start_type); 2754 } 2755 2756 /* 2757 * Check whether there is a suitable fallback freepage with requested order. 2758 * If only_stealable is true, this function returns fallback_mt only if 2759 * we can steal other freepages all together. This would help to reduce 2760 * fragmentation due to mixed migratetype pages in one pageblock. 2761 */ 2762 int find_suitable_fallback(struct free_area *area, unsigned int order, 2763 int migratetype, bool only_stealable, bool *can_steal) 2764 { 2765 int i; 2766 int fallback_mt; 2767 2768 if (area->nr_free == 0) 2769 return -1; 2770 2771 *can_steal = false; 2772 for (i = 0;; i++) { 2773 fallback_mt = fallbacks[migratetype][i]; 2774 if (fallback_mt == MIGRATE_TYPES) 2775 break; 2776 2777 if (free_area_empty(area, fallback_mt)) 2778 continue; 2779 2780 if (can_steal_fallback(order, migratetype)) 2781 *can_steal = true; 2782 2783 if (!only_stealable) 2784 return fallback_mt; 2785 2786 if (*can_steal) 2787 return fallback_mt; 2788 } 2789 2790 return -1; 2791 } 2792 2793 /* 2794 * Reserve a pageblock for exclusive use of high-order atomic allocations if 2795 * there are no empty page blocks that contain a page with a suitable order 2796 */ 2797 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2798 unsigned int alloc_order) 2799 { 2800 int mt; 2801 unsigned long max_managed, flags; 2802 2803 /* 2804 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2805 * Check is race-prone but harmless. 2806 */ 2807 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 2808 if (zone->nr_reserved_highatomic >= max_managed) 2809 return; 2810 2811 spin_lock_irqsave(&zone->lock, flags); 2812 2813 /* Recheck the nr_reserved_highatomic limit under the lock */ 2814 if (zone->nr_reserved_highatomic >= max_managed) 2815 goto out_unlock; 2816 2817 /* Yoink! */ 2818 mt = get_pageblock_migratetype(page); 2819 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 2820 if (migratetype_is_mergeable(mt)) { 2821 zone->nr_reserved_highatomic += pageblock_nr_pages; 2822 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2823 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 2824 } 2825 2826 out_unlock: 2827 spin_unlock_irqrestore(&zone->lock, flags); 2828 } 2829 2830 /* 2831 * Used when an allocation is about to fail under memory pressure. This 2832 * potentially hurts the reliability of high-order allocations when under 2833 * intense memory pressure but failed atomic allocations should be easier 2834 * to recover from than an OOM. 2835 * 2836 * If @force is true, try to unreserve a pageblock even though highatomic 2837 * pageblock is exhausted. 2838 */ 2839 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2840 bool force) 2841 { 2842 struct zonelist *zonelist = ac->zonelist; 2843 unsigned long flags; 2844 struct zoneref *z; 2845 struct zone *zone; 2846 struct page *page; 2847 int order; 2848 bool ret; 2849 2850 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2851 ac->nodemask) { 2852 /* 2853 * Preserve at least one pageblock unless memory pressure 2854 * is really high. 2855 */ 2856 if (!force && zone->nr_reserved_highatomic <= 2857 pageblock_nr_pages) 2858 continue; 2859 2860 spin_lock_irqsave(&zone->lock, flags); 2861 for (order = 0; order < MAX_ORDER; order++) { 2862 struct free_area *area = &(zone->free_area[order]); 2863 2864 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2865 if (!page) 2866 continue; 2867 2868 /* 2869 * In page freeing path, migratetype change is racy so 2870 * we can counter several free pages in a pageblock 2871 * in this loop although we changed the pageblock type 2872 * from highatomic to ac->migratetype. So we should 2873 * adjust the count once. 2874 */ 2875 if (is_migrate_highatomic_page(page)) { 2876 /* 2877 * It should never happen but changes to 2878 * locking could inadvertently allow a per-cpu 2879 * drain to add pages to MIGRATE_HIGHATOMIC 2880 * while unreserving so be safe and watch for 2881 * underflows. 2882 */ 2883 zone->nr_reserved_highatomic -= min( 2884 pageblock_nr_pages, 2885 zone->nr_reserved_highatomic); 2886 } 2887 2888 /* 2889 * Convert to ac->migratetype and avoid the normal 2890 * pageblock stealing heuristics. Minimally, the caller 2891 * is doing the work and needs the pages. More 2892 * importantly, if the block was always converted to 2893 * MIGRATE_UNMOVABLE or another type then the number 2894 * of pageblocks that cannot be completely freed 2895 * may increase. 2896 */ 2897 set_pageblock_migratetype(page, ac->migratetype); 2898 ret = move_freepages_block(zone, page, ac->migratetype, 2899 NULL); 2900 if (ret) { 2901 spin_unlock_irqrestore(&zone->lock, flags); 2902 return ret; 2903 } 2904 } 2905 spin_unlock_irqrestore(&zone->lock, flags); 2906 } 2907 2908 return false; 2909 } 2910 2911 /* 2912 * Try finding a free buddy page on the fallback list and put it on the free 2913 * list of requested migratetype, possibly along with other pages from the same 2914 * block, depending on fragmentation avoidance heuristics. Returns true if 2915 * fallback was found so that __rmqueue_smallest() can grab it. 2916 * 2917 * The use of signed ints for order and current_order is a deliberate 2918 * deviation from the rest of this file, to make the for loop 2919 * condition simpler. 2920 */ 2921 static __always_inline bool 2922 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2923 unsigned int alloc_flags) 2924 { 2925 struct free_area *area; 2926 int current_order; 2927 int min_order = order; 2928 struct page *page; 2929 int fallback_mt; 2930 bool can_steal; 2931 2932 /* 2933 * Do not steal pages from freelists belonging to other pageblocks 2934 * i.e. orders < pageblock_order. If there are no local zones free, 2935 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2936 */ 2937 if (alloc_flags & ALLOC_NOFRAGMENT) 2938 min_order = pageblock_order; 2939 2940 /* 2941 * Find the largest available free page in the other list. This roughly 2942 * approximates finding the pageblock with the most free pages, which 2943 * would be too costly to do exactly. 2944 */ 2945 for (current_order = MAX_ORDER - 1; current_order >= min_order; 2946 --current_order) { 2947 area = &(zone->free_area[current_order]); 2948 fallback_mt = find_suitable_fallback(area, current_order, 2949 start_migratetype, false, &can_steal); 2950 if (fallback_mt == -1) 2951 continue; 2952 2953 /* 2954 * We cannot steal all free pages from the pageblock and the 2955 * requested migratetype is movable. In that case it's better to 2956 * steal and split the smallest available page instead of the 2957 * largest available page, because even if the next movable 2958 * allocation falls back into a different pageblock than this 2959 * one, it won't cause permanent fragmentation. 2960 */ 2961 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2962 && current_order > order) 2963 goto find_smallest; 2964 2965 goto do_steal; 2966 } 2967 2968 return false; 2969 2970 find_smallest: 2971 for (current_order = order; current_order < MAX_ORDER; 2972 current_order++) { 2973 area = &(zone->free_area[current_order]); 2974 fallback_mt = find_suitable_fallback(area, current_order, 2975 start_migratetype, false, &can_steal); 2976 if (fallback_mt != -1) 2977 break; 2978 } 2979 2980 /* 2981 * This should not happen - we already found a suitable fallback 2982 * when looking for the largest page. 2983 */ 2984 VM_BUG_ON(current_order == MAX_ORDER); 2985 2986 do_steal: 2987 page = get_page_from_free_area(area, fallback_mt); 2988 2989 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2990 can_steal); 2991 2992 trace_mm_page_alloc_extfrag(page, order, current_order, 2993 start_migratetype, fallback_mt); 2994 2995 return true; 2996 2997 } 2998 2999 /* 3000 * Do the hard work of removing an element from the buddy allocator. 3001 * Call me with the zone->lock already held. 3002 */ 3003 static __always_inline struct page * 3004 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 3005 unsigned int alloc_flags) 3006 { 3007 struct page *page; 3008 3009 if (IS_ENABLED(CONFIG_CMA)) { 3010 /* 3011 * Balance movable allocations between regular and CMA areas by 3012 * allocating from CMA when over half of the zone's free memory 3013 * is in the CMA area. 3014 */ 3015 if (alloc_flags & ALLOC_CMA && 3016 zone_page_state(zone, NR_FREE_CMA_PAGES) > 3017 zone_page_state(zone, NR_FREE_PAGES) / 2) { 3018 page = __rmqueue_cma_fallback(zone, order); 3019 if (page) 3020 return page; 3021 } 3022 } 3023 retry: 3024 page = __rmqueue_smallest(zone, order, migratetype); 3025 if (unlikely(!page)) { 3026 if (alloc_flags & ALLOC_CMA) 3027 page = __rmqueue_cma_fallback(zone, order); 3028 3029 if (!page && __rmqueue_fallback(zone, order, migratetype, 3030 alloc_flags)) 3031 goto retry; 3032 } 3033 return page; 3034 } 3035 3036 /* 3037 * Obtain a specified number of elements from the buddy allocator, all under 3038 * a single hold of the lock, for efficiency. Add them to the supplied list. 3039 * Returns the number of new pages which were placed at *list. 3040 */ 3041 static int rmqueue_bulk(struct zone *zone, unsigned int order, 3042 unsigned long count, struct list_head *list, 3043 int migratetype, unsigned int alloc_flags) 3044 { 3045 int i, allocated = 0; 3046 3047 /* 3048 * local_lock_irq held so equivalent to spin_lock_irqsave for 3049 * both PREEMPT_RT and non-PREEMPT_RT configurations. 3050 */ 3051 spin_lock(&zone->lock); 3052 for (i = 0; i < count; ++i) { 3053 struct page *page = __rmqueue(zone, order, migratetype, 3054 alloc_flags); 3055 if (unlikely(page == NULL)) 3056 break; 3057 3058 if (unlikely(check_pcp_refill(page, order))) 3059 continue; 3060 3061 /* 3062 * Split buddy pages returned by expand() are received here in 3063 * physical page order. The page is added to the tail of 3064 * caller's list. From the callers perspective, the linked list 3065 * is ordered by page number under some conditions. This is 3066 * useful for IO devices that can forward direction from the 3067 * head, thus also in the physical page order. This is useful 3068 * for IO devices that can merge IO requests if the physical 3069 * pages are ordered properly. 3070 */ 3071 list_add_tail(&page->lru, list); 3072 allocated++; 3073 if (is_migrate_cma(get_pcppage_migratetype(page))) 3074 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 3075 -(1 << order)); 3076 } 3077 3078 /* 3079 * i pages were removed from the buddy list even if some leak due 3080 * to check_pcp_refill failing so adjust NR_FREE_PAGES based 3081 * on i. Do not confuse with 'allocated' which is the number of 3082 * pages added to the pcp list. 3083 */ 3084 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 3085 spin_unlock(&zone->lock); 3086 return allocated; 3087 } 3088 3089 #ifdef CONFIG_NUMA 3090 /* 3091 * Called from the vmstat counter updater to drain pagesets of this 3092 * currently executing processor on remote nodes after they have 3093 * expired. 3094 * 3095 * Note that this function must be called with the thread pinned to 3096 * a single processor. 3097 */ 3098 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 3099 { 3100 unsigned long flags; 3101 int to_drain, batch; 3102 3103 local_lock_irqsave(&pagesets.lock, flags); 3104 batch = READ_ONCE(pcp->batch); 3105 to_drain = min(pcp->count, batch); 3106 if (to_drain > 0) 3107 free_pcppages_bulk(zone, to_drain, pcp, 0); 3108 local_unlock_irqrestore(&pagesets.lock, flags); 3109 } 3110 #endif 3111 3112 /* 3113 * Drain pcplists of the indicated processor and zone. 3114 * 3115 * The processor must either be the current processor and the 3116 * thread pinned to the current processor or a processor that 3117 * is not online. 3118 */ 3119 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 3120 { 3121 unsigned long flags; 3122 struct per_cpu_pages *pcp; 3123 3124 local_lock_irqsave(&pagesets.lock, flags); 3125 3126 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 3127 if (pcp->count) 3128 free_pcppages_bulk(zone, pcp->count, pcp, 0); 3129 3130 local_unlock_irqrestore(&pagesets.lock, flags); 3131 } 3132 3133 /* 3134 * Drain pcplists of all zones on the indicated processor. 3135 * 3136 * The processor must either be the current processor and the 3137 * thread pinned to the current processor or a processor that 3138 * is not online. 3139 */ 3140 static void drain_pages(unsigned int cpu) 3141 { 3142 struct zone *zone; 3143 3144 for_each_populated_zone(zone) { 3145 drain_pages_zone(cpu, zone); 3146 } 3147 } 3148 3149 /* 3150 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 3151 * 3152 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 3153 * the single zone's pages. 3154 */ 3155 void drain_local_pages(struct zone *zone) 3156 { 3157 int cpu = smp_processor_id(); 3158 3159 if (zone) 3160 drain_pages_zone(cpu, zone); 3161 else 3162 drain_pages(cpu); 3163 } 3164 3165 static void drain_local_pages_wq(struct work_struct *work) 3166 { 3167 struct pcpu_drain *drain; 3168 3169 drain = container_of(work, struct pcpu_drain, work); 3170 3171 /* 3172 * drain_all_pages doesn't use proper cpu hotplug protection so 3173 * we can race with cpu offline when the WQ can move this from 3174 * a cpu pinned worker to an unbound one. We can operate on a different 3175 * cpu which is alright but we also have to make sure to not move to 3176 * a different one. 3177 */ 3178 migrate_disable(); 3179 drain_local_pages(drain->zone); 3180 migrate_enable(); 3181 } 3182 3183 /* 3184 * The implementation of drain_all_pages(), exposing an extra parameter to 3185 * drain on all cpus. 3186 * 3187 * drain_all_pages() is optimized to only execute on cpus where pcplists are 3188 * not empty. The check for non-emptiness can however race with a free to 3189 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 3190 * that need the guarantee that every CPU has drained can disable the 3191 * optimizing racy check. 3192 */ 3193 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 3194 { 3195 int cpu; 3196 3197 /* 3198 * Allocate in the BSS so we won't require allocation in 3199 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 3200 */ 3201 static cpumask_t cpus_with_pcps; 3202 3203 /* 3204 * Make sure nobody triggers this path before mm_percpu_wq is fully 3205 * initialized. 3206 */ 3207 if (WARN_ON_ONCE(!mm_percpu_wq)) 3208 return; 3209 3210 /* 3211 * Do not drain if one is already in progress unless it's specific to 3212 * a zone. Such callers are primarily CMA and memory hotplug and need 3213 * the drain to be complete when the call returns. 3214 */ 3215 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 3216 if (!zone) 3217 return; 3218 mutex_lock(&pcpu_drain_mutex); 3219 } 3220 3221 /* 3222 * We don't care about racing with CPU hotplug event 3223 * as offline notification will cause the notified 3224 * cpu to drain that CPU pcps and on_each_cpu_mask 3225 * disables preemption as part of its processing 3226 */ 3227 for_each_online_cpu(cpu) { 3228 struct per_cpu_pages *pcp; 3229 struct zone *z; 3230 bool has_pcps = false; 3231 3232 if (force_all_cpus) { 3233 /* 3234 * The pcp.count check is racy, some callers need a 3235 * guarantee that no cpu is missed. 3236 */ 3237 has_pcps = true; 3238 } else if (zone) { 3239 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 3240 if (pcp->count) 3241 has_pcps = true; 3242 } else { 3243 for_each_populated_zone(z) { 3244 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 3245 if (pcp->count) { 3246 has_pcps = true; 3247 break; 3248 } 3249 } 3250 } 3251 3252 if (has_pcps) 3253 cpumask_set_cpu(cpu, &cpus_with_pcps); 3254 else 3255 cpumask_clear_cpu(cpu, &cpus_with_pcps); 3256 } 3257 3258 for_each_cpu(cpu, &cpus_with_pcps) { 3259 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); 3260 3261 drain->zone = zone; 3262 INIT_WORK(&drain->work, drain_local_pages_wq); 3263 queue_work_on(cpu, mm_percpu_wq, &drain->work); 3264 } 3265 for_each_cpu(cpu, &cpus_with_pcps) 3266 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); 3267 3268 mutex_unlock(&pcpu_drain_mutex); 3269 } 3270 3271 /* 3272 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 3273 * 3274 * When zone parameter is non-NULL, spill just the single zone's pages. 3275 * 3276 * Note that this can be extremely slow as the draining happens in a workqueue. 3277 */ 3278 void drain_all_pages(struct zone *zone) 3279 { 3280 __drain_all_pages(zone, false); 3281 } 3282 3283 #ifdef CONFIG_HIBERNATION 3284 3285 /* 3286 * Touch the watchdog for every WD_PAGE_COUNT pages. 3287 */ 3288 #define WD_PAGE_COUNT (128*1024) 3289 3290 void mark_free_pages(struct zone *zone) 3291 { 3292 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; 3293 unsigned long flags; 3294 unsigned int order, t; 3295 struct page *page; 3296 3297 if (zone_is_empty(zone)) 3298 return; 3299 3300 spin_lock_irqsave(&zone->lock, flags); 3301 3302 max_zone_pfn = zone_end_pfn(zone); 3303 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 3304 if (pfn_valid(pfn)) { 3305 page = pfn_to_page(pfn); 3306 3307 if (!--page_count) { 3308 touch_nmi_watchdog(); 3309 page_count = WD_PAGE_COUNT; 3310 } 3311 3312 if (page_zone(page) != zone) 3313 continue; 3314 3315 if (!swsusp_page_is_forbidden(page)) 3316 swsusp_unset_page_free(page); 3317 } 3318 3319 for_each_migratetype_order(order, t) { 3320 list_for_each_entry(page, 3321 &zone->free_area[order].free_list[t], lru) { 3322 unsigned long i; 3323 3324 pfn = page_to_pfn(page); 3325 for (i = 0; i < (1UL << order); i++) { 3326 if (!--page_count) { 3327 touch_nmi_watchdog(); 3328 page_count = WD_PAGE_COUNT; 3329 } 3330 swsusp_set_page_free(pfn_to_page(pfn + i)); 3331 } 3332 } 3333 } 3334 spin_unlock_irqrestore(&zone->lock, flags); 3335 } 3336 #endif /* CONFIG_PM */ 3337 3338 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 3339 unsigned int order) 3340 { 3341 int migratetype; 3342 3343 if (!free_pcp_prepare(page, order)) 3344 return false; 3345 3346 migratetype = get_pfnblock_migratetype(page, pfn); 3347 set_pcppage_migratetype(page, migratetype); 3348 return true; 3349 } 3350 3351 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch, 3352 bool free_high) 3353 { 3354 int min_nr_free, max_nr_free; 3355 3356 /* Free everything if batch freeing high-order pages. */ 3357 if (unlikely(free_high)) 3358 return pcp->count; 3359 3360 /* Check for PCP disabled or boot pageset */ 3361 if (unlikely(high < batch)) 3362 return 1; 3363 3364 /* Leave at least pcp->batch pages on the list */ 3365 min_nr_free = batch; 3366 max_nr_free = high - batch; 3367 3368 /* 3369 * Double the number of pages freed each time there is subsequent 3370 * freeing of pages without any allocation. 3371 */ 3372 batch <<= pcp->free_factor; 3373 if (batch < max_nr_free) 3374 pcp->free_factor++; 3375 batch = clamp(batch, min_nr_free, max_nr_free); 3376 3377 return batch; 3378 } 3379 3380 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 3381 bool free_high) 3382 { 3383 int high = READ_ONCE(pcp->high); 3384 3385 if (unlikely(!high || free_high)) 3386 return 0; 3387 3388 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 3389 return high; 3390 3391 /* 3392 * If reclaim is active, limit the number of pages that can be 3393 * stored on pcp lists 3394 */ 3395 return min(READ_ONCE(pcp->batch) << 2, high); 3396 } 3397 3398 static void free_unref_page_commit(struct page *page, int migratetype, 3399 unsigned int order) 3400 { 3401 struct zone *zone = page_zone(page); 3402 struct per_cpu_pages *pcp; 3403 int high; 3404 int pindex; 3405 bool free_high; 3406 3407 __count_vm_event(PGFREE); 3408 pcp = this_cpu_ptr(zone->per_cpu_pageset); 3409 pindex = order_to_pindex(migratetype, order); 3410 list_add(&page->lru, &pcp->lists[pindex]); 3411 pcp->count += 1 << order; 3412 3413 /* 3414 * As high-order pages other than THP's stored on PCP can contribute 3415 * to fragmentation, limit the number stored when PCP is heavily 3416 * freeing without allocation. The remainder after bulk freeing 3417 * stops will be drained from vmstat refresh context. 3418 */ 3419 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); 3420 3421 high = nr_pcp_high(pcp, zone, free_high); 3422 if (pcp->count >= high) { 3423 int batch = READ_ONCE(pcp->batch); 3424 3425 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex); 3426 } 3427 } 3428 3429 /* 3430 * Free a pcp page 3431 */ 3432 void free_unref_page(struct page *page, unsigned int order) 3433 { 3434 unsigned long flags; 3435 unsigned long pfn = page_to_pfn(page); 3436 int migratetype; 3437 3438 if (!free_unref_page_prepare(page, pfn, order)) 3439 return; 3440 3441 /* 3442 * We only track unmovable, reclaimable and movable on pcp lists. 3443 * Place ISOLATE pages on the isolated list because they are being 3444 * offlined but treat HIGHATOMIC as movable pages so we can get those 3445 * areas back if necessary. Otherwise, we may have to free 3446 * excessively into the page allocator 3447 */ 3448 migratetype = get_pcppage_migratetype(page); 3449 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 3450 if (unlikely(is_migrate_isolate(migratetype))) { 3451 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 3452 return; 3453 } 3454 migratetype = MIGRATE_MOVABLE; 3455 } 3456 3457 local_lock_irqsave(&pagesets.lock, flags); 3458 free_unref_page_commit(page, migratetype, order); 3459 local_unlock_irqrestore(&pagesets.lock, flags); 3460 } 3461 3462 /* 3463 * Free a list of 0-order pages 3464 */ 3465 void free_unref_page_list(struct list_head *list) 3466 { 3467 struct page *page, *next; 3468 unsigned long flags; 3469 int batch_count = 0; 3470 int migratetype; 3471 3472 /* Prepare pages for freeing */ 3473 list_for_each_entry_safe(page, next, list, lru) { 3474 unsigned long pfn = page_to_pfn(page); 3475 if (!free_unref_page_prepare(page, pfn, 0)) { 3476 list_del(&page->lru); 3477 continue; 3478 } 3479 3480 /* 3481 * Free isolated pages directly to the allocator, see 3482 * comment in free_unref_page. 3483 */ 3484 migratetype = get_pcppage_migratetype(page); 3485 if (unlikely(is_migrate_isolate(migratetype))) { 3486 list_del(&page->lru); 3487 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 3488 continue; 3489 } 3490 } 3491 3492 local_lock_irqsave(&pagesets.lock, flags); 3493 list_for_each_entry_safe(page, next, list, lru) { 3494 /* 3495 * Non-isolated types over MIGRATE_PCPTYPES get added 3496 * to the MIGRATE_MOVABLE pcp list. 3497 */ 3498 migratetype = get_pcppage_migratetype(page); 3499 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 3500 migratetype = MIGRATE_MOVABLE; 3501 3502 trace_mm_page_free_batched(page); 3503 free_unref_page_commit(page, migratetype, 0); 3504 3505 /* 3506 * Guard against excessive IRQ disabled times when we get 3507 * a large list of pages to free. 3508 */ 3509 if (++batch_count == SWAP_CLUSTER_MAX) { 3510 local_unlock_irqrestore(&pagesets.lock, flags); 3511 batch_count = 0; 3512 local_lock_irqsave(&pagesets.lock, flags); 3513 } 3514 } 3515 local_unlock_irqrestore(&pagesets.lock, flags); 3516 } 3517 3518 /* 3519 * split_page takes a non-compound higher-order page, and splits it into 3520 * n (1<<order) sub-pages: page[0..n] 3521 * Each sub-page must be freed individually. 3522 * 3523 * Note: this is probably too low level an operation for use in drivers. 3524 * Please consult with lkml before using this in your driver. 3525 */ 3526 void split_page(struct page *page, unsigned int order) 3527 { 3528 int i; 3529 3530 VM_BUG_ON_PAGE(PageCompound(page), page); 3531 VM_BUG_ON_PAGE(!page_count(page), page); 3532 3533 for (i = 1; i < (1 << order); i++) 3534 set_page_refcounted(page + i); 3535 split_page_owner(page, 1 << order); 3536 split_page_memcg(page, 1 << order); 3537 } 3538 EXPORT_SYMBOL_GPL(split_page); 3539 3540 int __isolate_free_page(struct page *page, unsigned int order) 3541 { 3542 unsigned long watermark; 3543 struct zone *zone; 3544 int mt; 3545 3546 BUG_ON(!PageBuddy(page)); 3547 3548 zone = page_zone(page); 3549 mt = get_pageblock_migratetype(page); 3550 3551 if (!is_migrate_isolate(mt)) { 3552 /* 3553 * Obey watermarks as if the page was being allocated. We can 3554 * emulate a high-order watermark check with a raised order-0 3555 * watermark, because we already know our high-order page 3556 * exists. 3557 */ 3558 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3559 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3560 return 0; 3561 3562 __mod_zone_freepage_state(zone, -(1UL << order), mt); 3563 } 3564 3565 /* Remove page from free list */ 3566 3567 del_page_from_free_list(page, zone, order); 3568 3569 /* 3570 * Set the pageblock if the isolated page is at least half of a 3571 * pageblock 3572 */ 3573 if (order >= pageblock_order - 1) { 3574 struct page *endpage = page + (1 << order) - 1; 3575 for (; page < endpage; page += pageblock_nr_pages) { 3576 int mt = get_pageblock_migratetype(page); 3577 /* 3578 * Only change normal pageblocks (i.e., they can merge 3579 * with others) 3580 */ 3581 if (migratetype_is_mergeable(mt)) 3582 set_pageblock_migratetype(page, 3583 MIGRATE_MOVABLE); 3584 } 3585 } 3586 3587 3588 return 1UL << order; 3589 } 3590 3591 /** 3592 * __putback_isolated_page - Return a now-isolated page back where we got it 3593 * @page: Page that was isolated 3594 * @order: Order of the isolated page 3595 * @mt: The page's pageblock's migratetype 3596 * 3597 * This function is meant to return a page pulled from the free lists via 3598 * __isolate_free_page back to the free lists they were pulled from. 3599 */ 3600 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3601 { 3602 struct zone *zone = page_zone(page); 3603 3604 /* zone lock should be held when this function is called */ 3605 lockdep_assert_held(&zone->lock); 3606 3607 /* Return isolated page to tail of freelist. */ 3608 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3609 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3610 } 3611 3612 /* 3613 * Update NUMA hit/miss statistics 3614 * 3615 * Must be called with interrupts disabled. 3616 */ 3617 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 3618 long nr_account) 3619 { 3620 #ifdef CONFIG_NUMA 3621 enum numa_stat_item local_stat = NUMA_LOCAL; 3622 3623 /* skip numa counters update if numa stats is disabled */ 3624 if (!static_branch_likely(&vm_numa_stat_key)) 3625 return; 3626 3627 if (zone_to_nid(z) != numa_node_id()) 3628 local_stat = NUMA_OTHER; 3629 3630 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3631 __count_numa_events(z, NUMA_HIT, nr_account); 3632 else { 3633 __count_numa_events(z, NUMA_MISS, nr_account); 3634 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 3635 } 3636 __count_numa_events(z, local_stat, nr_account); 3637 #endif 3638 } 3639 3640 /* Remove page from the per-cpu list, caller must protect the list */ 3641 static inline 3642 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3643 int migratetype, 3644 unsigned int alloc_flags, 3645 struct per_cpu_pages *pcp, 3646 struct list_head *list) 3647 { 3648 struct page *page; 3649 3650 do { 3651 if (list_empty(list)) { 3652 int batch = READ_ONCE(pcp->batch); 3653 int alloced; 3654 3655 /* 3656 * Scale batch relative to order if batch implies 3657 * free pages can be stored on the PCP. Batch can 3658 * be 1 for small zones or for boot pagesets which 3659 * should never store free pages as the pages may 3660 * belong to arbitrary zones. 3661 */ 3662 if (batch > 1) 3663 batch = max(batch >> order, 2); 3664 alloced = rmqueue_bulk(zone, order, 3665 batch, list, 3666 migratetype, alloc_flags); 3667 3668 pcp->count += alloced << order; 3669 if (unlikely(list_empty(list))) 3670 return NULL; 3671 } 3672 3673 page = list_first_entry(list, struct page, lru); 3674 list_del(&page->lru); 3675 pcp->count -= 1 << order; 3676 } while (check_new_pcp(page, order)); 3677 3678 return page; 3679 } 3680 3681 /* Lock and remove page from the per-cpu list */ 3682 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3683 struct zone *zone, unsigned int order, 3684 gfp_t gfp_flags, int migratetype, 3685 unsigned int alloc_flags) 3686 { 3687 struct per_cpu_pages *pcp; 3688 struct list_head *list; 3689 struct page *page; 3690 unsigned long flags; 3691 3692 local_lock_irqsave(&pagesets.lock, flags); 3693 3694 /* 3695 * On allocation, reduce the number of pages that are batch freed. 3696 * See nr_pcp_free() where free_factor is increased for subsequent 3697 * frees. 3698 */ 3699 pcp = this_cpu_ptr(zone->per_cpu_pageset); 3700 pcp->free_factor >>= 1; 3701 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3702 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3703 local_unlock_irqrestore(&pagesets.lock, flags); 3704 if (page) { 3705 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); 3706 zone_statistics(preferred_zone, zone, 1); 3707 } 3708 return page; 3709 } 3710 3711 /* 3712 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 3713 */ 3714 static inline 3715 struct page *rmqueue(struct zone *preferred_zone, 3716 struct zone *zone, unsigned int order, 3717 gfp_t gfp_flags, unsigned int alloc_flags, 3718 int migratetype) 3719 { 3720 unsigned long flags; 3721 struct page *page; 3722 3723 if (likely(pcp_allowed_order(order))) { 3724 /* 3725 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and 3726 * we need to skip it when CMA area isn't allowed. 3727 */ 3728 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || 3729 migratetype != MIGRATE_MOVABLE) { 3730 page = rmqueue_pcplist(preferred_zone, zone, order, 3731 gfp_flags, migratetype, alloc_flags); 3732 goto out; 3733 } 3734 } 3735 3736 /* 3737 * We most definitely don't want callers attempting to 3738 * allocate greater than order-1 page units with __GFP_NOFAIL. 3739 */ 3740 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 3741 3742 do { 3743 page = NULL; 3744 spin_lock_irqsave(&zone->lock, flags); 3745 /* 3746 * order-0 request can reach here when the pcplist is skipped 3747 * due to non-CMA allocation context. HIGHATOMIC area is 3748 * reserved for high-order atomic allocation, so order-0 3749 * request should skip it. 3750 */ 3751 if (order > 0 && alloc_flags & ALLOC_HARDER) 3752 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3753 if (!page) { 3754 page = __rmqueue(zone, order, migratetype, alloc_flags); 3755 if (!page) 3756 goto failed; 3757 } 3758 __mod_zone_freepage_state(zone, -(1 << order), 3759 get_pcppage_migratetype(page)); 3760 spin_unlock_irqrestore(&zone->lock, flags); 3761 } while (check_new_pages(page, order)); 3762 3763 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3764 zone_statistics(preferred_zone, zone, 1); 3765 3766 out: 3767 /* Separate test+clear to avoid unnecessary atomics */ 3768 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { 3769 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3770 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3771 } 3772 3773 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3774 return page; 3775 3776 failed: 3777 spin_unlock_irqrestore(&zone->lock, flags); 3778 return NULL; 3779 } 3780 3781 #ifdef CONFIG_FAIL_PAGE_ALLOC 3782 3783 static struct { 3784 struct fault_attr attr; 3785 3786 bool ignore_gfp_highmem; 3787 bool ignore_gfp_reclaim; 3788 u32 min_order; 3789 } fail_page_alloc = { 3790 .attr = FAULT_ATTR_INITIALIZER, 3791 .ignore_gfp_reclaim = true, 3792 .ignore_gfp_highmem = true, 3793 .min_order = 1, 3794 }; 3795 3796 static int __init setup_fail_page_alloc(char *str) 3797 { 3798 return setup_fault_attr(&fail_page_alloc.attr, str); 3799 } 3800 __setup("fail_page_alloc=", setup_fail_page_alloc); 3801 3802 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3803 { 3804 if (order < fail_page_alloc.min_order) 3805 return false; 3806 if (gfp_mask & __GFP_NOFAIL) 3807 return false; 3808 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 3809 return false; 3810 if (fail_page_alloc.ignore_gfp_reclaim && 3811 (gfp_mask & __GFP_DIRECT_RECLAIM)) 3812 return false; 3813 3814 if (gfp_mask & __GFP_NOWARN) 3815 fail_page_alloc.attr.no_warn = true; 3816 3817 return should_fail(&fail_page_alloc.attr, 1 << order); 3818 } 3819 3820 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3821 3822 static int __init fail_page_alloc_debugfs(void) 3823 { 3824 umode_t mode = S_IFREG | 0600; 3825 struct dentry *dir; 3826 3827 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 3828 &fail_page_alloc.attr); 3829 3830 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3831 &fail_page_alloc.ignore_gfp_reclaim); 3832 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 3833 &fail_page_alloc.ignore_gfp_highmem); 3834 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); 3835 3836 return 0; 3837 } 3838 3839 late_initcall(fail_page_alloc_debugfs); 3840 3841 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3842 3843 #else /* CONFIG_FAIL_PAGE_ALLOC */ 3844 3845 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3846 { 3847 return false; 3848 } 3849 3850 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 3851 3852 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3853 { 3854 return __should_fail_alloc_page(gfp_mask, order); 3855 } 3856 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 3857 3858 static inline long __zone_watermark_unusable_free(struct zone *z, 3859 unsigned int order, unsigned int alloc_flags) 3860 { 3861 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3862 long unusable_free = (1 << order) - 1; 3863 3864 /* 3865 * If the caller does not have rights to ALLOC_HARDER then subtract 3866 * the high-atomic reserves. This will over-estimate the size of the 3867 * atomic reserve but it avoids a search. 3868 */ 3869 if (likely(!alloc_harder)) 3870 unusable_free += z->nr_reserved_highatomic; 3871 3872 #ifdef CONFIG_CMA 3873 /* If allocation can't use CMA areas don't use free CMA pages */ 3874 if (!(alloc_flags & ALLOC_CMA)) 3875 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3876 #endif 3877 3878 return unusable_free; 3879 } 3880 3881 /* 3882 * Return true if free base pages are above 'mark'. For high-order checks it 3883 * will return true of the order-0 watermark is reached and there is at least 3884 * one free page of a suitable size. Checking now avoids taking the zone lock 3885 * to check in the allocation paths if no pages are free. 3886 */ 3887 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3888 int highest_zoneidx, unsigned int alloc_flags, 3889 long free_pages) 3890 { 3891 long min = mark; 3892 int o; 3893 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3894 3895 /* free_pages may go negative - that's OK */ 3896 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3897 3898 if (alloc_flags & ALLOC_HIGH) 3899 min -= min / 2; 3900 3901 if (unlikely(alloc_harder)) { 3902 /* 3903 * OOM victims can try even harder than normal ALLOC_HARDER 3904 * users on the grounds that it's definitely going to be in 3905 * the exit path shortly and free memory. Any allocation it 3906 * makes during the free path will be small and short-lived. 3907 */ 3908 if (alloc_flags & ALLOC_OOM) 3909 min -= min / 2; 3910 else 3911 min -= min / 4; 3912 } 3913 3914 /* 3915 * Check watermarks for an order-0 allocation request. If these 3916 * are not met, then a high-order request also cannot go ahead 3917 * even if a suitable page happened to be free. 3918 */ 3919 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3920 return false; 3921 3922 /* If this is an order-0 request then the watermark is fine */ 3923 if (!order) 3924 return true; 3925 3926 /* For a high-order request, check at least one suitable page is free */ 3927 for (o = order; o < MAX_ORDER; o++) { 3928 struct free_area *area = &z->free_area[o]; 3929 int mt; 3930 3931 if (!area->nr_free) 3932 continue; 3933 3934 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3935 if (!free_area_empty(area, mt)) 3936 return true; 3937 } 3938 3939 #ifdef CONFIG_CMA 3940 if ((alloc_flags & ALLOC_CMA) && 3941 !free_area_empty(area, MIGRATE_CMA)) { 3942 return true; 3943 } 3944 #endif 3945 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC)) 3946 return true; 3947 } 3948 return false; 3949 } 3950 3951 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3952 int highest_zoneidx, unsigned int alloc_flags) 3953 { 3954 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3955 zone_page_state(z, NR_FREE_PAGES)); 3956 } 3957 3958 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3959 unsigned long mark, int highest_zoneidx, 3960 unsigned int alloc_flags, gfp_t gfp_mask) 3961 { 3962 long free_pages; 3963 3964 free_pages = zone_page_state(z, NR_FREE_PAGES); 3965 3966 /* 3967 * Fast check for order-0 only. If this fails then the reserves 3968 * need to be calculated. 3969 */ 3970 if (!order) { 3971 long usable_free; 3972 long reserved; 3973 3974 usable_free = free_pages; 3975 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3976 3977 /* reserved may over estimate high-atomic reserves. */ 3978 usable_free -= min(usable_free, reserved); 3979 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3980 return true; 3981 } 3982 3983 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3984 free_pages)) 3985 return true; 3986 /* 3987 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations 3988 * when checking the min watermark. The min watermark is the 3989 * point where boosting is ignored so that kswapd is woken up 3990 * when below the low watermark. 3991 */ 3992 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost 3993 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3994 mark = z->_watermark[WMARK_MIN]; 3995 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3996 alloc_flags, free_pages); 3997 } 3998 3999 return false; 4000 } 4001 4002 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 4003 unsigned long mark, int highest_zoneidx) 4004 { 4005 long free_pages = zone_page_state(z, NR_FREE_PAGES); 4006 4007 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 4008 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 4009 4010 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 4011 free_pages); 4012 } 4013 4014 #ifdef CONFIG_NUMA 4015 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 4016 4017 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 4018 { 4019 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 4020 node_reclaim_distance; 4021 } 4022 #else /* CONFIG_NUMA */ 4023 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 4024 { 4025 return true; 4026 } 4027 #endif /* CONFIG_NUMA */ 4028 4029 /* 4030 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 4031 * fragmentation is subtle. If the preferred zone was HIGHMEM then 4032 * premature use of a lower zone may cause lowmem pressure problems that 4033 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 4034 * probably too small. It only makes sense to spread allocations to avoid 4035 * fragmentation between the Normal and DMA32 zones. 4036 */ 4037 static inline unsigned int 4038 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 4039 { 4040 unsigned int alloc_flags; 4041 4042 /* 4043 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4044 * to save a branch. 4045 */ 4046 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 4047 4048 #ifdef CONFIG_ZONE_DMA32 4049 if (!zone) 4050 return alloc_flags; 4051 4052 if (zone_idx(zone) != ZONE_NORMAL) 4053 return alloc_flags; 4054 4055 /* 4056 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 4057 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 4058 * on UMA that if Normal is populated then so is DMA32. 4059 */ 4060 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 4061 if (nr_online_nodes > 1 && !populated_zone(--zone)) 4062 return alloc_flags; 4063 4064 alloc_flags |= ALLOC_NOFRAGMENT; 4065 #endif /* CONFIG_ZONE_DMA32 */ 4066 return alloc_flags; 4067 } 4068 4069 /* Must be called after current_gfp_context() which can change gfp_mask */ 4070 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 4071 unsigned int alloc_flags) 4072 { 4073 #ifdef CONFIG_CMA 4074 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 4075 alloc_flags |= ALLOC_CMA; 4076 #endif 4077 return alloc_flags; 4078 } 4079 4080 /* 4081 * get_page_from_freelist goes through the zonelist trying to allocate 4082 * a page. 4083 */ 4084 static struct page * 4085 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 4086 const struct alloc_context *ac) 4087 { 4088 struct zoneref *z; 4089 struct zone *zone; 4090 struct pglist_data *last_pgdat = NULL; 4091 bool last_pgdat_dirty_ok = false; 4092 bool no_fallback; 4093 4094 retry: 4095 /* 4096 * Scan zonelist, looking for a zone with enough free. 4097 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 4098 */ 4099 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 4100 z = ac->preferred_zoneref; 4101 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 4102 ac->nodemask) { 4103 struct page *page; 4104 unsigned long mark; 4105 4106 if (cpusets_enabled() && 4107 (alloc_flags & ALLOC_CPUSET) && 4108 !__cpuset_zone_allowed(zone, gfp_mask)) 4109 continue; 4110 /* 4111 * When allocating a page cache page for writing, we 4112 * want to get it from a node that is within its dirty 4113 * limit, such that no single node holds more than its 4114 * proportional share of globally allowed dirty pages. 4115 * The dirty limits take into account the node's 4116 * lowmem reserves and high watermark so that kswapd 4117 * should be able to balance it without having to 4118 * write pages from its LRU list. 4119 * 4120 * XXX: For now, allow allocations to potentially 4121 * exceed the per-node dirty limit in the slowpath 4122 * (spread_dirty_pages unset) before going into reclaim, 4123 * which is important when on a NUMA setup the allowed 4124 * nodes are together not big enough to reach the 4125 * global limit. The proper fix for these situations 4126 * will require awareness of nodes in the 4127 * dirty-throttling and the flusher threads. 4128 */ 4129 if (ac->spread_dirty_pages) { 4130 if (last_pgdat != zone->zone_pgdat) { 4131 last_pgdat = zone->zone_pgdat; 4132 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 4133 } 4134 4135 if (!last_pgdat_dirty_ok) 4136 continue; 4137 } 4138 4139 if (no_fallback && nr_online_nodes > 1 && 4140 zone != ac->preferred_zoneref->zone) { 4141 int local_nid; 4142 4143 /* 4144 * If moving to a remote node, retry but allow 4145 * fragmenting fallbacks. Locality is more important 4146 * than fragmentation avoidance. 4147 */ 4148 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 4149 if (zone_to_nid(zone) != local_nid) { 4150 alloc_flags &= ~ALLOC_NOFRAGMENT; 4151 goto retry; 4152 } 4153 } 4154 4155 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 4156 if (!zone_watermark_fast(zone, order, mark, 4157 ac->highest_zoneidx, alloc_flags, 4158 gfp_mask)) { 4159 int ret; 4160 4161 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 4162 /* 4163 * Watermark failed for this zone, but see if we can 4164 * grow this zone if it contains deferred pages. 4165 */ 4166 if (static_branch_unlikely(&deferred_pages)) { 4167 if (_deferred_grow_zone(zone, order)) 4168 goto try_this_zone; 4169 } 4170 #endif 4171 /* Checked here to keep the fast path fast */ 4172 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 4173 if (alloc_flags & ALLOC_NO_WATERMARKS) 4174 goto try_this_zone; 4175 4176 if (!node_reclaim_enabled() || 4177 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 4178 continue; 4179 4180 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 4181 switch (ret) { 4182 case NODE_RECLAIM_NOSCAN: 4183 /* did not scan */ 4184 continue; 4185 case NODE_RECLAIM_FULL: 4186 /* scanned but unreclaimable */ 4187 continue; 4188 default: 4189 /* did we reclaim enough */ 4190 if (zone_watermark_ok(zone, order, mark, 4191 ac->highest_zoneidx, alloc_flags)) 4192 goto try_this_zone; 4193 4194 continue; 4195 } 4196 } 4197 4198 try_this_zone: 4199 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 4200 gfp_mask, alloc_flags, ac->migratetype); 4201 if (page) { 4202 prep_new_page(page, order, gfp_mask, alloc_flags); 4203 4204 /* 4205 * If this is a high-order atomic allocation then check 4206 * if the pageblock should be reserved for the future 4207 */ 4208 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 4209 reserve_highatomic_pageblock(page, zone, order); 4210 4211 return page; 4212 } else { 4213 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 4214 /* Try again if zone has deferred pages */ 4215 if (static_branch_unlikely(&deferred_pages)) { 4216 if (_deferred_grow_zone(zone, order)) 4217 goto try_this_zone; 4218 } 4219 #endif 4220 } 4221 } 4222 4223 /* 4224 * It's possible on a UMA machine to get through all zones that are 4225 * fragmented. If avoiding fragmentation, reset and try again. 4226 */ 4227 if (no_fallback) { 4228 alloc_flags &= ~ALLOC_NOFRAGMENT; 4229 goto retry; 4230 } 4231 4232 return NULL; 4233 } 4234 4235 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 4236 { 4237 unsigned int filter = SHOW_MEM_FILTER_NODES; 4238 4239 /* 4240 * This documents exceptions given to allocations in certain 4241 * contexts that are allowed to allocate outside current's set 4242 * of allowed nodes. 4243 */ 4244 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4245 if (tsk_is_oom_victim(current) || 4246 (current->flags & (PF_MEMALLOC | PF_EXITING))) 4247 filter &= ~SHOW_MEM_FILTER_NODES; 4248 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 4249 filter &= ~SHOW_MEM_FILTER_NODES; 4250 4251 show_mem(filter, nodemask); 4252 } 4253 4254 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 4255 { 4256 struct va_format vaf; 4257 va_list args; 4258 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 4259 4260 if ((gfp_mask & __GFP_NOWARN) || 4261 !__ratelimit(&nopage_rs) || 4262 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 4263 return; 4264 4265 va_start(args, fmt); 4266 vaf.fmt = fmt; 4267 vaf.va = &args; 4268 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 4269 current->comm, &vaf, gfp_mask, &gfp_mask, 4270 nodemask_pr_args(nodemask)); 4271 va_end(args); 4272 4273 cpuset_print_current_mems_allowed(); 4274 pr_cont("\n"); 4275 dump_stack(); 4276 warn_alloc_show_mem(gfp_mask, nodemask); 4277 } 4278 4279 static inline struct page * 4280 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 4281 unsigned int alloc_flags, 4282 const struct alloc_context *ac) 4283 { 4284 struct page *page; 4285 4286 page = get_page_from_freelist(gfp_mask, order, 4287 alloc_flags|ALLOC_CPUSET, ac); 4288 /* 4289 * fallback to ignore cpuset restriction if our nodes 4290 * are depleted 4291 */ 4292 if (!page) 4293 page = get_page_from_freelist(gfp_mask, order, 4294 alloc_flags, ac); 4295 4296 return page; 4297 } 4298 4299 static inline struct page * 4300 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 4301 const struct alloc_context *ac, unsigned long *did_some_progress) 4302 { 4303 struct oom_control oc = { 4304 .zonelist = ac->zonelist, 4305 .nodemask = ac->nodemask, 4306 .memcg = NULL, 4307 .gfp_mask = gfp_mask, 4308 .order = order, 4309 }; 4310 struct page *page; 4311 4312 *did_some_progress = 0; 4313 4314 /* 4315 * Acquire the oom lock. If that fails, somebody else is 4316 * making progress for us. 4317 */ 4318 if (!mutex_trylock(&oom_lock)) { 4319 *did_some_progress = 1; 4320 schedule_timeout_uninterruptible(1); 4321 return NULL; 4322 } 4323 4324 /* 4325 * Go through the zonelist yet one more time, keep very high watermark 4326 * here, this is only to catch a parallel oom killing, we must fail if 4327 * we're still under heavy pressure. But make sure that this reclaim 4328 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 4329 * allocation which will never fail due to oom_lock already held. 4330 */ 4331 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 4332 ~__GFP_DIRECT_RECLAIM, order, 4333 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 4334 if (page) 4335 goto out; 4336 4337 /* Coredumps can quickly deplete all memory reserves */ 4338 if (current->flags & PF_DUMPCORE) 4339 goto out; 4340 /* The OOM killer will not help higher order allocs */ 4341 if (order > PAGE_ALLOC_COSTLY_ORDER) 4342 goto out; 4343 /* 4344 * We have already exhausted all our reclaim opportunities without any 4345 * success so it is time to admit defeat. We will skip the OOM killer 4346 * because it is very likely that the caller has a more reasonable 4347 * fallback than shooting a random task. 4348 * 4349 * The OOM killer may not free memory on a specific node. 4350 */ 4351 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4352 goto out; 4353 /* The OOM killer does not needlessly kill tasks for lowmem */ 4354 if (ac->highest_zoneidx < ZONE_NORMAL) 4355 goto out; 4356 if (pm_suspended_storage()) 4357 goto out; 4358 /* 4359 * XXX: GFP_NOFS allocations should rather fail than rely on 4360 * other request to make a forward progress. 4361 * We are in an unfortunate situation where out_of_memory cannot 4362 * do much for this context but let's try it to at least get 4363 * access to memory reserved if the current task is killed (see 4364 * out_of_memory). Once filesystems are ready to handle allocation 4365 * failures more gracefully we should just bail out here. 4366 */ 4367 4368 /* Exhausted what can be done so it's blame time */ 4369 if (out_of_memory(&oc) || 4370 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 4371 *did_some_progress = 1; 4372 4373 /* 4374 * Help non-failing allocations by giving them access to memory 4375 * reserves 4376 */ 4377 if (gfp_mask & __GFP_NOFAIL) 4378 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4379 ALLOC_NO_WATERMARKS, ac); 4380 } 4381 out: 4382 mutex_unlock(&oom_lock); 4383 return page; 4384 } 4385 4386 /* 4387 * Maximum number of compaction retries with a progress before OOM 4388 * killer is consider as the only way to move forward. 4389 */ 4390 #define MAX_COMPACT_RETRIES 16 4391 4392 #ifdef CONFIG_COMPACTION 4393 /* Try memory compaction for high-order allocations before reclaim */ 4394 static struct page * 4395 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4396 unsigned int alloc_flags, const struct alloc_context *ac, 4397 enum compact_priority prio, enum compact_result *compact_result) 4398 { 4399 struct page *page = NULL; 4400 unsigned long pflags; 4401 unsigned int noreclaim_flag; 4402 4403 if (!order) 4404 return NULL; 4405 4406 psi_memstall_enter(&pflags); 4407 delayacct_compact_start(); 4408 noreclaim_flag = memalloc_noreclaim_save(); 4409 4410 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4411 prio, &page); 4412 4413 memalloc_noreclaim_restore(noreclaim_flag); 4414 psi_memstall_leave(&pflags); 4415 delayacct_compact_end(); 4416 4417 if (*compact_result == COMPACT_SKIPPED) 4418 return NULL; 4419 /* 4420 * At least in one zone compaction wasn't deferred or skipped, so let's 4421 * count a compaction stall 4422 */ 4423 count_vm_event(COMPACTSTALL); 4424 4425 /* Prep a captured page if available */ 4426 if (page) 4427 prep_new_page(page, order, gfp_mask, alloc_flags); 4428 4429 /* Try get a page from the freelist if available */ 4430 if (!page) 4431 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4432 4433 if (page) { 4434 struct zone *zone = page_zone(page); 4435 4436 zone->compact_blockskip_flush = false; 4437 compaction_defer_reset(zone, order, true); 4438 count_vm_event(COMPACTSUCCESS); 4439 return page; 4440 } 4441 4442 /* 4443 * It's bad if compaction run occurs and fails. The most likely reason 4444 * is that pages exist, but not enough to satisfy watermarks. 4445 */ 4446 count_vm_event(COMPACTFAIL); 4447 4448 cond_resched(); 4449 4450 return NULL; 4451 } 4452 4453 static inline bool 4454 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4455 enum compact_result compact_result, 4456 enum compact_priority *compact_priority, 4457 int *compaction_retries) 4458 { 4459 int max_retries = MAX_COMPACT_RETRIES; 4460 int min_priority; 4461 bool ret = false; 4462 int retries = *compaction_retries; 4463 enum compact_priority priority = *compact_priority; 4464 4465 if (!order) 4466 return false; 4467 4468 if (fatal_signal_pending(current)) 4469 return false; 4470 4471 if (compaction_made_progress(compact_result)) 4472 (*compaction_retries)++; 4473 4474 /* 4475 * compaction considers all the zone as desperately out of memory 4476 * so it doesn't really make much sense to retry except when the 4477 * failure could be caused by insufficient priority 4478 */ 4479 if (compaction_failed(compact_result)) 4480 goto check_priority; 4481 4482 /* 4483 * compaction was skipped because there are not enough order-0 pages 4484 * to work with, so we retry only if it looks like reclaim can help. 4485 */ 4486 if (compaction_needs_reclaim(compact_result)) { 4487 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4488 goto out; 4489 } 4490 4491 /* 4492 * make sure the compaction wasn't deferred or didn't bail out early 4493 * due to locks contention before we declare that we should give up. 4494 * But the next retry should use a higher priority if allowed, so 4495 * we don't just keep bailing out endlessly. 4496 */ 4497 if (compaction_withdrawn(compact_result)) { 4498 goto check_priority; 4499 } 4500 4501 /* 4502 * !costly requests are much more important than __GFP_RETRY_MAYFAIL 4503 * costly ones because they are de facto nofail and invoke OOM 4504 * killer to move on while costly can fail and users are ready 4505 * to cope with that. 1/4 retries is rather arbitrary but we 4506 * would need much more detailed feedback from compaction to 4507 * make a better decision. 4508 */ 4509 if (order > PAGE_ALLOC_COSTLY_ORDER) 4510 max_retries /= 4; 4511 if (*compaction_retries <= max_retries) { 4512 ret = true; 4513 goto out; 4514 } 4515 4516 /* 4517 * Make sure there are attempts at the highest priority if we exhausted 4518 * all retries or failed at the lower priorities. 4519 */ 4520 check_priority: 4521 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4522 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4523 4524 if (*compact_priority > min_priority) { 4525 (*compact_priority)--; 4526 *compaction_retries = 0; 4527 ret = true; 4528 } 4529 out: 4530 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4531 return ret; 4532 } 4533 #else 4534 static inline struct page * 4535 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4536 unsigned int alloc_flags, const struct alloc_context *ac, 4537 enum compact_priority prio, enum compact_result *compact_result) 4538 { 4539 *compact_result = COMPACT_SKIPPED; 4540 return NULL; 4541 } 4542 4543 static inline bool 4544 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 4545 enum compact_result compact_result, 4546 enum compact_priority *compact_priority, 4547 int *compaction_retries) 4548 { 4549 struct zone *zone; 4550 struct zoneref *z; 4551 4552 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4553 return false; 4554 4555 /* 4556 * There are setups with compaction disabled which would prefer to loop 4557 * inside the allocator rather than hit the oom killer prematurely. 4558 * Let's give them a good hope and keep retrying while the order-0 4559 * watermarks are OK. 4560 */ 4561 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4562 ac->highest_zoneidx, ac->nodemask) { 4563 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4564 ac->highest_zoneidx, alloc_flags)) 4565 return true; 4566 } 4567 return false; 4568 } 4569 #endif /* CONFIG_COMPACTION */ 4570 4571 #ifdef CONFIG_LOCKDEP 4572 static struct lockdep_map __fs_reclaim_map = 4573 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4574 4575 static bool __need_reclaim(gfp_t gfp_mask) 4576 { 4577 /* no reclaim without waiting on it */ 4578 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4579 return false; 4580 4581 /* this guy won't enter reclaim */ 4582 if (current->flags & PF_MEMALLOC) 4583 return false; 4584 4585 if (gfp_mask & __GFP_NOLOCKDEP) 4586 return false; 4587 4588 return true; 4589 } 4590 4591 void __fs_reclaim_acquire(unsigned long ip) 4592 { 4593 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4594 } 4595 4596 void __fs_reclaim_release(unsigned long ip) 4597 { 4598 lock_release(&__fs_reclaim_map, ip); 4599 } 4600 4601 void fs_reclaim_acquire(gfp_t gfp_mask) 4602 { 4603 gfp_mask = current_gfp_context(gfp_mask); 4604 4605 if (__need_reclaim(gfp_mask)) { 4606 if (gfp_mask & __GFP_FS) 4607 __fs_reclaim_acquire(_RET_IP_); 4608 4609 #ifdef CONFIG_MMU_NOTIFIER 4610 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4611 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4612 #endif 4613 4614 } 4615 } 4616 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4617 4618 void fs_reclaim_release(gfp_t gfp_mask) 4619 { 4620 gfp_mask = current_gfp_context(gfp_mask); 4621 4622 if (__need_reclaim(gfp_mask)) { 4623 if (gfp_mask & __GFP_FS) 4624 __fs_reclaim_release(_RET_IP_); 4625 } 4626 } 4627 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4628 #endif 4629 4630 /* Perform direct synchronous page reclaim */ 4631 static unsigned long 4632 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4633 const struct alloc_context *ac) 4634 { 4635 unsigned int noreclaim_flag; 4636 unsigned long progress; 4637 4638 cond_resched(); 4639 4640 /* We now go into synchronous reclaim */ 4641 cpuset_memory_pressure_bump(); 4642 fs_reclaim_acquire(gfp_mask); 4643 noreclaim_flag = memalloc_noreclaim_save(); 4644 4645 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4646 ac->nodemask); 4647 4648 memalloc_noreclaim_restore(noreclaim_flag); 4649 fs_reclaim_release(gfp_mask); 4650 4651 cond_resched(); 4652 4653 return progress; 4654 } 4655 4656 /* The really slow allocator path where we enter direct reclaim */ 4657 static inline struct page * 4658 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4659 unsigned int alloc_flags, const struct alloc_context *ac, 4660 unsigned long *did_some_progress) 4661 { 4662 struct page *page = NULL; 4663 unsigned long pflags; 4664 bool drained = false; 4665 4666 psi_memstall_enter(&pflags); 4667 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4668 if (unlikely(!(*did_some_progress))) 4669 goto out; 4670 4671 retry: 4672 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4673 4674 /* 4675 * If an allocation failed after direct reclaim, it could be because 4676 * pages are pinned on the per-cpu lists or in high alloc reserves. 4677 * Shrink them and try again 4678 */ 4679 if (!page && !drained) { 4680 unreserve_highatomic_pageblock(ac, false); 4681 drain_all_pages(NULL); 4682 drained = true; 4683 goto retry; 4684 } 4685 out: 4686 psi_memstall_leave(&pflags); 4687 4688 return page; 4689 } 4690 4691 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4692 const struct alloc_context *ac) 4693 { 4694 struct zoneref *z; 4695 struct zone *zone; 4696 pg_data_t *last_pgdat = NULL; 4697 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4698 4699 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4700 ac->nodemask) { 4701 if (!managed_zone(zone)) 4702 continue; 4703 if (last_pgdat != zone->zone_pgdat) { 4704 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 4705 last_pgdat = zone->zone_pgdat; 4706 } 4707 } 4708 } 4709 4710 static inline unsigned int 4711 gfp_to_alloc_flags(gfp_t gfp_mask) 4712 { 4713 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4714 4715 /* 4716 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH 4717 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4718 * to save two branches. 4719 */ 4720 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 4721 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4722 4723 /* 4724 * The caller may dip into page reserves a bit more if the caller 4725 * cannot run direct reclaim, or if the caller has realtime scheduling 4726 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4727 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 4728 */ 4729 alloc_flags |= (__force int) 4730 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4731 4732 if (gfp_mask & __GFP_ATOMIC) { 4733 /* 4734 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4735 * if it can't schedule. 4736 */ 4737 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4738 alloc_flags |= ALLOC_HARDER; 4739 /* 4740 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 4741 * comment for __cpuset_node_allowed(). 4742 */ 4743 alloc_flags &= ~ALLOC_CPUSET; 4744 } else if (unlikely(rt_task(current)) && in_task()) 4745 alloc_flags |= ALLOC_HARDER; 4746 4747 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4748 4749 return alloc_flags; 4750 } 4751 4752 static bool oom_reserves_allowed(struct task_struct *tsk) 4753 { 4754 if (!tsk_is_oom_victim(tsk)) 4755 return false; 4756 4757 /* 4758 * !MMU doesn't have oom reaper so give access to memory reserves 4759 * only to the thread with TIF_MEMDIE set 4760 */ 4761 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4762 return false; 4763 4764 return true; 4765 } 4766 4767 /* 4768 * Distinguish requests which really need access to full memory 4769 * reserves from oom victims which can live with a portion of it 4770 */ 4771 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4772 { 4773 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4774 return 0; 4775 if (gfp_mask & __GFP_MEMALLOC) 4776 return ALLOC_NO_WATERMARKS; 4777 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4778 return ALLOC_NO_WATERMARKS; 4779 if (!in_interrupt()) { 4780 if (current->flags & PF_MEMALLOC) 4781 return ALLOC_NO_WATERMARKS; 4782 else if (oom_reserves_allowed(current)) 4783 return ALLOC_OOM; 4784 } 4785 4786 return 0; 4787 } 4788 4789 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4790 { 4791 return !!__gfp_pfmemalloc_flags(gfp_mask); 4792 } 4793 4794 /* 4795 * Checks whether it makes sense to retry the reclaim to make a forward progress 4796 * for the given allocation request. 4797 * 4798 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4799 * without success, or when we couldn't even meet the watermark if we 4800 * reclaimed all remaining pages on the LRU lists. 4801 * 4802 * Returns true if a retry is viable or false to enter the oom path. 4803 */ 4804 static inline bool 4805 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4806 struct alloc_context *ac, int alloc_flags, 4807 bool did_some_progress, int *no_progress_loops) 4808 { 4809 struct zone *zone; 4810 struct zoneref *z; 4811 bool ret = false; 4812 4813 /* 4814 * Costly allocations might have made a progress but this doesn't mean 4815 * their order will become available due to high fragmentation so 4816 * always increment the no progress counter for them 4817 */ 4818 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4819 *no_progress_loops = 0; 4820 else 4821 (*no_progress_loops)++; 4822 4823 /* 4824 * Make sure we converge to OOM if we cannot make any progress 4825 * several times in the row. 4826 */ 4827 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 4828 /* Before OOM, exhaust highatomic_reserve */ 4829 return unreserve_highatomic_pageblock(ac, true); 4830 } 4831 4832 /* 4833 * Keep reclaiming pages while there is a chance this will lead 4834 * somewhere. If none of the target zones can satisfy our allocation 4835 * request even if all reclaimable pages are considered then we are 4836 * screwed and have to go OOM. 4837 */ 4838 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4839 ac->highest_zoneidx, ac->nodemask) { 4840 unsigned long available; 4841 unsigned long reclaimable; 4842 unsigned long min_wmark = min_wmark_pages(zone); 4843 bool wmark; 4844 4845 available = reclaimable = zone_reclaimable_pages(zone); 4846 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4847 4848 /* 4849 * Would the allocation succeed if we reclaimed all 4850 * reclaimable pages? 4851 */ 4852 wmark = __zone_watermark_ok(zone, order, min_wmark, 4853 ac->highest_zoneidx, alloc_flags, available); 4854 trace_reclaim_retry_zone(z, order, reclaimable, 4855 available, min_wmark, *no_progress_loops, wmark); 4856 if (wmark) { 4857 ret = true; 4858 break; 4859 } 4860 } 4861 4862 /* 4863 * Memory allocation/reclaim might be called from a WQ context and the 4864 * current implementation of the WQ concurrency control doesn't 4865 * recognize that a particular WQ is congested if the worker thread is 4866 * looping without ever sleeping. Therefore we have to do a short sleep 4867 * here rather than calling cond_resched(). 4868 */ 4869 if (current->flags & PF_WQ_WORKER) 4870 schedule_timeout_uninterruptible(1); 4871 else 4872 cond_resched(); 4873 return ret; 4874 } 4875 4876 static inline bool 4877 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4878 { 4879 /* 4880 * It's possible that cpuset's mems_allowed and the nodemask from 4881 * mempolicy don't intersect. This should be normally dealt with by 4882 * policy_nodemask(), but it's possible to race with cpuset update in 4883 * such a way the check therein was true, and then it became false 4884 * before we got our cpuset_mems_cookie here. 4885 * This assumes that for all allocations, ac->nodemask can come only 4886 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4887 * when it does not intersect with the cpuset restrictions) or the 4888 * caller can deal with a violated nodemask. 4889 */ 4890 if (cpusets_enabled() && ac->nodemask && 4891 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4892 ac->nodemask = NULL; 4893 return true; 4894 } 4895 4896 /* 4897 * When updating a task's mems_allowed or mempolicy nodemask, it is 4898 * possible to race with parallel threads in such a way that our 4899 * allocation can fail while the mask is being updated. If we are about 4900 * to fail, check if the cpuset changed during allocation and if so, 4901 * retry. 4902 */ 4903 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4904 return true; 4905 4906 return false; 4907 } 4908 4909 static inline struct page * 4910 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4911 struct alloc_context *ac) 4912 { 4913 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4914 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4915 struct page *page = NULL; 4916 unsigned int alloc_flags; 4917 unsigned long did_some_progress; 4918 enum compact_priority compact_priority; 4919 enum compact_result compact_result; 4920 int compaction_retries; 4921 int no_progress_loops; 4922 unsigned int cpuset_mems_cookie; 4923 int reserve_flags; 4924 4925 /* 4926 * We also sanity check to catch abuse of atomic reserves being used by 4927 * callers that are not in atomic context. 4928 */ 4929 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 4930 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 4931 gfp_mask &= ~__GFP_ATOMIC; 4932 4933 retry_cpuset: 4934 compaction_retries = 0; 4935 no_progress_loops = 0; 4936 compact_priority = DEF_COMPACT_PRIORITY; 4937 cpuset_mems_cookie = read_mems_allowed_begin(); 4938 4939 /* 4940 * The fast path uses conservative alloc_flags to succeed only until 4941 * kswapd needs to be woken up, and to avoid the cost of setting up 4942 * alloc_flags precisely. So we do that now. 4943 */ 4944 alloc_flags = gfp_to_alloc_flags(gfp_mask); 4945 4946 /* 4947 * We need to recalculate the starting point for the zonelist iterator 4948 * because we might have used different nodemask in the fast path, or 4949 * there was a cpuset modification and we are retrying - otherwise we 4950 * could end up iterating over non-eligible zones endlessly. 4951 */ 4952 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4953 ac->highest_zoneidx, ac->nodemask); 4954 if (!ac->preferred_zoneref->zone) 4955 goto nopage; 4956 4957 /* 4958 * Check for insane configurations where the cpuset doesn't contain 4959 * any suitable zone to satisfy the request - e.g. non-movable 4960 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4961 */ 4962 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4963 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4964 ac->highest_zoneidx, 4965 &cpuset_current_mems_allowed); 4966 if (!z->zone) 4967 goto nopage; 4968 } 4969 4970 if (alloc_flags & ALLOC_KSWAPD) 4971 wake_all_kswapds(order, gfp_mask, ac); 4972 4973 /* 4974 * The adjusted alloc_flags might result in immediate success, so try 4975 * that first 4976 */ 4977 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4978 if (page) 4979 goto got_pg; 4980 4981 /* 4982 * For costly allocations, try direct compaction first, as it's likely 4983 * that we have enough base pages and don't need to reclaim. For non- 4984 * movable high-order allocations, do that as well, as compaction will 4985 * try prevent permanent fragmentation by migrating from blocks of the 4986 * same migratetype. 4987 * Don't try this for allocations that are allowed to ignore 4988 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4989 */ 4990 if (can_direct_reclaim && 4991 (costly_order || 4992 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4993 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4994 page = __alloc_pages_direct_compact(gfp_mask, order, 4995 alloc_flags, ac, 4996 INIT_COMPACT_PRIORITY, 4997 &compact_result); 4998 if (page) 4999 goto got_pg; 5000 5001 /* 5002 * Checks for costly allocations with __GFP_NORETRY, which 5003 * includes some THP page fault allocations 5004 */ 5005 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 5006 /* 5007 * If allocating entire pageblock(s) and compaction 5008 * failed because all zones are below low watermarks 5009 * or is prohibited because it recently failed at this 5010 * order, fail immediately unless the allocator has 5011 * requested compaction and reclaim retry. 5012 * 5013 * Reclaim is 5014 * - potentially very expensive because zones are far 5015 * below their low watermarks or this is part of very 5016 * bursty high order allocations, 5017 * - not guaranteed to help because isolate_freepages() 5018 * may not iterate over freed pages as part of its 5019 * linear scan, and 5020 * - unlikely to make entire pageblocks free on its 5021 * own. 5022 */ 5023 if (compact_result == COMPACT_SKIPPED || 5024 compact_result == COMPACT_DEFERRED) 5025 goto nopage; 5026 5027 /* 5028 * Looks like reclaim/compaction is worth trying, but 5029 * sync compaction could be very expensive, so keep 5030 * using async compaction. 5031 */ 5032 compact_priority = INIT_COMPACT_PRIORITY; 5033 } 5034 } 5035 5036 retry: 5037 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 5038 if (alloc_flags & ALLOC_KSWAPD) 5039 wake_all_kswapds(order, gfp_mask, ac); 5040 5041 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 5042 if (reserve_flags) 5043 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags); 5044 5045 /* 5046 * Reset the nodemask and zonelist iterators if memory policies can be 5047 * ignored. These allocations are high priority and system rather than 5048 * user oriented. 5049 */ 5050 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 5051 ac->nodemask = NULL; 5052 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5053 ac->highest_zoneidx, ac->nodemask); 5054 } 5055 5056 /* Attempt with potentially adjusted zonelist and alloc_flags */ 5057 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 5058 if (page) 5059 goto got_pg; 5060 5061 /* Caller is not willing to reclaim, we can't balance anything */ 5062 if (!can_direct_reclaim) 5063 goto nopage; 5064 5065 /* Avoid recursion of direct reclaim */ 5066 if (current->flags & PF_MEMALLOC) 5067 goto nopage; 5068 5069 /* Try direct reclaim and then allocating */ 5070 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 5071 &did_some_progress); 5072 if (page) 5073 goto got_pg; 5074 5075 /* Try direct compaction and then allocating */ 5076 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 5077 compact_priority, &compact_result); 5078 if (page) 5079 goto got_pg; 5080 5081 /* Do not loop if specifically requested */ 5082 if (gfp_mask & __GFP_NORETRY) 5083 goto nopage; 5084 5085 /* 5086 * Do not retry costly high order allocations unless they are 5087 * __GFP_RETRY_MAYFAIL 5088 */ 5089 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 5090 goto nopage; 5091 5092 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 5093 did_some_progress > 0, &no_progress_loops)) 5094 goto retry; 5095 5096 /* 5097 * It doesn't make any sense to retry for the compaction if the order-0 5098 * reclaim is not able to make any progress because the current 5099 * implementation of the compaction depends on the sufficient amount 5100 * of free memory (see __compaction_suitable) 5101 */ 5102 if (did_some_progress > 0 && 5103 should_compact_retry(ac, order, alloc_flags, 5104 compact_result, &compact_priority, 5105 &compaction_retries)) 5106 goto retry; 5107 5108 5109 /* Deal with possible cpuset update races before we start OOM killing */ 5110 if (check_retry_cpuset(cpuset_mems_cookie, ac)) 5111 goto retry_cpuset; 5112 5113 /* Reclaim has failed us, start killing things */ 5114 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 5115 if (page) 5116 goto got_pg; 5117 5118 /* Avoid allocations with no watermarks from looping endlessly */ 5119 if (tsk_is_oom_victim(current) && 5120 (alloc_flags & ALLOC_OOM || 5121 (gfp_mask & __GFP_NOMEMALLOC))) 5122 goto nopage; 5123 5124 /* Retry as long as the OOM killer is making progress */ 5125 if (did_some_progress) { 5126 no_progress_loops = 0; 5127 goto retry; 5128 } 5129 5130 nopage: 5131 /* Deal with possible cpuset update races before we fail */ 5132 if (check_retry_cpuset(cpuset_mems_cookie, ac)) 5133 goto retry_cpuset; 5134 5135 /* 5136 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 5137 * we always retry 5138 */ 5139 if (gfp_mask & __GFP_NOFAIL) { 5140 /* 5141 * All existing users of the __GFP_NOFAIL are blockable, so warn 5142 * of any new users that actually require GFP_NOWAIT 5143 */ 5144 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 5145 goto fail; 5146 5147 /* 5148 * PF_MEMALLOC request from this context is rather bizarre 5149 * because we cannot reclaim anything and only can loop waiting 5150 * for somebody to do a work for us 5151 */ 5152 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 5153 5154 /* 5155 * non failing costly orders are a hard requirement which we 5156 * are not prepared for much so let's warn about these users 5157 * so that we can identify them and convert them to something 5158 * else. 5159 */ 5160 WARN_ON_ONCE_GFP(order > PAGE_ALLOC_COSTLY_ORDER, gfp_mask); 5161 5162 /* 5163 * Help non-failing allocations by giving them access to memory 5164 * reserves but do not use ALLOC_NO_WATERMARKS because this 5165 * could deplete whole memory reserves which would just make 5166 * the situation worse 5167 */ 5168 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); 5169 if (page) 5170 goto got_pg; 5171 5172 cond_resched(); 5173 goto retry; 5174 } 5175 fail: 5176 warn_alloc(gfp_mask, ac->nodemask, 5177 "page allocation failure: order:%u", order); 5178 got_pg: 5179 return page; 5180 } 5181 5182 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 5183 int preferred_nid, nodemask_t *nodemask, 5184 struct alloc_context *ac, gfp_t *alloc_gfp, 5185 unsigned int *alloc_flags) 5186 { 5187 ac->highest_zoneidx = gfp_zone(gfp_mask); 5188 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 5189 ac->nodemask = nodemask; 5190 ac->migratetype = gfp_migratetype(gfp_mask); 5191 5192 if (cpusets_enabled()) { 5193 *alloc_gfp |= __GFP_HARDWALL; 5194 /* 5195 * When we are in the interrupt context, it is irrelevant 5196 * to the current task context. It means that any node ok. 5197 */ 5198 if (in_task() && !ac->nodemask) 5199 ac->nodemask = &cpuset_current_mems_allowed; 5200 else 5201 *alloc_flags |= ALLOC_CPUSET; 5202 } 5203 5204 fs_reclaim_acquire(gfp_mask); 5205 fs_reclaim_release(gfp_mask); 5206 5207 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 5208 5209 if (should_fail_alloc_page(gfp_mask, order)) 5210 return false; 5211 5212 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 5213 5214 /* Dirty zone balancing only done in the fast path */ 5215 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 5216 5217 /* 5218 * The preferred zone is used for statistics but crucially it is 5219 * also used as the starting point for the zonelist iterator. It 5220 * may get reset for allocations that ignore memory policies. 5221 */ 5222 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5223 ac->highest_zoneidx, ac->nodemask); 5224 5225 return true; 5226 } 5227 5228 /* 5229 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 5230 * @gfp: GFP flags for the allocation 5231 * @preferred_nid: The preferred NUMA node ID to allocate from 5232 * @nodemask: Set of nodes to allocate from, may be NULL 5233 * @nr_pages: The number of pages desired on the list or array 5234 * @page_list: Optional list to store the allocated pages 5235 * @page_array: Optional array to store the pages 5236 * 5237 * This is a batched version of the page allocator that attempts to 5238 * allocate nr_pages quickly. Pages are added to page_list if page_list 5239 * is not NULL, otherwise it is assumed that the page_array is valid. 5240 * 5241 * For lists, nr_pages is the number of pages that should be allocated. 5242 * 5243 * For arrays, only NULL elements are populated with pages and nr_pages 5244 * is the maximum number of pages that will be stored in the array. 5245 * 5246 * Returns the number of pages on the list or array. 5247 */ 5248 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 5249 nodemask_t *nodemask, int nr_pages, 5250 struct list_head *page_list, 5251 struct page **page_array) 5252 { 5253 struct page *page; 5254 unsigned long flags; 5255 struct zone *zone; 5256 struct zoneref *z; 5257 struct per_cpu_pages *pcp; 5258 struct list_head *pcp_list; 5259 struct alloc_context ac; 5260 gfp_t alloc_gfp; 5261 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5262 int nr_populated = 0, nr_account = 0; 5263 5264 /* 5265 * Skip populated array elements to determine if any pages need 5266 * to be allocated before disabling IRQs. 5267 */ 5268 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 5269 nr_populated++; 5270 5271 /* No pages requested? */ 5272 if (unlikely(nr_pages <= 0)) 5273 goto out; 5274 5275 /* Already populated array? */ 5276 if (unlikely(page_array && nr_pages - nr_populated == 0)) 5277 goto out; 5278 5279 /* Bulk allocator does not support memcg accounting. */ 5280 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT)) 5281 goto failed; 5282 5283 /* Use the single page allocator for one page. */ 5284 if (nr_pages - nr_populated == 1) 5285 goto failed; 5286 5287 #ifdef CONFIG_PAGE_OWNER 5288 /* 5289 * PAGE_OWNER may recurse into the allocator to allocate space to 5290 * save the stack with pagesets.lock held. Releasing/reacquiring 5291 * removes much of the performance benefit of bulk allocation so 5292 * force the caller to allocate one page at a time as it'll have 5293 * similar performance to added complexity to the bulk allocator. 5294 */ 5295 if (static_branch_unlikely(&page_owner_inited)) 5296 goto failed; 5297 #endif 5298 5299 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5300 gfp &= gfp_allowed_mask; 5301 alloc_gfp = gfp; 5302 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5303 goto out; 5304 gfp = alloc_gfp; 5305 5306 /* Find an allowed local zone that meets the low watermark. */ 5307 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 5308 unsigned long mark; 5309 5310 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5311 !__cpuset_zone_allowed(zone, gfp)) { 5312 continue; 5313 } 5314 5315 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 5316 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 5317 goto failed; 5318 } 5319 5320 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 5321 if (zone_watermark_fast(zone, 0, mark, 5322 zonelist_zone_idx(ac.preferred_zoneref), 5323 alloc_flags, gfp)) { 5324 break; 5325 } 5326 } 5327 5328 /* 5329 * If there are no allowed local zones that meets the watermarks then 5330 * try to allocate a single page and reclaim if necessary. 5331 */ 5332 if (unlikely(!zone)) 5333 goto failed; 5334 5335 /* Attempt the batch allocation */ 5336 local_lock_irqsave(&pagesets.lock, flags); 5337 pcp = this_cpu_ptr(zone->per_cpu_pageset); 5338 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 5339 5340 while (nr_populated < nr_pages) { 5341 5342 /* Skip existing pages */ 5343 if (page_array && page_array[nr_populated]) { 5344 nr_populated++; 5345 continue; 5346 } 5347 5348 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5349 pcp, pcp_list); 5350 if (unlikely(!page)) { 5351 /* Try and allocate at least one page */ 5352 if (!nr_account) 5353 goto failed_irq; 5354 break; 5355 } 5356 nr_account++; 5357 5358 prep_new_page(page, 0, gfp, 0); 5359 if (page_list) 5360 list_add(&page->lru, page_list); 5361 else 5362 page_array[nr_populated] = page; 5363 nr_populated++; 5364 } 5365 5366 local_unlock_irqrestore(&pagesets.lock, flags); 5367 5368 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5369 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 5370 5371 out: 5372 return nr_populated; 5373 5374 failed_irq: 5375 local_unlock_irqrestore(&pagesets.lock, flags); 5376 5377 failed: 5378 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 5379 if (page) { 5380 if (page_list) 5381 list_add(&page->lru, page_list); 5382 else 5383 page_array[nr_populated] = page; 5384 nr_populated++; 5385 } 5386 5387 goto out; 5388 } 5389 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 5390 5391 /* 5392 * This is the 'heart' of the zoned buddy allocator. 5393 */ 5394 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 5395 nodemask_t *nodemask) 5396 { 5397 struct page *page; 5398 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5399 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 5400 struct alloc_context ac = { }; 5401 5402 /* 5403 * There are several places where we assume that the order value is sane 5404 * so bail out early if the request is out of bound. 5405 */ 5406 if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp)) 5407 return NULL; 5408 5409 gfp &= gfp_allowed_mask; 5410 /* 5411 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 5412 * resp. GFP_NOIO which has to be inherited for all allocation requests 5413 * from a particular context which has been marked by 5414 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 5415 * movable zones are not used during allocation. 5416 */ 5417 gfp = current_gfp_context(gfp); 5418 alloc_gfp = gfp; 5419 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 5420 &alloc_gfp, &alloc_flags)) 5421 return NULL; 5422 5423 /* 5424 * Forbid the first pass from falling back to types that fragment 5425 * memory until all local zones are considered. 5426 */ 5427 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 5428 5429 /* First allocation attempt */ 5430 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 5431 if (likely(page)) 5432 goto out; 5433 5434 alloc_gfp = gfp; 5435 ac.spread_dirty_pages = false; 5436 5437 /* 5438 * Restore the original nodemask if it was potentially replaced with 5439 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5440 */ 5441 ac.nodemask = nodemask; 5442 5443 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 5444 5445 out: 5446 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page && 5447 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 5448 __free_pages(page, order); 5449 page = NULL; 5450 } 5451 5452 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5453 5454 return page; 5455 } 5456 EXPORT_SYMBOL(__alloc_pages); 5457 5458 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 5459 nodemask_t *nodemask) 5460 { 5461 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 5462 preferred_nid, nodemask); 5463 5464 if (page && order > 1) 5465 prep_transhuge_page(page); 5466 return (struct folio *)page; 5467 } 5468 EXPORT_SYMBOL(__folio_alloc); 5469 5470 /* 5471 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5472 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5473 * you need to access high mem. 5474 */ 5475 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 5476 { 5477 struct page *page; 5478 5479 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 5480 if (!page) 5481 return 0; 5482 return (unsigned long) page_address(page); 5483 } 5484 EXPORT_SYMBOL(__get_free_pages); 5485 5486 unsigned long get_zeroed_page(gfp_t gfp_mask) 5487 { 5488 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 5489 } 5490 EXPORT_SYMBOL(get_zeroed_page); 5491 5492 /** 5493 * __free_pages - Free pages allocated with alloc_pages(). 5494 * @page: The page pointer returned from alloc_pages(). 5495 * @order: The order of the allocation. 5496 * 5497 * This function can free multi-page allocations that are not compound 5498 * pages. It does not check that the @order passed in matches that of 5499 * the allocation, so it is easy to leak memory. Freeing more memory 5500 * than was allocated will probably emit a warning. 5501 * 5502 * If the last reference to this page is speculative, it will be released 5503 * by put_page() which only frees the first page of a non-compound 5504 * allocation. To prevent the remaining pages from being leaked, we free 5505 * the subsequent pages here. If you want to use the page's reference 5506 * count to decide when to free the allocation, you should allocate a 5507 * compound page, and use put_page() instead of __free_pages(). 5508 * 5509 * Context: May be called in interrupt context or while holding a normal 5510 * spinlock, but not in NMI context or while holding a raw spinlock. 5511 */ 5512 void __free_pages(struct page *page, unsigned int order) 5513 { 5514 if (put_page_testzero(page)) 5515 free_the_page(page, order); 5516 else if (!PageHead(page)) 5517 while (order-- > 0) 5518 free_the_page(page + (1 << order), order); 5519 } 5520 EXPORT_SYMBOL(__free_pages); 5521 5522 void free_pages(unsigned long addr, unsigned int order) 5523 { 5524 if (addr != 0) { 5525 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5526 __free_pages(virt_to_page((void *)addr), order); 5527 } 5528 } 5529 5530 EXPORT_SYMBOL(free_pages); 5531 5532 /* 5533 * Page Fragment: 5534 * An arbitrary-length arbitrary-offset area of memory which resides 5535 * within a 0 or higher order page. Multiple fragments within that page 5536 * are individually refcounted, in the page's reference counter. 5537 * 5538 * The page_frag functions below provide a simple allocation framework for 5539 * page fragments. This is used by the network stack and network device 5540 * drivers to provide a backing region of memory for use as either an 5541 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 5542 */ 5543 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 5544 gfp_t gfp_mask) 5545 { 5546 struct page *page = NULL; 5547 gfp_t gfp = gfp_mask; 5548 5549 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5550 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 5551 __GFP_NOMEMALLOC; 5552 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 5553 PAGE_FRAG_CACHE_MAX_ORDER); 5554 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 5555 #endif 5556 if (unlikely(!page)) 5557 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 5558 5559 nc->va = page ? page_address(page) : NULL; 5560 5561 return page; 5562 } 5563 5564 void __page_frag_cache_drain(struct page *page, unsigned int count) 5565 { 5566 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 5567 5568 if (page_ref_sub_and_test(page, count)) 5569 free_the_page(page, compound_order(page)); 5570 } 5571 EXPORT_SYMBOL(__page_frag_cache_drain); 5572 5573 void *page_frag_alloc_align(struct page_frag_cache *nc, 5574 unsigned int fragsz, gfp_t gfp_mask, 5575 unsigned int align_mask) 5576 { 5577 unsigned int size = PAGE_SIZE; 5578 struct page *page; 5579 int offset; 5580 5581 if (unlikely(!nc->va)) { 5582 refill: 5583 page = __page_frag_cache_refill(nc, gfp_mask); 5584 if (!page) 5585 return NULL; 5586 5587 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5588 /* if size can vary use size else just use PAGE_SIZE */ 5589 size = nc->size; 5590 #endif 5591 /* Even if we own the page, we do not use atomic_set(). 5592 * This would break get_page_unless_zero() users. 5593 */ 5594 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 5595 5596 /* reset page count bias and offset to start of new frag */ 5597 nc->pfmemalloc = page_is_pfmemalloc(page); 5598 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5599 nc->offset = size; 5600 } 5601 5602 offset = nc->offset - fragsz; 5603 if (unlikely(offset < 0)) { 5604 page = virt_to_page(nc->va); 5605 5606 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 5607 goto refill; 5608 5609 if (unlikely(nc->pfmemalloc)) { 5610 free_the_page(page, compound_order(page)); 5611 goto refill; 5612 } 5613 5614 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5615 /* if size can vary use size else just use PAGE_SIZE */ 5616 size = nc->size; 5617 #endif 5618 /* OK, page count is 0, we can safely set it */ 5619 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 5620 5621 /* reset page count bias and offset to start of new frag */ 5622 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5623 offset = size - fragsz; 5624 } 5625 5626 nc->pagecnt_bias--; 5627 offset &= align_mask; 5628 nc->offset = offset; 5629 5630 return nc->va + offset; 5631 } 5632 EXPORT_SYMBOL(page_frag_alloc_align); 5633 5634 /* 5635 * Frees a page fragment allocated out of either a compound or order 0 page. 5636 */ 5637 void page_frag_free(void *addr) 5638 { 5639 struct page *page = virt_to_head_page(addr); 5640 5641 if (unlikely(put_page_testzero(page))) 5642 free_the_page(page, compound_order(page)); 5643 } 5644 EXPORT_SYMBOL(page_frag_free); 5645 5646 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5647 size_t size) 5648 { 5649 if (addr) { 5650 unsigned long alloc_end = addr + (PAGE_SIZE << order); 5651 unsigned long used = addr + PAGE_ALIGN(size); 5652 5653 split_page(virt_to_page((void *)addr), order); 5654 while (used < alloc_end) { 5655 free_page(used); 5656 used += PAGE_SIZE; 5657 } 5658 } 5659 return (void *)addr; 5660 } 5661 5662 /** 5663 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5664 * @size: the number of bytes to allocate 5665 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5666 * 5667 * This function is similar to alloc_pages(), except that it allocates the 5668 * minimum number of pages to satisfy the request. alloc_pages() can only 5669 * allocate memory in power-of-two pages. 5670 * 5671 * This function is also limited by MAX_ORDER. 5672 * 5673 * Memory allocated by this function must be released by free_pages_exact(). 5674 * 5675 * Return: pointer to the allocated area or %NULL in case of error. 5676 */ 5677 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 5678 { 5679 unsigned int order = get_order(size); 5680 unsigned long addr; 5681 5682 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5683 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5684 5685 addr = __get_free_pages(gfp_mask, order); 5686 return make_alloc_exact(addr, order, size); 5687 } 5688 EXPORT_SYMBOL(alloc_pages_exact); 5689 5690 /** 5691 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5692 * pages on a node. 5693 * @nid: the preferred node ID where memory should be allocated 5694 * @size: the number of bytes to allocate 5695 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5696 * 5697 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5698 * back. 5699 * 5700 * Return: pointer to the allocated area or %NULL in case of error. 5701 */ 5702 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 5703 { 5704 unsigned int order = get_order(size); 5705 struct page *p; 5706 5707 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5708 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5709 5710 p = alloc_pages_node(nid, gfp_mask, order); 5711 if (!p) 5712 return NULL; 5713 return make_alloc_exact((unsigned long)page_address(p), order, size); 5714 } 5715 5716 /** 5717 * free_pages_exact - release memory allocated via alloc_pages_exact() 5718 * @virt: the value returned by alloc_pages_exact. 5719 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5720 * 5721 * Release the memory allocated by a previous call to alloc_pages_exact. 5722 */ 5723 void free_pages_exact(void *virt, size_t size) 5724 { 5725 unsigned long addr = (unsigned long)virt; 5726 unsigned long end = addr + PAGE_ALIGN(size); 5727 5728 while (addr < end) { 5729 free_page(addr); 5730 addr += PAGE_SIZE; 5731 } 5732 } 5733 EXPORT_SYMBOL(free_pages_exact); 5734 5735 /** 5736 * nr_free_zone_pages - count number of pages beyond high watermark 5737 * @offset: The zone index of the highest zone 5738 * 5739 * nr_free_zone_pages() counts the number of pages which are beyond the 5740 * high watermark within all zones at or below a given zone index. For each 5741 * zone, the number of pages is calculated as: 5742 * 5743 * nr_free_zone_pages = managed_pages - high_pages 5744 * 5745 * Return: number of pages beyond high watermark. 5746 */ 5747 static unsigned long nr_free_zone_pages(int offset) 5748 { 5749 struct zoneref *z; 5750 struct zone *zone; 5751 5752 /* Just pick one node, since fallback list is circular */ 5753 unsigned long sum = 0; 5754 5755 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5756 5757 for_each_zone_zonelist(zone, z, zonelist, offset) { 5758 unsigned long size = zone_managed_pages(zone); 5759 unsigned long high = high_wmark_pages(zone); 5760 if (size > high) 5761 sum += size - high; 5762 } 5763 5764 return sum; 5765 } 5766 5767 /** 5768 * nr_free_buffer_pages - count number of pages beyond high watermark 5769 * 5770 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5771 * watermark within ZONE_DMA and ZONE_NORMAL. 5772 * 5773 * Return: number of pages beyond high watermark within ZONE_DMA and 5774 * ZONE_NORMAL. 5775 */ 5776 unsigned long nr_free_buffer_pages(void) 5777 { 5778 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5779 } 5780 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5781 5782 static inline void show_node(struct zone *zone) 5783 { 5784 if (IS_ENABLED(CONFIG_NUMA)) 5785 printk("Node %d ", zone_to_nid(zone)); 5786 } 5787 5788 long si_mem_available(void) 5789 { 5790 long available; 5791 unsigned long pagecache; 5792 unsigned long wmark_low = 0; 5793 unsigned long pages[NR_LRU_LISTS]; 5794 unsigned long reclaimable; 5795 struct zone *zone; 5796 int lru; 5797 5798 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 5799 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 5800 5801 for_each_zone(zone) 5802 wmark_low += low_wmark_pages(zone); 5803 5804 /* 5805 * Estimate the amount of memory available for userspace allocations, 5806 * without causing swapping. 5807 */ 5808 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 5809 5810 /* 5811 * Not all the page cache can be freed, otherwise the system will 5812 * start swapping. Assume at least half of the page cache, or the 5813 * low watermark worth of cache, needs to stay. 5814 */ 5815 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 5816 pagecache -= min(pagecache / 2, wmark_low); 5817 available += pagecache; 5818 5819 /* 5820 * Part of the reclaimable slab and other kernel memory consists of 5821 * items that are in use, and cannot be freed. Cap this estimate at the 5822 * low watermark. 5823 */ 5824 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 5825 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 5826 available += reclaimable - min(reclaimable / 2, wmark_low); 5827 5828 if (available < 0) 5829 available = 0; 5830 return available; 5831 } 5832 EXPORT_SYMBOL_GPL(si_mem_available); 5833 5834 void si_meminfo(struct sysinfo *val) 5835 { 5836 val->totalram = totalram_pages(); 5837 val->sharedram = global_node_page_state(NR_SHMEM); 5838 val->freeram = global_zone_page_state(NR_FREE_PAGES); 5839 val->bufferram = nr_blockdev_pages(); 5840 val->totalhigh = totalhigh_pages(); 5841 val->freehigh = nr_free_highpages(); 5842 val->mem_unit = PAGE_SIZE; 5843 } 5844 5845 EXPORT_SYMBOL(si_meminfo); 5846 5847 #ifdef CONFIG_NUMA 5848 void si_meminfo_node(struct sysinfo *val, int nid) 5849 { 5850 int zone_type; /* needs to be signed */ 5851 unsigned long managed_pages = 0; 5852 unsigned long managed_highpages = 0; 5853 unsigned long free_highpages = 0; 5854 pg_data_t *pgdat = NODE_DATA(nid); 5855 5856 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 5857 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); 5858 val->totalram = managed_pages; 5859 val->sharedram = node_page_state(pgdat, NR_SHMEM); 5860 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 5861 #ifdef CONFIG_HIGHMEM 5862 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 5863 struct zone *zone = &pgdat->node_zones[zone_type]; 5864 5865 if (is_highmem(zone)) { 5866 managed_highpages += zone_managed_pages(zone); 5867 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 5868 } 5869 } 5870 val->totalhigh = managed_highpages; 5871 val->freehigh = free_highpages; 5872 #else 5873 val->totalhigh = managed_highpages; 5874 val->freehigh = free_highpages; 5875 #endif 5876 val->mem_unit = PAGE_SIZE; 5877 } 5878 #endif 5879 5880 /* 5881 * Determine whether the node should be displayed or not, depending on whether 5882 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 5883 */ 5884 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 5885 { 5886 if (!(flags & SHOW_MEM_FILTER_NODES)) 5887 return false; 5888 5889 /* 5890 * no node mask - aka implicit memory numa policy. Do not bother with 5891 * the synchronization - read_mems_allowed_begin - because we do not 5892 * have to be precise here. 5893 */ 5894 if (!nodemask) 5895 nodemask = &cpuset_current_mems_allowed; 5896 5897 return !node_isset(nid, *nodemask); 5898 } 5899 5900 #define K(x) ((x) << (PAGE_SHIFT-10)) 5901 5902 static void show_migration_types(unsigned char type) 5903 { 5904 static const char types[MIGRATE_TYPES] = { 5905 [MIGRATE_UNMOVABLE] = 'U', 5906 [MIGRATE_MOVABLE] = 'M', 5907 [MIGRATE_RECLAIMABLE] = 'E', 5908 [MIGRATE_HIGHATOMIC] = 'H', 5909 #ifdef CONFIG_CMA 5910 [MIGRATE_CMA] = 'C', 5911 #endif 5912 #ifdef CONFIG_MEMORY_ISOLATION 5913 [MIGRATE_ISOLATE] = 'I', 5914 #endif 5915 }; 5916 char tmp[MIGRATE_TYPES + 1]; 5917 char *p = tmp; 5918 int i; 5919 5920 for (i = 0; i < MIGRATE_TYPES; i++) { 5921 if (type & (1 << i)) 5922 *p++ = types[i]; 5923 } 5924 5925 *p = '\0'; 5926 printk(KERN_CONT "(%s) ", tmp); 5927 } 5928 5929 /* 5930 * Show free area list (used inside shift_scroll-lock stuff) 5931 * We also calculate the percentage fragmentation. We do this by counting the 5932 * memory on each free list with the exception of the first item on the list. 5933 * 5934 * Bits in @filter: 5935 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 5936 * cpuset. 5937 */ 5938 void show_free_areas(unsigned int filter, nodemask_t *nodemask) 5939 { 5940 unsigned long free_pcp = 0; 5941 int cpu; 5942 struct zone *zone; 5943 pg_data_t *pgdat; 5944 5945 for_each_populated_zone(zone) { 5946 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5947 continue; 5948 5949 for_each_online_cpu(cpu) 5950 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 5951 } 5952 5953 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 5954 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 5955 " unevictable:%lu dirty:%lu writeback:%lu\n" 5956 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 5957 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 5958 " kernel_misc_reclaimable:%lu\n" 5959 " free:%lu free_pcp:%lu free_cma:%lu\n", 5960 global_node_page_state(NR_ACTIVE_ANON), 5961 global_node_page_state(NR_INACTIVE_ANON), 5962 global_node_page_state(NR_ISOLATED_ANON), 5963 global_node_page_state(NR_ACTIVE_FILE), 5964 global_node_page_state(NR_INACTIVE_FILE), 5965 global_node_page_state(NR_ISOLATED_FILE), 5966 global_node_page_state(NR_UNEVICTABLE), 5967 global_node_page_state(NR_FILE_DIRTY), 5968 global_node_page_state(NR_WRITEBACK), 5969 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 5970 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 5971 global_node_page_state(NR_FILE_MAPPED), 5972 global_node_page_state(NR_SHMEM), 5973 global_node_page_state(NR_PAGETABLE), 5974 global_zone_page_state(NR_BOUNCE), 5975 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), 5976 global_zone_page_state(NR_FREE_PAGES), 5977 free_pcp, 5978 global_zone_page_state(NR_FREE_CMA_PAGES)); 5979 5980 for_each_online_pgdat(pgdat) { 5981 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 5982 continue; 5983 5984 printk("Node %d" 5985 " active_anon:%lukB" 5986 " inactive_anon:%lukB" 5987 " active_file:%lukB" 5988 " inactive_file:%lukB" 5989 " unevictable:%lukB" 5990 " isolated(anon):%lukB" 5991 " isolated(file):%lukB" 5992 " mapped:%lukB" 5993 " dirty:%lukB" 5994 " writeback:%lukB" 5995 " shmem:%lukB" 5996 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5997 " shmem_thp: %lukB" 5998 " shmem_pmdmapped: %lukB" 5999 " anon_thp: %lukB" 6000 #endif 6001 " writeback_tmp:%lukB" 6002 " kernel_stack:%lukB" 6003 #ifdef CONFIG_SHADOW_CALL_STACK 6004 " shadow_call_stack:%lukB" 6005 #endif 6006 " pagetables:%lukB" 6007 " all_unreclaimable? %s" 6008 "\n", 6009 pgdat->node_id, 6010 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 6011 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 6012 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 6013 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 6014 K(node_page_state(pgdat, NR_UNEVICTABLE)), 6015 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 6016 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 6017 K(node_page_state(pgdat, NR_FILE_MAPPED)), 6018 K(node_page_state(pgdat, NR_FILE_DIRTY)), 6019 K(node_page_state(pgdat, NR_WRITEBACK)), 6020 K(node_page_state(pgdat, NR_SHMEM)), 6021 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6022 K(node_page_state(pgdat, NR_SHMEM_THPS)), 6023 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), 6024 K(node_page_state(pgdat, NR_ANON_THPS)), 6025 #endif 6026 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 6027 node_page_state(pgdat, NR_KERNEL_STACK_KB), 6028 #ifdef CONFIG_SHADOW_CALL_STACK 6029 node_page_state(pgdat, NR_KERNEL_SCS_KB), 6030 #endif 6031 K(node_page_state(pgdat, NR_PAGETABLE)), 6032 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 6033 "yes" : "no"); 6034 } 6035 6036 for_each_populated_zone(zone) { 6037 int i; 6038 6039 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6040 continue; 6041 6042 free_pcp = 0; 6043 for_each_online_cpu(cpu) 6044 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 6045 6046 show_node(zone); 6047 printk(KERN_CONT 6048 "%s" 6049 " free:%lukB" 6050 " boost:%lukB" 6051 " min:%lukB" 6052 " low:%lukB" 6053 " high:%lukB" 6054 " reserved_highatomic:%luKB" 6055 " active_anon:%lukB" 6056 " inactive_anon:%lukB" 6057 " active_file:%lukB" 6058 " inactive_file:%lukB" 6059 " unevictable:%lukB" 6060 " writepending:%lukB" 6061 " present:%lukB" 6062 " managed:%lukB" 6063 " mlocked:%lukB" 6064 " bounce:%lukB" 6065 " free_pcp:%lukB" 6066 " local_pcp:%ukB" 6067 " free_cma:%lukB" 6068 "\n", 6069 zone->name, 6070 K(zone_page_state(zone, NR_FREE_PAGES)), 6071 K(zone->watermark_boost), 6072 K(min_wmark_pages(zone)), 6073 K(low_wmark_pages(zone)), 6074 K(high_wmark_pages(zone)), 6075 K(zone->nr_reserved_highatomic), 6076 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 6077 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 6078 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 6079 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 6080 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 6081 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 6082 K(zone->present_pages), 6083 K(zone_managed_pages(zone)), 6084 K(zone_page_state(zone, NR_MLOCK)), 6085 K(zone_page_state(zone, NR_BOUNCE)), 6086 K(free_pcp), 6087 K(this_cpu_read(zone->per_cpu_pageset->count)), 6088 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 6089 printk("lowmem_reserve[]:"); 6090 for (i = 0; i < MAX_NR_ZONES; i++) 6091 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 6092 printk(KERN_CONT "\n"); 6093 } 6094 6095 for_each_populated_zone(zone) { 6096 unsigned int order; 6097 unsigned long nr[MAX_ORDER], flags, total = 0; 6098 unsigned char types[MAX_ORDER]; 6099 6100 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6101 continue; 6102 show_node(zone); 6103 printk(KERN_CONT "%s: ", zone->name); 6104 6105 spin_lock_irqsave(&zone->lock, flags); 6106 for (order = 0; order < MAX_ORDER; order++) { 6107 struct free_area *area = &zone->free_area[order]; 6108 int type; 6109 6110 nr[order] = area->nr_free; 6111 total += nr[order] << order; 6112 6113 types[order] = 0; 6114 for (type = 0; type < MIGRATE_TYPES; type++) { 6115 if (!free_area_empty(area, type)) 6116 types[order] |= 1 << type; 6117 } 6118 } 6119 spin_unlock_irqrestore(&zone->lock, flags); 6120 for (order = 0; order < MAX_ORDER; order++) { 6121 printk(KERN_CONT "%lu*%lukB ", 6122 nr[order], K(1UL) << order); 6123 if (nr[order]) 6124 show_migration_types(types[order]); 6125 } 6126 printk(KERN_CONT "= %lukB\n", K(total)); 6127 } 6128 6129 hugetlb_show_meminfo(); 6130 6131 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 6132 6133 show_swap_cache_info(); 6134 } 6135 6136 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 6137 { 6138 zoneref->zone = zone; 6139 zoneref->zone_idx = zone_idx(zone); 6140 } 6141 6142 /* 6143 * Builds allocation fallback zone lists. 6144 * 6145 * Add all populated zones of a node to the zonelist. 6146 */ 6147 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 6148 { 6149 struct zone *zone; 6150 enum zone_type zone_type = MAX_NR_ZONES; 6151 int nr_zones = 0; 6152 6153 do { 6154 zone_type--; 6155 zone = pgdat->node_zones + zone_type; 6156 if (populated_zone(zone)) { 6157 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 6158 check_highest_zone(zone_type); 6159 } 6160 } while (zone_type); 6161 6162 return nr_zones; 6163 } 6164 6165 #ifdef CONFIG_NUMA 6166 6167 static int __parse_numa_zonelist_order(char *s) 6168 { 6169 /* 6170 * We used to support different zonelists modes but they turned 6171 * out to be just not useful. Let's keep the warning in place 6172 * if somebody still use the cmd line parameter so that we do 6173 * not fail it silently 6174 */ 6175 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 6176 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 6177 return -EINVAL; 6178 } 6179 return 0; 6180 } 6181 6182 char numa_zonelist_order[] = "Node"; 6183 6184 /* 6185 * sysctl handler for numa_zonelist_order 6186 */ 6187 int numa_zonelist_order_handler(struct ctl_table *table, int write, 6188 void *buffer, size_t *length, loff_t *ppos) 6189 { 6190 if (write) 6191 return __parse_numa_zonelist_order(buffer); 6192 return proc_dostring(table, write, buffer, length, ppos); 6193 } 6194 6195 6196 static int node_load[MAX_NUMNODES]; 6197 6198 /** 6199 * find_next_best_node - find the next node that should appear in a given node's fallback list 6200 * @node: node whose fallback list we're appending 6201 * @used_node_mask: nodemask_t of already used nodes 6202 * 6203 * We use a number of factors to determine which is the next node that should 6204 * appear on a given node's fallback list. The node should not have appeared 6205 * already in @node's fallback list, and it should be the next closest node 6206 * according to the distance array (which contains arbitrary distance values 6207 * from each node to each node in the system), and should also prefer nodes 6208 * with no CPUs, since presumably they'll have very little allocation pressure 6209 * on them otherwise. 6210 * 6211 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 6212 */ 6213 int find_next_best_node(int node, nodemask_t *used_node_mask) 6214 { 6215 int n, val; 6216 int min_val = INT_MAX; 6217 int best_node = NUMA_NO_NODE; 6218 6219 /* Use the local node if we haven't already */ 6220 if (!node_isset(node, *used_node_mask)) { 6221 node_set(node, *used_node_mask); 6222 return node; 6223 } 6224 6225 for_each_node_state(n, N_MEMORY) { 6226 6227 /* Don't want a node to appear more than once */ 6228 if (node_isset(n, *used_node_mask)) 6229 continue; 6230 6231 /* Use the distance array to find the distance */ 6232 val = node_distance(node, n); 6233 6234 /* Penalize nodes under us ("prefer the next node") */ 6235 val += (n < node); 6236 6237 /* Give preference to headless and unused nodes */ 6238 if (!cpumask_empty(cpumask_of_node(n))) 6239 val += PENALTY_FOR_NODE_WITH_CPUS; 6240 6241 /* Slight preference for less loaded node */ 6242 val *= MAX_NUMNODES; 6243 val += node_load[n]; 6244 6245 if (val < min_val) { 6246 min_val = val; 6247 best_node = n; 6248 } 6249 } 6250 6251 if (best_node >= 0) 6252 node_set(best_node, *used_node_mask); 6253 6254 return best_node; 6255 } 6256 6257 6258 /* 6259 * Build zonelists ordered by node and zones within node. 6260 * This results in maximum locality--normal zone overflows into local 6261 * DMA zone, if any--but risks exhausting DMA zone. 6262 */ 6263 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 6264 unsigned nr_nodes) 6265 { 6266 struct zoneref *zonerefs; 6267 int i; 6268 6269 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 6270 6271 for (i = 0; i < nr_nodes; i++) { 6272 int nr_zones; 6273 6274 pg_data_t *node = NODE_DATA(node_order[i]); 6275 6276 nr_zones = build_zonerefs_node(node, zonerefs); 6277 zonerefs += nr_zones; 6278 } 6279 zonerefs->zone = NULL; 6280 zonerefs->zone_idx = 0; 6281 } 6282 6283 /* 6284 * Build gfp_thisnode zonelists 6285 */ 6286 static void build_thisnode_zonelists(pg_data_t *pgdat) 6287 { 6288 struct zoneref *zonerefs; 6289 int nr_zones; 6290 6291 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 6292 nr_zones = build_zonerefs_node(pgdat, zonerefs); 6293 zonerefs += nr_zones; 6294 zonerefs->zone = NULL; 6295 zonerefs->zone_idx = 0; 6296 } 6297 6298 /* 6299 * Build zonelists ordered by zone and nodes within zones. 6300 * This results in conserving DMA zone[s] until all Normal memory is 6301 * exhausted, but results in overflowing to remote node while memory 6302 * may still exist in local DMA zone. 6303 */ 6304 6305 static void build_zonelists(pg_data_t *pgdat) 6306 { 6307 static int node_order[MAX_NUMNODES]; 6308 int node, nr_nodes = 0; 6309 nodemask_t used_mask = NODE_MASK_NONE; 6310 int local_node, prev_node; 6311 6312 /* NUMA-aware ordering of nodes */ 6313 local_node = pgdat->node_id; 6314 prev_node = local_node; 6315 6316 memset(node_order, 0, sizeof(node_order)); 6317 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 6318 /* 6319 * We don't want to pressure a particular node. 6320 * So adding penalty to the first node in same 6321 * distance group to make it round-robin. 6322 */ 6323 if (node_distance(local_node, node) != 6324 node_distance(local_node, prev_node)) 6325 node_load[node] += 1; 6326 6327 node_order[nr_nodes++] = node; 6328 prev_node = node; 6329 } 6330 6331 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 6332 build_thisnode_zonelists(pgdat); 6333 pr_info("Fallback order for Node %d: ", local_node); 6334 for (node = 0; node < nr_nodes; node++) 6335 pr_cont("%d ", node_order[node]); 6336 pr_cont("\n"); 6337 } 6338 6339 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6340 /* 6341 * Return node id of node used for "local" allocations. 6342 * I.e., first node id of first zone in arg node's generic zonelist. 6343 * Used for initializing percpu 'numa_mem', which is used primarily 6344 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 6345 */ 6346 int local_memory_node(int node) 6347 { 6348 struct zoneref *z; 6349 6350 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 6351 gfp_zone(GFP_KERNEL), 6352 NULL); 6353 return zone_to_nid(z->zone); 6354 } 6355 #endif 6356 6357 static void setup_min_unmapped_ratio(void); 6358 static void setup_min_slab_ratio(void); 6359 #else /* CONFIG_NUMA */ 6360 6361 static void build_zonelists(pg_data_t *pgdat) 6362 { 6363 int node, local_node; 6364 struct zoneref *zonerefs; 6365 int nr_zones; 6366 6367 local_node = pgdat->node_id; 6368 6369 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 6370 nr_zones = build_zonerefs_node(pgdat, zonerefs); 6371 zonerefs += nr_zones; 6372 6373 /* 6374 * Now we build the zonelist so that it contains the zones 6375 * of all the other nodes. 6376 * We don't want to pressure a particular node, so when 6377 * building the zones for node N, we make sure that the 6378 * zones coming right after the local ones are those from 6379 * node N+1 (modulo N) 6380 */ 6381 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 6382 if (!node_online(node)) 6383 continue; 6384 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 6385 zonerefs += nr_zones; 6386 } 6387 for (node = 0; node < local_node; node++) { 6388 if (!node_online(node)) 6389 continue; 6390 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 6391 zonerefs += nr_zones; 6392 } 6393 6394 zonerefs->zone = NULL; 6395 zonerefs->zone_idx = 0; 6396 } 6397 6398 #endif /* CONFIG_NUMA */ 6399 6400 /* 6401 * Boot pageset table. One per cpu which is going to be used for all 6402 * zones and all nodes. The parameters will be set in such a way 6403 * that an item put on a list will immediately be handed over to 6404 * the buddy list. This is safe since pageset manipulation is done 6405 * with interrupts disabled. 6406 * 6407 * The boot_pagesets must be kept even after bootup is complete for 6408 * unused processors and/or zones. They do play a role for bootstrapping 6409 * hotplugged processors. 6410 * 6411 * zoneinfo_show() and maybe other functions do 6412 * not check if the processor is online before following the pageset pointer. 6413 * Other parts of the kernel may not check if the zone is available. 6414 */ 6415 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 6416 /* These effectively disable the pcplists in the boot pageset completely */ 6417 #define BOOT_PAGESET_HIGH 0 6418 #define BOOT_PAGESET_BATCH 1 6419 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 6420 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 6421 DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 6422 6423 static void __build_all_zonelists(void *data) 6424 { 6425 int nid; 6426 int __maybe_unused cpu; 6427 pg_data_t *self = data; 6428 static DEFINE_SPINLOCK(lock); 6429 6430 spin_lock(&lock); 6431 6432 #ifdef CONFIG_NUMA 6433 memset(node_load, 0, sizeof(node_load)); 6434 #endif 6435 6436 /* 6437 * This node is hotadded and no memory is yet present. So just 6438 * building zonelists is fine - no need to touch other nodes. 6439 */ 6440 if (self && !node_online(self->node_id)) { 6441 build_zonelists(self); 6442 } else { 6443 /* 6444 * All possible nodes have pgdat preallocated 6445 * in free_area_init 6446 */ 6447 for_each_node(nid) { 6448 pg_data_t *pgdat = NODE_DATA(nid); 6449 6450 build_zonelists(pgdat); 6451 } 6452 6453 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6454 /* 6455 * We now know the "local memory node" for each node-- 6456 * i.e., the node of the first zone in the generic zonelist. 6457 * Set up numa_mem percpu variable for on-line cpus. During 6458 * boot, only the boot cpu should be on-line; we'll init the 6459 * secondary cpus' numa_mem as they come on-line. During 6460 * node/memory hotplug, we'll fixup all on-line cpus. 6461 */ 6462 for_each_online_cpu(cpu) 6463 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 6464 #endif 6465 } 6466 6467 spin_unlock(&lock); 6468 } 6469 6470 static noinline void __init 6471 build_all_zonelists_init(void) 6472 { 6473 int cpu; 6474 6475 __build_all_zonelists(NULL); 6476 6477 /* 6478 * Initialize the boot_pagesets that are going to be used 6479 * for bootstrapping processors. The real pagesets for 6480 * each zone will be allocated later when the per cpu 6481 * allocator is available. 6482 * 6483 * boot_pagesets are used also for bootstrapping offline 6484 * cpus if the system is already booted because the pagesets 6485 * are needed to initialize allocators on a specific cpu too. 6486 * F.e. the percpu allocator needs the page allocator which 6487 * needs the percpu allocator in order to allocate its pagesets 6488 * (a chicken-egg dilemma). 6489 */ 6490 for_each_possible_cpu(cpu) 6491 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 6492 6493 mminit_verify_zonelist(); 6494 cpuset_init_current_mems_allowed(); 6495 } 6496 6497 /* 6498 * unless system_state == SYSTEM_BOOTING. 6499 * 6500 * __ref due to call of __init annotated helper build_all_zonelists_init 6501 * [protected by SYSTEM_BOOTING]. 6502 */ 6503 void __ref build_all_zonelists(pg_data_t *pgdat) 6504 { 6505 unsigned long vm_total_pages; 6506 6507 if (system_state == SYSTEM_BOOTING) { 6508 build_all_zonelists_init(); 6509 } else { 6510 __build_all_zonelists(pgdat); 6511 /* cpuset refresh routine should be here */ 6512 } 6513 /* Get the number of free pages beyond high watermark in all zones. */ 6514 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 6515 /* 6516 * Disable grouping by mobility if the number of pages in the 6517 * system is too low to allow the mechanism to work. It would be 6518 * more accurate, but expensive to check per-zone. This check is 6519 * made on memory-hotadd so a system can start with mobility 6520 * disabled and enable it later 6521 */ 6522 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 6523 page_group_by_mobility_disabled = 1; 6524 else 6525 page_group_by_mobility_disabled = 0; 6526 6527 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 6528 nr_online_nodes, 6529 page_group_by_mobility_disabled ? "off" : "on", 6530 vm_total_pages); 6531 #ifdef CONFIG_NUMA 6532 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 6533 #endif 6534 } 6535 6536 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 6537 static bool __meminit 6538 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 6539 { 6540 static struct memblock_region *r; 6541 6542 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 6543 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 6544 for_each_mem_region(r) { 6545 if (*pfn < memblock_region_memory_end_pfn(r)) 6546 break; 6547 } 6548 } 6549 if (*pfn >= memblock_region_memory_base_pfn(r) && 6550 memblock_is_mirror(r)) { 6551 *pfn = memblock_region_memory_end_pfn(r); 6552 return true; 6553 } 6554 } 6555 return false; 6556 } 6557 6558 /* 6559 * Initially all pages are reserved - free ones are freed 6560 * up by memblock_free_all() once the early boot process is 6561 * done. Non-atomic initialization, single-pass. 6562 * 6563 * All aligned pageblocks are initialized to the specified migratetype 6564 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 6565 * zone stats (e.g., nr_isolate_pageblock) are touched. 6566 */ 6567 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 6568 unsigned long start_pfn, unsigned long zone_end_pfn, 6569 enum meminit_context context, 6570 struct vmem_altmap *altmap, int migratetype) 6571 { 6572 unsigned long pfn, end_pfn = start_pfn + size; 6573 struct page *page; 6574 6575 if (highest_memmap_pfn < end_pfn - 1) 6576 highest_memmap_pfn = end_pfn - 1; 6577 6578 #ifdef CONFIG_ZONE_DEVICE 6579 /* 6580 * Honor reservation requested by the driver for this ZONE_DEVICE 6581 * memory. We limit the total number of pages to initialize to just 6582 * those that might contain the memory mapping. We will defer the 6583 * ZONE_DEVICE page initialization until after we have released 6584 * the hotplug lock. 6585 */ 6586 if (zone == ZONE_DEVICE) { 6587 if (!altmap) 6588 return; 6589 6590 if (start_pfn == altmap->base_pfn) 6591 start_pfn += altmap->reserve; 6592 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6593 } 6594 #endif 6595 6596 for (pfn = start_pfn; pfn < end_pfn; ) { 6597 /* 6598 * There can be holes in boot-time mem_map[]s handed to this 6599 * function. They do not exist on hotplugged memory. 6600 */ 6601 if (context == MEMINIT_EARLY) { 6602 if (overlap_memmap_init(zone, &pfn)) 6603 continue; 6604 if (defer_init(nid, pfn, zone_end_pfn)) 6605 break; 6606 } 6607 6608 page = pfn_to_page(pfn); 6609 __init_single_page(page, pfn, zone, nid); 6610 if (context == MEMINIT_HOTPLUG) 6611 __SetPageReserved(page); 6612 6613 /* 6614 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 6615 * such that unmovable allocations won't be scattered all 6616 * over the place during system boot. 6617 */ 6618 if (IS_ALIGNED(pfn, pageblock_nr_pages)) { 6619 set_pageblock_migratetype(page, migratetype); 6620 cond_resched(); 6621 } 6622 pfn++; 6623 } 6624 } 6625 6626 #ifdef CONFIG_ZONE_DEVICE 6627 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 6628 unsigned long zone_idx, int nid, 6629 struct dev_pagemap *pgmap) 6630 { 6631 6632 __init_single_page(page, pfn, zone_idx, nid); 6633 6634 /* 6635 * Mark page reserved as it will need to wait for onlining 6636 * phase for it to be fully associated with a zone. 6637 * 6638 * We can use the non-atomic __set_bit operation for setting 6639 * the flag as we are still initializing the pages. 6640 */ 6641 __SetPageReserved(page); 6642 6643 /* 6644 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 6645 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 6646 * ever freed or placed on a driver-private list. 6647 */ 6648 page->pgmap = pgmap; 6649 page->zone_device_data = NULL; 6650 6651 /* 6652 * Mark the block movable so that blocks are reserved for 6653 * movable at startup. This will force kernel allocations 6654 * to reserve their blocks rather than leaking throughout 6655 * the address space during boot when many long-lived 6656 * kernel allocations are made. 6657 * 6658 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 6659 * because this is done early in section_activate() 6660 */ 6661 if (IS_ALIGNED(pfn, pageblock_nr_pages)) { 6662 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 6663 cond_resched(); 6664 } 6665 } 6666 6667 /* 6668 * With compound page geometry and when struct pages are stored in ram most 6669 * tail pages are reused. Consequently, the amount of unique struct pages to 6670 * initialize is a lot smaller that the total amount of struct pages being 6671 * mapped. This is a paired / mild layering violation with explicit knowledge 6672 * of how the sparse_vmemmap internals handle compound pages in the lack 6673 * of an altmap. See vmemmap_populate_compound_pages(). 6674 */ 6675 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, 6676 unsigned long nr_pages) 6677 { 6678 return is_power_of_2(sizeof(struct page)) && 6679 !altmap ? 2 * (PAGE_SIZE / sizeof(struct page)) : nr_pages; 6680 } 6681 6682 static void __ref memmap_init_compound(struct page *head, 6683 unsigned long head_pfn, 6684 unsigned long zone_idx, int nid, 6685 struct dev_pagemap *pgmap, 6686 unsigned long nr_pages) 6687 { 6688 unsigned long pfn, end_pfn = head_pfn + nr_pages; 6689 unsigned int order = pgmap->vmemmap_shift; 6690 6691 __SetPageHead(head); 6692 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 6693 struct page *page = pfn_to_page(pfn); 6694 6695 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 6696 prep_compound_tail(head, pfn - head_pfn); 6697 set_page_count(page, 0); 6698 6699 /* 6700 * The first tail page stores compound_mapcount_ptr() and 6701 * compound_order() and the second tail page stores 6702 * compound_pincount_ptr(). Call prep_compound_head() after 6703 * the first and second tail pages have been initialized to 6704 * not have the data overwritten. 6705 */ 6706 if (pfn == head_pfn + 2) 6707 prep_compound_head(head, order); 6708 } 6709 } 6710 6711 void __ref memmap_init_zone_device(struct zone *zone, 6712 unsigned long start_pfn, 6713 unsigned long nr_pages, 6714 struct dev_pagemap *pgmap) 6715 { 6716 unsigned long pfn, end_pfn = start_pfn + nr_pages; 6717 struct pglist_data *pgdat = zone->zone_pgdat; 6718 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 6719 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 6720 unsigned long zone_idx = zone_idx(zone); 6721 unsigned long start = jiffies; 6722 int nid = pgdat->node_id; 6723 6724 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) 6725 return; 6726 6727 /* 6728 * The call to memmap_init should have already taken care 6729 * of the pages reserved for the memmap, so we can just jump to 6730 * the end of that region and start processing the device pages. 6731 */ 6732 if (altmap) { 6733 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6734 nr_pages = end_pfn - start_pfn; 6735 } 6736 6737 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 6738 struct page *page = pfn_to_page(pfn); 6739 6740 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 6741 6742 if (pfns_per_compound == 1) 6743 continue; 6744 6745 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 6746 compound_nr_pages(altmap, pfns_per_compound)); 6747 } 6748 6749 pr_info("%s initialised %lu pages in %ums\n", __func__, 6750 nr_pages, jiffies_to_msecs(jiffies - start)); 6751 } 6752 6753 #endif 6754 static void __meminit zone_init_free_lists(struct zone *zone) 6755 { 6756 unsigned int order, t; 6757 for_each_migratetype_order(order, t) { 6758 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 6759 zone->free_area[order].nr_free = 0; 6760 } 6761 } 6762 6763 /* 6764 * Only struct pages that correspond to ranges defined by memblock.memory 6765 * are zeroed and initialized by going through __init_single_page() during 6766 * memmap_init_zone_range(). 6767 * 6768 * But, there could be struct pages that correspond to holes in 6769 * memblock.memory. This can happen because of the following reasons: 6770 * - physical memory bank size is not necessarily the exact multiple of the 6771 * arbitrary section size 6772 * - early reserved memory may not be listed in memblock.memory 6773 * - memory layouts defined with memmap= kernel parameter may not align 6774 * nicely with memmap sections 6775 * 6776 * Explicitly initialize those struct pages so that: 6777 * - PG_Reserved is set 6778 * - zone and node links point to zone and node that span the page if the 6779 * hole is in the middle of a zone 6780 * - zone and node links point to adjacent zone/node if the hole falls on 6781 * the zone boundary; the pages in such holes will be prepended to the 6782 * zone/node above the hole except for the trailing pages in the last 6783 * section that will be appended to the zone/node below. 6784 */ 6785 static void __init init_unavailable_range(unsigned long spfn, 6786 unsigned long epfn, 6787 int zone, int node) 6788 { 6789 unsigned long pfn; 6790 u64 pgcnt = 0; 6791 6792 for (pfn = spfn; pfn < epfn; pfn++) { 6793 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { 6794 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) 6795 + pageblock_nr_pages - 1; 6796 continue; 6797 } 6798 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 6799 __SetPageReserved(pfn_to_page(pfn)); 6800 pgcnt++; 6801 } 6802 6803 if (pgcnt) 6804 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", 6805 node, zone_names[zone], pgcnt); 6806 } 6807 6808 static void __init memmap_init_zone_range(struct zone *zone, 6809 unsigned long start_pfn, 6810 unsigned long end_pfn, 6811 unsigned long *hole_pfn) 6812 { 6813 unsigned long zone_start_pfn = zone->zone_start_pfn; 6814 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 6815 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 6816 6817 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 6818 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 6819 6820 if (start_pfn >= end_pfn) 6821 return; 6822 6823 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 6824 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); 6825 6826 if (*hole_pfn < start_pfn) 6827 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 6828 6829 *hole_pfn = end_pfn; 6830 } 6831 6832 static void __init memmap_init(void) 6833 { 6834 unsigned long start_pfn, end_pfn; 6835 unsigned long hole_pfn = 0; 6836 int i, j, zone_id = 0, nid; 6837 6838 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 6839 struct pglist_data *node = NODE_DATA(nid); 6840 6841 for (j = 0; j < MAX_NR_ZONES; j++) { 6842 struct zone *zone = node->node_zones + j; 6843 6844 if (!populated_zone(zone)) 6845 continue; 6846 6847 memmap_init_zone_range(zone, start_pfn, end_pfn, 6848 &hole_pfn); 6849 zone_id = j; 6850 } 6851 } 6852 6853 #ifdef CONFIG_SPARSEMEM 6854 /* 6855 * Initialize the memory map for hole in the range [memory_end, 6856 * section_end]. 6857 * Append the pages in this hole to the highest zone in the last 6858 * node. 6859 * The call to init_unavailable_range() is outside the ifdef to 6860 * silence the compiler warining about zone_id set but not used; 6861 * for FLATMEM it is a nop anyway 6862 */ 6863 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 6864 if (hole_pfn < end_pfn) 6865 #endif 6866 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 6867 } 6868 6869 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 6870 phys_addr_t min_addr, int nid, bool exact_nid) 6871 { 6872 void *ptr; 6873 6874 if (exact_nid) 6875 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 6876 MEMBLOCK_ALLOC_ACCESSIBLE, 6877 nid); 6878 else 6879 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 6880 MEMBLOCK_ALLOC_ACCESSIBLE, 6881 nid); 6882 6883 if (ptr && size > 0) 6884 page_init_poison(ptr, size); 6885 6886 return ptr; 6887 } 6888 6889 static int zone_batchsize(struct zone *zone) 6890 { 6891 #ifdef CONFIG_MMU 6892 int batch; 6893 6894 /* 6895 * The number of pages to batch allocate is either ~0.1% 6896 * of the zone or 1MB, whichever is smaller. The batch 6897 * size is striking a balance between allocation latency 6898 * and zone lock contention. 6899 */ 6900 batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE); 6901 batch /= 4; /* We effectively *= 4 below */ 6902 if (batch < 1) 6903 batch = 1; 6904 6905 /* 6906 * Clamp the batch to a 2^n - 1 value. Having a power 6907 * of 2 value was found to be more likely to have 6908 * suboptimal cache aliasing properties in some cases. 6909 * 6910 * For example if 2 tasks are alternately allocating 6911 * batches of pages, one task can end up with a lot 6912 * of pages of one half of the possible page colors 6913 * and the other with pages of the other colors. 6914 */ 6915 batch = rounddown_pow_of_two(batch + batch/2) - 1; 6916 6917 return batch; 6918 6919 #else 6920 /* The deferral and batching of frees should be suppressed under NOMMU 6921 * conditions. 6922 * 6923 * The problem is that NOMMU needs to be able to allocate large chunks 6924 * of contiguous memory as there's no hardware page translation to 6925 * assemble apparent contiguous memory from discontiguous pages. 6926 * 6927 * Queueing large contiguous runs of pages for batching, however, 6928 * causes the pages to actually be freed in smaller chunks. As there 6929 * can be a significant delay between the individual batches being 6930 * recycled, this leads to the once large chunks of space being 6931 * fragmented and becoming unavailable for high-order allocations. 6932 */ 6933 return 0; 6934 #endif 6935 } 6936 6937 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 6938 { 6939 #ifdef CONFIG_MMU 6940 int high; 6941 int nr_split_cpus; 6942 unsigned long total_pages; 6943 6944 if (!percpu_pagelist_high_fraction) { 6945 /* 6946 * By default, the high value of the pcp is based on the zone 6947 * low watermark so that if they are full then background 6948 * reclaim will not be started prematurely. 6949 */ 6950 total_pages = low_wmark_pages(zone); 6951 } else { 6952 /* 6953 * If percpu_pagelist_high_fraction is configured, the high 6954 * value is based on a fraction of the managed pages in the 6955 * zone. 6956 */ 6957 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 6958 } 6959 6960 /* 6961 * Split the high value across all online CPUs local to the zone. Note 6962 * that early in boot that CPUs may not be online yet and that during 6963 * CPU hotplug that the cpumask is not yet updated when a CPU is being 6964 * onlined. For memory nodes that have no CPUs, split pcp->high across 6965 * all online CPUs to mitigate the risk that reclaim is triggered 6966 * prematurely due to pages stored on pcp lists. 6967 */ 6968 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 6969 if (!nr_split_cpus) 6970 nr_split_cpus = num_online_cpus(); 6971 high = total_pages / nr_split_cpus; 6972 6973 /* 6974 * Ensure high is at least batch*4. The multiple is based on the 6975 * historical relationship between high and batch. 6976 */ 6977 high = max(high, batch << 2); 6978 6979 return high; 6980 #else 6981 return 0; 6982 #endif 6983 } 6984 6985 /* 6986 * pcp->high and pcp->batch values are related and generally batch is lower 6987 * than high. They are also related to pcp->count such that count is lower 6988 * than high, and as soon as it reaches high, the pcplist is flushed. 6989 * 6990 * However, guaranteeing these relations at all times would require e.g. write 6991 * barriers here but also careful usage of read barriers at the read side, and 6992 * thus be prone to error and bad for performance. Thus the update only prevents 6993 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 6994 * can cope with those fields changing asynchronously, and fully trust only the 6995 * pcp->count field on the local CPU with interrupts disabled. 6996 * 6997 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 6998 * outside of boot time (or some other assurance that no concurrent updaters 6999 * exist). 7000 */ 7001 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 7002 unsigned long batch) 7003 { 7004 WRITE_ONCE(pcp->batch, batch); 7005 WRITE_ONCE(pcp->high, high); 7006 } 7007 7008 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 7009 { 7010 int pindex; 7011 7012 memset(pcp, 0, sizeof(*pcp)); 7013 memset(pzstats, 0, sizeof(*pzstats)); 7014 7015 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 7016 INIT_LIST_HEAD(&pcp->lists[pindex]); 7017 7018 /* 7019 * Set batch and high values safe for a boot pageset. A true percpu 7020 * pageset's initialization will update them subsequently. Here we don't 7021 * need to be as careful as pageset_update() as nobody can access the 7022 * pageset yet. 7023 */ 7024 pcp->high = BOOT_PAGESET_HIGH; 7025 pcp->batch = BOOT_PAGESET_BATCH; 7026 pcp->free_factor = 0; 7027 } 7028 7029 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 7030 unsigned long batch) 7031 { 7032 struct per_cpu_pages *pcp; 7033 int cpu; 7034 7035 for_each_possible_cpu(cpu) { 7036 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 7037 pageset_update(pcp, high, batch); 7038 } 7039 } 7040 7041 /* 7042 * Calculate and set new high and batch values for all per-cpu pagesets of a 7043 * zone based on the zone's size. 7044 */ 7045 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 7046 { 7047 int new_high, new_batch; 7048 7049 new_batch = max(1, zone_batchsize(zone)); 7050 new_high = zone_highsize(zone, new_batch, cpu_online); 7051 7052 if (zone->pageset_high == new_high && 7053 zone->pageset_batch == new_batch) 7054 return; 7055 7056 zone->pageset_high = new_high; 7057 zone->pageset_batch = new_batch; 7058 7059 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 7060 } 7061 7062 void __meminit setup_zone_pageset(struct zone *zone) 7063 { 7064 int cpu; 7065 7066 /* Size may be 0 on !SMP && !NUMA */ 7067 if (sizeof(struct per_cpu_zonestat) > 0) 7068 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 7069 7070 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 7071 for_each_possible_cpu(cpu) { 7072 struct per_cpu_pages *pcp; 7073 struct per_cpu_zonestat *pzstats; 7074 7075 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 7076 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7077 per_cpu_pages_init(pcp, pzstats); 7078 } 7079 7080 zone_set_pageset_high_and_batch(zone, 0); 7081 } 7082 7083 /* 7084 * Allocate per cpu pagesets and initialize them. 7085 * Before this call only boot pagesets were available. 7086 */ 7087 void __init setup_per_cpu_pageset(void) 7088 { 7089 struct pglist_data *pgdat; 7090 struct zone *zone; 7091 int __maybe_unused cpu; 7092 7093 for_each_populated_zone(zone) 7094 setup_zone_pageset(zone); 7095 7096 #ifdef CONFIG_NUMA 7097 /* 7098 * Unpopulated zones continue using the boot pagesets. 7099 * The numa stats for these pagesets need to be reset. 7100 * Otherwise, they will end up skewing the stats of 7101 * the nodes these zones are associated with. 7102 */ 7103 for_each_possible_cpu(cpu) { 7104 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 7105 memset(pzstats->vm_numa_event, 0, 7106 sizeof(pzstats->vm_numa_event)); 7107 } 7108 #endif 7109 7110 for_each_online_pgdat(pgdat) 7111 pgdat->per_cpu_nodestats = 7112 alloc_percpu(struct per_cpu_nodestat); 7113 } 7114 7115 static __meminit void zone_pcp_init(struct zone *zone) 7116 { 7117 /* 7118 * per cpu subsystem is not up at this point. The following code 7119 * relies on the ability of the linker to provide the 7120 * offset of a (static) per cpu variable into the per cpu area. 7121 */ 7122 zone->per_cpu_pageset = &boot_pageset; 7123 zone->per_cpu_zonestats = &boot_zonestats; 7124 zone->pageset_high = BOOT_PAGESET_HIGH; 7125 zone->pageset_batch = BOOT_PAGESET_BATCH; 7126 7127 if (populated_zone(zone)) 7128 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 7129 zone->present_pages, zone_batchsize(zone)); 7130 } 7131 7132 void __meminit init_currently_empty_zone(struct zone *zone, 7133 unsigned long zone_start_pfn, 7134 unsigned long size) 7135 { 7136 struct pglist_data *pgdat = zone->zone_pgdat; 7137 int zone_idx = zone_idx(zone) + 1; 7138 7139 if (zone_idx > pgdat->nr_zones) 7140 pgdat->nr_zones = zone_idx; 7141 7142 zone->zone_start_pfn = zone_start_pfn; 7143 7144 mminit_dprintk(MMINIT_TRACE, "memmap_init", 7145 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 7146 pgdat->node_id, 7147 (unsigned long)zone_idx(zone), 7148 zone_start_pfn, (zone_start_pfn + size)); 7149 7150 zone_init_free_lists(zone); 7151 zone->initialized = 1; 7152 } 7153 7154 /** 7155 * get_pfn_range_for_nid - Return the start and end page frames for a node 7156 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 7157 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 7158 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 7159 * 7160 * It returns the start and end page frame of a node based on information 7161 * provided by memblock_set_node(). If called for a node 7162 * with no available memory, a warning is printed and the start and end 7163 * PFNs will be 0. 7164 */ 7165 void __init get_pfn_range_for_nid(unsigned int nid, 7166 unsigned long *start_pfn, unsigned long *end_pfn) 7167 { 7168 unsigned long this_start_pfn, this_end_pfn; 7169 int i; 7170 7171 *start_pfn = -1UL; 7172 *end_pfn = 0; 7173 7174 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 7175 *start_pfn = min(*start_pfn, this_start_pfn); 7176 *end_pfn = max(*end_pfn, this_end_pfn); 7177 } 7178 7179 if (*start_pfn == -1UL) 7180 *start_pfn = 0; 7181 } 7182 7183 /* 7184 * This finds a zone that can be used for ZONE_MOVABLE pages. The 7185 * assumption is made that zones within a node are ordered in monotonic 7186 * increasing memory addresses so that the "highest" populated zone is used 7187 */ 7188 static void __init find_usable_zone_for_movable(void) 7189 { 7190 int zone_index; 7191 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 7192 if (zone_index == ZONE_MOVABLE) 7193 continue; 7194 7195 if (arch_zone_highest_possible_pfn[zone_index] > 7196 arch_zone_lowest_possible_pfn[zone_index]) 7197 break; 7198 } 7199 7200 VM_BUG_ON(zone_index == -1); 7201 movable_zone = zone_index; 7202 } 7203 7204 /* 7205 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 7206 * because it is sized independent of architecture. Unlike the other zones, 7207 * the starting point for ZONE_MOVABLE is not fixed. It may be different 7208 * in each node depending on the size of each node and how evenly kernelcore 7209 * is distributed. This helper function adjusts the zone ranges 7210 * provided by the architecture for a given node by using the end of the 7211 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 7212 * zones within a node are in order of monotonic increases memory addresses 7213 */ 7214 static void __init adjust_zone_range_for_zone_movable(int nid, 7215 unsigned long zone_type, 7216 unsigned long node_start_pfn, 7217 unsigned long node_end_pfn, 7218 unsigned long *zone_start_pfn, 7219 unsigned long *zone_end_pfn) 7220 { 7221 /* Only adjust if ZONE_MOVABLE is on this node */ 7222 if (zone_movable_pfn[nid]) { 7223 /* Size ZONE_MOVABLE */ 7224 if (zone_type == ZONE_MOVABLE) { 7225 *zone_start_pfn = zone_movable_pfn[nid]; 7226 *zone_end_pfn = min(node_end_pfn, 7227 arch_zone_highest_possible_pfn[movable_zone]); 7228 7229 /* Adjust for ZONE_MOVABLE starting within this range */ 7230 } else if (!mirrored_kernelcore && 7231 *zone_start_pfn < zone_movable_pfn[nid] && 7232 *zone_end_pfn > zone_movable_pfn[nid]) { 7233 *zone_end_pfn = zone_movable_pfn[nid]; 7234 7235 /* Check if this whole range is within ZONE_MOVABLE */ 7236 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 7237 *zone_start_pfn = *zone_end_pfn; 7238 } 7239 } 7240 7241 /* 7242 * Return the number of pages a zone spans in a node, including holes 7243 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 7244 */ 7245 static unsigned long __init zone_spanned_pages_in_node(int nid, 7246 unsigned long zone_type, 7247 unsigned long node_start_pfn, 7248 unsigned long node_end_pfn, 7249 unsigned long *zone_start_pfn, 7250 unsigned long *zone_end_pfn) 7251 { 7252 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 7253 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 7254 /* When hotadd a new node from cpu_up(), the node should be empty */ 7255 if (!node_start_pfn && !node_end_pfn) 7256 return 0; 7257 7258 /* Get the start and end of the zone */ 7259 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 7260 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 7261 adjust_zone_range_for_zone_movable(nid, zone_type, 7262 node_start_pfn, node_end_pfn, 7263 zone_start_pfn, zone_end_pfn); 7264 7265 /* Check that this node has pages within the zone's required range */ 7266 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 7267 return 0; 7268 7269 /* Move the zone boundaries inside the node if necessary */ 7270 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 7271 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 7272 7273 /* Return the spanned pages */ 7274 return *zone_end_pfn - *zone_start_pfn; 7275 } 7276 7277 /* 7278 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 7279 * then all holes in the requested range will be accounted for. 7280 */ 7281 unsigned long __init __absent_pages_in_range(int nid, 7282 unsigned long range_start_pfn, 7283 unsigned long range_end_pfn) 7284 { 7285 unsigned long nr_absent = range_end_pfn - range_start_pfn; 7286 unsigned long start_pfn, end_pfn; 7287 int i; 7288 7289 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 7290 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 7291 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 7292 nr_absent -= end_pfn - start_pfn; 7293 } 7294 return nr_absent; 7295 } 7296 7297 /** 7298 * absent_pages_in_range - Return number of page frames in holes within a range 7299 * @start_pfn: The start PFN to start searching for holes 7300 * @end_pfn: The end PFN to stop searching for holes 7301 * 7302 * Return: the number of pages frames in memory holes within a range. 7303 */ 7304 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 7305 unsigned long end_pfn) 7306 { 7307 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 7308 } 7309 7310 /* Return the number of page frames in holes in a zone on a node */ 7311 static unsigned long __init zone_absent_pages_in_node(int nid, 7312 unsigned long zone_type, 7313 unsigned long node_start_pfn, 7314 unsigned long node_end_pfn) 7315 { 7316 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 7317 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 7318 unsigned long zone_start_pfn, zone_end_pfn; 7319 unsigned long nr_absent; 7320 7321 /* When hotadd a new node from cpu_up(), the node should be empty */ 7322 if (!node_start_pfn && !node_end_pfn) 7323 return 0; 7324 7325 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 7326 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 7327 7328 adjust_zone_range_for_zone_movable(nid, zone_type, 7329 node_start_pfn, node_end_pfn, 7330 &zone_start_pfn, &zone_end_pfn); 7331 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 7332 7333 /* 7334 * ZONE_MOVABLE handling. 7335 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 7336 * and vice versa. 7337 */ 7338 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 7339 unsigned long start_pfn, end_pfn; 7340 struct memblock_region *r; 7341 7342 for_each_mem_region(r) { 7343 start_pfn = clamp(memblock_region_memory_base_pfn(r), 7344 zone_start_pfn, zone_end_pfn); 7345 end_pfn = clamp(memblock_region_memory_end_pfn(r), 7346 zone_start_pfn, zone_end_pfn); 7347 7348 if (zone_type == ZONE_MOVABLE && 7349 memblock_is_mirror(r)) 7350 nr_absent += end_pfn - start_pfn; 7351 7352 if (zone_type == ZONE_NORMAL && 7353 !memblock_is_mirror(r)) 7354 nr_absent += end_pfn - start_pfn; 7355 } 7356 } 7357 7358 return nr_absent; 7359 } 7360 7361 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 7362 unsigned long node_start_pfn, 7363 unsigned long node_end_pfn) 7364 { 7365 unsigned long realtotalpages = 0, totalpages = 0; 7366 enum zone_type i; 7367 7368 for (i = 0; i < MAX_NR_ZONES; i++) { 7369 struct zone *zone = pgdat->node_zones + i; 7370 unsigned long zone_start_pfn, zone_end_pfn; 7371 unsigned long spanned, absent; 7372 unsigned long size, real_size; 7373 7374 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 7375 node_start_pfn, 7376 node_end_pfn, 7377 &zone_start_pfn, 7378 &zone_end_pfn); 7379 absent = zone_absent_pages_in_node(pgdat->node_id, i, 7380 node_start_pfn, 7381 node_end_pfn); 7382 7383 size = spanned; 7384 real_size = size - absent; 7385 7386 if (size) 7387 zone->zone_start_pfn = zone_start_pfn; 7388 else 7389 zone->zone_start_pfn = 0; 7390 zone->spanned_pages = size; 7391 zone->present_pages = real_size; 7392 #if defined(CONFIG_MEMORY_HOTPLUG) 7393 zone->present_early_pages = real_size; 7394 #endif 7395 7396 totalpages += size; 7397 realtotalpages += real_size; 7398 } 7399 7400 pgdat->node_spanned_pages = totalpages; 7401 pgdat->node_present_pages = realtotalpages; 7402 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 7403 } 7404 7405 #ifndef CONFIG_SPARSEMEM 7406 /* 7407 * Calculate the size of the zone->blockflags rounded to an unsigned long 7408 * Start by making sure zonesize is a multiple of pageblock_order by rounding 7409 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 7410 * round what is now in bits to nearest long in bits, then return it in 7411 * bytes. 7412 */ 7413 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 7414 { 7415 unsigned long usemapsize; 7416 7417 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 7418 usemapsize = roundup(zonesize, pageblock_nr_pages); 7419 usemapsize = usemapsize >> pageblock_order; 7420 usemapsize *= NR_PAGEBLOCK_BITS; 7421 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 7422 7423 return usemapsize / 8; 7424 } 7425 7426 static void __ref setup_usemap(struct zone *zone) 7427 { 7428 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 7429 zone->spanned_pages); 7430 zone->pageblock_flags = NULL; 7431 if (usemapsize) { 7432 zone->pageblock_flags = 7433 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 7434 zone_to_nid(zone)); 7435 if (!zone->pageblock_flags) 7436 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 7437 usemapsize, zone->name, zone_to_nid(zone)); 7438 } 7439 } 7440 #else 7441 static inline void setup_usemap(struct zone *zone) {} 7442 #endif /* CONFIG_SPARSEMEM */ 7443 7444 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 7445 7446 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 7447 void __init set_pageblock_order(void) 7448 { 7449 unsigned int order = MAX_ORDER - 1; 7450 7451 /* Check that pageblock_nr_pages has not already been setup */ 7452 if (pageblock_order) 7453 return; 7454 7455 /* Don't let pageblocks exceed the maximum allocation granularity. */ 7456 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) 7457 order = HUGETLB_PAGE_ORDER; 7458 7459 /* 7460 * Assume the largest contiguous order of interest is a huge page. 7461 * This value may be variable depending on boot parameters on IA64 and 7462 * powerpc. 7463 */ 7464 pageblock_order = order; 7465 } 7466 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 7467 7468 /* 7469 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 7470 * is unused as pageblock_order is set at compile-time. See 7471 * include/linux/pageblock-flags.h for the values of pageblock_order based on 7472 * the kernel config 7473 */ 7474 void __init set_pageblock_order(void) 7475 { 7476 } 7477 7478 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 7479 7480 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, 7481 unsigned long present_pages) 7482 { 7483 unsigned long pages = spanned_pages; 7484 7485 /* 7486 * Provide a more accurate estimation if there are holes within 7487 * the zone and SPARSEMEM is in use. If there are holes within the 7488 * zone, each populated memory region may cost us one or two extra 7489 * memmap pages due to alignment because memmap pages for each 7490 * populated regions may not be naturally aligned on page boundary. 7491 * So the (present_pages >> 4) heuristic is a tradeoff for that. 7492 */ 7493 if (spanned_pages > present_pages + (present_pages >> 4) && 7494 IS_ENABLED(CONFIG_SPARSEMEM)) 7495 pages = present_pages; 7496 7497 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 7498 } 7499 7500 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 7501 static void pgdat_init_split_queue(struct pglist_data *pgdat) 7502 { 7503 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 7504 7505 spin_lock_init(&ds_queue->split_queue_lock); 7506 INIT_LIST_HEAD(&ds_queue->split_queue); 7507 ds_queue->split_queue_len = 0; 7508 } 7509 #else 7510 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 7511 #endif 7512 7513 #ifdef CONFIG_COMPACTION 7514 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 7515 { 7516 init_waitqueue_head(&pgdat->kcompactd_wait); 7517 } 7518 #else 7519 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 7520 #endif 7521 7522 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 7523 { 7524 int i; 7525 7526 pgdat_resize_init(pgdat); 7527 7528 pgdat_init_split_queue(pgdat); 7529 pgdat_init_kcompactd(pgdat); 7530 7531 init_waitqueue_head(&pgdat->kswapd_wait); 7532 init_waitqueue_head(&pgdat->pfmemalloc_wait); 7533 7534 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 7535 init_waitqueue_head(&pgdat->reclaim_wait[i]); 7536 7537 pgdat_page_ext_init(pgdat); 7538 lruvec_init(&pgdat->__lruvec); 7539 } 7540 7541 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 7542 unsigned long remaining_pages) 7543 { 7544 atomic_long_set(&zone->managed_pages, remaining_pages); 7545 zone_set_nid(zone, nid); 7546 zone->name = zone_names[idx]; 7547 zone->zone_pgdat = NODE_DATA(nid); 7548 spin_lock_init(&zone->lock); 7549 zone_seqlock_init(zone); 7550 zone_pcp_init(zone); 7551 } 7552 7553 /* 7554 * Set up the zone data structures 7555 * - init pgdat internals 7556 * - init all zones belonging to this node 7557 * 7558 * NOTE: this function is only called during memory hotplug 7559 */ 7560 #ifdef CONFIG_MEMORY_HOTPLUG 7561 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) 7562 { 7563 int nid = pgdat->node_id; 7564 enum zone_type z; 7565 int cpu; 7566 7567 pgdat_init_internals(pgdat); 7568 7569 if (pgdat->per_cpu_nodestats == &boot_nodestats) 7570 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 7571 7572 /* 7573 * Reset the nr_zones, order and highest_zoneidx before reuse. 7574 * Note that kswapd will init kswapd_highest_zoneidx properly 7575 * when it starts in the near future. 7576 */ 7577 pgdat->nr_zones = 0; 7578 pgdat->kswapd_order = 0; 7579 pgdat->kswapd_highest_zoneidx = 0; 7580 pgdat->node_start_pfn = 0; 7581 for_each_online_cpu(cpu) { 7582 struct per_cpu_nodestat *p; 7583 7584 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 7585 memset(p, 0, sizeof(*p)); 7586 } 7587 7588 for (z = 0; z < MAX_NR_ZONES; z++) 7589 zone_init_internals(&pgdat->node_zones[z], z, nid, 0); 7590 } 7591 #endif 7592 7593 /* 7594 * Set up the zone data structures: 7595 * - mark all pages reserved 7596 * - mark all memory queues empty 7597 * - clear the memory bitmaps 7598 * 7599 * NOTE: pgdat should get zeroed by caller. 7600 * NOTE: this function is only called during early init. 7601 */ 7602 static void __init free_area_init_core(struct pglist_data *pgdat) 7603 { 7604 enum zone_type j; 7605 int nid = pgdat->node_id; 7606 7607 pgdat_init_internals(pgdat); 7608 pgdat->per_cpu_nodestats = &boot_nodestats; 7609 7610 for (j = 0; j < MAX_NR_ZONES; j++) { 7611 struct zone *zone = pgdat->node_zones + j; 7612 unsigned long size, freesize, memmap_pages; 7613 7614 size = zone->spanned_pages; 7615 freesize = zone->present_pages; 7616 7617 /* 7618 * Adjust freesize so that it accounts for how much memory 7619 * is used by this zone for memmap. This affects the watermark 7620 * and per-cpu initialisations 7621 */ 7622 memmap_pages = calc_memmap_size(size, freesize); 7623 if (!is_highmem_idx(j)) { 7624 if (freesize >= memmap_pages) { 7625 freesize -= memmap_pages; 7626 if (memmap_pages) 7627 pr_debug(" %s zone: %lu pages used for memmap\n", 7628 zone_names[j], memmap_pages); 7629 } else 7630 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", 7631 zone_names[j], memmap_pages, freesize); 7632 } 7633 7634 /* Account for reserved pages */ 7635 if (j == 0 && freesize > dma_reserve) { 7636 freesize -= dma_reserve; 7637 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); 7638 } 7639 7640 if (!is_highmem_idx(j)) 7641 nr_kernel_pages += freesize; 7642 /* Charge for highmem memmap if there are enough kernel pages */ 7643 else if (nr_kernel_pages > memmap_pages * 2) 7644 nr_kernel_pages -= memmap_pages; 7645 nr_all_pages += freesize; 7646 7647 /* 7648 * Set an approximate value for lowmem here, it will be adjusted 7649 * when the bootmem allocator frees pages into the buddy system. 7650 * And all highmem pages will be managed by the buddy system. 7651 */ 7652 zone_init_internals(zone, j, nid, freesize); 7653 7654 if (!size) 7655 continue; 7656 7657 set_pageblock_order(); 7658 setup_usemap(zone); 7659 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 7660 } 7661 } 7662 7663 #ifdef CONFIG_FLATMEM 7664 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 7665 { 7666 unsigned long __maybe_unused start = 0; 7667 unsigned long __maybe_unused offset = 0; 7668 7669 /* Skip empty nodes */ 7670 if (!pgdat->node_spanned_pages) 7671 return; 7672 7673 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 7674 offset = pgdat->node_start_pfn - start; 7675 /* ia64 gets its own node_mem_map, before this, without bootmem */ 7676 if (!pgdat->node_mem_map) { 7677 unsigned long size, end; 7678 struct page *map; 7679 7680 /* 7681 * The zone's endpoints aren't required to be MAX_ORDER 7682 * aligned but the node_mem_map endpoints must be in order 7683 * for the buddy allocator to function correctly. 7684 */ 7685 end = pgdat_end_pfn(pgdat); 7686 end = ALIGN(end, MAX_ORDER_NR_PAGES); 7687 size = (end - start) * sizeof(struct page); 7688 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 7689 pgdat->node_id, false); 7690 if (!map) 7691 panic("Failed to allocate %ld bytes for node %d memory map\n", 7692 size, pgdat->node_id); 7693 pgdat->node_mem_map = map + offset; 7694 } 7695 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 7696 __func__, pgdat->node_id, (unsigned long)pgdat, 7697 (unsigned long)pgdat->node_mem_map); 7698 #ifndef CONFIG_NUMA 7699 /* 7700 * With no DISCONTIG, the global mem_map is just set as node 0's 7701 */ 7702 if (pgdat == NODE_DATA(0)) { 7703 mem_map = NODE_DATA(0)->node_mem_map; 7704 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 7705 mem_map -= offset; 7706 } 7707 #endif 7708 } 7709 #else 7710 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 7711 #endif /* CONFIG_FLATMEM */ 7712 7713 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 7714 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 7715 { 7716 pgdat->first_deferred_pfn = ULONG_MAX; 7717 } 7718 #else 7719 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 7720 #endif 7721 7722 static void __init free_area_init_node(int nid) 7723 { 7724 pg_data_t *pgdat = NODE_DATA(nid); 7725 unsigned long start_pfn = 0; 7726 unsigned long end_pfn = 0; 7727 7728 /* pg_data_t should be reset to zero when it's allocated */ 7729 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 7730 7731 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 7732 7733 pgdat->node_id = nid; 7734 pgdat->node_start_pfn = start_pfn; 7735 pgdat->per_cpu_nodestats = NULL; 7736 7737 if (start_pfn != end_pfn) { 7738 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 7739 (u64)start_pfn << PAGE_SHIFT, 7740 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 7741 } else { 7742 pr_info("Initmem setup node %d as memoryless\n", nid); 7743 } 7744 7745 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 7746 7747 alloc_node_mem_map(pgdat); 7748 pgdat_set_deferred_range(pgdat); 7749 7750 free_area_init_core(pgdat); 7751 } 7752 7753 static void __init free_area_init_memoryless_node(int nid) 7754 { 7755 free_area_init_node(nid); 7756 } 7757 7758 #if MAX_NUMNODES > 1 7759 /* 7760 * Figure out the number of possible node ids. 7761 */ 7762 void __init setup_nr_node_ids(void) 7763 { 7764 unsigned int highest; 7765 7766 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 7767 nr_node_ids = highest + 1; 7768 } 7769 #endif 7770 7771 /** 7772 * node_map_pfn_alignment - determine the maximum internode alignment 7773 * 7774 * This function should be called after node map is populated and sorted. 7775 * It calculates the maximum power of two alignment which can distinguish 7776 * all the nodes. 7777 * 7778 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 7779 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 7780 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 7781 * shifted, 1GiB is enough and this function will indicate so. 7782 * 7783 * This is used to test whether pfn -> nid mapping of the chosen memory 7784 * model has fine enough granularity to avoid incorrect mapping for the 7785 * populated node map. 7786 * 7787 * Return: the determined alignment in pfn's. 0 if there is no alignment 7788 * requirement (single node). 7789 */ 7790 unsigned long __init node_map_pfn_alignment(void) 7791 { 7792 unsigned long accl_mask = 0, last_end = 0; 7793 unsigned long start, end, mask; 7794 int last_nid = NUMA_NO_NODE; 7795 int i, nid; 7796 7797 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 7798 if (!start || last_nid < 0 || last_nid == nid) { 7799 last_nid = nid; 7800 last_end = end; 7801 continue; 7802 } 7803 7804 /* 7805 * Start with a mask granular enough to pin-point to the 7806 * start pfn and tick off bits one-by-one until it becomes 7807 * too coarse to separate the current node from the last. 7808 */ 7809 mask = ~((1 << __ffs(start)) - 1); 7810 while (mask && last_end <= (start & (mask << 1))) 7811 mask <<= 1; 7812 7813 /* accumulate all internode masks */ 7814 accl_mask |= mask; 7815 } 7816 7817 /* convert mask to number of pages */ 7818 return ~accl_mask + 1; 7819 } 7820 7821 /** 7822 * find_min_pfn_with_active_regions - Find the minimum PFN registered 7823 * 7824 * Return: the minimum PFN based on information provided via 7825 * memblock_set_node(). 7826 */ 7827 unsigned long __init find_min_pfn_with_active_regions(void) 7828 { 7829 return PHYS_PFN(memblock_start_of_DRAM()); 7830 } 7831 7832 /* 7833 * early_calculate_totalpages() 7834 * Sum pages in active regions for movable zone. 7835 * Populate N_MEMORY for calculating usable_nodes. 7836 */ 7837 static unsigned long __init early_calculate_totalpages(void) 7838 { 7839 unsigned long totalpages = 0; 7840 unsigned long start_pfn, end_pfn; 7841 int i, nid; 7842 7843 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 7844 unsigned long pages = end_pfn - start_pfn; 7845 7846 totalpages += pages; 7847 if (pages) 7848 node_set_state(nid, N_MEMORY); 7849 } 7850 return totalpages; 7851 } 7852 7853 /* 7854 * Find the PFN the Movable zone begins in each node. Kernel memory 7855 * is spread evenly between nodes as long as the nodes have enough 7856 * memory. When they don't, some nodes will have more kernelcore than 7857 * others 7858 */ 7859 static void __init find_zone_movable_pfns_for_nodes(void) 7860 { 7861 int i, nid; 7862 unsigned long usable_startpfn; 7863 unsigned long kernelcore_node, kernelcore_remaining; 7864 /* save the state before borrow the nodemask */ 7865 nodemask_t saved_node_state = node_states[N_MEMORY]; 7866 unsigned long totalpages = early_calculate_totalpages(); 7867 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 7868 struct memblock_region *r; 7869 7870 /* Need to find movable_zone earlier when movable_node is specified. */ 7871 find_usable_zone_for_movable(); 7872 7873 /* 7874 * If movable_node is specified, ignore kernelcore and movablecore 7875 * options. 7876 */ 7877 if (movable_node_is_enabled()) { 7878 for_each_mem_region(r) { 7879 if (!memblock_is_hotpluggable(r)) 7880 continue; 7881 7882 nid = memblock_get_region_node(r); 7883 7884 usable_startpfn = PFN_DOWN(r->base); 7885 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 7886 min(usable_startpfn, zone_movable_pfn[nid]) : 7887 usable_startpfn; 7888 } 7889 7890 goto out2; 7891 } 7892 7893 /* 7894 * If kernelcore=mirror is specified, ignore movablecore option 7895 */ 7896 if (mirrored_kernelcore) { 7897 bool mem_below_4gb_not_mirrored = false; 7898 7899 for_each_mem_region(r) { 7900 if (memblock_is_mirror(r)) 7901 continue; 7902 7903 nid = memblock_get_region_node(r); 7904 7905 usable_startpfn = memblock_region_memory_base_pfn(r); 7906 7907 if (usable_startpfn < PHYS_PFN(SZ_4G)) { 7908 mem_below_4gb_not_mirrored = true; 7909 continue; 7910 } 7911 7912 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 7913 min(usable_startpfn, zone_movable_pfn[nid]) : 7914 usable_startpfn; 7915 } 7916 7917 if (mem_below_4gb_not_mirrored) 7918 pr_warn("This configuration results in unmirrored kernel memory.\n"); 7919 7920 goto out2; 7921 } 7922 7923 /* 7924 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 7925 * amount of necessary memory. 7926 */ 7927 if (required_kernelcore_percent) 7928 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 7929 10000UL; 7930 if (required_movablecore_percent) 7931 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 7932 10000UL; 7933 7934 /* 7935 * If movablecore= was specified, calculate what size of 7936 * kernelcore that corresponds so that memory usable for 7937 * any allocation type is evenly spread. If both kernelcore 7938 * and movablecore are specified, then the value of kernelcore 7939 * will be used for required_kernelcore if it's greater than 7940 * what movablecore would have allowed. 7941 */ 7942 if (required_movablecore) { 7943 unsigned long corepages; 7944 7945 /* 7946 * Round-up so that ZONE_MOVABLE is at least as large as what 7947 * was requested by the user 7948 */ 7949 required_movablecore = 7950 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 7951 required_movablecore = min(totalpages, required_movablecore); 7952 corepages = totalpages - required_movablecore; 7953 7954 required_kernelcore = max(required_kernelcore, corepages); 7955 } 7956 7957 /* 7958 * If kernelcore was not specified or kernelcore size is larger 7959 * than totalpages, there is no ZONE_MOVABLE. 7960 */ 7961 if (!required_kernelcore || required_kernelcore >= totalpages) 7962 goto out; 7963 7964 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 7965 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 7966 7967 restart: 7968 /* Spread kernelcore memory as evenly as possible throughout nodes */ 7969 kernelcore_node = required_kernelcore / usable_nodes; 7970 for_each_node_state(nid, N_MEMORY) { 7971 unsigned long start_pfn, end_pfn; 7972 7973 /* 7974 * Recalculate kernelcore_node if the division per node 7975 * now exceeds what is necessary to satisfy the requested 7976 * amount of memory for the kernel 7977 */ 7978 if (required_kernelcore < kernelcore_node) 7979 kernelcore_node = required_kernelcore / usable_nodes; 7980 7981 /* 7982 * As the map is walked, we track how much memory is usable 7983 * by the kernel using kernelcore_remaining. When it is 7984 * 0, the rest of the node is usable by ZONE_MOVABLE 7985 */ 7986 kernelcore_remaining = kernelcore_node; 7987 7988 /* Go through each range of PFNs within this node */ 7989 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 7990 unsigned long size_pages; 7991 7992 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 7993 if (start_pfn >= end_pfn) 7994 continue; 7995 7996 /* Account for what is only usable for kernelcore */ 7997 if (start_pfn < usable_startpfn) { 7998 unsigned long kernel_pages; 7999 kernel_pages = min(end_pfn, usable_startpfn) 8000 - start_pfn; 8001 8002 kernelcore_remaining -= min(kernel_pages, 8003 kernelcore_remaining); 8004 required_kernelcore -= min(kernel_pages, 8005 required_kernelcore); 8006 8007 /* Continue if range is now fully accounted */ 8008 if (end_pfn <= usable_startpfn) { 8009 8010 /* 8011 * Push zone_movable_pfn to the end so 8012 * that if we have to rebalance 8013 * kernelcore across nodes, we will 8014 * not double account here 8015 */ 8016 zone_movable_pfn[nid] = end_pfn; 8017 continue; 8018 } 8019 start_pfn = usable_startpfn; 8020 } 8021 8022 /* 8023 * The usable PFN range for ZONE_MOVABLE is from 8024 * start_pfn->end_pfn. Calculate size_pages as the 8025 * number of pages used as kernelcore 8026 */ 8027 size_pages = end_pfn - start_pfn; 8028 if (size_pages > kernelcore_remaining) 8029 size_pages = kernelcore_remaining; 8030 zone_movable_pfn[nid] = start_pfn + size_pages; 8031 8032 /* 8033 * Some kernelcore has been met, update counts and 8034 * break if the kernelcore for this node has been 8035 * satisfied 8036 */ 8037 required_kernelcore -= min(required_kernelcore, 8038 size_pages); 8039 kernelcore_remaining -= size_pages; 8040 if (!kernelcore_remaining) 8041 break; 8042 } 8043 } 8044 8045 /* 8046 * If there is still required_kernelcore, we do another pass with one 8047 * less node in the count. This will push zone_movable_pfn[nid] further 8048 * along on the nodes that still have memory until kernelcore is 8049 * satisfied 8050 */ 8051 usable_nodes--; 8052 if (usable_nodes && required_kernelcore > usable_nodes) 8053 goto restart; 8054 8055 out2: 8056 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 8057 for (nid = 0; nid < MAX_NUMNODES; nid++) { 8058 unsigned long start_pfn, end_pfn; 8059 8060 zone_movable_pfn[nid] = 8061 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 8062 8063 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 8064 if (zone_movable_pfn[nid] >= end_pfn) 8065 zone_movable_pfn[nid] = 0; 8066 } 8067 8068 out: 8069 /* restore the node_state */ 8070 node_states[N_MEMORY] = saved_node_state; 8071 } 8072 8073 /* Any regular or high memory on that node ? */ 8074 static void check_for_memory(pg_data_t *pgdat, int nid) 8075 { 8076 enum zone_type zone_type; 8077 8078 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 8079 struct zone *zone = &pgdat->node_zones[zone_type]; 8080 if (populated_zone(zone)) { 8081 if (IS_ENABLED(CONFIG_HIGHMEM)) 8082 node_set_state(nid, N_HIGH_MEMORY); 8083 if (zone_type <= ZONE_NORMAL) 8084 node_set_state(nid, N_NORMAL_MEMORY); 8085 break; 8086 } 8087 } 8088 } 8089 8090 /* 8091 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 8092 * such cases we allow max_zone_pfn sorted in the descending order 8093 */ 8094 bool __weak arch_has_descending_max_zone_pfns(void) 8095 { 8096 return false; 8097 } 8098 8099 /** 8100 * free_area_init - Initialise all pg_data_t and zone data 8101 * @max_zone_pfn: an array of max PFNs for each zone 8102 * 8103 * This will call free_area_init_node() for each active node in the system. 8104 * Using the page ranges provided by memblock_set_node(), the size of each 8105 * zone in each node and their holes is calculated. If the maximum PFN 8106 * between two adjacent zones match, it is assumed that the zone is empty. 8107 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 8108 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 8109 * starts where the previous one ended. For example, ZONE_DMA32 starts 8110 * at arch_max_dma_pfn. 8111 */ 8112 void __init free_area_init(unsigned long *max_zone_pfn) 8113 { 8114 unsigned long start_pfn, end_pfn; 8115 int i, nid, zone; 8116 bool descending; 8117 8118 /* Record where the zone boundaries are */ 8119 memset(arch_zone_lowest_possible_pfn, 0, 8120 sizeof(arch_zone_lowest_possible_pfn)); 8121 memset(arch_zone_highest_possible_pfn, 0, 8122 sizeof(arch_zone_highest_possible_pfn)); 8123 8124 start_pfn = find_min_pfn_with_active_regions(); 8125 descending = arch_has_descending_max_zone_pfns(); 8126 8127 for (i = 0; i < MAX_NR_ZONES; i++) { 8128 if (descending) 8129 zone = MAX_NR_ZONES - i - 1; 8130 else 8131 zone = i; 8132 8133 if (zone == ZONE_MOVABLE) 8134 continue; 8135 8136 end_pfn = max(max_zone_pfn[zone], start_pfn); 8137 arch_zone_lowest_possible_pfn[zone] = start_pfn; 8138 arch_zone_highest_possible_pfn[zone] = end_pfn; 8139 8140 start_pfn = end_pfn; 8141 } 8142 8143 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 8144 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 8145 find_zone_movable_pfns_for_nodes(); 8146 8147 /* Print out the zone ranges */ 8148 pr_info("Zone ranges:\n"); 8149 for (i = 0; i < MAX_NR_ZONES; i++) { 8150 if (i == ZONE_MOVABLE) 8151 continue; 8152 pr_info(" %-8s ", zone_names[i]); 8153 if (arch_zone_lowest_possible_pfn[i] == 8154 arch_zone_highest_possible_pfn[i]) 8155 pr_cont("empty\n"); 8156 else 8157 pr_cont("[mem %#018Lx-%#018Lx]\n", 8158 (u64)arch_zone_lowest_possible_pfn[i] 8159 << PAGE_SHIFT, 8160 ((u64)arch_zone_highest_possible_pfn[i] 8161 << PAGE_SHIFT) - 1); 8162 } 8163 8164 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 8165 pr_info("Movable zone start for each node\n"); 8166 for (i = 0; i < MAX_NUMNODES; i++) { 8167 if (zone_movable_pfn[i]) 8168 pr_info(" Node %d: %#018Lx\n", i, 8169 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 8170 } 8171 8172 /* 8173 * Print out the early node map, and initialize the 8174 * subsection-map relative to active online memory ranges to 8175 * enable future "sub-section" extensions of the memory map. 8176 */ 8177 pr_info("Early memory node ranges\n"); 8178 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 8179 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 8180 (u64)start_pfn << PAGE_SHIFT, 8181 ((u64)end_pfn << PAGE_SHIFT) - 1); 8182 subsection_map_init(start_pfn, end_pfn - start_pfn); 8183 } 8184 8185 /* Initialise every node */ 8186 mminit_verify_pageflags_layout(); 8187 setup_nr_node_ids(); 8188 for_each_node(nid) { 8189 pg_data_t *pgdat; 8190 8191 if (!node_online(nid)) { 8192 pr_info("Initializing node %d as memoryless\n", nid); 8193 8194 /* Allocator not initialized yet */ 8195 pgdat = arch_alloc_nodedata(nid); 8196 if (!pgdat) { 8197 pr_err("Cannot allocate %zuB for node %d.\n", 8198 sizeof(*pgdat), nid); 8199 continue; 8200 } 8201 arch_refresh_nodedata(nid, pgdat); 8202 free_area_init_memoryless_node(nid); 8203 8204 /* 8205 * We do not want to confuse userspace by sysfs 8206 * files/directories for node without any memory 8207 * attached to it, so this node is not marked as 8208 * N_MEMORY and not marked online so that no sysfs 8209 * hierarchy will be created via register_one_node for 8210 * it. The pgdat will get fully initialized by 8211 * hotadd_init_pgdat() when memory is hotplugged into 8212 * this node. 8213 */ 8214 continue; 8215 } 8216 8217 pgdat = NODE_DATA(nid); 8218 free_area_init_node(nid); 8219 8220 /* Any memory on that node */ 8221 if (pgdat->node_present_pages) 8222 node_set_state(nid, N_MEMORY); 8223 check_for_memory(pgdat, nid); 8224 } 8225 8226 memmap_init(); 8227 } 8228 8229 static int __init cmdline_parse_core(char *p, unsigned long *core, 8230 unsigned long *percent) 8231 { 8232 unsigned long long coremem; 8233 char *endptr; 8234 8235 if (!p) 8236 return -EINVAL; 8237 8238 /* Value may be a percentage of total memory, otherwise bytes */ 8239 coremem = simple_strtoull(p, &endptr, 0); 8240 if (*endptr == '%') { 8241 /* Paranoid check for percent values greater than 100 */ 8242 WARN_ON(coremem > 100); 8243 8244 *percent = coremem; 8245 } else { 8246 coremem = memparse(p, &p); 8247 /* Paranoid check that UL is enough for the coremem value */ 8248 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 8249 8250 *core = coremem >> PAGE_SHIFT; 8251 *percent = 0UL; 8252 } 8253 return 0; 8254 } 8255 8256 /* 8257 * kernelcore=size sets the amount of memory for use for allocations that 8258 * cannot be reclaimed or migrated. 8259 */ 8260 static int __init cmdline_parse_kernelcore(char *p) 8261 { 8262 /* parse kernelcore=mirror */ 8263 if (parse_option_str(p, "mirror")) { 8264 mirrored_kernelcore = true; 8265 return 0; 8266 } 8267 8268 return cmdline_parse_core(p, &required_kernelcore, 8269 &required_kernelcore_percent); 8270 } 8271 8272 /* 8273 * movablecore=size sets the amount of memory for use for allocations that 8274 * can be reclaimed or migrated. 8275 */ 8276 static int __init cmdline_parse_movablecore(char *p) 8277 { 8278 return cmdline_parse_core(p, &required_movablecore, 8279 &required_movablecore_percent); 8280 } 8281 8282 early_param("kernelcore", cmdline_parse_kernelcore); 8283 early_param("movablecore", cmdline_parse_movablecore); 8284 8285 void adjust_managed_page_count(struct page *page, long count) 8286 { 8287 atomic_long_add(count, &page_zone(page)->managed_pages); 8288 totalram_pages_add(count); 8289 #ifdef CONFIG_HIGHMEM 8290 if (PageHighMem(page)) 8291 totalhigh_pages_add(count); 8292 #endif 8293 } 8294 EXPORT_SYMBOL(adjust_managed_page_count); 8295 8296 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 8297 { 8298 void *pos; 8299 unsigned long pages = 0; 8300 8301 start = (void *)PAGE_ALIGN((unsigned long)start); 8302 end = (void *)((unsigned long)end & PAGE_MASK); 8303 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 8304 struct page *page = virt_to_page(pos); 8305 void *direct_map_addr; 8306 8307 /* 8308 * 'direct_map_addr' might be different from 'pos' 8309 * because some architectures' virt_to_page() 8310 * work with aliases. Getting the direct map 8311 * address ensures that we get a _writeable_ 8312 * alias for the memset(). 8313 */ 8314 direct_map_addr = page_address(page); 8315 /* 8316 * Perform a kasan-unchecked memset() since this memory 8317 * has not been initialized. 8318 */ 8319 direct_map_addr = kasan_reset_tag(direct_map_addr); 8320 if ((unsigned int)poison <= 0xFF) 8321 memset(direct_map_addr, poison, PAGE_SIZE); 8322 8323 free_reserved_page(page); 8324 } 8325 8326 if (pages && s) 8327 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 8328 8329 return pages; 8330 } 8331 8332 void __init mem_init_print_info(void) 8333 { 8334 unsigned long physpages, codesize, datasize, rosize, bss_size; 8335 unsigned long init_code_size, init_data_size; 8336 8337 physpages = get_num_physpages(); 8338 codesize = _etext - _stext; 8339 datasize = _edata - _sdata; 8340 rosize = __end_rodata - __start_rodata; 8341 bss_size = __bss_stop - __bss_start; 8342 init_data_size = __init_end - __init_begin; 8343 init_code_size = _einittext - _sinittext; 8344 8345 /* 8346 * Detect special cases and adjust section sizes accordingly: 8347 * 1) .init.* may be embedded into .data sections 8348 * 2) .init.text.* may be out of [__init_begin, __init_end], 8349 * please refer to arch/tile/kernel/vmlinux.lds.S. 8350 * 3) .rodata.* may be embedded into .text or .data sections. 8351 */ 8352 #define adj_init_size(start, end, size, pos, adj) \ 8353 do { \ 8354 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 8355 size -= adj; \ 8356 } while (0) 8357 8358 adj_init_size(__init_begin, __init_end, init_data_size, 8359 _sinittext, init_code_size); 8360 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 8361 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 8362 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 8363 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 8364 8365 #undef adj_init_size 8366 8367 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 8368 #ifdef CONFIG_HIGHMEM 8369 ", %luK highmem" 8370 #endif 8371 ")\n", 8372 K(nr_free_pages()), K(physpages), 8373 codesize >> 10, datasize >> 10, rosize >> 10, 8374 (init_data_size + init_code_size) >> 10, bss_size >> 10, 8375 K(physpages - totalram_pages() - totalcma_pages), 8376 K(totalcma_pages) 8377 #ifdef CONFIG_HIGHMEM 8378 , K(totalhigh_pages()) 8379 #endif 8380 ); 8381 } 8382 8383 /** 8384 * set_dma_reserve - set the specified number of pages reserved in the first zone 8385 * @new_dma_reserve: The number of pages to mark reserved 8386 * 8387 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 8388 * In the DMA zone, a significant percentage may be consumed by kernel image 8389 * and other unfreeable allocations which can skew the watermarks badly. This 8390 * function may optionally be used to account for unfreeable pages in the 8391 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 8392 * smaller per-cpu batchsize. 8393 */ 8394 void __init set_dma_reserve(unsigned long new_dma_reserve) 8395 { 8396 dma_reserve = new_dma_reserve; 8397 } 8398 8399 static int page_alloc_cpu_dead(unsigned int cpu) 8400 { 8401 struct zone *zone; 8402 8403 lru_add_drain_cpu(cpu); 8404 mlock_page_drain_remote(cpu); 8405 drain_pages(cpu); 8406 8407 /* 8408 * Spill the event counters of the dead processor 8409 * into the current processors event counters. 8410 * This artificially elevates the count of the current 8411 * processor. 8412 */ 8413 vm_events_fold_cpu(cpu); 8414 8415 /* 8416 * Zero the differential counters of the dead processor 8417 * so that the vm statistics are consistent. 8418 * 8419 * This is only okay since the processor is dead and cannot 8420 * race with what we are doing. 8421 */ 8422 cpu_vm_stats_fold(cpu); 8423 8424 for_each_populated_zone(zone) 8425 zone_pcp_update(zone, 0); 8426 8427 return 0; 8428 } 8429 8430 static int page_alloc_cpu_online(unsigned int cpu) 8431 { 8432 struct zone *zone; 8433 8434 for_each_populated_zone(zone) 8435 zone_pcp_update(zone, 1); 8436 return 0; 8437 } 8438 8439 #ifdef CONFIG_NUMA 8440 int hashdist = HASHDIST_DEFAULT; 8441 8442 static int __init set_hashdist(char *str) 8443 { 8444 if (!str) 8445 return 0; 8446 hashdist = simple_strtoul(str, &str, 0); 8447 return 1; 8448 } 8449 __setup("hashdist=", set_hashdist); 8450 #endif 8451 8452 void __init page_alloc_init(void) 8453 { 8454 int ret; 8455 8456 #ifdef CONFIG_NUMA 8457 if (num_node_state(N_MEMORY) == 1) 8458 hashdist = 0; 8459 #endif 8460 8461 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 8462 "mm/page_alloc:pcp", 8463 page_alloc_cpu_online, 8464 page_alloc_cpu_dead); 8465 WARN_ON(ret < 0); 8466 } 8467 8468 /* 8469 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 8470 * or min_free_kbytes changes. 8471 */ 8472 static void calculate_totalreserve_pages(void) 8473 { 8474 struct pglist_data *pgdat; 8475 unsigned long reserve_pages = 0; 8476 enum zone_type i, j; 8477 8478 for_each_online_pgdat(pgdat) { 8479 8480 pgdat->totalreserve_pages = 0; 8481 8482 for (i = 0; i < MAX_NR_ZONES; i++) { 8483 struct zone *zone = pgdat->node_zones + i; 8484 long max = 0; 8485 unsigned long managed_pages = zone_managed_pages(zone); 8486 8487 /* Find valid and maximum lowmem_reserve in the zone */ 8488 for (j = i; j < MAX_NR_ZONES; j++) { 8489 if (zone->lowmem_reserve[j] > max) 8490 max = zone->lowmem_reserve[j]; 8491 } 8492 8493 /* we treat the high watermark as reserved pages. */ 8494 max += high_wmark_pages(zone); 8495 8496 if (max > managed_pages) 8497 max = managed_pages; 8498 8499 pgdat->totalreserve_pages += max; 8500 8501 reserve_pages += max; 8502 } 8503 } 8504 totalreserve_pages = reserve_pages; 8505 } 8506 8507 /* 8508 * setup_per_zone_lowmem_reserve - called whenever 8509 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 8510 * has a correct pages reserved value, so an adequate number of 8511 * pages are left in the zone after a successful __alloc_pages(). 8512 */ 8513 static void setup_per_zone_lowmem_reserve(void) 8514 { 8515 struct pglist_data *pgdat; 8516 enum zone_type i, j; 8517 8518 for_each_online_pgdat(pgdat) { 8519 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 8520 struct zone *zone = &pgdat->node_zones[i]; 8521 int ratio = sysctl_lowmem_reserve_ratio[i]; 8522 bool clear = !ratio || !zone_managed_pages(zone); 8523 unsigned long managed_pages = 0; 8524 8525 for (j = i + 1; j < MAX_NR_ZONES; j++) { 8526 struct zone *upper_zone = &pgdat->node_zones[j]; 8527 8528 managed_pages += zone_managed_pages(upper_zone); 8529 8530 if (clear) 8531 zone->lowmem_reserve[j] = 0; 8532 else 8533 zone->lowmem_reserve[j] = managed_pages / ratio; 8534 } 8535 } 8536 } 8537 8538 /* update totalreserve_pages */ 8539 calculate_totalreserve_pages(); 8540 } 8541 8542 static void __setup_per_zone_wmarks(void) 8543 { 8544 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 8545 unsigned long lowmem_pages = 0; 8546 struct zone *zone; 8547 unsigned long flags; 8548 8549 /* Calculate total number of !ZONE_HIGHMEM pages */ 8550 for_each_zone(zone) { 8551 if (!is_highmem(zone)) 8552 lowmem_pages += zone_managed_pages(zone); 8553 } 8554 8555 for_each_zone(zone) { 8556 u64 tmp; 8557 8558 spin_lock_irqsave(&zone->lock, flags); 8559 tmp = (u64)pages_min * zone_managed_pages(zone); 8560 do_div(tmp, lowmem_pages); 8561 if (is_highmem(zone)) { 8562 /* 8563 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 8564 * need highmem pages, so cap pages_min to a small 8565 * value here. 8566 * 8567 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 8568 * deltas control async page reclaim, and so should 8569 * not be capped for highmem. 8570 */ 8571 unsigned long min_pages; 8572 8573 min_pages = zone_managed_pages(zone) / 1024; 8574 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 8575 zone->_watermark[WMARK_MIN] = min_pages; 8576 } else { 8577 /* 8578 * If it's a lowmem zone, reserve a number of pages 8579 * proportionate to the zone's size. 8580 */ 8581 zone->_watermark[WMARK_MIN] = tmp; 8582 } 8583 8584 /* 8585 * Set the kswapd watermarks distance according to the 8586 * scale factor in proportion to available memory, but 8587 * ensure a minimum size on small systems. 8588 */ 8589 tmp = max_t(u64, tmp >> 2, 8590 mult_frac(zone_managed_pages(zone), 8591 watermark_scale_factor, 10000)); 8592 8593 zone->watermark_boost = 0; 8594 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 8595 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 8596 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 8597 8598 spin_unlock_irqrestore(&zone->lock, flags); 8599 } 8600 8601 /* update totalreserve_pages */ 8602 calculate_totalreserve_pages(); 8603 } 8604 8605 /** 8606 * setup_per_zone_wmarks - called when min_free_kbytes changes 8607 * or when memory is hot-{added|removed} 8608 * 8609 * Ensures that the watermark[min,low,high] values for each zone are set 8610 * correctly with respect to min_free_kbytes. 8611 */ 8612 void setup_per_zone_wmarks(void) 8613 { 8614 struct zone *zone; 8615 static DEFINE_SPINLOCK(lock); 8616 8617 spin_lock(&lock); 8618 __setup_per_zone_wmarks(); 8619 spin_unlock(&lock); 8620 8621 /* 8622 * The watermark size have changed so update the pcpu batch 8623 * and high limits or the limits may be inappropriate. 8624 */ 8625 for_each_zone(zone) 8626 zone_pcp_update(zone, 0); 8627 } 8628 8629 /* 8630 * Initialise min_free_kbytes. 8631 * 8632 * For small machines we want it small (128k min). For large machines 8633 * we want it large (256MB max). But it is not linear, because network 8634 * bandwidth does not increase linearly with machine size. We use 8635 * 8636 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 8637 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 8638 * 8639 * which yields 8640 * 8641 * 16MB: 512k 8642 * 32MB: 724k 8643 * 64MB: 1024k 8644 * 128MB: 1448k 8645 * 256MB: 2048k 8646 * 512MB: 2896k 8647 * 1024MB: 4096k 8648 * 2048MB: 5792k 8649 * 4096MB: 8192k 8650 * 8192MB: 11584k 8651 * 16384MB: 16384k 8652 */ 8653 void calculate_min_free_kbytes(void) 8654 { 8655 unsigned long lowmem_kbytes; 8656 int new_min_free_kbytes; 8657 8658 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 8659 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 8660 8661 if (new_min_free_kbytes > user_min_free_kbytes) 8662 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 8663 else 8664 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 8665 new_min_free_kbytes, user_min_free_kbytes); 8666 8667 } 8668 8669 int __meminit init_per_zone_wmark_min(void) 8670 { 8671 calculate_min_free_kbytes(); 8672 setup_per_zone_wmarks(); 8673 refresh_zone_stat_thresholds(); 8674 setup_per_zone_lowmem_reserve(); 8675 8676 #ifdef CONFIG_NUMA 8677 setup_min_unmapped_ratio(); 8678 setup_min_slab_ratio(); 8679 #endif 8680 8681 khugepaged_min_free_kbytes_update(); 8682 8683 return 0; 8684 } 8685 postcore_initcall(init_per_zone_wmark_min) 8686 8687 /* 8688 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 8689 * that we can call two helper functions whenever min_free_kbytes 8690 * changes. 8691 */ 8692 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 8693 void *buffer, size_t *length, loff_t *ppos) 8694 { 8695 int rc; 8696 8697 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8698 if (rc) 8699 return rc; 8700 8701 if (write) { 8702 user_min_free_kbytes = min_free_kbytes; 8703 setup_per_zone_wmarks(); 8704 } 8705 return 0; 8706 } 8707 8708 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 8709 void *buffer, size_t *length, loff_t *ppos) 8710 { 8711 int rc; 8712 8713 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8714 if (rc) 8715 return rc; 8716 8717 if (write) 8718 setup_per_zone_wmarks(); 8719 8720 return 0; 8721 } 8722 8723 #ifdef CONFIG_NUMA 8724 static void setup_min_unmapped_ratio(void) 8725 { 8726 pg_data_t *pgdat; 8727 struct zone *zone; 8728 8729 for_each_online_pgdat(pgdat) 8730 pgdat->min_unmapped_pages = 0; 8731 8732 for_each_zone(zone) 8733 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 8734 sysctl_min_unmapped_ratio) / 100; 8735 } 8736 8737 8738 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 8739 void *buffer, size_t *length, loff_t *ppos) 8740 { 8741 int rc; 8742 8743 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8744 if (rc) 8745 return rc; 8746 8747 setup_min_unmapped_ratio(); 8748 8749 return 0; 8750 } 8751 8752 static void setup_min_slab_ratio(void) 8753 { 8754 pg_data_t *pgdat; 8755 struct zone *zone; 8756 8757 for_each_online_pgdat(pgdat) 8758 pgdat->min_slab_pages = 0; 8759 8760 for_each_zone(zone) 8761 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 8762 sysctl_min_slab_ratio) / 100; 8763 } 8764 8765 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 8766 void *buffer, size_t *length, loff_t *ppos) 8767 { 8768 int rc; 8769 8770 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8771 if (rc) 8772 return rc; 8773 8774 setup_min_slab_ratio(); 8775 8776 return 0; 8777 } 8778 #endif 8779 8780 /* 8781 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 8782 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 8783 * whenever sysctl_lowmem_reserve_ratio changes. 8784 * 8785 * The reserve ratio obviously has absolutely no relation with the 8786 * minimum watermarks. The lowmem reserve ratio can only make sense 8787 * if in function of the boot time zone sizes. 8788 */ 8789 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 8790 void *buffer, size_t *length, loff_t *ppos) 8791 { 8792 int i; 8793 8794 proc_dointvec_minmax(table, write, buffer, length, ppos); 8795 8796 for (i = 0; i < MAX_NR_ZONES; i++) { 8797 if (sysctl_lowmem_reserve_ratio[i] < 1) 8798 sysctl_lowmem_reserve_ratio[i] = 0; 8799 } 8800 8801 setup_per_zone_lowmem_reserve(); 8802 return 0; 8803 } 8804 8805 /* 8806 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 8807 * cpu. It is the fraction of total pages in each zone that a hot per cpu 8808 * pagelist can have before it gets flushed back to buddy allocator. 8809 */ 8810 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 8811 int write, void *buffer, size_t *length, loff_t *ppos) 8812 { 8813 struct zone *zone; 8814 int old_percpu_pagelist_high_fraction; 8815 int ret; 8816 8817 mutex_lock(&pcp_batch_high_lock); 8818 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 8819 8820 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 8821 if (!write || ret < 0) 8822 goto out; 8823 8824 /* Sanity checking to avoid pcp imbalance */ 8825 if (percpu_pagelist_high_fraction && 8826 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 8827 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 8828 ret = -EINVAL; 8829 goto out; 8830 } 8831 8832 /* No change? */ 8833 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 8834 goto out; 8835 8836 for_each_populated_zone(zone) 8837 zone_set_pageset_high_and_batch(zone, 0); 8838 out: 8839 mutex_unlock(&pcp_batch_high_lock); 8840 return ret; 8841 } 8842 8843 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 8844 /* 8845 * Returns the number of pages that arch has reserved but 8846 * is not known to alloc_large_system_hash(). 8847 */ 8848 static unsigned long __init arch_reserved_kernel_pages(void) 8849 { 8850 return 0; 8851 } 8852 #endif 8853 8854 /* 8855 * Adaptive scale is meant to reduce sizes of hash tables on large memory 8856 * machines. As memory size is increased the scale is also increased but at 8857 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 8858 * quadruples the scale is increased by one, which means the size of hash table 8859 * only doubles, instead of quadrupling as well. 8860 * Because 32-bit systems cannot have large physical memory, where this scaling 8861 * makes sense, it is disabled on such platforms. 8862 */ 8863 #if __BITS_PER_LONG > 32 8864 #define ADAPT_SCALE_BASE (64ul << 30) 8865 #define ADAPT_SCALE_SHIFT 2 8866 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 8867 #endif 8868 8869 /* 8870 * allocate a large system hash table from bootmem 8871 * - it is assumed that the hash table must contain an exact power-of-2 8872 * quantity of entries 8873 * - limit is the number of hash buckets, not the total allocation size 8874 */ 8875 void *__init alloc_large_system_hash(const char *tablename, 8876 unsigned long bucketsize, 8877 unsigned long numentries, 8878 int scale, 8879 int flags, 8880 unsigned int *_hash_shift, 8881 unsigned int *_hash_mask, 8882 unsigned long low_limit, 8883 unsigned long high_limit) 8884 { 8885 unsigned long long max = high_limit; 8886 unsigned long log2qty, size; 8887 void *table = NULL; 8888 gfp_t gfp_flags; 8889 bool virt; 8890 bool huge; 8891 8892 /* allow the kernel cmdline to have a say */ 8893 if (!numentries) { 8894 /* round applicable memory size up to nearest megabyte */ 8895 numentries = nr_kernel_pages; 8896 numentries -= arch_reserved_kernel_pages(); 8897 8898 /* It isn't necessary when PAGE_SIZE >= 1MB */ 8899 if (PAGE_SHIFT < 20) 8900 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 8901 8902 #if __BITS_PER_LONG > 32 8903 if (!high_limit) { 8904 unsigned long adapt; 8905 8906 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 8907 adapt <<= ADAPT_SCALE_SHIFT) 8908 scale++; 8909 } 8910 #endif 8911 8912 /* limit to 1 bucket per 2^scale bytes of low memory */ 8913 if (scale > PAGE_SHIFT) 8914 numentries >>= (scale - PAGE_SHIFT); 8915 else 8916 numentries <<= (PAGE_SHIFT - scale); 8917 8918 /* Make sure we've got at least a 0-order allocation.. */ 8919 if (unlikely(flags & HASH_SMALL)) { 8920 /* Makes no sense without HASH_EARLY */ 8921 WARN_ON(!(flags & HASH_EARLY)); 8922 if (!(numentries >> *_hash_shift)) { 8923 numentries = 1UL << *_hash_shift; 8924 BUG_ON(!numentries); 8925 } 8926 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 8927 numentries = PAGE_SIZE / bucketsize; 8928 } 8929 numentries = roundup_pow_of_two(numentries); 8930 8931 /* limit allocation size to 1/16 total memory by default */ 8932 if (max == 0) { 8933 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 8934 do_div(max, bucketsize); 8935 } 8936 max = min(max, 0x80000000ULL); 8937 8938 if (numentries < low_limit) 8939 numentries = low_limit; 8940 if (numentries > max) 8941 numentries = max; 8942 8943 log2qty = ilog2(numentries); 8944 8945 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 8946 do { 8947 virt = false; 8948 size = bucketsize << log2qty; 8949 if (flags & HASH_EARLY) { 8950 if (flags & HASH_ZERO) 8951 table = memblock_alloc(size, SMP_CACHE_BYTES); 8952 else 8953 table = memblock_alloc_raw(size, 8954 SMP_CACHE_BYTES); 8955 } else if (get_order(size) >= MAX_ORDER || hashdist) { 8956 table = vmalloc_huge(size, gfp_flags); 8957 virt = true; 8958 if (table) 8959 huge = is_vm_area_hugepages(table); 8960 } else { 8961 /* 8962 * If bucketsize is not a power-of-two, we may free 8963 * some pages at the end of hash table which 8964 * alloc_pages_exact() automatically does 8965 */ 8966 table = alloc_pages_exact(size, gfp_flags); 8967 kmemleak_alloc(table, size, 1, gfp_flags); 8968 } 8969 } while (!table && size > PAGE_SIZE && --log2qty); 8970 8971 if (!table) 8972 panic("Failed to allocate %s hash table\n", tablename); 8973 8974 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 8975 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, 8976 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 8977 8978 if (_hash_shift) 8979 *_hash_shift = log2qty; 8980 if (_hash_mask) 8981 *_hash_mask = (1 << log2qty) - 1; 8982 8983 return table; 8984 } 8985 8986 #ifdef CONFIG_CONTIG_ALLOC 8987 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 8988 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 8989 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 8990 static void alloc_contig_dump_pages(struct list_head *page_list) 8991 { 8992 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 8993 8994 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 8995 struct page *page; 8996 8997 dump_stack(); 8998 list_for_each_entry(page, page_list, lru) 8999 dump_page(page, "migration failure"); 9000 } 9001 } 9002 #else 9003 static inline void alloc_contig_dump_pages(struct list_head *page_list) 9004 { 9005 } 9006 #endif 9007 9008 /* [start, end) must belong to a single zone. */ 9009 int __alloc_contig_migrate_range(struct compact_control *cc, 9010 unsigned long start, unsigned long end) 9011 { 9012 /* This function is based on compact_zone() from compaction.c. */ 9013 unsigned int nr_reclaimed; 9014 unsigned long pfn = start; 9015 unsigned int tries = 0; 9016 int ret = 0; 9017 struct migration_target_control mtc = { 9018 .nid = zone_to_nid(cc->zone), 9019 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 9020 }; 9021 9022 lru_cache_disable(); 9023 9024 while (pfn < end || !list_empty(&cc->migratepages)) { 9025 if (fatal_signal_pending(current)) { 9026 ret = -EINTR; 9027 break; 9028 } 9029 9030 if (list_empty(&cc->migratepages)) { 9031 cc->nr_migratepages = 0; 9032 ret = isolate_migratepages_range(cc, pfn, end); 9033 if (ret && ret != -EAGAIN) 9034 break; 9035 pfn = cc->migrate_pfn; 9036 tries = 0; 9037 } else if (++tries == 5) { 9038 ret = -EBUSY; 9039 break; 9040 } 9041 9042 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 9043 &cc->migratepages); 9044 cc->nr_migratepages -= nr_reclaimed; 9045 9046 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 9047 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 9048 9049 /* 9050 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 9051 * to retry again over this error, so do the same here. 9052 */ 9053 if (ret == -ENOMEM) 9054 break; 9055 } 9056 9057 lru_cache_enable(); 9058 if (ret < 0) { 9059 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 9060 alloc_contig_dump_pages(&cc->migratepages); 9061 putback_movable_pages(&cc->migratepages); 9062 return ret; 9063 } 9064 return 0; 9065 } 9066 9067 /** 9068 * alloc_contig_range() -- tries to allocate given range of pages 9069 * @start: start PFN to allocate 9070 * @end: one-past-the-last PFN to allocate 9071 * @migratetype: migratetype of the underlying pageblocks (either 9072 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 9073 * in range must have the same migratetype and it must 9074 * be either of the two. 9075 * @gfp_mask: GFP mask to use during compaction 9076 * 9077 * The PFN range does not have to be pageblock aligned. The PFN range must 9078 * belong to a single zone. 9079 * 9080 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 9081 * pageblocks in the range. Once isolated, the pageblocks should not 9082 * be modified by others. 9083 * 9084 * Return: zero on success or negative error code. On success all 9085 * pages which PFN is in [start, end) are allocated for the caller and 9086 * need to be freed with free_contig_range(). 9087 */ 9088 int alloc_contig_range(unsigned long start, unsigned long end, 9089 unsigned migratetype, gfp_t gfp_mask) 9090 { 9091 unsigned long outer_start, outer_end; 9092 int order; 9093 int ret = 0; 9094 9095 struct compact_control cc = { 9096 .nr_migratepages = 0, 9097 .order = -1, 9098 .zone = page_zone(pfn_to_page(start)), 9099 .mode = MIGRATE_SYNC, 9100 .ignore_skip_hint = true, 9101 .no_set_skip_hint = true, 9102 .gfp_mask = current_gfp_context(gfp_mask), 9103 .alloc_contig = true, 9104 }; 9105 INIT_LIST_HEAD(&cc.migratepages); 9106 9107 /* 9108 * What we do here is we mark all pageblocks in range as 9109 * MIGRATE_ISOLATE. Because pageblock and max order pages may 9110 * have different sizes, and due to the way page allocator 9111 * work, start_isolate_page_range() has special handlings for this. 9112 * 9113 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 9114 * migrate the pages from an unaligned range (ie. pages that 9115 * we are interested in). This will put all the pages in 9116 * range back to page allocator as MIGRATE_ISOLATE. 9117 * 9118 * When this is done, we take the pages in range from page 9119 * allocator removing them from the buddy system. This way 9120 * page allocator will never consider using them. 9121 * 9122 * This lets us mark the pageblocks back as 9123 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 9124 * aligned range but not in the unaligned, original range are 9125 * put back to page allocator so that buddy can use them. 9126 */ 9127 9128 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 9129 if (ret) 9130 goto done; 9131 9132 drain_all_pages(cc.zone); 9133 9134 /* 9135 * In case of -EBUSY, we'd like to know which page causes problem. 9136 * So, just fall through. test_pages_isolated() has a tracepoint 9137 * which will report the busy page. 9138 * 9139 * It is possible that busy pages could become available before 9140 * the call to test_pages_isolated, and the range will actually be 9141 * allocated. So, if we fall through be sure to clear ret so that 9142 * -EBUSY is not accidentally used or returned to caller. 9143 */ 9144 ret = __alloc_contig_migrate_range(&cc, start, end); 9145 if (ret && ret != -EBUSY) 9146 goto done; 9147 ret = 0; 9148 9149 /* 9150 * Pages from [start, end) are within a pageblock_nr_pages 9151 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 9152 * more, all pages in [start, end) are free in page allocator. 9153 * What we are going to do is to allocate all pages from 9154 * [start, end) (that is remove them from page allocator). 9155 * 9156 * The only problem is that pages at the beginning and at the 9157 * end of interesting range may be not aligned with pages that 9158 * page allocator holds, ie. they can be part of higher order 9159 * pages. Because of this, we reserve the bigger range and 9160 * once this is done free the pages we are not interested in. 9161 * 9162 * We don't have to hold zone->lock here because the pages are 9163 * isolated thus they won't get removed from buddy. 9164 */ 9165 9166 order = 0; 9167 outer_start = start; 9168 while (!PageBuddy(pfn_to_page(outer_start))) { 9169 if (++order >= MAX_ORDER) { 9170 outer_start = start; 9171 break; 9172 } 9173 outer_start &= ~0UL << order; 9174 } 9175 9176 if (outer_start != start) { 9177 order = buddy_order(pfn_to_page(outer_start)); 9178 9179 /* 9180 * outer_start page could be small order buddy page and 9181 * it doesn't include start page. Adjust outer_start 9182 * in this case to report failed page properly 9183 * on tracepoint in test_pages_isolated() 9184 */ 9185 if (outer_start + (1UL << order) <= start) 9186 outer_start = start; 9187 } 9188 9189 /* Make sure the range is really isolated. */ 9190 if (test_pages_isolated(outer_start, end, 0)) { 9191 ret = -EBUSY; 9192 goto done; 9193 } 9194 9195 /* Grab isolated pages from freelists. */ 9196 outer_end = isolate_freepages_range(&cc, outer_start, end); 9197 if (!outer_end) { 9198 ret = -EBUSY; 9199 goto done; 9200 } 9201 9202 /* Free head and tail (if any) */ 9203 if (start != outer_start) 9204 free_contig_range(outer_start, start - outer_start); 9205 if (end != outer_end) 9206 free_contig_range(end, outer_end - end); 9207 9208 done: 9209 undo_isolate_page_range(start, end, migratetype); 9210 return ret; 9211 } 9212 EXPORT_SYMBOL(alloc_contig_range); 9213 9214 static int __alloc_contig_pages(unsigned long start_pfn, 9215 unsigned long nr_pages, gfp_t gfp_mask) 9216 { 9217 unsigned long end_pfn = start_pfn + nr_pages; 9218 9219 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 9220 gfp_mask); 9221 } 9222 9223 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 9224 unsigned long nr_pages) 9225 { 9226 unsigned long i, end_pfn = start_pfn + nr_pages; 9227 struct page *page; 9228 9229 for (i = start_pfn; i < end_pfn; i++) { 9230 page = pfn_to_online_page(i); 9231 if (!page) 9232 return false; 9233 9234 if (page_zone(page) != z) 9235 return false; 9236 9237 if (PageReserved(page)) 9238 return false; 9239 } 9240 return true; 9241 } 9242 9243 static bool zone_spans_last_pfn(const struct zone *zone, 9244 unsigned long start_pfn, unsigned long nr_pages) 9245 { 9246 unsigned long last_pfn = start_pfn + nr_pages - 1; 9247 9248 return zone_spans_pfn(zone, last_pfn); 9249 } 9250 9251 /** 9252 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 9253 * @nr_pages: Number of contiguous pages to allocate 9254 * @gfp_mask: GFP mask to limit search and used during compaction 9255 * @nid: Target node 9256 * @nodemask: Mask for other possible nodes 9257 * 9258 * This routine is a wrapper around alloc_contig_range(). It scans over zones 9259 * on an applicable zonelist to find a contiguous pfn range which can then be 9260 * tried for allocation with alloc_contig_range(). This routine is intended 9261 * for allocation requests which can not be fulfilled with the buddy allocator. 9262 * 9263 * The allocated memory is always aligned to a page boundary. If nr_pages is a 9264 * power of two, then allocated range is also guaranteed to be aligned to same 9265 * nr_pages (e.g. 1GB request would be aligned to 1GB). 9266 * 9267 * Allocated pages can be freed with free_contig_range() or by manually calling 9268 * __free_page() on each allocated page. 9269 * 9270 * Return: pointer to contiguous pages on success, or NULL if not successful. 9271 */ 9272 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 9273 int nid, nodemask_t *nodemask) 9274 { 9275 unsigned long ret, pfn, flags; 9276 struct zonelist *zonelist; 9277 struct zone *zone; 9278 struct zoneref *z; 9279 9280 zonelist = node_zonelist(nid, gfp_mask); 9281 for_each_zone_zonelist_nodemask(zone, z, zonelist, 9282 gfp_zone(gfp_mask), nodemask) { 9283 spin_lock_irqsave(&zone->lock, flags); 9284 9285 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 9286 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 9287 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 9288 /* 9289 * We release the zone lock here because 9290 * alloc_contig_range() will also lock the zone 9291 * at some point. If there's an allocation 9292 * spinning on this lock, it may win the race 9293 * and cause alloc_contig_range() to fail... 9294 */ 9295 spin_unlock_irqrestore(&zone->lock, flags); 9296 ret = __alloc_contig_pages(pfn, nr_pages, 9297 gfp_mask); 9298 if (!ret) 9299 return pfn_to_page(pfn); 9300 spin_lock_irqsave(&zone->lock, flags); 9301 } 9302 pfn += nr_pages; 9303 } 9304 spin_unlock_irqrestore(&zone->lock, flags); 9305 } 9306 return NULL; 9307 } 9308 #endif /* CONFIG_CONTIG_ALLOC */ 9309 9310 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 9311 { 9312 unsigned long count = 0; 9313 9314 for (; nr_pages--; pfn++) { 9315 struct page *page = pfn_to_page(pfn); 9316 9317 count += page_count(page) != 1; 9318 __free_page(page); 9319 } 9320 WARN(count != 0, "%lu pages are still in use!\n", count); 9321 } 9322 EXPORT_SYMBOL(free_contig_range); 9323 9324 /* 9325 * The zone indicated has a new number of managed_pages; batch sizes and percpu 9326 * page high values need to be recalculated. 9327 */ 9328 void zone_pcp_update(struct zone *zone, int cpu_online) 9329 { 9330 mutex_lock(&pcp_batch_high_lock); 9331 zone_set_pageset_high_and_batch(zone, cpu_online); 9332 mutex_unlock(&pcp_batch_high_lock); 9333 } 9334 9335 /* 9336 * Effectively disable pcplists for the zone by setting the high limit to 0 9337 * and draining all cpus. A concurrent page freeing on another CPU that's about 9338 * to put the page on pcplist will either finish before the drain and the page 9339 * will be drained, or observe the new high limit and skip the pcplist. 9340 * 9341 * Must be paired with a call to zone_pcp_enable(). 9342 */ 9343 void zone_pcp_disable(struct zone *zone) 9344 { 9345 mutex_lock(&pcp_batch_high_lock); 9346 __zone_set_pageset_high_and_batch(zone, 0, 1); 9347 __drain_all_pages(zone, true); 9348 } 9349 9350 void zone_pcp_enable(struct zone *zone) 9351 { 9352 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 9353 mutex_unlock(&pcp_batch_high_lock); 9354 } 9355 9356 void zone_pcp_reset(struct zone *zone) 9357 { 9358 int cpu; 9359 struct per_cpu_zonestat *pzstats; 9360 9361 if (zone->per_cpu_pageset != &boot_pageset) { 9362 for_each_online_cpu(cpu) { 9363 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 9364 drain_zonestat(zone, pzstats); 9365 } 9366 free_percpu(zone->per_cpu_pageset); 9367 free_percpu(zone->per_cpu_zonestats); 9368 zone->per_cpu_pageset = &boot_pageset; 9369 zone->per_cpu_zonestats = &boot_zonestats; 9370 } 9371 } 9372 9373 #ifdef CONFIG_MEMORY_HOTREMOVE 9374 /* 9375 * All pages in the range must be in a single zone, must not contain holes, 9376 * must span full sections, and must be isolated before calling this function. 9377 */ 9378 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 9379 { 9380 unsigned long pfn = start_pfn; 9381 struct page *page; 9382 struct zone *zone; 9383 unsigned int order; 9384 unsigned long flags; 9385 9386 offline_mem_sections(pfn, end_pfn); 9387 zone = page_zone(pfn_to_page(pfn)); 9388 spin_lock_irqsave(&zone->lock, flags); 9389 while (pfn < end_pfn) { 9390 page = pfn_to_page(pfn); 9391 /* 9392 * The HWPoisoned page may be not in buddy system, and 9393 * page_count() is not 0. 9394 */ 9395 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 9396 pfn++; 9397 continue; 9398 } 9399 /* 9400 * At this point all remaining PageOffline() pages have a 9401 * reference count of 0 and can simply be skipped. 9402 */ 9403 if (PageOffline(page)) { 9404 BUG_ON(page_count(page)); 9405 BUG_ON(PageBuddy(page)); 9406 pfn++; 9407 continue; 9408 } 9409 9410 BUG_ON(page_count(page)); 9411 BUG_ON(!PageBuddy(page)); 9412 order = buddy_order(page); 9413 del_page_from_free_list(page, zone, order); 9414 pfn += (1 << order); 9415 } 9416 spin_unlock_irqrestore(&zone->lock, flags); 9417 } 9418 #endif 9419 9420 /* 9421 * This function returns a stable result only if called under zone lock. 9422 */ 9423 bool is_free_buddy_page(struct page *page) 9424 { 9425 unsigned long pfn = page_to_pfn(page); 9426 unsigned int order; 9427 9428 for (order = 0; order < MAX_ORDER; order++) { 9429 struct page *page_head = page - (pfn & ((1 << order) - 1)); 9430 9431 if (PageBuddy(page_head) && 9432 buddy_order_unsafe(page_head) >= order) 9433 break; 9434 } 9435 9436 return order < MAX_ORDER; 9437 } 9438 EXPORT_SYMBOL(is_free_buddy_page); 9439 9440 #ifdef CONFIG_MEMORY_FAILURE 9441 /* 9442 * Break down a higher-order page in sub-pages, and keep our target out of 9443 * buddy allocator. 9444 */ 9445 static void break_down_buddy_pages(struct zone *zone, struct page *page, 9446 struct page *target, int low, int high, 9447 int migratetype) 9448 { 9449 unsigned long size = 1 << high; 9450 struct page *current_buddy, *next_page; 9451 9452 while (high > low) { 9453 high--; 9454 size >>= 1; 9455 9456 if (target >= &page[size]) { 9457 next_page = page + size; 9458 current_buddy = page; 9459 } else { 9460 next_page = page; 9461 current_buddy = page + size; 9462 } 9463 9464 if (set_page_guard(zone, current_buddy, high, migratetype)) 9465 continue; 9466 9467 if (current_buddy != target) { 9468 add_to_free_list(current_buddy, zone, high, migratetype); 9469 set_buddy_order(current_buddy, high); 9470 page = next_page; 9471 } 9472 } 9473 } 9474 9475 /* 9476 * Take a page that will be marked as poisoned off the buddy allocator. 9477 */ 9478 bool take_page_off_buddy(struct page *page) 9479 { 9480 struct zone *zone = page_zone(page); 9481 unsigned long pfn = page_to_pfn(page); 9482 unsigned long flags; 9483 unsigned int order; 9484 bool ret = false; 9485 9486 spin_lock_irqsave(&zone->lock, flags); 9487 for (order = 0; order < MAX_ORDER; order++) { 9488 struct page *page_head = page - (pfn & ((1 << order) - 1)); 9489 int page_order = buddy_order(page_head); 9490 9491 if (PageBuddy(page_head) && page_order >= order) { 9492 unsigned long pfn_head = page_to_pfn(page_head); 9493 int migratetype = get_pfnblock_migratetype(page_head, 9494 pfn_head); 9495 9496 del_page_from_free_list(page_head, zone, page_order); 9497 break_down_buddy_pages(zone, page_head, page, 0, 9498 page_order, migratetype); 9499 SetPageHWPoisonTakenOff(page); 9500 if (!is_migrate_isolate(migratetype)) 9501 __mod_zone_freepage_state(zone, -1, migratetype); 9502 ret = true; 9503 break; 9504 } 9505 if (page_count(page_head) > 0) 9506 break; 9507 } 9508 spin_unlock_irqrestore(&zone->lock, flags); 9509 return ret; 9510 } 9511 9512 /* 9513 * Cancel takeoff done by take_page_off_buddy(). 9514 */ 9515 bool put_page_back_buddy(struct page *page) 9516 { 9517 struct zone *zone = page_zone(page); 9518 unsigned long pfn = page_to_pfn(page); 9519 unsigned long flags; 9520 int migratetype = get_pfnblock_migratetype(page, pfn); 9521 bool ret = false; 9522 9523 spin_lock_irqsave(&zone->lock, flags); 9524 if (put_page_testzero(page)) { 9525 ClearPageHWPoisonTakenOff(page); 9526 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 9527 if (TestClearPageHWPoison(page)) { 9528 ret = true; 9529 } 9530 } 9531 spin_unlock_irqrestore(&zone->lock, flags); 9532 9533 return ret; 9534 } 9535 #endif 9536 9537 #ifdef CONFIG_ZONE_DMA 9538 bool has_managed_dma(void) 9539 { 9540 struct pglist_data *pgdat; 9541 9542 for_each_online_pgdat(pgdat) { 9543 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 9544 9545 if (managed_zone(zone)) 9546 return true; 9547 } 9548 return false; 9549 } 9550 #endif /* CONFIG_ZONE_DMA */ 9551