1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/swap.h> 22 #include <linux/swapops.h> 23 #include <linux/interrupt.h> 24 #include <linux/pagemap.h> 25 #include <linux/jiffies.h> 26 #include <linux/memblock.h> 27 #include <linux/compiler.h> 28 #include <linux/kernel.h> 29 #include <linux/kasan.h> 30 #include <linux/module.h> 31 #include <linux/suspend.h> 32 #include <linux/pagevec.h> 33 #include <linux/blkdev.h> 34 #include <linux/slab.h> 35 #include <linux/ratelimit.h> 36 #include <linux/oom.h> 37 #include <linux/topology.h> 38 #include <linux/sysctl.h> 39 #include <linux/cpu.h> 40 #include <linux/cpuset.h> 41 #include <linux/memory_hotplug.h> 42 #include <linux/nodemask.h> 43 #include <linux/vmalloc.h> 44 #include <linux/vmstat.h> 45 #include <linux/mempolicy.h> 46 #include <linux/memremap.h> 47 #include <linux/stop_machine.h> 48 #include <linux/random.h> 49 #include <linux/sort.h> 50 #include <linux/pfn.h> 51 #include <linux/backing-dev.h> 52 #include <linux/fault-inject.h> 53 #include <linux/page-isolation.h> 54 #include <linux/debugobjects.h> 55 #include <linux/kmemleak.h> 56 #include <linux/compaction.h> 57 #include <trace/events/kmem.h> 58 #include <trace/events/oom.h> 59 #include <linux/prefetch.h> 60 #include <linux/mm_inline.h> 61 #include <linux/mmu_notifier.h> 62 #include <linux/migrate.h> 63 #include <linux/hugetlb.h> 64 #include <linux/sched/rt.h> 65 #include <linux/sched/mm.h> 66 #include <linux/page_owner.h> 67 #include <linux/page_table_check.h> 68 #include <linux/kthread.h> 69 #include <linux/memcontrol.h> 70 #include <linux/ftrace.h> 71 #include <linux/lockdep.h> 72 #include <linux/nmi.h> 73 #include <linux/psi.h> 74 #include <linux/padata.h> 75 #include <linux/khugepaged.h> 76 #include <linux/buffer_head.h> 77 #include <asm/sections.h> 78 #include <asm/tlbflush.h> 79 #include <asm/div64.h> 80 #include "internal.h" 81 #include "shuffle.h" 82 #include "page_reporting.h" 83 84 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 85 typedef int __bitwise fpi_t; 86 87 /* No special request */ 88 #define FPI_NONE ((__force fpi_t)0) 89 90 /* 91 * Skip free page reporting notification for the (possibly merged) page. 92 * This does not hinder free page reporting from grabbing the page, 93 * reporting it and marking it "reported" - it only skips notifying 94 * the free page reporting infrastructure about a newly freed page. For 95 * example, used when temporarily pulling a page from a freelist and 96 * putting it back unmodified. 97 */ 98 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 99 100 /* 101 * Place the (possibly merged) page to the tail of the freelist. Will ignore 102 * page shuffling (relevant code - e.g., memory onlining - is expected to 103 * shuffle the whole zone). 104 * 105 * Note: No code should rely on this flag for correctness - it's purely 106 * to allow for optimizations when handing back either fresh pages 107 * (memory onlining) or untouched pages (page isolation, free page 108 * reporting). 109 */ 110 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 111 112 /* 113 * Don't poison memory with KASAN (only for the tag-based modes). 114 * During boot, all non-reserved memblock memory is exposed to page_alloc. 115 * Poisoning all that memory lengthens boot time, especially on systems with 116 * large amount of RAM. This flag is used to skip that poisoning. 117 * This is only done for the tag-based KASAN modes, as those are able to 118 * detect memory corruptions with the memory tags assigned by default. 119 * All memory allocated normally after boot gets poisoned as usual. 120 */ 121 #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2)) 122 123 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 124 static DEFINE_MUTEX(pcp_batch_high_lock); 125 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 126 127 struct pagesets { 128 local_lock_t lock; 129 }; 130 static DEFINE_PER_CPU(struct pagesets, pagesets) = { 131 .lock = INIT_LOCAL_LOCK(lock), 132 }; 133 134 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 135 DEFINE_PER_CPU(int, numa_node); 136 EXPORT_PER_CPU_SYMBOL(numa_node); 137 #endif 138 139 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 140 141 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 142 /* 143 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 144 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 145 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 146 * defined in <linux/topology.h>. 147 */ 148 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 149 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 150 #endif 151 152 /* work_structs for global per-cpu drains */ 153 struct pcpu_drain { 154 struct zone *zone; 155 struct work_struct work; 156 }; 157 static DEFINE_MUTEX(pcpu_drain_mutex); 158 static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain); 159 160 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 161 volatile unsigned long latent_entropy __latent_entropy; 162 EXPORT_SYMBOL(latent_entropy); 163 #endif 164 165 /* 166 * Array of node states. 167 */ 168 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 169 [N_POSSIBLE] = NODE_MASK_ALL, 170 [N_ONLINE] = { { [0] = 1UL } }, 171 #ifndef CONFIG_NUMA 172 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 173 #ifdef CONFIG_HIGHMEM 174 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 175 #endif 176 [N_MEMORY] = { { [0] = 1UL } }, 177 [N_CPU] = { { [0] = 1UL } }, 178 #endif /* NUMA */ 179 }; 180 EXPORT_SYMBOL(node_states); 181 182 atomic_long_t _totalram_pages __read_mostly; 183 EXPORT_SYMBOL(_totalram_pages); 184 unsigned long totalreserve_pages __read_mostly; 185 unsigned long totalcma_pages __read_mostly; 186 187 int percpu_pagelist_high_fraction; 188 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 189 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 190 EXPORT_SYMBOL(init_on_alloc); 191 192 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 193 EXPORT_SYMBOL(init_on_free); 194 195 static bool _init_on_alloc_enabled_early __read_mostly 196 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 197 static int __init early_init_on_alloc(char *buf) 198 { 199 200 return kstrtobool(buf, &_init_on_alloc_enabled_early); 201 } 202 early_param("init_on_alloc", early_init_on_alloc); 203 204 static bool _init_on_free_enabled_early __read_mostly 205 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 206 static int __init early_init_on_free(char *buf) 207 { 208 return kstrtobool(buf, &_init_on_free_enabled_early); 209 } 210 early_param("init_on_free", early_init_on_free); 211 212 /* 213 * A cached value of the page's pageblock's migratetype, used when the page is 214 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 215 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 216 * Also the migratetype set in the page does not necessarily match the pcplist 217 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 218 * other index - this ensures that it will be put on the correct CMA freelist. 219 */ 220 static inline int get_pcppage_migratetype(struct page *page) 221 { 222 return page->index; 223 } 224 225 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 226 { 227 page->index = migratetype; 228 } 229 230 #ifdef CONFIG_PM_SLEEP 231 /* 232 * The following functions are used by the suspend/hibernate code to temporarily 233 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 234 * while devices are suspended. To avoid races with the suspend/hibernate code, 235 * they should always be called with system_transition_mutex held 236 * (gfp_allowed_mask also should only be modified with system_transition_mutex 237 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 238 * with that modification). 239 */ 240 241 static gfp_t saved_gfp_mask; 242 243 void pm_restore_gfp_mask(void) 244 { 245 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 246 if (saved_gfp_mask) { 247 gfp_allowed_mask = saved_gfp_mask; 248 saved_gfp_mask = 0; 249 } 250 } 251 252 void pm_restrict_gfp_mask(void) 253 { 254 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 255 WARN_ON(saved_gfp_mask); 256 saved_gfp_mask = gfp_allowed_mask; 257 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 258 } 259 260 bool pm_suspended_storage(void) 261 { 262 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 263 return false; 264 return true; 265 } 266 #endif /* CONFIG_PM_SLEEP */ 267 268 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 269 unsigned int pageblock_order __read_mostly; 270 #endif 271 272 static void __free_pages_ok(struct page *page, unsigned int order, 273 fpi_t fpi_flags); 274 275 /* 276 * results with 256, 32 in the lowmem_reserve sysctl: 277 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 278 * 1G machine -> (16M dma, 784M normal, 224M high) 279 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 280 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 281 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 282 * 283 * TBD: should special case ZONE_DMA32 machines here - in those we normally 284 * don't need any ZONE_NORMAL reservation 285 */ 286 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 287 #ifdef CONFIG_ZONE_DMA 288 [ZONE_DMA] = 256, 289 #endif 290 #ifdef CONFIG_ZONE_DMA32 291 [ZONE_DMA32] = 256, 292 #endif 293 [ZONE_NORMAL] = 32, 294 #ifdef CONFIG_HIGHMEM 295 [ZONE_HIGHMEM] = 0, 296 #endif 297 [ZONE_MOVABLE] = 0, 298 }; 299 300 static char * const zone_names[MAX_NR_ZONES] = { 301 #ifdef CONFIG_ZONE_DMA 302 "DMA", 303 #endif 304 #ifdef CONFIG_ZONE_DMA32 305 "DMA32", 306 #endif 307 "Normal", 308 #ifdef CONFIG_HIGHMEM 309 "HighMem", 310 #endif 311 "Movable", 312 #ifdef CONFIG_ZONE_DEVICE 313 "Device", 314 #endif 315 }; 316 317 const char * const migratetype_names[MIGRATE_TYPES] = { 318 "Unmovable", 319 "Movable", 320 "Reclaimable", 321 "HighAtomic", 322 #ifdef CONFIG_CMA 323 "CMA", 324 #endif 325 #ifdef CONFIG_MEMORY_ISOLATION 326 "Isolate", 327 #endif 328 }; 329 330 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { 331 [NULL_COMPOUND_DTOR] = NULL, 332 [COMPOUND_PAGE_DTOR] = free_compound_page, 333 #ifdef CONFIG_HUGETLB_PAGE 334 [HUGETLB_PAGE_DTOR] = free_huge_page, 335 #endif 336 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 337 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, 338 #endif 339 }; 340 341 int min_free_kbytes = 1024; 342 int user_min_free_kbytes = -1; 343 int watermark_boost_factor __read_mostly = 15000; 344 int watermark_scale_factor = 10; 345 346 static unsigned long nr_kernel_pages __initdata; 347 static unsigned long nr_all_pages __initdata; 348 static unsigned long dma_reserve __initdata; 349 350 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 351 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 352 static unsigned long required_kernelcore __initdata; 353 static unsigned long required_kernelcore_percent __initdata; 354 static unsigned long required_movablecore __initdata; 355 static unsigned long required_movablecore_percent __initdata; 356 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 357 static bool mirrored_kernelcore __meminitdata; 358 359 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 360 int movable_zone; 361 EXPORT_SYMBOL(movable_zone); 362 363 #if MAX_NUMNODES > 1 364 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 365 unsigned int nr_online_nodes __read_mostly = 1; 366 EXPORT_SYMBOL(nr_node_ids); 367 EXPORT_SYMBOL(nr_online_nodes); 368 #endif 369 370 int page_group_by_mobility_disabled __read_mostly; 371 372 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 373 /* 374 * During boot we initialize deferred pages on-demand, as needed, but once 375 * page_alloc_init_late() has finished, the deferred pages are all initialized, 376 * and we can permanently disable that path. 377 */ 378 static DEFINE_STATIC_KEY_TRUE(deferred_pages); 379 380 /* 381 * Calling kasan_poison_pages() only after deferred memory initialization 382 * has completed. Poisoning pages during deferred memory init will greatly 383 * lengthen the process and cause problem in large memory systems as the 384 * deferred pages initialization is done with interrupt disabled. 385 * 386 * Assuming that there will be no reference to those newly initialized 387 * pages before they are ever allocated, this should have no effect on 388 * KASAN memory tracking as the poison will be properly inserted at page 389 * allocation time. The only corner case is when pages are allocated by 390 * on-demand allocation and then freed again before the deferred pages 391 * initialization is done, but this is not likely to happen. 392 */ 393 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 394 { 395 return static_branch_unlikely(&deferred_pages) || 396 (!IS_ENABLED(CONFIG_KASAN_GENERIC) && 397 (fpi_flags & FPI_SKIP_KASAN_POISON)) || 398 PageSkipKASanPoison(page); 399 } 400 401 /* Returns true if the struct page for the pfn is uninitialised */ 402 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 403 { 404 int nid = early_pfn_to_nid(pfn); 405 406 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 407 return true; 408 409 return false; 410 } 411 412 /* 413 * Returns true when the remaining initialisation should be deferred until 414 * later in the boot cycle when it can be parallelised. 415 */ 416 static bool __meminit 417 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 418 { 419 static unsigned long prev_end_pfn, nr_initialised; 420 421 /* 422 * prev_end_pfn static that contains the end of previous zone 423 * No need to protect because called very early in boot before smp_init. 424 */ 425 if (prev_end_pfn != end_pfn) { 426 prev_end_pfn = end_pfn; 427 nr_initialised = 0; 428 } 429 430 /* Always populate low zones for address-constrained allocations */ 431 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 432 return false; 433 434 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 435 return true; 436 /* 437 * We start only with one section of pages, more pages are added as 438 * needed until the rest of deferred pages are initialized. 439 */ 440 nr_initialised++; 441 if ((nr_initialised > PAGES_PER_SECTION) && 442 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 443 NODE_DATA(nid)->first_deferred_pfn = pfn; 444 return true; 445 } 446 return false; 447 } 448 #else 449 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 450 { 451 return (!IS_ENABLED(CONFIG_KASAN_GENERIC) && 452 (fpi_flags & FPI_SKIP_KASAN_POISON)) || 453 PageSkipKASanPoison(page); 454 } 455 456 static inline bool early_page_uninitialised(unsigned long pfn) 457 { 458 return false; 459 } 460 461 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 462 { 463 return false; 464 } 465 #endif 466 467 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 468 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 469 unsigned long pfn) 470 { 471 #ifdef CONFIG_SPARSEMEM 472 return section_to_usemap(__pfn_to_section(pfn)); 473 #else 474 return page_zone(page)->pageblock_flags; 475 #endif /* CONFIG_SPARSEMEM */ 476 } 477 478 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 479 { 480 #ifdef CONFIG_SPARSEMEM 481 pfn &= (PAGES_PER_SECTION-1); 482 #else 483 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); 484 #endif /* CONFIG_SPARSEMEM */ 485 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 486 } 487 488 static __always_inline 489 unsigned long __get_pfnblock_flags_mask(const struct page *page, 490 unsigned long pfn, 491 unsigned long mask) 492 { 493 unsigned long *bitmap; 494 unsigned long bitidx, word_bitidx; 495 unsigned long word; 496 497 bitmap = get_pageblock_bitmap(page, pfn); 498 bitidx = pfn_to_bitidx(page, pfn); 499 word_bitidx = bitidx / BITS_PER_LONG; 500 bitidx &= (BITS_PER_LONG-1); 501 502 word = bitmap[word_bitidx]; 503 return (word >> bitidx) & mask; 504 } 505 506 /** 507 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 508 * @page: The page within the block of interest 509 * @pfn: The target page frame number 510 * @mask: mask of bits that the caller is interested in 511 * 512 * Return: pageblock_bits flags 513 */ 514 unsigned long get_pfnblock_flags_mask(const struct page *page, 515 unsigned long pfn, unsigned long mask) 516 { 517 return __get_pfnblock_flags_mask(page, pfn, mask); 518 } 519 520 static __always_inline int get_pfnblock_migratetype(const struct page *page, 521 unsigned long pfn) 522 { 523 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 524 } 525 526 /** 527 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 528 * @page: The page within the block of interest 529 * @flags: The flags to set 530 * @pfn: The target page frame number 531 * @mask: mask of bits that the caller is interested in 532 */ 533 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 534 unsigned long pfn, 535 unsigned long mask) 536 { 537 unsigned long *bitmap; 538 unsigned long bitidx, word_bitidx; 539 unsigned long old_word, word; 540 541 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 542 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 543 544 bitmap = get_pageblock_bitmap(page, pfn); 545 bitidx = pfn_to_bitidx(page, pfn); 546 word_bitidx = bitidx / BITS_PER_LONG; 547 bitidx &= (BITS_PER_LONG-1); 548 549 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 550 551 mask <<= bitidx; 552 flags <<= bitidx; 553 554 word = READ_ONCE(bitmap[word_bitidx]); 555 for (;;) { 556 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 557 if (word == old_word) 558 break; 559 word = old_word; 560 } 561 } 562 563 void set_pageblock_migratetype(struct page *page, int migratetype) 564 { 565 if (unlikely(page_group_by_mobility_disabled && 566 migratetype < MIGRATE_PCPTYPES)) 567 migratetype = MIGRATE_UNMOVABLE; 568 569 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 570 page_to_pfn(page), MIGRATETYPE_MASK); 571 } 572 573 #ifdef CONFIG_DEBUG_VM 574 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 575 { 576 int ret = 0; 577 unsigned seq; 578 unsigned long pfn = page_to_pfn(page); 579 unsigned long sp, start_pfn; 580 581 do { 582 seq = zone_span_seqbegin(zone); 583 start_pfn = zone->zone_start_pfn; 584 sp = zone->spanned_pages; 585 if (!zone_spans_pfn(zone, pfn)) 586 ret = 1; 587 } while (zone_span_seqretry(zone, seq)); 588 589 if (ret) 590 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 591 pfn, zone_to_nid(zone), zone->name, 592 start_pfn, start_pfn + sp); 593 594 return ret; 595 } 596 597 static int page_is_consistent(struct zone *zone, struct page *page) 598 { 599 if (zone != page_zone(page)) 600 return 0; 601 602 return 1; 603 } 604 /* 605 * Temporary debugging check for pages not lying within a given zone. 606 */ 607 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 608 { 609 if (page_outside_zone_boundaries(zone, page)) 610 return 1; 611 if (!page_is_consistent(zone, page)) 612 return 1; 613 614 return 0; 615 } 616 #else 617 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 618 { 619 return 0; 620 } 621 #endif 622 623 static void bad_page(struct page *page, const char *reason) 624 { 625 static unsigned long resume; 626 static unsigned long nr_shown; 627 static unsigned long nr_unshown; 628 629 /* 630 * Allow a burst of 60 reports, then keep quiet for that minute; 631 * or allow a steady drip of one report per second. 632 */ 633 if (nr_shown == 60) { 634 if (time_before(jiffies, resume)) { 635 nr_unshown++; 636 goto out; 637 } 638 if (nr_unshown) { 639 pr_alert( 640 "BUG: Bad page state: %lu messages suppressed\n", 641 nr_unshown); 642 nr_unshown = 0; 643 } 644 nr_shown = 0; 645 } 646 if (nr_shown++ == 0) 647 resume = jiffies + 60 * HZ; 648 649 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 650 current->comm, page_to_pfn(page)); 651 dump_page(page, reason); 652 653 print_modules(); 654 dump_stack(); 655 out: 656 /* Leave bad fields for debug, except PageBuddy could make trouble */ 657 page_mapcount_reset(page); /* remove PageBuddy */ 658 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 659 } 660 661 static inline unsigned int order_to_pindex(int migratetype, int order) 662 { 663 int base = order; 664 665 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 666 if (order > PAGE_ALLOC_COSTLY_ORDER) { 667 VM_BUG_ON(order != pageblock_order); 668 base = PAGE_ALLOC_COSTLY_ORDER + 1; 669 } 670 #else 671 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 672 #endif 673 674 return (MIGRATE_PCPTYPES * base) + migratetype; 675 } 676 677 static inline int pindex_to_order(unsigned int pindex) 678 { 679 int order = pindex / MIGRATE_PCPTYPES; 680 681 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 682 if (order > PAGE_ALLOC_COSTLY_ORDER) 683 order = pageblock_order; 684 #else 685 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 686 #endif 687 688 return order; 689 } 690 691 static inline bool pcp_allowed_order(unsigned int order) 692 { 693 if (order <= PAGE_ALLOC_COSTLY_ORDER) 694 return true; 695 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 696 if (order == pageblock_order) 697 return true; 698 #endif 699 return false; 700 } 701 702 static inline void free_the_page(struct page *page, unsigned int order) 703 { 704 if (pcp_allowed_order(order)) /* Via pcp? */ 705 free_unref_page(page, order); 706 else 707 __free_pages_ok(page, order, FPI_NONE); 708 } 709 710 /* 711 * Higher-order pages are called "compound pages". They are structured thusly: 712 * 713 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 714 * 715 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 716 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 717 * 718 * The first tail page's ->compound_dtor holds the offset in array of compound 719 * page destructors. See compound_page_dtors. 720 * 721 * The first tail page's ->compound_order holds the order of allocation. 722 * This usage means that zero-order pages may not be compound. 723 */ 724 725 void free_compound_page(struct page *page) 726 { 727 mem_cgroup_uncharge(page_folio(page)); 728 free_the_page(page, compound_order(page)); 729 } 730 731 static void prep_compound_head(struct page *page, unsigned int order) 732 { 733 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 734 set_compound_order(page, order); 735 atomic_set(compound_mapcount_ptr(page), -1); 736 if (hpage_pincount_available(page)) 737 atomic_set(compound_pincount_ptr(page), 0); 738 } 739 740 static void prep_compound_tail(struct page *head, int tail_idx) 741 { 742 struct page *p = head + tail_idx; 743 744 p->mapping = TAIL_MAPPING; 745 set_compound_head(p, head); 746 } 747 748 void prep_compound_page(struct page *page, unsigned int order) 749 { 750 int i; 751 int nr_pages = 1 << order; 752 753 __SetPageHead(page); 754 for (i = 1; i < nr_pages; i++) 755 prep_compound_tail(page, i); 756 757 prep_compound_head(page, order); 758 } 759 760 #ifdef CONFIG_DEBUG_PAGEALLOC 761 unsigned int _debug_guardpage_minorder; 762 763 bool _debug_pagealloc_enabled_early __read_mostly 764 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 765 EXPORT_SYMBOL(_debug_pagealloc_enabled_early); 766 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 767 EXPORT_SYMBOL(_debug_pagealloc_enabled); 768 769 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 770 771 static int __init early_debug_pagealloc(char *buf) 772 { 773 return kstrtobool(buf, &_debug_pagealloc_enabled_early); 774 } 775 early_param("debug_pagealloc", early_debug_pagealloc); 776 777 static int __init debug_guardpage_minorder_setup(char *buf) 778 { 779 unsigned long res; 780 781 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 782 pr_err("Bad debug_guardpage_minorder value\n"); 783 return 0; 784 } 785 _debug_guardpage_minorder = res; 786 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 787 return 0; 788 } 789 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); 790 791 static inline bool set_page_guard(struct zone *zone, struct page *page, 792 unsigned int order, int migratetype) 793 { 794 if (!debug_guardpage_enabled()) 795 return false; 796 797 if (order >= debug_guardpage_minorder()) 798 return false; 799 800 __SetPageGuard(page); 801 INIT_LIST_HEAD(&page->lru); 802 set_page_private(page, order); 803 /* Guard pages are not available for any usage */ 804 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 805 806 return true; 807 } 808 809 static inline void clear_page_guard(struct zone *zone, struct page *page, 810 unsigned int order, int migratetype) 811 { 812 if (!debug_guardpage_enabled()) 813 return; 814 815 __ClearPageGuard(page); 816 817 set_page_private(page, 0); 818 if (!is_migrate_isolate(migratetype)) 819 __mod_zone_freepage_state(zone, (1 << order), migratetype); 820 } 821 #else 822 static inline bool set_page_guard(struct zone *zone, struct page *page, 823 unsigned int order, int migratetype) { return false; } 824 static inline void clear_page_guard(struct zone *zone, struct page *page, 825 unsigned int order, int migratetype) {} 826 #endif 827 828 /* 829 * Enable static keys related to various memory debugging and hardening options. 830 * Some override others, and depend on early params that are evaluated in the 831 * order of appearance. So we need to first gather the full picture of what was 832 * enabled, and then make decisions. 833 */ 834 void init_mem_debugging_and_hardening(void) 835 { 836 bool page_poisoning_requested = false; 837 838 #ifdef CONFIG_PAGE_POISONING 839 /* 840 * Page poisoning is debug page alloc for some arches. If 841 * either of those options are enabled, enable poisoning. 842 */ 843 if (page_poisoning_enabled() || 844 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 845 debug_pagealloc_enabled())) { 846 static_branch_enable(&_page_poisoning_enabled); 847 page_poisoning_requested = true; 848 } 849 #endif 850 851 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 852 page_poisoning_requested) { 853 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 854 "will take precedence over init_on_alloc and init_on_free\n"); 855 _init_on_alloc_enabled_early = false; 856 _init_on_free_enabled_early = false; 857 } 858 859 if (_init_on_alloc_enabled_early) 860 static_branch_enable(&init_on_alloc); 861 else 862 static_branch_disable(&init_on_alloc); 863 864 if (_init_on_free_enabled_early) 865 static_branch_enable(&init_on_free); 866 else 867 static_branch_disable(&init_on_free); 868 869 #ifdef CONFIG_DEBUG_PAGEALLOC 870 if (!debug_pagealloc_enabled()) 871 return; 872 873 static_branch_enable(&_debug_pagealloc_enabled); 874 875 if (!debug_guardpage_minorder()) 876 return; 877 878 static_branch_enable(&_debug_guardpage_enabled); 879 #endif 880 } 881 882 static inline void set_buddy_order(struct page *page, unsigned int order) 883 { 884 set_page_private(page, order); 885 __SetPageBuddy(page); 886 } 887 888 /* 889 * This function checks whether a page is free && is the buddy 890 * we can coalesce a page and its buddy if 891 * (a) the buddy is not in a hole (check before calling!) && 892 * (b) the buddy is in the buddy system && 893 * (c) a page and its buddy have the same order && 894 * (d) a page and its buddy are in the same zone. 895 * 896 * For recording whether a page is in the buddy system, we set PageBuddy. 897 * Setting, clearing, and testing PageBuddy is serialized by zone->lock. 898 * 899 * For recording page's order, we use page_private(page). 900 */ 901 static inline bool page_is_buddy(struct page *page, struct page *buddy, 902 unsigned int order) 903 { 904 if (!page_is_guard(buddy) && !PageBuddy(buddy)) 905 return false; 906 907 if (buddy_order(buddy) != order) 908 return false; 909 910 /* 911 * zone check is done late to avoid uselessly calculating 912 * zone/node ids for pages that could never merge. 913 */ 914 if (page_zone_id(page) != page_zone_id(buddy)) 915 return false; 916 917 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 918 919 return true; 920 } 921 922 #ifdef CONFIG_COMPACTION 923 static inline struct capture_control *task_capc(struct zone *zone) 924 { 925 struct capture_control *capc = current->capture_control; 926 927 return unlikely(capc) && 928 !(current->flags & PF_KTHREAD) && 929 !capc->page && 930 capc->cc->zone == zone ? capc : NULL; 931 } 932 933 static inline bool 934 compaction_capture(struct capture_control *capc, struct page *page, 935 int order, int migratetype) 936 { 937 if (!capc || order != capc->cc->order) 938 return false; 939 940 /* Do not accidentally pollute CMA or isolated regions*/ 941 if (is_migrate_cma(migratetype) || 942 is_migrate_isolate(migratetype)) 943 return false; 944 945 /* 946 * Do not let lower order allocations pollute a movable pageblock. 947 * This might let an unmovable request use a reclaimable pageblock 948 * and vice-versa but no more than normal fallback logic which can 949 * have trouble finding a high-order free page. 950 */ 951 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 952 return false; 953 954 capc->page = page; 955 return true; 956 } 957 958 #else 959 static inline struct capture_control *task_capc(struct zone *zone) 960 { 961 return NULL; 962 } 963 964 static inline bool 965 compaction_capture(struct capture_control *capc, struct page *page, 966 int order, int migratetype) 967 { 968 return false; 969 } 970 #endif /* CONFIG_COMPACTION */ 971 972 /* Used for pages not on another list */ 973 static inline void add_to_free_list(struct page *page, struct zone *zone, 974 unsigned int order, int migratetype) 975 { 976 struct free_area *area = &zone->free_area[order]; 977 978 list_add(&page->lru, &area->free_list[migratetype]); 979 area->nr_free++; 980 } 981 982 /* Used for pages not on another list */ 983 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 984 unsigned int order, int migratetype) 985 { 986 struct free_area *area = &zone->free_area[order]; 987 988 list_add_tail(&page->lru, &area->free_list[migratetype]); 989 area->nr_free++; 990 } 991 992 /* 993 * Used for pages which are on another list. Move the pages to the tail 994 * of the list - so the moved pages won't immediately be considered for 995 * allocation again (e.g., optimization for memory onlining). 996 */ 997 static inline void move_to_free_list(struct page *page, struct zone *zone, 998 unsigned int order, int migratetype) 999 { 1000 struct free_area *area = &zone->free_area[order]; 1001 1002 list_move_tail(&page->lru, &area->free_list[migratetype]); 1003 } 1004 1005 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 1006 unsigned int order) 1007 { 1008 /* clear reported state and update reported page count */ 1009 if (page_reported(page)) 1010 __ClearPageReported(page); 1011 1012 list_del(&page->lru); 1013 __ClearPageBuddy(page); 1014 set_page_private(page, 0); 1015 zone->free_area[order].nr_free--; 1016 } 1017 1018 /* 1019 * If this is not the largest possible page, check if the buddy 1020 * of the next-highest order is free. If it is, it's possible 1021 * that pages are being freed that will coalesce soon. In case, 1022 * that is happening, add the free page to the tail of the list 1023 * so it's less likely to be used soon and more likely to be merged 1024 * as a higher order page 1025 */ 1026 static inline bool 1027 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 1028 struct page *page, unsigned int order) 1029 { 1030 struct page *higher_page, *higher_buddy; 1031 unsigned long combined_pfn; 1032 1033 if (order >= MAX_ORDER - 2) 1034 return false; 1035 1036 combined_pfn = buddy_pfn & pfn; 1037 higher_page = page + (combined_pfn - pfn); 1038 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); 1039 higher_buddy = higher_page + (buddy_pfn - combined_pfn); 1040 1041 return page_is_buddy(higher_page, higher_buddy, order + 1); 1042 } 1043 1044 /* 1045 * Freeing function for a buddy system allocator. 1046 * 1047 * The concept of a buddy system is to maintain direct-mapped table 1048 * (containing bit values) for memory blocks of various "orders". 1049 * The bottom level table contains the map for the smallest allocatable 1050 * units of memory (here, pages), and each level above it describes 1051 * pairs of units from the levels below, hence, "buddies". 1052 * At a high level, all that happens here is marking the table entry 1053 * at the bottom level available, and propagating the changes upward 1054 * as necessary, plus some accounting needed to play nicely with other 1055 * parts of the VM system. 1056 * At each level, we keep a list of pages, which are heads of continuous 1057 * free pages of length of (1 << order) and marked with PageBuddy. 1058 * Page's order is recorded in page_private(page) field. 1059 * So when we are allocating or freeing one, we can derive the state of the 1060 * other. That is, if we allocate a small block, and both were 1061 * free, the remainder of the region must be split into blocks. 1062 * If a block is freed, and its buddy is also free, then this 1063 * triggers coalescing into a block of larger size. 1064 * 1065 * -- nyc 1066 */ 1067 1068 static inline void __free_one_page(struct page *page, 1069 unsigned long pfn, 1070 struct zone *zone, unsigned int order, 1071 int migratetype, fpi_t fpi_flags) 1072 { 1073 struct capture_control *capc = task_capc(zone); 1074 unsigned long buddy_pfn; 1075 unsigned long combined_pfn; 1076 unsigned int max_order; 1077 struct page *buddy; 1078 bool to_tail; 1079 1080 max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order); 1081 1082 VM_BUG_ON(!zone_is_initialized(zone)); 1083 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 1084 1085 VM_BUG_ON(migratetype == -1); 1086 if (likely(!is_migrate_isolate(migratetype))) 1087 __mod_zone_freepage_state(zone, 1 << order, migratetype); 1088 1089 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 1090 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1091 1092 continue_merging: 1093 while (order < max_order) { 1094 if (compaction_capture(capc, page, order, migratetype)) { 1095 __mod_zone_freepage_state(zone, -(1 << order), 1096 migratetype); 1097 return; 1098 } 1099 buddy_pfn = __find_buddy_pfn(pfn, order); 1100 buddy = page + (buddy_pfn - pfn); 1101 1102 if (!page_is_buddy(page, buddy, order)) 1103 goto done_merging; 1104 /* 1105 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 1106 * merge with it and move up one order. 1107 */ 1108 if (page_is_guard(buddy)) 1109 clear_page_guard(zone, buddy, order, migratetype); 1110 else 1111 del_page_from_free_list(buddy, zone, order); 1112 combined_pfn = buddy_pfn & pfn; 1113 page = page + (combined_pfn - pfn); 1114 pfn = combined_pfn; 1115 order++; 1116 } 1117 if (order < MAX_ORDER - 1) { 1118 /* If we are here, it means order is >= pageblock_order. 1119 * We want to prevent merge between freepages on isolate 1120 * pageblock and normal pageblock. Without this, pageblock 1121 * isolation could cause incorrect freepage or CMA accounting. 1122 * 1123 * We don't want to hit this code for the more frequent 1124 * low-order merging. 1125 */ 1126 if (unlikely(has_isolate_pageblock(zone))) { 1127 int buddy_mt; 1128 1129 buddy_pfn = __find_buddy_pfn(pfn, order); 1130 buddy = page + (buddy_pfn - pfn); 1131 buddy_mt = get_pageblock_migratetype(buddy); 1132 1133 if (migratetype != buddy_mt 1134 && (is_migrate_isolate(migratetype) || 1135 is_migrate_isolate(buddy_mt))) 1136 goto done_merging; 1137 } 1138 max_order = order + 1; 1139 goto continue_merging; 1140 } 1141 1142 done_merging: 1143 set_buddy_order(page, order); 1144 1145 if (fpi_flags & FPI_TO_TAIL) 1146 to_tail = true; 1147 else if (is_shuffle_order(order)) 1148 to_tail = shuffle_pick_tail(); 1149 else 1150 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1151 1152 if (to_tail) 1153 add_to_free_list_tail(page, zone, order, migratetype); 1154 else 1155 add_to_free_list(page, zone, order, migratetype); 1156 1157 /* Notify page reporting subsystem of freed page */ 1158 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1159 page_reporting_notify_free(order); 1160 } 1161 1162 /* 1163 * A bad page could be due to a number of fields. Instead of multiple branches, 1164 * try and check multiple fields with one check. The caller must do a detailed 1165 * check if necessary. 1166 */ 1167 static inline bool page_expected_state(struct page *page, 1168 unsigned long check_flags) 1169 { 1170 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1171 return false; 1172 1173 if (unlikely((unsigned long)page->mapping | 1174 page_ref_count(page) | 1175 #ifdef CONFIG_MEMCG 1176 page->memcg_data | 1177 #endif 1178 (page->flags & check_flags))) 1179 return false; 1180 1181 return true; 1182 } 1183 1184 static const char *page_bad_reason(struct page *page, unsigned long flags) 1185 { 1186 const char *bad_reason = NULL; 1187 1188 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1189 bad_reason = "nonzero mapcount"; 1190 if (unlikely(page->mapping != NULL)) 1191 bad_reason = "non-NULL mapping"; 1192 if (unlikely(page_ref_count(page) != 0)) 1193 bad_reason = "nonzero _refcount"; 1194 if (unlikely(page->flags & flags)) { 1195 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1196 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1197 else 1198 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1199 } 1200 #ifdef CONFIG_MEMCG 1201 if (unlikely(page->memcg_data)) 1202 bad_reason = "page still charged to cgroup"; 1203 #endif 1204 return bad_reason; 1205 } 1206 1207 static void check_free_page_bad(struct page *page) 1208 { 1209 bad_page(page, 1210 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1211 } 1212 1213 static inline int check_free_page(struct page *page) 1214 { 1215 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1216 return 0; 1217 1218 /* Something has gone sideways, find it */ 1219 check_free_page_bad(page); 1220 return 1; 1221 } 1222 1223 static int free_tail_pages_check(struct page *head_page, struct page *page) 1224 { 1225 int ret = 1; 1226 1227 /* 1228 * We rely page->lru.next never has bit 0 set, unless the page 1229 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1230 */ 1231 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1232 1233 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 1234 ret = 0; 1235 goto out; 1236 } 1237 switch (page - head_page) { 1238 case 1: 1239 /* the first tail page: ->mapping may be compound_mapcount() */ 1240 if (unlikely(compound_mapcount(page))) { 1241 bad_page(page, "nonzero compound_mapcount"); 1242 goto out; 1243 } 1244 break; 1245 case 2: 1246 /* 1247 * the second tail page: ->mapping is 1248 * deferred_list.next -- ignore value. 1249 */ 1250 break; 1251 default: 1252 if (page->mapping != TAIL_MAPPING) { 1253 bad_page(page, "corrupted mapping in tail page"); 1254 goto out; 1255 } 1256 break; 1257 } 1258 if (unlikely(!PageTail(page))) { 1259 bad_page(page, "PageTail not set"); 1260 goto out; 1261 } 1262 if (unlikely(compound_head(page) != head_page)) { 1263 bad_page(page, "compound_head not consistent"); 1264 goto out; 1265 } 1266 ret = 0; 1267 out: 1268 page->mapping = NULL; 1269 clear_compound_head(page); 1270 return ret; 1271 } 1272 1273 static void kernel_init_free_pages(struct page *page, int numpages, bool zero_tags) 1274 { 1275 int i; 1276 1277 if (zero_tags) { 1278 for (i = 0; i < numpages; i++) 1279 tag_clear_highpage(page + i); 1280 return; 1281 } 1282 1283 /* s390's use of memset() could override KASAN redzones. */ 1284 kasan_disable_current(); 1285 for (i = 0; i < numpages; i++) { 1286 u8 tag = page_kasan_tag(page + i); 1287 page_kasan_tag_reset(page + i); 1288 clear_highpage(page + i); 1289 page_kasan_tag_set(page + i, tag); 1290 } 1291 kasan_enable_current(); 1292 } 1293 1294 static __always_inline bool free_pages_prepare(struct page *page, 1295 unsigned int order, bool check_free, fpi_t fpi_flags) 1296 { 1297 int bad = 0; 1298 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); 1299 1300 VM_BUG_ON_PAGE(PageTail(page), page); 1301 1302 trace_mm_page_free(page, order); 1303 1304 if (unlikely(PageHWPoison(page)) && !order) { 1305 /* 1306 * Do not let hwpoison pages hit pcplists/buddy 1307 * Untie memcg state and reset page's owner 1308 */ 1309 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1310 __memcg_kmem_uncharge_page(page, order); 1311 reset_page_owner(page, order); 1312 page_table_check_free(page, order); 1313 return false; 1314 } 1315 1316 /* 1317 * Check tail pages before head page information is cleared to 1318 * avoid checking PageCompound for order-0 pages. 1319 */ 1320 if (unlikely(order)) { 1321 bool compound = PageCompound(page); 1322 int i; 1323 1324 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1325 1326 if (compound) { 1327 ClearPageDoubleMap(page); 1328 ClearPageHasHWPoisoned(page); 1329 } 1330 for (i = 1; i < (1 << order); i++) { 1331 if (compound) 1332 bad += free_tail_pages_check(page, page + i); 1333 if (unlikely(check_free_page(page + i))) { 1334 bad++; 1335 continue; 1336 } 1337 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1338 } 1339 } 1340 if (PageMappingFlags(page)) 1341 page->mapping = NULL; 1342 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1343 __memcg_kmem_uncharge_page(page, order); 1344 if (check_free) 1345 bad += check_free_page(page); 1346 if (bad) 1347 return false; 1348 1349 page_cpupid_reset_last(page); 1350 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1351 reset_page_owner(page, order); 1352 page_table_check_free(page, order); 1353 1354 if (!PageHighMem(page)) { 1355 debug_check_no_locks_freed(page_address(page), 1356 PAGE_SIZE << order); 1357 debug_check_no_obj_freed(page_address(page), 1358 PAGE_SIZE << order); 1359 } 1360 1361 kernel_poison_pages(page, 1 << order); 1362 1363 /* 1364 * As memory initialization might be integrated into KASAN, 1365 * kasan_free_pages and kernel_init_free_pages must be 1366 * kept together to avoid discrepancies in behavior. 1367 * 1368 * With hardware tag-based KASAN, memory tags must be set before the 1369 * page becomes unavailable via debug_pagealloc or arch_free_page. 1370 */ 1371 if (kasan_has_integrated_init()) { 1372 if (!skip_kasan_poison) 1373 kasan_free_pages(page, order); 1374 } else { 1375 bool init = want_init_on_free(); 1376 1377 if (init) 1378 kernel_init_free_pages(page, 1 << order, false); 1379 if (!skip_kasan_poison) 1380 kasan_poison_pages(page, order, init); 1381 } 1382 1383 /* 1384 * arch_free_page() can make the page's contents inaccessible. s390 1385 * does this. So nothing which can access the page's contents should 1386 * happen after this. 1387 */ 1388 arch_free_page(page, order); 1389 1390 debug_pagealloc_unmap_pages(page, 1 << order); 1391 1392 return true; 1393 } 1394 1395 #ifdef CONFIG_DEBUG_VM 1396 /* 1397 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed 1398 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when 1399 * moved from pcp lists to free lists. 1400 */ 1401 static bool free_pcp_prepare(struct page *page, unsigned int order) 1402 { 1403 return free_pages_prepare(page, order, true, FPI_NONE); 1404 } 1405 1406 static bool bulkfree_pcp_prepare(struct page *page) 1407 { 1408 if (debug_pagealloc_enabled_static()) 1409 return check_free_page(page); 1410 else 1411 return false; 1412 } 1413 #else 1414 /* 1415 * With DEBUG_VM disabled, order-0 pages being freed are checked only when 1416 * moving from pcp lists to free list in order to reduce overhead. With 1417 * debug_pagealloc enabled, they are checked also immediately when being freed 1418 * to the pcp lists. 1419 */ 1420 static bool free_pcp_prepare(struct page *page, unsigned int order) 1421 { 1422 if (debug_pagealloc_enabled_static()) 1423 return free_pages_prepare(page, order, true, FPI_NONE); 1424 else 1425 return free_pages_prepare(page, order, false, FPI_NONE); 1426 } 1427 1428 static bool bulkfree_pcp_prepare(struct page *page) 1429 { 1430 return check_free_page(page); 1431 } 1432 #endif /* CONFIG_DEBUG_VM */ 1433 1434 static inline void prefetch_buddy(struct page *page) 1435 { 1436 unsigned long pfn = page_to_pfn(page); 1437 unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0); 1438 struct page *buddy = page + (buddy_pfn - pfn); 1439 1440 prefetch(buddy); 1441 } 1442 1443 /* 1444 * Frees a number of pages from the PCP lists 1445 * Assumes all pages on list are in same zone. 1446 * count is the number of pages to free. 1447 */ 1448 static void free_pcppages_bulk(struct zone *zone, int count, 1449 struct per_cpu_pages *pcp) 1450 { 1451 int pindex = 0; 1452 int batch_free = 0; 1453 int nr_freed = 0; 1454 unsigned int order; 1455 int prefetch_nr = READ_ONCE(pcp->batch); 1456 bool isolated_pageblocks; 1457 struct page *page, *tmp; 1458 LIST_HEAD(head); 1459 1460 /* 1461 * Ensure proper count is passed which otherwise would stuck in the 1462 * below while (list_empty(list)) loop. 1463 */ 1464 count = min(pcp->count, count); 1465 while (count > 0) { 1466 struct list_head *list; 1467 1468 /* 1469 * Remove pages from lists in a round-robin fashion. A 1470 * batch_free count is maintained that is incremented when an 1471 * empty list is encountered. This is so more pages are freed 1472 * off fuller lists instead of spinning excessively around empty 1473 * lists 1474 */ 1475 do { 1476 batch_free++; 1477 if (++pindex == NR_PCP_LISTS) 1478 pindex = 0; 1479 list = &pcp->lists[pindex]; 1480 } while (list_empty(list)); 1481 1482 /* This is the only non-empty list. Free them all. */ 1483 if (batch_free == NR_PCP_LISTS) 1484 batch_free = count; 1485 1486 order = pindex_to_order(pindex); 1487 BUILD_BUG_ON(MAX_ORDER >= (1<<NR_PCP_ORDER_WIDTH)); 1488 do { 1489 page = list_last_entry(list, struct page, lru); 1490 /* must delete to avoid corrupting pcp list */ 1491 list_del(&page->lru); 1492 nr_freed += 1 << order; 1493 count -= 1 << order; 1494 1495 if (bulkfree_pcp_prepare(page)) 1496 continue; 1497 1498 /* Encode order with the migratetype */ 1499 page->index <<= NR_PCP_ORDER_WIDTH; 1500 page->index |= order; 1501 1502 list_add_tail(&page->lru, &head); 1503 1504 /* 1505 * We are going to put the page back to the global 1506 * pool, prefetch its buddy to speed up later access 1507 * under zone->lock. It is believed the overhead of 1508 * an additional test and calculating buddy_pfn here 1509 * can be offset by reduced memory latency later. To 1510 * avoid excessive prefetching due to large count, only 1511 * prefetch buddy for the first pcp->batch nr of pages. 1512 */ 1513 if (prefetch_nr) { 1514 prefetch_buddy(page); 1515 prefetch_nr--; 1516 } 1517 } while (count > 0 && --batch_free && !list_empty(list)); 1518 } 1519 pcp->count -= nr_freed; 1520 1521 /* 1522 * local_lock_irq held so equivalent to spin_lock_irqsave for 1523 * both PREEMPT_RT and non-PREEMPT_RT configurations. 1524 */ 1525 spin_lock(&zone->lock); 1526 isolated_pageblocks = has_isolate_pageblock(zone); 1527 1528 /* 1529 * Use safe version since after __free_one_page(), 1530 * page->lru.next will not point to original list. 1531 */ 1532 list_for_each_entry_safe(page, tmp, &head, lru) { 1533 int mt = get_pcppage_migratetype(page); 1534 1535 /* mt has been encoded with the order (see above) */ 1536 order = mt & NR_PCP_ORDER_MASK; 1537 mt >>= NR_PCP_ORDER_WIDTH; 1538 1539 /* MIGRATE_ISOLATE page should not go to pcplists */ 1540 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1541 /* Pageblock could have been isolated meanwhile */ 1542 if (unlikely(isolated_pageblocks)) 1543 mt = get_pageblock_migratetype(page); 1544 1545 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1546 trace_mm_page_pcpu_drain(page, order, mt); 1547 } 1548 spin_unlock(&zone->lock); 1549 } 1550 1551 static void free_one_page(struct zone *zone, 1552 struct page *page, unsigned long pfn, 1553 unsigned int order, 1554 int migratetype, fpi_t fpi_flags) 1555 { 1556 unsigned long flags; 1557 1558 spin_lock_irqsave(&zone->lock, flags); 1559 if (unlikely(has_isolate_pageblock(zone) || 1560 is_migrate_isolate(migratetype))) { 1561 migratetype = get_pfnblock_migratetype(page, pfn); 1562 } 1563 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1564 spin_unlock_irqrestore(&zone->lock, flags); 1565 } 1566 1567 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1568 unsigned long zone, int nid) 1569 { 1570 mm_zero_struct_page(page); 1571 set_page_links(page, zone, nid, pfn); 1572 init_page_count(page); 1573 page_mapcount_reset(page); 1574 page_cpupid_reset_last(page); 1575 page_kasan_tag_reset(page); 1576 1577 INIT_LIST_HEAD(&page->lru); 1578 #ifdef WANT_PAGE_VIRTUAL 1579 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1580 if (!is_highmem_idx(zone)) 1581 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1582 #endif 1583 } 1584 1585 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1586 static void __meminit init_reserved_page(unsigned long pfn) 1587 { 1588 pg_data_t *pgdat; 1589 int nid, zid; 1590 1591 if (!early_page_uninitialised(pfn)) 1592 return; 1593 1594 nid = early_pfn_to_nid(pfn); 1595 pgdat = NODE_DATA(nid); 1596 1597 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1598 struct zone *zone = &pgdat->node_zones[zid]; 1599 1600 if (zone_spans_pfn(zone, pfn)) 1601 break; 1602 } 1603 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 1604 } 1605 #else 1606 static inline void init_reserved_page(unsigned long pfn) 1607 { 1608 } 1609 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1610 1611 /* 1612 * Initialised pages do not have PageReserved set. This function is 1613 * called for each range allocated by the bootmem allocator and 1614 * marks the pages PageReserved. The remaining valid pages are later 1615 * sent to the buddy page allocator. 1616 */ 1617 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 1618 { 1619 unsigned long start_pfn = PFN_DOWN(start); 1620 unsigned long end_pfn = PFN_UP(end); 1621 1622 for (; start_pfn < end_pfn; start_pfn++) { 1623 if (pfn_valid(start_pfn)) { 1624 struct page *page = pfn_to_page(start_pfn); 1625 1626 init_reserved_page(start_pfn); 1627 1628 /* Avoid false-positive PageTail() */ 1629 INIT_LIST_HEAD(&page->lru); 1630 1631 /* 1632 * no need for atomic set_bit because the struct 1633 * page is not visible yet so nobody should 1634 * access it yet. 1635 */ 1636 __SetPageReserved(page); 1637 } 1638 } 1639 } 1640 1641 static void __free_pages_ok(struct page *page, unsigned int order, 1642 fpi_t fpi_flags) 1643 { 1644 unsigned long flags; 1645 int migratetype; 1646 unsigned long pfn = page_to_pfn(page); 1647 struct zone *zone = page_zone(page); 1648 1649 if (!free_pages_prepare(page, order, true, fpi_flags)) 1650 return; 1651 1652 migratetype = get_pfnblock_migratetype(page, pfn); 1653 1654 spin_lock_irqsave(&zone->lock, flags); 1655 if (unlikely(has_isolate_pageblock(zone) || 1656 is_migrate_isolate(migratetype))) { 1657 migratetype = get_pfnblock_migratetype(page, pfn); 1658 } 1659 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1660 spin_unlock_irqrestore(&zone->lock, flags); 1661 1662 __count_vm_events(PGFREE, 1 << order); 1663 } 1664 1665 void __free_pages_core(struct page *page, unsigned int order) 1666 { 1667 unsigned int nr_pages = 1 << order; 1668 struct page *p = page; 1669 unsigned int loop; 1670 1671 /* 1672 * When initializing the memmap, __init_single_page() sets the refcount 1673 * of all pages to 1 ("allocated"/"not free"). We have to set the 1674 * refcount of all involved pages to 0. 1675 */ 1676 prefetchw(p); 1677 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1678 prefetchw(p + 1); 1679 __ClearPageReserved(p); 1680 set_page_count(p, 0); 1681 } 1682 __ClearPageReserved(p); 1683 set_page_count(p, 0); 1684 1685 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1686 1687 /* 1688 * Bypass PCP and place fresh pages right to the tail, primarily 1689 * relevant for memory onlining. 1690 */ 1691 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); 1692 } 1693 1694 #ifdef CONFIG_NUMA 1695 1696 /* 1697 * During memory init memblocks map pfns to nids. The search is expensive and 1698 * this caches recent lookups. The implementation of __early_pfn_to_nid 1699 * treats start/end as pfns. 1700 */ 1701 struct mminit_pfnnid_cache { 1702 unsigned long last_start; 1703 unsigned long last_end; 1704 int last_nid; 1705 }; 1706 1707 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1708 1709 /* 1710 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 1711 */ 1712 static int __meminit __early_pfn_to_nid(unsigned long pfn, 1713 struct mminit_pfnnid_cache *state) 1714 { 1715 unsigned long start_pfn, end_pfn; 1716 int nid; 1717 1718 if (state->last_start <= pfn && pfn < state->last_end) 1719 return state->last_nid; 1720 1721 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 1722 if (nid != NUMA_NO_NODE) { 1723 state->last_start = start_pfn; 1724 state->last_end = end_pfn; 1725 state->last_nid = nid; 1726 } 1727 1728 return nid; 1729 } 1730 1731 int __meminit early_pfn_to_nid(unsigned long pfn) 1732 { 1733 static DEFINE_SPINLOCK(early_pfn_lock); 1734 int nid; 1735 1736 spin_lock(&early_pfn_lock); 1737 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1738 if (nid < 0) 1739 nid = first_online_node; 1740 spin_unlock(&early_pfn_lock); 1741 1742 return nid; 1743 } 1744 #endif /* CONFIG_NUMA */ 1745 1746 void __init memblock_free_pages(struct page *page, unsigned long pfn, 1747 unsigned int order) 1748 { 1749 if (early_page_uninitialised(pfn)) 1750 return; 1751 __free_pages_core(page, order); 1752 } 1753 1754 /* 1755 * Check that the whole (or subset of) a pageblock given by the interval of 1756 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1757 * with the migration of free compaction scanner. 1758 * 1759 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1760 * 1761 * It's possible on some configurations to have a setup like node0 node1 node0 1762 * i.e. it's possible that all pages within a zones range of pages do not 1763 * belong to a single zone. We assume that a border between node0 and node1 1764 * can occur within a single pageblock, but not a node0 node1 node0 1765 * interleaving within a single pageblock. It is therefore sufficient to check 1766 * the first and last page of a pageblock and avoid checking each individual 1767 * page in a pageblock. 1768 */ 1769 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1770 unsigned long end_pfn, struct zone *zone) 1771 { 1772 struct page *start_page; 1773 struct page *end_page; 1774 1775 /* end_pfn is one past the range we are checking */ 1776 end_pfn--; 1777 1778 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1779 return NULL; 1780 1781 start_page = pfn_to_online_page(start_pfn); 1782 if (!start_page) 1783 return NULL; 1784 1785 if (page_zone(start_page) != zone) 1786 return NULL; 1787 1788 end_page = pfn_to_page(end_pfn); 1789 1790 /* This gives a shorter code than deriving page_zone(end_page) */ 1791 if (page_zone_id(start_page) != page_zone_id(end_page)) 1792 return NULL; 1793 1794 return start_page; 1795 } 1796 1797 void set_zone_contiguous(struct zone *zone) 1798 { 1799 unsigned long block_start_pfn = zone->zone_start_pfn; 1800 unsigned long block_end_pfn; 1801 1802 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); 1803 for (; block_start_pfn < zone_end_pfn(zone); 1804 block_start_pfn = block_end_pfn, 1805 block_end_pfn += pageblock_nr_pages) { 1806 1807 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1808 1809 if (!__pageblock_pfn_to_page(block_start_pfn, 1810 block_end_pfn, zone)) 1811 return; 1812 cond_resched(); 1813 } 1814 1815 /* We confirm that there is no hole */ 1816 zone->contiguous = true; 1817 } 1818 1819 void clear_zone_contiguous(struct zone *zone) 1820 { 1821 zone->contiguous = false; 1822 } 1823 1824 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1825 static void __init deferred_free_range(unsigned long pfn, 1826 unsigned long nr_pages) 1827 { 1828 struct page *page; 1829 unsigned long i; 1830 1831 if (!nr_pages) 1832 return; 1833 1834 page = pfn_to_page(pfn); 1835 1836 /* Free a large naturally-aligned chunk if possible */ 1837 if (nr_pages == pageblock_nr_pages && 1838 (pfn & (pageblock_nr_pages - 1)) == 0) { 1839 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1840 __free_pages_core(page, pageblock_order); 1841 return; 1842 } 1843 1844 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1845 if ((pfn & (pageblock_nr_pages - 1)) == 0) 1846 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1847 __free_pages_core(page, 0); 1848 } 1849 } 1850 1851 /* Completion tracking for deferred_init_memmap() threads */ 1852 static atomic_t pgdat_init_n_undone __initdata; 1853 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1854 1855 static inline void __init pgdat_init_report_one_done(void) 1856 { 1857 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1858 complete(&pgdat_init_all_done_comp); 1859 } 1860 1861 /* 1862 * Returns true if page needs to be initialized or freed to buddy allocator. 1863 * 1864 * First we check if pfn is valid on architectures where it is possible to have 1865 * holes within pageblock_nr_pages. On systems where it is not possible, this 1866 * function is optimized out. 1867 * 1868 * Then, we check if a current large page is valid by only checking the validity 1869 * of the head pfn. 1870 */ 1871 static inline bool __init deferred_pfn_valid(unsigned long pfn) 1872 { 1873 if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn)) 1874 return false; 1875 return true; 1876 } 1877 1878 /* 1879 * Free pages to buddy allocator. Try to free aligned pages in 1880 * pageblock_nr_pages sizes. 1881 */ 1882 static void __init deferred_free_pages(unsigned long pfn, 1883 unsigned long end_pfn) 1884 { 1885 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1886 unsigned long nr_free = 0; 1887 1888 for (; pfn < end_pfn; pfn++) { 1889 if (!deferred_pfn_valid(pfn)) { 1890 deferred_free_range(pfn - nr_free, nr_free); 1891 nr_free = 0; 1892 } else if (!(pfn & nr_pgmask)) { 1893 deferred_free_range(pfn - nr_free, nr_free); 1894 nr_free = 1; 1895 } else { 1896 nr_free++; 1897 } 1898 } 1899 /* Free the last block of pages to allocator */ 1900 deferred_free_range(pfn - nr_free, nr_free); 1901 } 1902 1903 /* 1904 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 1905 * by performing it only once every pageblock_nr_pages. 1906 * Return number of pages initialized. 1907 */ 1908 static unsigned long __init deferred_init_pages(struct zone *zone, 1909 unsigned long pfn, 1910 unsigned long end_pfn) 1911 { 1912 unsigned long nr_pgmask = pageblock_nr_pages - 1; 1913 int nid = zone_to_nid(zone); 1914 unsigned long nr_pages = 0; 1915 int zid = zone_idx(zone); 1916 struct page *page = NULL; 1917 1918 for (; pfn < end_pfn; pfn++) { 1919 if (!deferred_pfn_valid(pfn)) { 1920 page = NULL; 1921 continue; 1922 } else if (!page || !(pfn & nr_pgmask)) { 1923 page = pfn_to_page(pfn); 1924 } else { 1925 page++; 1926 } 1927 __init_single_page(page, pfn, zid, nid); 1928 nr_pages++; 1929 } 1930 return (nr_pages); 1931 } 1932 1933 /* 1934 * This function is meant to pre-load the iterator for the zone init. 1935 * Specifically it walks through the ranges until we are caught up to the 1936 * first_init_pfn value and exits there. If we never encounter the value we 1937 * return false indicating there are no valid ranges left. 1938 */ 1939 static bool __init 1940 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, 1941 unsigned long *spfn, unsigned long *epfn, 1942 unsigned long first_init_pfn) 1943 { 1944 u64 j; 1945 1946 /* 1947 * Start out by walking through the ranges in this zone that have 1948 * already been initialized. We don't need to do anything with them 1949 * so we just need to flush them out of the system. 1950 */ 1951 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { 1952 if (*epfn <= first_init_pfn) 1953 continue; 1954 if (*spfn < first_init_pfn) 1955 *spfn = first_init_pfn; 1956 *i = j; 1957 return true; 1958 } 1959 1960 return false; 1961 } 1962 1963 /* 1964 * Initialize and free pages. We do it in two loops: first we initialize 1965 * struct page, then free to buddy allocator, because while we are 1966 * freeing pages we can access pages that are ahead (computing buddy 1967 * page in __free_one_page()). 1968 * 1969 * In order to try and keep some memory in the cache we have the loop 1970 * broken along max page order boundaries. This way we will not cause 1971 * any issues with the buddy page computation. 1972 */ 1973 static unsigned long __init 1974 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, 1975 unsigned long *end_pfn) 1976 { 1977 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); 1978 unsigned long spfn = *start_pfn, epfn = *end_pfn; 1979 unsigned long nr_pages = 0; 1980 u64 j = *i; 1981 1982 /* First we loop through and initialize the page values */ 1983 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { 1984 unsigned long t; 1985 1986 if (mo_pfn <= *start_pfn) 1987 break; 1988 1989 t = min(mo_pfn, *end_pfn); 1990 nr_pages += deferred_init_pages(zone, *start_pfn, t); 1991 1992 if (mo_pfn < *end_pfn) { 1993 *start_pfn = mo_pfn; 1994 break; 1995 } 1996 } 1997 1998 /* Reset values and now loop through freeing pages as needed */ 1999 swap(j, *i); 2000 2001 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { 2002 unsigned long t; 2003 2004 if (mo_pfn <= spfn) 2005 break; 2006 2007 t = min(mo_pfn, epfn); 2008 deferred_free_pages(spfn, t); 2009 2010 if (mo_pfn <= epfn) 2011 break; 2012 } 2013 2014 return nr_pages; 2015 } 2016 2017 static void __init 2018 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 2019 void *arg) 2020 { 2021 unsigned long spfn, epfn; 2022 struct zone *zone = arg; 2023 u64 i; 2024 2025 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); 2026 2027 /* 2028 * Initialize and free pages in MAX_ORDER sized increments so that we 2029 * can avoid introducing any issues with the buddy allocator. 2030 */ 2031 while (spfn < end_pfn) { 2032 deferred_init_maxorder(&i, zone, &spfn, &epfn); 2033 cond_resched(); 2034 } 2035 } 2036 2037 /* An arch may override for more concurrency. */ 2038 __weak int __init 2039 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2040 { 2041 return 1; 2042 } 2043 2044 /* Initialise remaining memory on a node */ 2045 static int __init deferred_init_memmap(void *data) 2046 { 2047 pg_data_t *pgdat = data; 2048 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2049 unsigned long spfn = 0, epfn = 0; 2050 unsigned long first_init_pfn, flags; 2051 unsigned long start = jiffies; 2052 struct zone *zone; 2053 int zid, max_threads; 2054 u64 i; 2055 2056 /* Bind memory initialisation thread to a local node if possible */ 2057 if (!cpumask_empty(cpumask)) 2058 set_cpus_allowed_ptr(current, cpumask); 2059 2060 pgdat_resize_lock(pgdat, &flags); 2061 first_init_pfn = pgdat->first_deferred_pfn; 2062 if (first_init_pfn == ULONG_MAX) { 2063 pgdat_resize_unlock(pgdat, &flags); 2064 pgdat_init_report_one_done(); 2065 return 0; 2066 } 2067 2068 /* Sanity check boundaries */ 2069 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2070 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2071 pgdat->first_deferred_pfn = ULONG_MAX; 2072 2073 /* 2074 * Once we unlock here, the zone cannot be grown anymore, thus if an 2075 * interrupt thread must allocate this early in boot, zone must be 2076 * pre-grown prior to start of deferred page initialization. 2077 */ 2078 pgdat_resize_unlock(pgdat, &flags); 2079 2080 /* Only the highest zone is deferred so find it */ 2081 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 2082 zone = pgdat->node_zones + zid; 2083 if (first_init_pfn < zone_end_pfn(zone)) 2084 break; 2085 } 2086 2087 /* If the zone is empty somebody else may have cleared out the zone */ 2088 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2089 first_init_pfn)) 2090 goto zone_empty; 2091 2092 max_threads = deferred_page_init_max_threads(cpumask); 2093 2094 while (spfn < epfn) { 2095 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); 2096 struct padata_mt_job job = { 2097 .thread_fn = deferred_init_memmap_chunk, 2098 .fn_arg = zone, 2099 .start = spfn, 2100 .size = epfn_align - spfn, 2101 .align = PAGES_PER_SECTION, 2102 .min_chunk = PAGES_PER_SECTION, 2103 .max_threads = max_threads, 2104 }; 2105 2106 padata_do_multithreaded(&job); 2107 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2108 epfn_align); 2109 } 2110 zone_empty: 2111 /* Sanity check that the next zone really is unpopulated */ 2112 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 2113 2114 pr_info("node %d deferred pages initialised in %ums\n", 2115 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2116 2117 pgdat_init_report_one_done(); 2118 return 0; 2119 } 2120 2121 /* 2122 * If this zone has deferred pages, try to grow it by initializing enough 2123 * deferred pages to satisfy the allocation specified by order, rounded up to 2124 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2125 * of SECTION_SIZE bytes by initializing struct pages in increments of 2126 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2127 * 2128 * Return true when zone was grown, otherwise return false. We return true even 2129 * when we grow less than requested, to let the caller decide if there are 2130 * enough pages to satisfy the allocation. 2131 * 2132 * Note: We use noinline because this function is needed only during boot, and 2133 * it is called from a __ref function _deferred_grow_zone. This way we are 2134 * making sure that it is not inlined into permanent text section. 2135 */ 2136 static noinline bool __init 2137 deferred_grow_zone(struct zone *zone, unsigned int order) 2138 { 2139 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 2140 pg_data_t *pgdat = zone->zone_pgdat; 2141 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2142 unsigned long spfn, epfn, flags; 2143 unsigned long nr_pages = 0; 2144 u64 i; 2145 2146 /* Only the last zone may have deferred pages */ 2147 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2148 return false; 2149 2150 pgdat_resize_lock(pgdat, &flags); 2151 2152 /* 2153 * If someone grew this zone while we were waiting for spinlock, return 2154 * true, as there might be enough pages already. 2155 */ 2156 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2157 pgdat_resize_unlock(pgdat, &flags); 2158 return true; 2159 } 2160 2161 /* If the zone is empty somebody else may have cleared out the zone */ 2162 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2163 first_deferred_pfn)) { 2164 pgdat->first_deferred_pfn = ULONG_MAX; 2165 pgdat_resize_unlock(pgdat, &flags); 2166 /* Retry only once. */ 2167 return first_deferred_pfn != ULONG_MAX; 2168 } 2169 2170 /* 2171 * Initialize and free pages in MAX_ORDER sized increments so 2172 * that we can avoid introducing any issues with the buddy 2173 * allocator. 2174 */ 2175 while (spfn < epfn) { 2176 /* update our first deferred PFN for this section */ 2177 first_deferred_pfn = spfn; 2178 2179 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); 2180 touch_nmi_watchdog(); 2181 2182 /* We should only stop along section boundaries */ 2183 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) 2184 continue; 2185 2186 /* If our quota has been met we can stop here */ 2187 if (nr_pages >= nr_pages_needed) 2188 break; 2189 } 2190 2191 pgdat->first_deferred_pfn = spfn; 2192 pgdat_resize_unlock(pgdat, &flags); 2193 2194 return nr_pages > 0; 2195 } 2196 2197 /* 2198 * deferred_grow_zone() is __init, but it is called from 2199 * get_page_from_freelist() during early boot until deferred_pages permanently 2200 * disables this call. This is why we have refdata wrapper to avoid warning, 2201 * and to ensure that the function body gets unloaded. 2202 */ 2203 static bool __ref 2204 _deferred_grow_zone(struct zone *zone, unsigned int order) 2205 { 2206 return deferred_grow_zone(zone, order); 2207 } 2208 2209 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2210 2211 void __init page_alloc_init_late(void) 2212 { 2213 struct zone *zone; 2214 int nid; 2215 2216 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2217 2218 /* There will be num_node_state(N_MEMORY) threads */ 2219 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2220 for_each_node_state(nid, N_MEMORY) { 2221 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2222 } 2223 2224 /* Block until all are initialised */ 2225 wait_for_completion(&pgdat_init_all_done_comp); 2226 2227 /* 2228 * We initialized the rest of the deferred pages. Permanently disable 2229 * on-demand struct page initialization. 2230 */ 2231 static_branch_disable(&deferred_pages); 2232 2233 /* Reinit limits that are based on free pages after the kernel is up */ 2234 files_maxfiles_init(); 2235 #endif 2236 2237 buffer_init(); 2238 2239 /* Discard memblock private memory */ 2240 memblock_discard(); 2241 2242 for_each_node_state(nid, N_MEMORY) 2243 shuffle_free_memory(NODE_DATA(nid)); 2244 2245 for_each_populated_zone(zone) 2246 set_zone_contiguous(zone); 2247 } 2248 2249 #ifdef CONFIG_CMA 2250 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 2251 void __init init_cma_reserved_pageblock(struct page *page) 2252 { 2253 unsigned i = pageblock_nr_pages; 2254 struct page *p = page; 2255 2256 do { 2257 __ClearPageReserved(p); 2258 set_page_count(p, 0); 2259 } while (++p, --i); 2260 2261 set_pageblock_migratetype(page, MIGRATE_CMA); 2262 2263 if (pageblock_order >= MAX_ORDER) { 2264 i = pageblock_nr_pages; 2265 p = page; 2266 do { 2267 set_page_refcounted(p); 2268 __free_pages(p, MAX_ORDER - 1); 2269 p += MAX_ORDER_NR_PAGES; 2270 } while (i -= MAX_ORDER_NR_PAGES); 2271 } else { 2272 set_page_refcounted(page); 2273 __free_pages(page, pageblock_order); 2274 } 2275 2276 adjust_managed_page_count(page, pageblock_nr_pages); 2277 page_zone(page)->cma_pages += pageblock_nr_pages; 2278 } 2279 #endif 2280 2281 /* 2282 * The order of subdivision here is critical for the IO subsystem. 2283 * Please do not alter this order without good reasons and regression 2284 * testing. Specifically, as large blocks of memory are subdivided, 2285 * the order in which smaller blocks are delivered depends on the order 2286 * they're subdivided in this function. This is the primary factor 2287 * influencing the order in which pages are delivered to the IO 2288 * subsystem according to empirical testing, and this is also justified 2289 * by considering the behavior of a buddy system containing a single 2290 * large block of memory acted on by a series of small allocations. 2291 * This behavior is a critical factor in sglist merging's success. 2292 * 2293 * -- nyc 2294 */ 2295 static inline void expand(struct zone *zone, struct page *page, 2296 int low, int high, int migratetype) 2297 { 2298 unsigned long size = 1 << high; 2299 2300 while (high > low) { 2301 high--; 2302 size >>= 1; 2303 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 2304 2305 /* 2306 * Mark as guard pages (or page), that will allow to 2307 * merge back to allocator when buddy will be freed. 2308 * Corresponding page table entries will not be touched, 2309 * pages will stay not present in virtual address space 2310 */ 2311 if (set_page_guard(zone, &page[size], high, migratetype)) 2312 continue; 2313 2314 add_to_free_list(&page[size], zone, high, migratetype); 2315 set_buddy_order(&page[size], high); 2316 } 2317 } 2318 2319 static void check_new_page_bad(struct page *page) 2320 { 2321 if (unlikely(page->flags & __PG_HWPOISON)) { 2322 /* Don't complain about hwpoisoned pages */ 2323 page_mapcount_reset(page); /* remove PageBuddy */ 2324 return; 2325 } 2326 2327 bad_page(page, 2328 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 2329 } 2330 2331 /* 2332 * This page is about to be returned from the page allocator 2333 */ 2334 static inline int check_new_page(struct page *page) 2335 { 2336 if (likely(page_expected_state(page, 2337 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 2338 return 0; 2339 2340 check_new_page_bad(page); 2341 return 1; 2342 } 2343 2344 #ifdef CONFIG_DEBUG_VM 2345 /* 2346 * With DEBUG_VM enabled, order-0 pages are checked for expected state when 2347 * being allocated from pcp lists. With debug_pagealloc also enabled, they are 2348 * also checked when pcp lists are refilled from the free lists. 2349 */ 2350 static inline bool check_pcp_refill(struct page *page) 2351 { 2352 if (debug_pagealloc_enabled_static()) 2353 return check_new_page(page); 2354 else 2355 return false; 2356 } 2357 2358 static inline bool check_new_pcp(struct page *page) 2359 { 2360 return check_new_page(page); 2361 } 2362 #else 2363 /* 2364 * With DEBUG_VM disabled, free order-0 pages are checked for expected state 2365 * when pcp lists are being refilled from the free lists. With debug_pagealloc 2366 * enabled, they are also checked when being allocated from the pcp lists. 2367 */ 2368 static inline bool check_pcp_refill(struct page *page) 2369 { 2370 return check_new_page(page); 2371 } 2372 static inline bool check_new_pcp(struct page *page) 2373 { 2374 if (debug_pagealloc_enabled_static()) 2375 return check_new_page(page); 2376 else 2377 return false; 2378 } 2379 #endif /* CONFIG_DEBUG_VM */ 2380 2381 static bool check_new_pages(struct page *page, unsigned int order) 2382 { 2383 int i; 2384 for (i = 0; i < (1 << order); i++) { 2385 struct page *p = page + i; 2386 2387 if (unlikely(check_new_page(p))) 2388 return true; 2389 } 2390 2391 return false; 2392 } 2393 2394 inline void post_alloc_hook(struct page *page, unsigned int order, 2395 gfp_t gfp_flags) 2396 { 2397 set_page_private(page, 0); 2398 set_page_refcounted(page); 2399 2400 arch_alloc_page(page, order); 2401 debug_pagealloc_map_pages(page, 1 << order); 2402 2403 /* 2404 * Page unpoisoning must happen before memory initialization. 2405 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 2406 * allocations and the page unpoisoning code will complain. 2407 */ 2408 kernel_unpoison_pages(page, 1 << order); 2409 2410 /* 2411 * As memory initialization might be integrated into KASAN, 2412 * kasan_alloc_pages and kernel_init_free_pages must be 2413 * kept together to avoid discrepancies in behavior. 2414 */ 2415 if (kasan_has_integrated_init()) { 2416 kasan_alloc_pages(page, order, gfp_flags); 2417 } else { 2418 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags); 2419 2420 kasan_unpoison_pages(page, order, init); 2421 if (init) 2422 kernel_init_free_pages(page, 1 << order, 2423 gfp_flags & __GFP_ZEROTAGS); 2424 } 2425 2426 set_page_owner(page, order, gfp_flags); 2427 page_table_check_alloc(page, order); 2428 } 2429 2430 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 2431 unsigned int alloc_flags) 2432 { 2433 post_alloc_hook(page, order, gfp_flags); 2434 2435 if (order && (gfp_flags & __GFP_COMP)) 2436 prep_compound_page(page, order); 2437 2438 /* 2439 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 2440 * allocate the page. The expectation is that the caller is taking 2441 * steps that will free more memory. The caller should avoid the page 2442 * being used for !PFMEMALLOC purposes. 2443 */ 2444 if (alloc_flags & ALLOC_NO_WATERMARKS) 2445 set_page_pfmemalloc(page); 2446 else 2447 clear_page_pfmemalloc(page); 2448 } 2449 2450 /* 2451 * Go through the free lists for the given migratetype and remove 2452 * the smallest available page from the freelists 2453 */ 2454 static __always_inline 2455 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 2456 int migratetype) 2457 { 2458 unsigned int current_order; 2459 struct free_area *area; 2460 struct page *page; 2461 2462 /* Find a page of the appropriate size in the preferred list */ 2463 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 2464 area = &(zone->free_area[current_order]); 2465 page = get_page_from_free_area(area, migratetype); 2466 if (!page) 2467 continue; 2468 del_page_from_free_list(page, zone, current_order); 2469 expand(zone, page, order, current_order, migratetype); 2470 set_pcppage_migratetype(page, migratetype); 2471 return page; 2472 } 2473 2474 return NULL; 2475 } 2476 2477 2478 /* 2479 * This array describes the order lists are fallen back to when 2480 * the free lists for the desirable migrate type are depleted 2481 */ 2482 static int fallbacks[MIGRATE_TYPES][3] = { 2483 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2484 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 2485 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2486 #ifdef CONFIG_CMA 2487 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ 2488 #endif 2489 #ifdef CONFIG_MEMORY_ISOLATION 2490 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ 2491 #endif 2492 }; 2493 2494 #ifdef CONFIG_CMA 2495 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2496 unsigned int order) 2497 { 2498 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 2499 } 2500 #else 2501 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2502 unsigned int order) { return NULL; } 2503 #endif 2504 2505 /* 2506 * Move the free pages in a range to the freelist tail of the requested type. 2507 * Note that start_page and end_pages are not aligned on a pageblock 2508 * boundary. If alignment is required, use move_freepages_block() 2509 */ 2510 static int move_freepages(struct zone *zone, 2511 unsigned long start_pfn, unsigned long end_pfn, 2512 int migratetype, int *num_movable) 2513 { 2514 struct page *page; 2515 unsigned long pfn; 2516 unsigned int order; 2517 int pages_moved = 0; 2518 2519 for (pfn = start_pfn; pfn <= end_pfn;) { 2520 page = pfn_to_page(pfn); 2521 if (!PageBuddy(page)) { 2522 /* 2523 * We assume that pages that could be isolated for 2524 * migration are movable. But we don't actually try 2525 * isolating, as that would be expensive. 2526 */ 2527 if (num_movable && 2528 (PageLRU(page) || __PageMovable(page))) 2529 (*num_movable)++; 2530 pfn++; 2531 continue; 2532 } 2533 2534 /* Make sure we are not inadvertently changing nodes */ 2535 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 2536 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 2537 2538 order = buddy_order(page); 2539 move_to_free_list(page, zone, order, migratetype); 2540 pfn += 1 << order; 2541 pages_moved += 1 << order; 2542 } 2543 2544 return pages_moved; 2545 } 2546 2547 int move_freepages_block(struct zone *zone, struct page *page, 2548 int migratetype, int *num_movable) 2549 { 2550 unsigned long start_pfn, end_pfn, pfn; 2551 2552 if (num_movable) 2553 *num_movable = 0; 2554 2555 pfn = page_to_pfn(page); 2556 start_pfn = pfn & ~(pageblock_nr_pages - 1); 2557 end_pfn = start_pfn + pageblock_nr_pages - 1; 2558 2559 /* Do not cross zone boundaries */ 2560 if (!zone_spans_pfn(zone, start_pfn)) 2561 start_pfn = pfn; 2562 if (!zone_spans_pfn(zone, end_pfn)) 2563 return 0; 2564 2565 return move_freepages(zone, start_pfn, end_pfn, migratetype, 2566 num_movable); 2567 } 2568 2569 static void change_pageblock_range(struct page *pageblock_page, 2570 int start_order, int migratetype) 2571 { 2572 int nr_pageblocks = 1 << (start_order - pageblock_order); 2573 2574 while (nr_pageblocks--) { 2575 set_pageblock_migratetype(pageblock_page, migratetype); 2576 pageblock_page += pageblock_nr_pages; 2577 } 2578 } 2579 2580 /* 2581 * When we are falling back to another migratetype during allocation, try to 2582 * steal extra free pages from the same pageblocks to satisfy further 2583 * allocations, instead of polluting multiple pageblocks. 2584 * 2585 * If we are stealing a relatively large buddy page, it is likely there will 2586 * be more free pages in the pageblock, so try to steal them all. For 2587 * reclaimable and unmovable allocations, we steal regardless of page size, 2588 * as fragmentation caused by those allocations polluting movable pageblocks 2589 * is worse than movable allocations stealing from unmovable and reclaimable 2590 * pageblocks. 2591 */ 2592 static bool can_steal_fallback(unsigned int order, int start_mt) 2593 { 2594 /* 2595 * Leaving this order check is intended, although there is 2596 * relaxed order check in next check. The reason is that 2597 * we can actually steal whole pageblock if this condition met, 2598 * but, below check doesn't guarantee it and that is just heuristic 2599 * so could be changed anytime. 2600 */ 2601 if (order >= pageblock_order) 2602 return true; 2603 2604 if (order >= pageblock_order / 2 || 2605 start_mt == MIGRATE_RECLAIMABLE || 2606 start_mt == MIGRATE_UNMOVABLE || 2607 page_group_by_mobility_disabled) 2608 return true; 2609 2610 return false; 2611 } 2612 2613 static inline bool boost_watermark(struct zone *zone) 2614 { 2615 unsigned long max_boost; 2616 2617 if (!watermark_boost_factor) 2618 return false; 2619 /* 2620 * Don't bother in zones that are unlikely to produce results. 2621 * On small machines, including kdump capture kernels running 2622 * in a small area, boosting the watermark can cause an out of 2623 * memory situation immediately. 2624 */ 2625 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2626 return false; 2627 2628 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2629 watermark_boost_factor, 10000); 2630 2631 /* 2632 * high watermark may be uninitialised if fragmentation occurs 2633 * very early in boot so do not boost. We do not fall 2634 * through and boost by pageblock_nr_pages as failing 2635 * allocations that early means that reclaim is not going 2636 * to help and it may even be impossible to reclaim the 2637 * boosted watermark resulting in a hang. 2638 */ 2639 if (!max_boost) 2640 return false; 2641 2642 max_boost = max(pageblock_nr_pages, max_boost); 2643 2644 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2645 max_boost); 2646 2647 return true; 2648 } 2649 2650 /* 2651 * This function implements actual steal behaviour. If order is large enough, 2652 * we can steal whole pageblock. If not, we first move freepages in this 2653 * pageblock to our migratetype and determine how many already-allocated pages 2654 * are there in the pageblock with a compatible migratetype. If at least half 2655 * of pages are free or compatible, we can change migratetype of the pageblock 2656 * itself, so pages freed in the future will be put on the correct free list. 2657 */ 2658 static void steal_suitable_fallback(struct zone *zone, struct page *page, 2659 unsigned int alloc_flags, int start_type, bool whole_block) 2660 { 2661 unsigned int current_order = buddy_order(page); 2662 int free_pages, movable_pages, alike_pages; 2663 int old_block_type; 2664 2665 old_block_type = get_pageblock_migratetype(page); 2666 2667 /* 2668 * This can happen due to races and we want to prevent broken 2669 * highatomic accounting. 2670 */ 2671 if (is_migrate_highatomic(old_block_type)) 2672 goto single_page; 2673 2674 /* Take ownership for orders >= pageblock_order */ 2675 if (current_order >= pageblock_order) { 2676 change_pageblock_range(page, current_order, start_type); 2677 goto single_page; 2678 } 2679 2680 /* 2681 * Boost watermarks to increase reclaim pressure to reduce the 2682 * likelihood of future fallbacks. Wake kswapd now as the node 2683 * may be balanced overall and kswapd will not wake naturally. 2684 */ 2685 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2686 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2687 2688 /* We are not allowed to try stealing from the whole block */ 2689 if (!whole_block) 2690 goto single_page; 2691 2692 free_pages = move_freepages_block(zone, page, start_type, 2693 &movable_pages); 2694 /* 2695 * Determine how many pages are compatible with our allocation. 2696 * For movable allocation, it's the number of movable pages which 2697 * we just obtained. For other types it's a bit more tricky. 2698 */ 2699 if (start_type == MIGRATE_MOVABLE) { 2700 alike_pages = movable_pages; 2701 } else { 2702 /* 2703 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2704 * to MOVABLE pageblock, consider all non-movable pages as 2705 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2706 * vice versa, be conservative since we can't distinguish the 2707 * exact migratetype of non-movable pages. 2708 */ 2709 if (old_block_type == MIGRATE_MOVABLE) 2710 alike_pages = pageblock_nr_pages 2711 - (free_pages + movable_pages); 2712 else 2713 alike_pages = 0; 2714 } 2715 2716 /* moving whole block can fail due to zone boundary conditions */ 2717 if (!free_pages) 2718 goto single_page; 2719 2720 /* 2721 * If a sufficient number of pages in the block are either free or of 2722 * comparable migratability as our allocation, claim the whole block. 2723 */ 2724 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2725 page_group_by_mobility_disabled) 2726 set_pageblock_migratetype(page, start_type); 2727 2728 return; 2729 2730 single_page: 2731 move_to_free_list(page, zone, current_order, start_type); 2732 } 2733 2734 /* 2735 * Check whether there is a suitable fallback freepage with requested order. 2736 * If only_stealable is true, this function returns fallback_mt only if 2737 * we can steal other freepages all together. This would help to reduce 2738 * fragmentation due to mixed migratetype pages in one pageblock. 2739 */ 2740 int find_suitable_fallback(struct free_area *area, unsigned int order, 2741 int migratetype, bool only_stealable, bool *can_steal) 2742 { 2743 int i; 2744 int fallback_mt; 2745 2746 if (area->nr_free == 0) 2747 return -1; 2748 2749 *can_steal = false; 2750 for (i = 0;; i++) { 2751 fallback_mt = fallbacks[migratetype][i]; 2752 if (fallback_mt == MIGRATE_TYPES) 2753 break; 2754 2755 if (free_area_empty(area, fallback_mt)) 2756 continue; 2757 2758 if (can_steal_fallback(order, migratetype)) 2759 *can_steal = true; 2760 2761 if (!only_stealable) 2762 return fallback_mt; 2763 2764 if (*can_steal) 2765 return fallback_mt; 2766 } 2767 2768 return -1; 2769 } 2770 2771 /* 2772 * Reserve a pageblock for exclusive use of high-order atomic allocations if 2773 * there are no empty page blocks that contain a page with a suitable order 2774 */ 2775 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2776 unsigned int alloc_order) 2777 { 2778 int mt; 2779 unsigned long max_managed, flags; 2780 2781 /* 2782 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2783 * Check is race-prone but harmless. 2784 */ 2785 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 2786 if (zone->nr_reserved_highatomic >= max_managed) 2787 return; 2788 2789 spin_lock_irqsave(&zone->lock, flags); 2790 2791 /* Recheck the nr_reserved_highatomic limit under the lock */ 2792 if (zone->nr_reserved_highatomic >= max_managed) 2793 goto out_unlock; 2794 2795 /* Yoink! */ 2796 mt = get_pageblock_migratetype(page); 2797 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt) 2798 && !is_migrate_cma(mt)) { 2799 zone->nr_reserved_highatomic += pageblock_nr_pages; 2800 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2801 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 2802 } 2803 2804 out_unlock: 2805 spin_unlock_irqrestore(&zone->lock, flags); 2806 } 2807 2808 /* 2809 * Used when an allocation is about to fail under memory pressure. This 2810 * potentially hurts the reliability of high-order allocations when under 2811 * intense memory pressure but failed atomic allocations should be easier 2812 * to recover from than an OOM. 2813 * 2814 * If @force is true, try to unreserve a pageblock even though highatomic 2815 * pageblock is exhausted. 2816 */ 2817 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2818 bool force) 2819 { 2820 struct zonelist *zonelist = ac->zonelist; 2821 unsigned long flags; 2822 struct zoneref *z; 2823 struct zone *zone; 2824 struct page *page; 2825 int order; 2826 bool ret; 2827 2828 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2829 ac->nodemask) { 2830 /* 2831 * Preserve at least one pageblock unless memory pressure 2832 * is really high. 2833 */ 2834 if (!force && zone->nr_reserved_highatomic <= 2835 pageblock_nr_pages) 2836 continue; 2837 2838 spin_lock_irqsave(&zone->lock, flags); 2839 for (order = 0; order < MAX_ORDER; order++) { 2840 struct free_area *area = &(zone->free_area[order]); 2841 2842 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2843 if (!page) 2844 continue; 2845 2846 /* 2847 * In page freeing path, migratetype change is racy so 2848 * we can counter several free pages in a pageblock 2849 * in this loop although we changed the pageblock type 2850 * from highatomic to ac->migratetype. So we should 2851 * adjust the count once. 2852 */ 2853 if (is_migrate_highatomic_page(page)) { 2854 /* 2855 * It should never happen but changes to 2856 * locking could inadvertently allow a per-cpu 2857 * drain to add pages to MIGRATE_HIGHATOMIC 2858 * while unreserving so be safe and watch for 2859 * underflows. 2860 */ 2861 zone->nr_reserved_highatomic -= min( 2862 pageblock_nr_pages, 2863 zone->nr_reserved_highatomic); 2864 } 2865 2866 /* 2867 * Convert to ac->migratetype and avoid the normal 2868 * pageblock stealing heuristics. Minimally, the caller 2869 * is doing the work and needs the pages. More 2870 * importantly, if the block was always converted to 2871 * MIGRATE_UNMOVABLE or another type then the number 2872 * of pageblocks that cannot be completely freed 2873 * may increase. 2874 */ 2875 set_pageblock_migratetype(page, ac->migratetype); 2876 ret = move_freepages_block(zone, page, ac->migratetype, 2877 NULL); 2878 if (ret) { 2879 spin_unlock_irqrestore(&zone->lock, flags); 2880 return ret; 2881 } 2882 } 2883 spin_unlock_irqrestore(&zone->lock, flags); 2884 } 2885 2886 return false; 2887 } 2888 2889 /* 2890 * Try finding a free buddy page on the fallback list and put it on the free 2891 * list of requested migratetype, possibly along with other pages from the same 2892 * block, depending on fragmentation avoidance heuristics. Returns true if 2893 * fallback was found so that __rmqueue_smallest() can grab it. 2894 * 2895 * The use of signed ints for order and current_order is a deliberate 2896 * deviation from the rest of this file, to make the for loop 2897 * condition simpler. 2898 */ 2899 static __always_inline bool 2900 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2901 unsigned int alloc_flags) 2902 { 2903 struct free_area *area; 2904 int current_order; 2905 int min_order = order; 2906 struct page *page; 2907 int fallback_mt; 2908 bool can_steal; 2909 2910 /* 2911 * Do not steal pages from freelists belonging to other pageblocks 2912 * i.e. orders < pageblock_order. If there are no local zones free, 2913 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2914 */ 2915 if (alloc_flags & ALLOC_NOFRAGMENT) 2916 min_order = pageblock_order; 2917 2918 /* 2919 * Find the largest available free page in the other list. This roughly 2920 * approximates finding the pageblock with the most free pages, which 2921 * would be too costly to do exactly. 2922 */ 2923 for (current_order = MAX_ORDER - 1; current_order >= min_order; 2924 --current_order) { 2925 area = &(zone->free_area[current_order]); 2926 fallback_mt = find_suitable_fallback(area, current_order, 2927 start_migratetype, false, &can_steal); 2928 if (fallback_mt == -1) 2929 continue; 2930 2931 /* 2932 * We cannot steal all free pages from the pageblock and the 2933 * requested migratetype is movable. In that case it's better to 2934 * steal and split the smallest available page instead of the 2935 * largest available page, because even if the next movable 2936 * allocation falls back into a different pageblock than this 2937 * one, it won't cause permanent fragmentation. 2938 */ 2939 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2940 && current_order > order) 2941 goto find_smallest; 2942 2943 goto do_steal; 2944 } 2945 2946 return false; 2947 2948 find_smallest: 2949 for (current_order = order; current_order < MAX_ORDER; 2950 current_order++) { 2951 area = &(zone->free_area[current_order]); 2952 fallback_mt = find_suitable_fallback(area, current_order, 2953 start_migratetype, false, &can_steal); 2954 if (fallback_mt != -1) 2955 break; 2956 } 2957 2958 /* 2959 * This should not happen - we already found a suitable fallback 2960 * when looking for the largest page. 2961 */ 2962 VM_BUG_ON(current_order == MAX_ORDER); 2963 2964 do_steal: 2965 page = get_page_from_free_area(area, fallback_mt); 2966 2967 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2968 can_steal); 2969 2970 trace_mm_page_alloc_extfrag(page, order, current_order, 2971 start_migratetype, fallback_mt); 2972 2973 return true; 2974 2975 } 2976 2977 /* 2978 * Do the hard work of removing an element from the buddy allocator. 2979 * Call me with the zone->lock already held. 2980 */ 2981 static __always_inline struct page * 2982 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2983 unsigned int alloc_flags) 2984 { 2985 struct page *page; 2986 2987 if (IS_ENABLED(CONFIG_CMA)) { 2988 /* 2989 * Balance movable allocations between regular and CMA areas by 2990 * allocating from CMA when over half of the zone's free memory 2991 * is in the CMA area. 2992 */ 2993 if (alloc_flags & ALLOC_CMA && 2994 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2995 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2996 page = __rmqueue_cma_fallback(zone, order); 2997 if (page) 2998 goto out; 2999 } 3000 } 3001 retry: 3002 page = __rmqueue_smallest(zone, order, migratetype); 3003 if (unlikely(!page)) { 3004 if (alloc_flags & ALLOC_CMA) 3005 page = __rmqueue_cma_fallback(zone, order); 3006 3007 if (!page && __rmqueue_fallback(zone, order, migratetype, 3008 alloc_flags)) 3009 goto retry; 3010 } 3011 out: 3012 if (page) 3013 trace_mm_page_alloc_zone_locked(page, order, migratetype); 3014 return page; 3015 } 3016 3017 /* 3018 * Obtain a specified number of elements from the buddy allocator, all under 3019 * a single hold of the lock, for efficiency. Add them to the supplied list. 3020 * Returns the number of new pages which were placed at *list. 3021 */ 3022 static int rmqueue_bulk(struct zone *zone, unsigned int order, 3023 unsigned long count, struct list_head *list, 3024 int migratetype, unsigned int alloc_flags) 3025 { 3026 int i, allocated = 0; 3027 3028 /* 3029 * local_lock_irq held so equivalent to spin_lock_irqsave for 3030 * both PREEMPT_RT and non-PREEMPT_RT configurations. 3031 */ 3032 spin_lock(&zone->lock); 3033 for (i = 0; i < count; ++i) { 3034 struct page *page = __rmqueue(zone, order, migratetype, 3035 alloc_flags); 3036 if (unlikely(page == NULL)) 3037 break; 3038 3039 if (unlikely(check_pcp_refill(page))) 3040 continue; 3041 3042 /* 3043 * Split buddy pages returned by expand() are received here in 3044 * physical page order. The page is added to the tail of 3045 * caller's list. From the callers perspective, the linked list 3046 * is ordered by page number under some conditions. This is 3047 * useful for IO devices that can forward direction from the 3048 * head, thus also in the physical page order. This is useful 3049 * for IO devices that can merge IO requests if the physical 3050 * pages are ordered properly. 3051 */ 3052 list_add_tail(&page->lru, list); 3053 allocated++; 3054 if (is_migrate_cma(get_pcppage_migratetype(page))) 3055 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 3056 -(1 << order)); 3057 } 3058 3059 /* 3060 * i pages were removed from the buddy list even if some leak due 3061 * to check_pcp_refill failing so adjust NR_FREE_PAGES based 3062 * on i. Do not confuse with 'allocated' which is the number of 3063 * pages added to the pcp list. 3064 */ 3065 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 3066 spin_unlock(&zone->lock); 3067 return allocated; 3068 } 3069 3070 #ifdef CONFIG_NUMA 3071 /* 3072 * Called from the vmstat counter updater to drain pagesets of this 3073 * currently executing processor on remote nodes after they have 3074 * expired. 3075 * 3076 * Note that this function must be called with the thread pinned to 3077 * a single processor. 3078 */ 3079 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 3080 { 3081 unsigned long flags; 3082 int to_drain, batch; 3083 3084 local_lock_irqsave(&pagesets.lock, flags); 3085 batch = READ_ONCE(pcp->batch); 3086 to_drain = min(pcp->count, batch); 3087 if (to_drain > 0) 3088 free_pcppages_bulk(zone, to_drain, pcp); 3089 local_unlock_irqrestore(&pagesets.lock, flags); 3090 } 3091 #endif 3092 3093 /* 3094 * Drain pcplists of the indicated processor and zone. 3095 * 3096 * The processor must either be the current processor and the 3097 * thread pinned to the current processor or a processor that 3098 * is not online. 3099 */ 3100 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 3101 { 3102 unsigned long flags; 3103 struct per_cpu_pages *pcp; 3104 3105 local_lock_irqsave(&pagesets.lock, flags); 3106 3107 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 3108 if (pcp->count) 3109 free_pcppages_bulk(zone, pcp->count, pcp); 3110 3111 local_unlock_irqrestore(&pagesets.lock, flags); 3112 } 3113 3114 /* 3115 * Drain pcplists of all zones on the indicated processor. 3116 * 3117 * The processor must either be the current processor and the 3118 * thread pinned to the current processor or a processor that 3119 * is not online. 3120 */ 3121 static void drain_pages(unsigned int cpu) 3122 { 3123 struct zone *zone; 3124 3125 for_each_populated_zone(zone) { 3126 drain_pages_zone(cpu, zone); 3127 } 3128 } 3129 3130 /* 3131 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 3132 * 3133 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 3134 * the single zone's pages. 3135 */ 3136 void drain_local_pages(struct zone *zone) 3137 { 3138 int cpu = smp_processor_id(); 3139 3140 if (zone) 3141 drain_pages_zone(cpu, zone); 3142 else 3143 drain_pages(cpu); 3144 } 3145 3146 static void drain_local_pages_wq(struct work_struct *work) 3147 { 3148 struct pcpu_drain *drain; 3149 3150 drain = container_of(work, struct pcpu_drain, work); 3151 3152 /* 3153 * drain_all_pages doesn't use proper cpu hotplug protection so 3154 * we can race with cpu offline when the WQ can move this from 3155 * a cpu pinned worker to an unbound one. We can operate on a different 3156 * cpu which is alright but we also have to make sure to not move to 3157 * a different one. 3158 */ 3159 migrate_disable(); 3160 drain_local_pages(drain->zone); 3161 migrate_enable(); 3162 } 3163 3164 /* 3165 * The implementation of drain_all_pages(), exposing an extra parameter to 3166 * drain on all cpus. 3167 * 3168 * drain_all_pages() is optimized to only execute on cpus where pcplists are 3169 * not empty. The check for non-emptiness can however race with a free to 3170 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 3171 * that need the guarantee that every CPU has drained can disable the 3172 * optimizing racy check. 3173 */ 3174 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 3175 { 3176 int cpu; 3177 3178 /* 3179 * Allocate in the BSS so we won't require allocation in 3180 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 3181 */ 3182 static cpumask_t cpus_with_pcps; 3183 3184 /* 3185 * Make sure nobody triggers this path before mm_percpu_wq is fully 3186 * initialized. 3187 */ 3188 if (WARN_ON_ONCE(!mm_percpu_wq)) 3189 return; 3190 3191 /* 3192 * Do not drain if one is already in progress unless it's specific to 3193 * a zone. Such callers are primarily CMA and memory hotplug and need 3194 * the drain to be complete when the call returns. 3195 */ 3196 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 3197 if (!zone) 3198 return; 3199 mutex_lock(&pcpu_drain_mutex); 3200 } 3201 3202 /* 3203 * We don't care about racing with CPU hotplug event 3204 * as offline notification will cause the notified 3205 * cpu to drain that CPU pcps and on_each_cpu_mask 3206 * disables preemption as part of its processing 3207 */ 3208 for_each_online_cpu(cpu) { 3209 struct per_cpu_pages *pcp; 3210 struct zone *z; 3211 bool has_pcps = false; 3212 3213 if (force_all_cpus) { 3214 /* 3215 * The pcp.count check is racy, some callers need a 3216 * guarantee that no cpu is missed. 3217 */ 3218 has_pcps = true; 3219 } else if (zone) { 3220 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 3221 if (pcp->count) 3222 has_pcps = true; 3223 } else { 3224 for_each_populated_zone(z) { 3225 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 3226 if (pcp->count) { 3227 has_pcps = true; 3228 break; 3229 } 3230 } 3231 } 3232 3233 if (has_pcps) 3234 cpumask_set_cpu(cpu, &cpus_with_pcps); 3235 else 3236 cpumask_clear_cpu(cpu, &cpus_with_pcps); 3237 } 3238 3239 for_each_cpu(cpu, &cpus_with_pcps) { 3240 struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); 3241 3242 drain->zone = zone; 3243 INIT_WORK(&drain->work, drain_local_pages_wq); 3244 queue_work_on(cpu, mm_percpu_wq, &drain->work); 3245 } 3246 for_each_cpu(cpu, &cpus_with_pcps) 3247 flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); 3248 3249 mutex_unlock(&pcpu_drain_mutex); 3250 } 3251 3252 /* 3253 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 3254 * 3255 * When zone parameter is non-NULL, spill just the single zone's pages. 3256 * 3257 * Note that this can be extremely slow as the draining happens in a workqueue. 3258 */ 3259 void drain_all_pages(struct zone *zone) 3260 { 3261 __drain_all_pages(zone, false); 3262 } 3263 3264 #ifdef CONFIG_HIBERNATION 3265 3266 /* 3267 * Touch the watchdog for every WD_PAGE_COUNT pages. 3268 */ 3269 #define WD_PAGE_COUNT (128*1024) 3270 3271 void mark_free_pages(struct zone *zone) 3272 { 3273 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; 3274 unsigned long flags; 3275 unsigned int order, t; 3276 struct page *page; 3277 3278 if (zone_is_empty(zone)) 3279 return; 3280 3281 spin_lock_irqsave(&zone->lock, flags); 3282 3283 max_zone_pfn = zone_end_pfn(zone); 3284 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 3285 if (pfn_valid(pfn)) { 3286 page = pfn_to_page(pfn); 3287 3288 if (!--page_count) { 3289 touch_nmi_watchdog(); 3290 page_count = WD_PAGE_COUNT; 3291 } 3292 3293 if (page_zone(page) != zone) 3294 continue; 3295 3296 if (!swsusp_page_is_forbidden(page)) 3297 swsusp_unset_page_free(page); 3298 } 3299 3300 for_each_migratetype_order(order, t) { 3301 list_for_each_entry(page, 3302 &zone->free_area[order].free_list[t], lru) { 3303 unsigned long i; 3304 3305 pfn = page_to_pfn(page); 3306 for (i = 0; i < (1UL << order); i++) { 3307 if (!--page_count) { 3308 touch_nmi_watchdog(); 3309 page_count = WD_PAGE_COUNT; 3310 } 3311 swsusp_set_page_free(pfn_to_page(pfn + i)); 3312 } 3313 } 3314 } 3315 spin_unlock_irqrestore(&zone->lock, flags); 3316 } 3317 #endif /* CONFIG_PM */ 3318 3319 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 3320 unsigned int order) 3321 { 3322 int migratetype; 3323 3324 if (!free_pcp_prepare(page, order)) 3325 return false; 3326 3327 migratetype = get_pfnblock_migratetype(page, pfn); 3328 set_pcppage_migratetype(page, migratetype); 3329 return true; 3330 } 3331 3332 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch) 3333 { 3334 int min_nr_free, max_nr_free; 3335 3336 /* Check for PCP disabled or boot pageset */ 3337 if (unlikely(high < batch)) 3338 return 1; 3339 3340 /* Leave at least pcp->batch pages on the list */ 3341 min_nr_free = batch; 3342 max_nr_free = high - batch; 3343 3344 /* 3345 * Double the number of pages freed each time there is subsequent 3346 * freeing of pages without any allocation. 3347 */ 3348 batch <<= pcp->free_factor; 3349 if (batch < max_nr_free) 3350 pcp->free_factor++; 3351 batch = clamp(batch, min_nr_free, max_nr_free); 3352 3353 return batch; 3354 } 3355 3356 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone) 3357 { 3358 int high = READ_ONCE(pcp->high); 3359 3360 if (unlikely(!high)) 3361 return 0; 3362 3363 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 3364 return high; 3365 3366 /* 3367 * If reclaim is active, limit the number of pages that can be 3368 * stored on pcp lists 3369 */ 3370 return min(READ_ONCE(pcp->batch) << 2, high); 3371 } 3372 3373 static void free_unref_page_commit(struct page *page, unsigned long pfn, 3374 int migratetype, unsigned int order) 3375 { 3376 struct zone *zone = page_zone(page); 3377 struct per_cpu_pages *pcp; 3378 int high; 3379 int pindex; 3380 3381 __count_vm_event(PGFREE); 3382 pcp = this_cpu_ptr(zone->per_cpu_pageset); 3383 pindex = order_to_pindex(migratetype, order); 3384 list_add(&page->lru, &pcp->lists[pindex]); 3385 pcp->count += 1 << order; 3386 high = nr_pcp_high(pcp, zone); 3387 if (pcp->count >= high) { 3388 int batch = READ_ONCE(pcp->batch); 3389 3390 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch), pcp); 3391 } 3392 } 3393 3394 /* 3395 * Free a pcp page 3396 */ 3397 void free_unref_page(struct page *page, unsigned int order) 3398 { 3399 unsigned long flags; 3400 unsigned long pfn = page_to_pfn(page); 3401 int migratetype; 3402 3403 if (!free_unref_page_prepare(page, pfn, order)) 3404 return; 3405 3406 /* 3407 * We only track unmovable, reclaimable and movable on pcp lists. 3408 * Place ISOLATE pages on the isolated list because they are being 3409 * offlined but treat HIGHATOMIC as movable pages so we can get those 3410 * areas back if necessary. Otherwise, we may have to free 3411 * excessively into the page allocator 3412 */ 3413 migratetype = get_pcppage_migratetype(page); 3414 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 3415 if (unlikely(is_migrate_isolate(migratetype))) { 3416 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 3417 return; 3418 } 3419 migratetype = MIGRATE_MOVABLE; 3420 } 3421 3422 local_lock_irqsave(&pagesets.lock, flags); 3423 free_unref_page_commit(page, pfn, migratetype, order); 3424 local_unlock_irqrestore(&pagesets.lock, flags); 3425 } 3426 3427 /* 3428 * Free a list of 0-order pages 3429 */ 3430 void free_unref_page_list(struct list_head *list) 3431 { 3432 struct page *page, *next; 3433 unsigned long flags, pfn; 3434 int batch_count = 0; 3435 int migratetype; 3436 3437 /* Prepare pages for freeing */ 3438 list_for_each_entry_safe(page, next, list, lru) { 3439 pfn = page_to_pfn(page); 3440 if (!free_unref_page_prepare(page, pfn, 0)) { 3441 list_del(&page->lru); 3442 continue; 3443 } 3444 3445 /* 3446 * Free isolated pages directly to the allocator, see 3447 * comment in free_unref_page. 3448 */ 3449 migratetype = get_pcppage_migratetype(page); 3450 if (unlikely(is_migrate_isolate(migratetype))) { 3451 list_del(&page->lru); 3452 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 3453 continue; 3454 } 3455 3456 set_page_private(page, pfn); 3457 } 3458 3459 local_lock_irqsave(&pagesets.lock, flags); 3460 list_for_each_entry_safe(page, next, list, lru) { 3461 pfn = page_private(page); 3462 set_page_private(page, 0); 3463 3464 /* 3465 * Non-isolated types over MIGRATE_PCPTYPES get added 3466 * to the MIGRATE_MOVABLE pcp list. 3467 */ 3468 migratetype = get_pcppage_migratetype(page); 3469 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 3470 migratetype = MIGRATE_MOVABLE; 3471 3472 trace_mm_page_free_batched(page); 3473 free_unref_page_commit(page, pfn, migratetype, 0); 3474 3475 /* 3476 * Guard against excessive IRQ disabled times when we get 3477 * a large list of pages to free. 3478 */ 3479 if (++batch_count == SWAP_CLUSTER_MAX) { 3480 local_unlock_irqrestore(&pagesets.lock, flags); 3481 batch_count = 0; 3482 local_lock_irqsave(&pagesets.lock, flags); 3483 } 3484 } 3485 local_unlock_irqrestore(&pagesets.lock, flags); 3486 } 3487 3488 /* 3489 * split_page takes a non-compound higher-order page, and splits it into 3490 * n (1<<order) sub-pages: page[0..n] 3491 * Each sub-page must be freed individually. 3492 * 3493 * Note: this is probably too low level an operation for use in drivers. 3494 * Please consult with lkml before using this in your driver. 3495 */ 3496 void split_page(struct page *page, unsigned int order) 3497 { 3498 int i; 3499 3500 VM_BUG_ON_PAGE(PageCompound(page), page); 3501 VM_BUG_ON_PAGE(!page_count(page), page); 3502 3503 for (i = 1; i < (1 << order); i++) 3504 set_page_refcounted(page + i); 3505 split_page_owner(page, 1 << order); 3506 split_page_memcg(page, 1 << order); 3507 } 3508 EXPORT_SYMBOL_GPL(split_page); 3509 3510 int __isolate_free_page(struct page *page, unsigned int order) 3511 { 3512 unsigned long watermark; 3513 struct zone *zone; 3514 int mt; 3515 3516 BUG_ON(!PageBuddy(page)); 3517 3518 zone = page_zone(page); 3519 mt = get_pageblock_migratetype(page); 3520 3521 if (!is_migrate_isolate(mt)) { 3522 /* 3523 * Obey watermarks as if the page was being allocated. We can 3524 * emulate a high-order watermark check with a raised order-0 3525 * watermark, because we already know our high-order page 3526 * exists. 3527 */ 3528 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3529 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3530 return 0; 3531 3532 __mod_zone_freepage_state(zone, -(1UL << order), mt); 3533 } 3534 3535 /* Remove page from free list */ 3536 3537 del_page_from_free_list(page, zone, order); 3538 3539 /* 3540 * Set the pageblock if the isolated page is at least half of a 3541 * pageblock 3542 */ 3543 if (order >= pageblock_order - 1) { 3544 struct page *endpage = page + (1 << order) - 1; 3545 for (; page < endpage; page += pageblock_nr_pages) { 3546 int mt = get_pageblock_migratetype(page); 3547 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) 3548 && !is_migrate_highatomic(mt)) 3549 set_pageblock_migratetype(page, 3550 MIGRATE_MOVABLE); 3551 } 3552 } 3553 3554 3555 return 1UL << order; 3556 } 3557 3558 /** 3559 * __putback_isolated_page - Return a now-isolated page back where we got it 3560 * @page: Page that was isolated 3561 * @order: Order of the isolated page 3562 * @mt: The page's pageblock's migratetype 3563 * 3564 * This function is meant to return a page pulled from the free lists via 3565 * __isolate_free_page back to the free lists they were pulled from. 3566 */ 3567 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3568 { 3569 struct zone *zone = page_zone(page); 3570 3571 /* zone lock should be held when this function is called */ 3572 lockdep_assert_held(&zone->lock); 3573 3574 /* Return isolated page to tail of freelist. */ 3575 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3576 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3577 } 3578 3579 /* 3580 * Update NUMA hit/miss statistics 3581 * 3582 * Must be called with interrupts disabled. 3583 */ 3584 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 3585 long nr_account) 3586 { 3587 #ifdef CONFIG_NUMA 3588 enum numa_stat_item local_stat = NUMA_LOCAL; 3589 3590 /* skip numa counters update if numa stats is disabled */ 3591 if (!static_branch_likely(&vm_numa_stat_key)) 3592 return; 3593 3594 if (zone_to_nid(z) != numa_node_id()) 3595 local_stat = NUMA_OTHER; 3596 3597 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3598 __count_numa_events(z, NUMA_HIT, nr_account); 3599 else { 3600 __count_numa_events(z, NUMA_MISS, nr_account); 3601 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 3602 } 3603 __count_numa_events(z, local_stat, nr_account); 3604 #endif 3605 } 3606 3607 /* Remove page from the per-cpu list, caller must protect the list */ 3608 static inline 3609 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3610 int migratetype, 3611 unsigned int alloc_flags, 3612 struct per_cpu_pages *pcp, 3613 struct list_head *list) 3614 { 3615 struct page *page; 3616 3617 do { 3618 if (list_empty(list)) { 3619 int batch = READ_ONCE(pcp->batch); 3620 int alloced; 3621 3622 /* 3623 * Scale batch relative to order if batch implies 3624 * free pages can be stored on the PCP. Batch can 3625 * be 1 for small zones or for boot pagesets which 3626 * should never store free pages as the pages may 3627 * belong to arbitrary zones. 3628 */ 3629 if (batch > 1) 3630 batch = max(batch >> order, 2); 3631 alloced = rmqueue_bulk(zone, order, 3632 batch, list, 3633 migratetype, alloc_flags); 3634 3635 pcp->count += alloced << order; 3636 if (unlikely(list_empty(list))) 3637 return NULL; 3638 } 3639 3640 page = list_first_entry(list, struct page, lru); 3641 list_del(&page->lru); 3642 pcp->count -= 1 << order; 3643 } while (check_new_pcp(page)); 3644 3645 return page; 3646 } 3647 3648 /* Lock and remove page from the per-cpu list */ 3649 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3650 struct zone *zone, unsigned int order, 3651 gfp_t gfp_flags, int migratetype, 3652 unsigned int alloc_flags) 3653 { 3654 struct per_cpu_pages *pcp; 3655 struct list_head *list; 3656 struct page *page; 3657 unsigned long flags; 3658 3659 local_lock_irqsave(&pagesets.lock, flags); 3660 3661 /* 3662 * On allocation, reduce the number of pages that are batch freed. 3663 * See nr_pcp_free() where free_factor is increased for subsequent 3664 * frees. 3665 */ 3666 pcp = this_cpu_ptr(zone->per_cpu_pageset); 3667 pcp->free_factor >>= 1; 3668 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3669 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3670 local_unlock_irqrestore(&pagesets.lock, flags); 3671 if (page) { 3672 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); 3673 zone_statistics(preferred_zone, zone, 1); 3674 } 3675 return page; 3676 } 3677 3678 /* 3679 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 3680 */ 3681 static inline 3682 struct page *rmqueue(struct zone *preferred_zone, 3683 struct zone *zone, unsigned int order, 3684 gfp_t gfp_flags, unsigned int alloc_flags, 3685 int migratetype) 3686 { 3687 unsigned long flags; 3688 struct page *page; 3689 3690 if (likely(pcp_allowed_order(order))) { 3691 /* 3692 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and 3693 * we need to skip it when CMA area isn't allowed. 3694 */ 3695 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || 3696 migratetype != MIGRATE_MOVABLE) { 3697 page = rmqueue_pcplist(preferred_zone, zone, order, 3698 gfp_flags, migratetype, alloc_flags); 3699 goto out; 3700 } 3701 } 3702 3703 /* 3704 * We most definitely don't want callers attempting to 3705 * allocate greater than order-1 page units with __GFP_NOFAIL. 3706 */ 3707 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 3708 spin_lock_irqsave(&zone->lock, flags); 3709 3710 do { 3711 page = NULL; 3712 /* 3713 * order-0 request can reach here when the pcplist is skipped 3714 * due to non-CMA allocation context. HIGHATOMIC area is 3715 * reserved for high-order atomic allocation, so order-0 3716 * request should skip it. 3717 */ 3718 if (order > 0 && alloc_flags & ALLOC_HARDER) { 3719 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3720 if (page) 3721 trace_mm_page_alloc_zone_locked(page, order, migratetype); 3722 } 3723 if (!page) 3724 page = __rmqueue(zone, order, migratetype, alloc_flags); 3725 } while (page && check_new_pages(page, order)); 3726 if (!page) 3727 goto failed; 3728 3729 __mod_zone_freepage_state(zone, -(1 << order), 3730 get_pcppage_migratetype(page)); 3731 spin_unlock_irqrestore(&zone->lock, flags); 3732 3733 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3734 zone_statistics(preferred_zone, zone, 1); 3735 3736 out: 3737 /* Separate test+clear to avoid unnecessary atomics */ 3738 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { 3739 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3740 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3741 } 3742 3743 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3744 return page; 3745 3746 failed: 3747 spin_unlock_irqrestore(&zone->lock, flags); 3748 return NULL; 3749 } 3750 3751 #ifdef CONFIG_FAIL_PAGE_ALLOC 3752 3753 static struct { 3754 struct fault_attr attr; 3755 3756 bool ignore_gfp_highmem; 3757 bool ignore_gfp_reclaim; 3758 u32 min_order; 3759 } fail_page_alloc = { 3760 .attr = FAULT_ATTR_INITIALIZER, 3761 .ignore_gfp_reclaim = true, 3762 .ignore_gfp_highmem = true, 3763 .min_order = 1, 3764 }; 3765 3766 static int __init setup_fail_page_alloc(char *str) 3767 { 3768 return setup_fault_attr(&fail_page_alloc.attr, str); 3769 } 3770 __setup("fail_page_alloc=", setup_fail_page_alloc); 3771 3772 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3773 { 3774 if (order < fail_page_alloc.min_order) 3775 return false; 3776 if (gfp_mask & __GFP_NOFAIL) 3777 return false; 3778 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 3779 return false; 3780 if (fail_page_alloc.ignore_gfp_reclaim && 3781 (gfp_mask & __GFP_DIRECT_RECLAIM)) 3782 return false; 3783 3784 return should_fail(&fail_page_alloc.attr, 1 << order); 3785 } 3786 3787 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3788 3789 static int __init fail_page_alloc_debugfs(void) 3790 { 3791 umode_t mode = S_IFREG | 0600; 3792 struct dentry *dir; 3793 3794 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 3795 &fail_page_alloc.attr); 3796 3797 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3798 &fail_page_alloc.ignore_gfp_reclaim); 3799 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 3800 &fail_page_alloc.ignore_gfp_highmem); 3801 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); 3802 3803 return 0; 3804 } 3805 3806 late_initcall(fail_page_alloc_debugfs); 3807 3808 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3809 3810 #else /* CONFIG_FAIL_PAGE_ALLOC */ 3811 3812 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3813 { 3814 return false; 3815 } 3816 3817 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 3818 3819 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3820 { 3821 return __should_fail_alloc_page(gfp_mask, order); 3822 } 3823 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 3824 3825 static inline long __zone_watermark_unusable_free(struct zone *z, 3826 unsigned int order, unsigned int alloc_flags) 3827 { 3828 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3829 long unusable_free = (1 << order) - 1; 3830 3831 /* 3832 * If the caller does not have rights to ALLOC_HARDER then subtract 3833 * the high-atomic reserves. This will over-estimate the size of the 3834 * atomic reserve but it avoids a search. 3835 */ 3836 if (likely(!alloc_harder)) 3837 unusable_free += z->nr_reserved_highatomic; 3838 3839 #ifdef CONFIG_CMA 3840 /* If allocation can't use CMA areas don't use free CMA pages */ 3841 if (!(alloc_flags & ALLOC_CMA)) 3842 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3843 #endif 3844 3845 return unusable_free; 3846 } 3847 3848 /* 3849 * Return true if free base pages are above 'mark'. For high-order checks it 3850 * will return true of the order-0 watermark is reached and there is at least 3851 * one free page of a suitable size. Checking now avoids taking the zone lock 3852 * to check in the allocation paths if no pages are free. 3853 */ 3854 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3855 int highest_zoneidx, unsigned int alloc_flags, 3856 long free_pages) 3857 { 3858 long min = mark; 3859 int o; 3860 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3861 3862 /* free_pages may go negative - that's OK */ 3863 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3864 3865 if (alloc_flags & ALLOC_HIGH) 3866 min -= min / 2; 3867 3868 if (unlikely(alloc_harder)) { 3869 /* 3870 * OOM victims can try even harder than normal ALLOC_HARDER 3871 * users on the grounds that it's definitely going to be in 3872 * the exit path shortly and free memory. Any allocation it 3873 * makes during the free path will be small and short-lived. 3874 */ 3875 if (alloc_flags & ALLOC_OOM) 3876 min -= min / 2; 3877 else 3878 min -= min / 4; 3879 } 3880 3881 /* 3882 * Check watermarks for an order-0 allocation request. If these 3883 * are not met, then a high-order request also cannot go ahead 3884 * even if a suitable page happened to be free. 3885 */ 3886 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3887 return false; 3888 3889 /* If this is an order-0 request then the watermark is fine */ 3890 if (!order) 3891 return true; 3892 3893 /* For a high-order request, check at least one suitable page is free */ 3894 for (o = order; o < MAX_ORDER; o++) { 3895 struct free_area *area = &z->free_area[o]; 3896 int mt; 3897 3898 if (!area->nr_free) 3899 continue; 3900 3901 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3902 if (!free_area_empty(area, mt)) 3903 return true; 3904 } 3905 3906 #ifdef CONFIG_CMA 3907 if ((alloc_flags & ALLOC_CMA) && 3908 !free_area_empty(area, MIGRATE_CMA)) { 3909 return true; 3910 } 3911 #endif 3912 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC)) 3913 return true; 3914 } 3915 return false; 3916 } 3917 3918 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3919 int highest_zoneidx, unsigned int alloc_flags) 3920 { 3921 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3922 zone_page_state(z, NR_FREE_PAGES)); 3923 } 3924 3925 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3926 unsigned long mark, int highest_zoneidx, 3927 unsigned int alloc_flags, gfp_t gfp_mask) 3928 { 3929 long free_pages; 3930 3931 free_pages = zone_page_state(z, NR_FREE_PAGES); 3932 3933 /* 3934 * Fast check for order-0 only. If this fails then the reserves 3935 * need to be calculated. 3936 */ 3937 if (!order) { 3938 long fast_free; 3939 3940 fast_free = free_pages; 3941 fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags); 3942 if (fast_free > mark + z->lowmem_reserve[highest_zoneidx]) 3943 return true; 3944 } 3945 3946 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3947 free_pages)) 3948 return true; 3949 /* 3950 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations 3951 * when checking the min watermark. The min watermark is the 3952 * point where boosting is ignored so that kswapd is woken up 3953 * when below the low watermark. 3954 */ 3955 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost 3956 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3957 mark = z->_watermark[WMARK_MIN]; 3958 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3959 alloc_flags, free_pages); 3960 } 3961 3962 return false; 3963 } 3964 3965 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3966 unsigned long mark, int highest_zoneidx) 3967 { 3968 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3969 3970 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3971 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3972 3973 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3974 free_pages); 3975 } 3976 3977 #ifdef CONFIG_NUMA 3978 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3979 3980 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3981 { 3982 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3983 node_reclaim_distance; 3984 } 3985 #else /* CONFIG_NUMA */ 3986 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3987 { 3988 return true; 3989 } 3990 #endif /* CONFIG_NUMA */ 3991 3992 /* 3993 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3994 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3995 * premature use of a lower zone may cause lowmem pressure problems that 3996 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3997 * probably too small. It only makes sense to spread allocations to avoid 3998 * fragmentation between the Normal and DMA32 zones. 3999 */ 4000 static inline unsigned int 4001 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 4002 { 4003 unsigned int alloc_flags; 4004 4005 /* 4006 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4007 * to save a branch. 4008 */ 4009 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 4010 4011 #ifdef CONFIG_ZONE_DMA32 4012 if (!zone) 4013 return alloc_flags; 4014 4015 if (zone_idx(zone) != ZONE_NORMAL) 4016 return alloc_flags; 4017 4018 /* 4019 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 4020 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 4021 * on UMA that if Normal is populated then so is DMA32. 4022 */ 4023 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 4024 if (nr_online_nodes > 1 && !populated_zone(--zone)) 4025 return alloc_flags; 4026 4027 alloc_flags |= ALLOC_NOFRAGMENT; 4028 #endif /* CONFIG_ZONE_DMA32 */ 4029 return alloc_flags; 4030 } 4031 4032 /* Must be called after current_gfp_context() which can change gfp_mask */ 4033 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 4034 unsigned int alloc_flags) 4035 { 4036 #ifdef CONFIG_CMA 4037 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 4038 alloc_flags |= ALLOC_CMA; 4039 #endif 4040 return alloc_flags; 4041 } 4042 4043 /* 4044 * get_page_from_freelist goes through the zonelist trying to allocate 4045 * a page. 4046 */ 4047 static struct page * 4048 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 4049 const struct alloc_context *ac) 4050 { 4051 struct zoneref *z; 4052 struct zone *zone; 4053 struct pglist_data *last_pgdat_dirty_limit = NULL; 4054 bool no_fallback; 4055 4056 retry: 4057 /* 4058 * Scan zonelist, looking for a zone with enough free. 4059 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 4060 */ 4061 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 4062 z = ac->preferred_zoneref; 4063 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 4064 ac->nodemask) { 4065 struct page *page; 4066 unsigned long mark; 4067 4068 if (cpusets_enabled() && 4069 (alloc_flags & ALLOC_CPUSET) && 4070 !__cpuset_zone_allowed(zone, gfp_mask)) 4071 continue; 4072 /* 4073 * When allocating a page cache page for writing, we 4074 * want to get it from a node that is within its dirty 4075 * limit, such that no single node holds more than its 4076 * proportional share of globally allowed dirty pages. 4077 * The dirty limits take into account the node's 4078 * lowmem reserves and high watermark so that kswapd 4079 * should be able to balance it without having to 4080 * write pages from its LRU list. 4081 * 4082 * XXX: For now, allow allocations to potentially 4083 * exceed the per-node dirty limit in the slowpath 4084 * (spread_dirty_pages unset) before going into reclaim, 4085 * which is important when on a NUMA setup the allowed 4086 * nodes are together not big enough to reach the 4087 * global limit. The proper fix for these situations 4088 * will require awareness of nodes in the 4089 * dirty-throttling and the flusher threads. 4090 */ 4091 if (ac->spread_dirty_pages) { 4092 if (last_pgdat_dirty_limit == zone->zone_pgdat) 4093 continue; 4094 4095 if (!node_dirty_ok(zone->zone_pgdat)) { 4096 last_pgdat_dirty_limit = zone->zone_pgdat; 4097 continue; 4098 } 4099 } 4100 4101 if (no_fallback && nr_online_nodes > 1 && 4102 zone != ac->preferred_zoneref->zone) { 4103 int local_nid; 4104 4105 /* 4106 * If moving to a remote node, retry but allow 4107 * fragmenting fallbacks. Locality is more important 4108 * than fragmentation avoidance. 4109 */ 4110 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 4111 if (zone_to_nid(zone) != local_nid) { 4112 alloc_flags &= ~ALLOC_NOFRAGMENT; 4113 goto retry; 4114 } 4115 } 4116 4117 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 4118 if (!zone_watermark_fast(zone, order, mark, 4119 ac->highest_zoneidx, alloc_flags, 4120 gfp_mask)) { 4121 int ret; 4122 4123 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 4124 /* 4125 * Watermark failed for this zone, but see if we can 4126 * grow this zone if it contains deferred pages. 4127 */ 4128 if (static_branch_unlikely(&deferred_pages)) { 4129 if (_deferred_grow_zone(zone, order)) 4130 goto try_this_zone; 4131 } 4132 #endif 4133 /* Checked here to keep the fast path fast */ 4134 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 4135 if (alloc_flags & ALLOC_NO_WATERMARKS) 4136 goto try_this_zone; 4137 4138 if (!node_reclaim_enabled() || 4139 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 4140 continue; 4141 4142 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 4143 switch (ret) { 4144 case NODE_RECLAIM_NOSCAN: 4145 /* did not scan */ 4146 continue; 4147 case NODE_RECLAIM_FULL: 4148 /* scanned but unreclaimable */ 4149 continue; 4150 default: 4151 /* did we reclaim enough */ 4152 if (zone_watermark_ok(zone, order, mark, 4153 ac->highest_zoneidx, alloc_flags)) 4154 goto try_this_zone; 4155 4156 continue; 4157 } 4158 } 4159 4160 try_this_zone: 4161 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 4162 gfp_mask, alloc_flags, ac->migratetype); 4163 if (page) { 4164 prep_new_page(page, order, gfp_mask, alloc_flags); 4165 4166 /* 4167 * If this is a high-order atomic allocation then check 4168 * if the pageblock should be reserved for the future 4169 */ 4170 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 4171 reserve_highatomic_pageblock(page, zone, order); 4172 4173 return page; 4174 } else { 4175 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 4176 /* Try again if zone has deferred pages */ 4177 if (static_branch_unlikely(&deferred_pages)) { 4178 if (_deferred_grow_zone(zone, order)) 4179 goto try_this_zone; 4180 } 4181 #endif 4182 } 4183 } 4184 4185 /* 4186 * It's possible on a UMA machine to get through all zones that are 4187 * fragmented. If avoiding fragmentation, reset and try again. 4188 */ 4189 if (no_fallback) { 4190 alloc_flags &= ~ALLOC_NOFRAGMENT; 4191 goto retry; 4192 } 4193 4194 return NULL; 4195 } 4196 4197 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 4198 { 4199 unsigned int filter = SHOW_MEM_FILTER_NODES; 4200 4201 /* 4202 * This documents exceptions given to allocations in certain 4203 * contexts that are allowed to allocate outside current's set 4204 * of allowed nodes. 4205 */ 4206 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4207 if (tsk_is_oom_victim(current) || 4208 (current->flags & (PF_MEMALLOC | PF_EXITING))) 4209 filter &= ~SHOW_MEM_FILTER_NODES; 4210 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 4211 filter &= ~SHOW_MEM_FILTER_NODES; 4212 4213 show_mem(filter, nodemask); 4214 } 4215 4216 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 4217 { 4218 struct va_format vaf; 4219 va_list args; 4220 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 4221 4222 if ((gfp_mask & __GFP_NOWARN) || 4223 !__ratelimit(&nopage_rs) || 4224 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 4225 return; 4226 4227 va_start(args, fmt); 4228 vaf.fmt = fmt; 4229 vaf.va = &args; 4230 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 4231 current->comm, &vaf, gfp_mask, &gfp_mask, 4232 nodemask_pr_args(nodemask)); 4233 va_end(args); 4234 4235 cpuset_print_current_mems_allowed(); 4236 pr_cont("\n"); 4237 dump_stack(); 4238 warn_alloc_show_mem(gfp_mask, nodemask); 4239 } 4240 4241 static inline struct page * 4242 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 4243 unsigned int alloc_flags, 4244 const struct alloc_context *ac) 4245 { 4246 struct page *page; 4247 4248 page = get_page_from_freelist(gfp_mask, order, 4249 alloc_flags|ALLOC_CPUSET, ac); 4250 /* 4251 * fallback to ignore cpuset restriction if our nodes 4252 * are depleted 4253 */ 4254 if (!page) 4255 page = get_page_from_freelist(gfp_mask, order, 4256 alloc_flags, ac); 4257 4258 return page; 4259 } 4260 4261 static inline struct page * 4262 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 4263 const struct alloc_context *ac, unsigned long *did_some_progress) 4264 { 4265 struct oom_control oc = { 4266 .zonelist = ac->zonelist, 4267 .nodemask = ac->nodemask, 4268 .memcg = NULL, 4269 .gfp_mask = gfp_mask, 4270 .order = order, 4271 }; 4272 struct page *page; 4273 4274 *did_some_progress = 0; 4275 4276 /* 4277 * Acquire the oom lock. If that fails, somebody else is 4278 * making progress for us. 4279 */ 4280 if (!mutex_trylock(&oom_lock)) { 4281 *did_some_progress = 1; 4282 schedule_timeout_uninterruptible(1); 4283 return NULL; 4284 } 4285 4286 /* 4287 * Go through the zonelist yet one more time, keep very high watermark 4288 * here, this is only to catch a parallel oom killing, we must fail if 4289 * we're still under heavy pressure. But make sure that this reclaim 4290 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 4291 * allocation which will never fail due to oom_lock already held. 4292 */ 4293 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 4294 ~__GFP_DIRECT_RECLAIM, order, 4295 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 4296 if (page) 4297 goto out; 4298 4299 /* Coredumps can quickly deplete all memory reserves */ 4300 if (current->flags & PF_DUMPCORE) 4301 goto out; 4302 /* The OOM killer will not help higher order allocs */ 4303 if (order > PAGE_ALLOC_COSTLY_ORDER) 4304 goto out; 4305 /* 4306 * We have already exhausted all our reclaim opportunities without any 4307 * success so it is time to admit defeat. We will skip the OOM killer 4308 * because it is very likely that the caller has a more reasonable 4309 * fallback than shooting a random task. 4310 * 4311 * The OOM killer may not free memory on a specific node. 4312 */ 4313 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4314 goto out; 4315 /* The OOM killer does not needlessly kill tasks for lowmem */ 4316 if (ac->highest_zoneidx < ZONE_NORMAL) 4317 goto out; 4318 if (pm_suspended_storage()) 4319 goto out; 4320 /* 4321 * XXX: GFP_NOFS allocations should rather fail than rely on 4322 * other request to make a forward progress. 4323 * We are in an unfortunate situation where out_of_memory cannot 4324 * do much for this context but let's try it to at least get 4325 * access to memory reserved if the current task is killed (see 4326 * out_of_memory). Once filesystems are ready to handle allocation 4327 * failures more gracefully we should just bail out here. 4328 */ 4329 4330 /* Exhausted what can be done so it's blame time */ 4331 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { 4332 *did_some_progress = 1; 4333 4334 /* 4335 * Help non-failing allocations by giving them access to memory 4336 * reserves 4337 */ 4338 if (gfp_mask & __GFP_NOFAIL) 4339 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4340 ALLOC_NO_WATERMARKS, ac); 4341 } 4342 out: 4343 mutex_unlock(&oom_lock); 4344 return page; 4345 } 4346 4347 /* 4348 * Maximum number of compaction retries with a progress before OOM 4349 * killer is consider as the only way to move forward. 4350 */ 4351 #define MAX_COMPACT_RETRIES 16 4352 4353 #ifdef CONFIG_COMPACTION 4354 /* Try memory compaction for high-order allocations before reclaim */ 4355 static struct page * 4356 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4357 unsigned int alloc_flags, const struct alloc_context *ac, 4358 enum compact_priority prio, enum compact_result *compact_result) 4359 { 4360 struct page *page = NULL; 4361 unsigned long pflags; 4362 unsigned int noreclaim_flag; 4363 4364 if (!order) 4365 return NULL; 4366 4367 psi_memstall_enter(&pflags); 4368 noreclaim_flag = memalloc_noreclaim_save(); 4369 4370 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4371 prio, &page); 4372 4373 memalloc_noreclaim_restore(noreclaim_flag); 4374 psi_memstall_leave(&pflags); 4375 4376 if (*compact_result == COMPACT_SKIPPED) 4377 return NULL; 4378 /* 4379 * At least in one zone compaction wasn't deferred or skipped, so let's 4380 * count a compaction stall 4381 */ 4382 count_vm_event(COMPACTSTALL); 4383 4384 /* Prep a captured page if available */ 4385 if (page) 4386 prep_new_page(page, order, gfp_mask, alloc_flags); 4387 4388 /* Try get a page from the freelist if available */ 4389 if (!page) 4390 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4391 4392 if (page) { 4393 struct zone *zone = page_zone(page); 4394 4395 zone->compact_blockskip_flush = false; 4396 compaction_defer_reset(zone, order, true); 4397 count_vm_event(COMPACTSUCCESS); 4398 return page; 4399 } 4400 4401 /* 4402 * It's bad if compaction run occurs and fails. The most likely reason 4403 * is that pages exist, but not enough to satisfy watermarks. 4404 */ 4405 count_vm_event(COMPACTFAIL); 4406 4407 cond_resched(); 4408 4409 return NULL; 4410 } 4411 4412 static inline bool 4413 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4414 enum compact_result compact_result, 4415 enum compact_priority *compact_priority, 4416 int *compaction_retries) 4417 { 4418 int max_retries = MAX_COMPACT_RETRIES; 4419 int min_priority; 4420 bool ret = false; 4421 int retries = *compaction_retries; 4422 enum compact_priority priority = *compact_priority; 4423 4424 if (!order) 4425 return false; 4426 4427 if (fatal_signal_pending(current)) 4428 return false; 4429 4430 if (compaction_made_progress(compact_result)) 4431 (*compaction_retries)++; 4432 4433 /* 4434 * compaction considers all the zone as desperately out of memory 4435 * so it doesn't really make much sense to retry except when the 4436 * failure could be caused by insufficient priority 4437 */ 4438 if (compaction_failed(compact_result)) 4439 goto check_priority; 4440 4441 /* 4442 * compaction was skipped because there are not enough order-0 pages 4443 * to work with, so we retry only if it looks like reclaim can help. 4444 */ 4445 if (compaction_needs_reclaim(compact_result)) { 4446 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4447 goto out; 4448 } 4449 4450 /* 4451 * make sure the compaction wasn't deferred or didn't bail out early 4452 * due to locks contention before we declare that we should give up. 4453 * But the next retry should use a higher priority if allowed, so 4454 * we don't just keep bailing out endlessly. 4455 */ 4456 if (compaction_withdrawn(compact_result)) { 4457 goto check_priority; 4458 } 4459 4460 /* 4461 * !costly requests are much more important than __GFP_RETRY_MAYFAIL 4462 * costly ones because they are de facto nofail and invoke OOM 4463 * killer to move on while costly can fail and users are ready 4464 * to cope with that. 1/4 retries is rather arbitrary but we 4465 * would need much more detailed feedback from compaction to 4466 * make a better decision. 4467 */ 4468 if (order > PAGE_ALLOC_COSTLY_ORDER) 4469 max_retries /= 4; 4470 if (*compaction_retries <= max_retries) { 4471 ret = true; 4472 goto out; 4473 } 4474 4475 /* 4476 * Make sure there are attempts at the highest priority if we exhausted 4477 * all retries or failed at the lower priorities. 4478 */ 4479 check_priority: 4480 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4481 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4482 4483 if (*compact_priority > min_priority) { 4484 (*compact_priority)--; 4485 *compaction_retries = 0; 4486 ret = true; 4487 } 4488 out: 4489 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4490 return ret; 4491 } 4492 #else 4493 static inline struct page * 4494 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4495 unsigned int alloc_flags, const struct alloc_context *ac, 4496 enum compact_priority prio, enum compact_result *compact_result) 4497 { 4498 *compact_result = COMPACT_SKIPPED; 4499 return NULL; 4500 } 4501 4502 static inline bool 4503 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 4504 enum compact_result compact_result, 4505 enum compact_priority *compact_priority, 4506 int *compaction_retries) 4507 { 4508 struct zone *zone; 4509 struct zoneref *z; 4510 4511 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4512 return false; 4513 4514 /* 4515 * There are setups with compaction disabled which would prefer to loop 4516 * inside the allocator rather than hit the oom killer prematurely. 4517 * Let's give them a good hope and keep retrying while the order-0 4518 * watermarks are OK. 4519 */ 4520 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4521 ac->highest_zoneidx, ac->nodemask) { 4522 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4523 ac->highest_zoneidx, alloc_flags)) 4524 return true; 4525 } 4526 return false; 4527 } 4528 #endif /* CONFIG_COMPACTION */ 4529 4530 #ifdef CONFIG_LOCKDEP 4531 static struct lockdep_map __fs_reclaim_map = 4532 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4533 4534 static bool __need_reclaim(gfp_t gfp_mask) 4535 { 4536 /* no reclaim without waiting on it */ 4537 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4538 return false; 4539 4540 /* this guy won't enter reclaim */ 4541 if (current->flags & PF_MEMALLOC) 4542 return false; 4543 4544 if (gfp_mask & __GFP_NOLOCKDEP) 4545 return false; 4546 4547 return true; 4548 } 4549 4550 void __fs_reclaim_acquire(unsigned long ip) 4551 { 4552 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4553 } 4554 4555 void __fs_reclaim_release(unsigned long ip) 4556 { 4557 lock_release(&__fs_reclaim_map, ip); 4558 } 4559 4560 void fs_reclaim_acquire(gfp_t gfp_mask) 4561 { 4562 gfp_mask = current_gfp_context(gfp_mask); 4563 4564 if (__need_reclaim(gfp_mask)) { 4565 if (gfp_mask & __GFP_FS) 4566 __fs_reclaim_acquire(_RET_IP_); 4567 4568 #ifdef CONFIG_MMU_NOTIFIER 4569 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4570 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4571 #endif 4572 4573 } 4574 } 4575 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4576 4577 void fs_reclaim_release(gfp_t gfp_mask) 4578 { 4579 gfp_mask = current_gfp_context(gfp_mask); 4580 4581 if (__need_reclaim(gfp_mask)) { 4582 if (gfp_mask & __GFP_FS) 4583 __fs_reclaim_release(_RET_IP_); 4584 } 4585 } 4586 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4587 #endif 4588 4589 /* Perform direct synchronous page reclaim */ 4590 static unsigned long 4591 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4592 const struct alloc_context *ac) 4593 { 4594 unsigned int noreclaim_flag; 4595 unsigned long pflags, progress; 4596 4597 cond_resched(); 4598 4599 /* We now go into synchronous reclaim */ 4600 cpuset_memory_pressure_bump(); 4601 psi_memstall_enter(&pflags); 4602 fs_reclaim_acquire(gfp_mask); 4603 noreclaim_flag = memalloc_noreclaim_save(); 4604 4605 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4606 ac->nodemask); 4607 4608 memalloc_noreclaim_restore(noreclaim_flag); 4609 fs_reclaim_release(gfp_mask); 4610 psi_memstall_leave(&pflags); 4611 4612 cond_resched(); 4613 4614 return progress; 4615 } 4616 4617 /* The really slow allocator path where we enter direct reclaim */ 4618 static inline struct page * 4619 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4620 unsigned int alloc_flags, const struct alloc_context *ac, 4621 unsigned long *did_some_progress) 4622 { 4623 struct page *page = NULL; 4624 bool drained = false; 4625 4626 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4627 if (unlikely(!(*did_some_progress))) 4628 return NULL; 4629 4630 retry: 4631 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4632 4633 /* 4634 * If an allocation failed after direct reclaim, it could be because 4635 * pages are pinned on the per-cpu lists or in high alloc reserves. 4636 * Shrink them and try again 4637 */ 4638 if (!page && !drained) { 4639 unreserve_highatomic_pageblock(ac, false); 4640 drain_all_pages(NULL); 4641 drained = true; 4642 goto retry; 4643 } 4644 4645 return page; 4646 } 4647 4648 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4649 const struct alloc_context *ac) 4650 { 4651 struct zoneref *z; 4652 struct zone *zone; 4653 pg_data_t *last_pgdat = NULL; 4654 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4655 4656 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4657 ac->nodemask) { 4658 if (last_pgdat != zone->zone_pgdat) 4659 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 4660 last_pgdat = zone->zone_pgdat; 4661 } 4662 } 4663 4664 static inline unsigned int 4665 gfp_to_alloc_flags(gfp_t gfp_mask) 4666 { 4667 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4668 4669 /* 4670 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH 4671 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4672 * to save two branches. 4673 */ 4674 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 4675 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4676 4677 /* 4678 * The caller may dip into page reserves a bit more if the caller 4679 * cannot run direct reclaim, or if the caller has realtime scheduling 4680 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4681 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 4682 */ 4683 alloc_flags |= (__force int) 4684 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4685 4686 if (gfp_mask & __GFP_ATOMIC) { 4687 /* 4688 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4689 * if it can't schedule. 4690 */ 4691 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4692 alloc_flags |= ALLOC_HARDER; 4693 /* 4694 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 4695 * comment for __cpuset_node_allowed(). 4696 */ 4697 alloc_flags &= ~ALLOC_CPUSET; 4698 } else if (unlikely(rt_task(current)) && in_task()) 4699 alloc_flags |= ALLOC_HARDER; 4700 4701 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4702 4703 return alloc_flags; 4704 } 4705 4706 static bool oom_reserves_allowed(struct task_struct *tsk) 4707 { 4708 if (!tsk_is_oom_victim(tsk)) 4709 return false; 4710 4711 /* 4712 * !MMU doesn't have oom reaper so give access to memory reserves 4713 * only to the thread with TIF_MEMDIE set 4714 */ 4715 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4716 return false; 4717 4718 return true; 4719 } 4720 4721 /* 4722 * Distinguish requests which really need access to full memory 4723 * reserves from oom victims which can live with a portion of it 4724 */ 4725 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4726 { 4727 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4728 return 0; 4729 if (gfp_mask & __GFP_MEMALLOC) 4730 return ALLOC_NO_WATERMARKS; 4731 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4732 return ALLOC_NO_WATERMARKS; 4733 if (!in_interrupt()) { 4734 if (current->flags & PF_MEMALLOC) 4735 return ALLOC_NO_WATERMARKS; 4736 else if (oom_reserves_allowed(current)) 4737 return ALLOC_OOM; 4738 } 4739 4740 return 0; 4741 } 4742 4743 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4744 { 4745 return !!__gfp_pfmemalloc_flags(gfp_mask); 4746 } 4747 4748 /* 4749 * Checks whether it makes sense to retry the reclaim to make a forward progress 4750 * for the given allocation request. 4751 * 4752 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4753 * without success, or when we couldn't even meet the watermark if we 4754 * reclaimed all remaining pages on the LRU lists. 4755 * 4756 * Returns true if a retry is viable or false to enter the oom path. 4757 */ 4758 static inline bool 4759 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4760 struct alloc_context *ac, int alloc_flags, 4761 bool did_some_progress, int *no_progress_loops) 4762 { 4763 struct zone *zone; 4764 struct zoneref *z; 4765 bool ret = false; 4766 4767 /* 4768 * Costly allocations might have made a progress but this doesn't mean 4769 * their order will become available due to high fragmentation so 4770 * always increment the no progress counter for them 4771 */ 4772 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4773 *no_progress_loops = 0; 4774 else 4775 (*no_progress_loops)++; 4776 4777 /* 4778 * Make sure we converge to OOM if we cannot make any progress 4779 * several times in the row. 4780 */ 4781 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 4782 /* Before OOM, exhaust highatomic_reserve */ 4783 return unreserve_highatomic_pageblock(ac, true); 4784 } 4785 4786 /* 4787 * Keep reclaiming pages while there is a chance this will lead 4788 * somewhere. If none of the target zones can satisfy our allocation 4789 * request even if all reclaimable pages are considered then we are 4790 * screwed and have to go OOM. 4791 */ 4792 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4793 ac->highest_zoneidx, ac->nodemask) { 4794 unsigned long available; 4795 unsigned long reclaimable; 4796 unsigned long min_wmark = min_wmark_pages(zone); 4797 bool wmark; 4798 4799 available = reclaimable = zone_reclaimable_pages(zone); 4800 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4801 4802 /* 4803 * Would the allocation succeed if we reclaimed all 4804 * reclaimable pages? 4805 */ 4806 wmark = __zone_watermark_ok(zone, order, min_wmark, 4807 ac->highest_zoneidx, alloc_flags, available); 4808 trace_reclaim_retry_zone(z, order, reclaimable, 4809 available, min_wmark, *no_progress_loops, wmark); 4810 if (wmark) { 4811 ret = true; 4812 break; 4813 } 4814 } 4815 4816 /* 4817 * Memory allocation/reclaim might be called from a WQ context and the 4818 * current implementation of the WQ concurrency control doesn't 4819 * recognize that a particular WQ is congested if the worker thread is 4820 * looping without ever sleeping. Therefore we have to do a short sleep 4821 * here rather than calling cond_resched(). 4822 */ 4823 if (current->flags & PF_WQ_WORKER) 4824 schedule_timeout_uninterruptible(1); 4825 else 4826 cond_resched(); 4827 return ret; 4828 } 4829 4830 static inline bool 4831 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4832 { 4833 /* 4834 * It's possible that cpuset's mems_allowed and the nodemask from 4835 * mempolicy don't intersect. This should be normally dealt with by 4836 * policy_nodemask(), but it's possible to race with cpuset update in 4837 * such a way the check therein was true, and then it became false 4838 * before we got our cpuset_mems_cookie here. 4839 * This assumes that for all allocations, ac->nodemask can come only 4840 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4841 * when it does not intersect with the cpuset restrictions) or the 4842 * caller can deal with a violated nodemask. 4843 */ 4844 if (cpusets_enabled() && ac->nodemask && 4845 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4846 ac->nodemask = NULL; 4847 return true; 4848 } 4849 4850 /* 4851 * When updating a task's mems_allowed or mempolicy nodemask, it is 4852 * possible to race with parallel threads in such a way that our 4853 * allocation can fail while the mask is being updated. If we are about 4854 * to fail, check if the cpuset changed during allocation and if so, 4855 * retry. 4856 */ 4857 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4858 return true; 4859 4860 return false; 4861 } 4862 4863 static inline struct page * 4864 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4865 struct alloc_context *ac) 4866 { 4867 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4868 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4869 struct page *page = NULL; 4870 unsigned int alloc_flags; 4871 unsigned long did_some_progress; 4872 enum compact_priority compact_priority; 4873 enum compact_result compact_result; 4874 int compaction_retries; 4875 int no_progress_loops; 4876 unsigned int cpuset_mems_cookie; 4877 int reserve_flags; 4878 4879 /* 4880 * We also sanity check to catch abuse of atomic reserves being used by 4881 * callers that are not in atomic context. 4882 */ 4883 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 4884 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 4885 gfp_mask &= ~__GFP_ATOMIC; 4886 4887 retry_cpuset: 4888 compaction_retries = 0; 4889 no_progress_loops = 0; 4890 compact_priority = DEF_COMPACT_PRIORITY; 4891 cpuset_mems_cookie = read_mems_allowed_begin(); 4892 4893 /* 4894 * The fast path uses conservative alloc_flags to succeed only until 4895 * kswapd needs to be woken up, and to avoid the cost of setting up 4896 * alloc_flags precisely. So we do that now. 4897 */ 4898 alloc_flags = gfp_to_alloc_flags(gfp_mask); 4899 4900 /* 4901 * We need to recalculate the starting point for the zonelist iterator 4902 * because we might have used different nodemask in the fast path, or 4903 * there was a cpuset modification and we are retrying - otherwise we 4904 * could end up iterating over non-eligible zones endlessly. 4905 */ 4906 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4907 ac->highest_zoneidx, ac->nodemask); 4908 if (!ac->preferred_zoneref->zone) 4909 goto nopage; 4910 4911 /* 4912 * Check for insane configurations where the cpuset doesn't contain 4913 * any suitable zone to satisfy the request - e.g. non-movable 4914 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4915 */ 4916 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4917 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4918 ac->highest_zoneidx, 4919 &cpuset_current_mems_allowed); 4920 if (!z->zone) 4921 goto nopage; 4922 } 4923 4924 if (alloc_flags & ALLOC_KSWAPD) 4925 wake_all_kswapds(order, gfp_mask, ac); 4926 4927 /* 4928 * The adjusted alloc_flags might result in immediate success, so try 4929 * that first 4930 */ 4931 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4932 if (page) 4933 goto got_pg; 4934 4935 /* 4936 * For costly allocations, try direct compaction first, as it's likely 4937 * that we have enough base pages and don't need to reclaim. For non- 4938 * movable high-order allocations, do that as well, as compaction will 4939 * try prevent permanent fragmentation by migrating from blocks of the 4940 * same migratetype. 4941 * Don't try this for allocations that are allowed to ignore 4942 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4943 */ 4944 if (can_direct_reclaim && 4945 (costly_order || 4946 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4947 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4948 page = __alloc_pages_direct_compact(gfp_mask, order, 4949 alloc_flags, ac, 4950 INIT_COMPACT_PRIORITY, 4951 &compact_result); 4952 if (page) 4953 goto got_pg; 4954 4955 /* 4956 * Checks for costly allocations with __GFP_NORETRY, which 4957 * includes some THP page fault allocations 4958 */ 4959 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4960 /* 4961 * If allocating entire pageblock(s) and compaction 4962 * failed because all zones are below low watermarks 4963 * or is prohibited because it recently failed at this 4964 * order, fail immediately unless the allocator has 4965 * requested compaction and reclaim retry. 4966 * 4967 * Reclaim is 4968 * - potentially very expensive because zones are far 4969 * below their low watermarks or this is part of very 4970 * bursty high order allocations, 4971 * - not guaranteed to help because isolate_freepages() 4972 * may not iterate over freed pages as part of its 4973 * linear scan, and 4974 * - unlikely to make entire pageblocks free on its 4975 * own. 4976 */ 4977 if (compact_result == COMPACT_SKIPPED || 4978 compact_result == COMPACT_DEFERRED) 4979 goto nopage; 4980 4981 /* 4982 * Looks like reclaim/compaction is worth trying, but 4983 * sync compaction could be very expensive, so keep 4984 * using async compaction. 4985 */ 4986 compact_priority = INIT_COMPACT_PRIORITY; 4987 } 4988 } 4989 4990 retry: 4991 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4992 if (alloc_flags & ALLOC_KSWAPD) 4993 wake_all_kswapds(order, gfp_mask, ac); 4994 4995 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4996 if (reserve_flags) 4997 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags); 4998 4999 /* 5000 * Reset the nodemask and zonelist iterators if memory policies can be 5001 * ignored. These allocations are high priority and system rather than 5002 * user oriented. 5003 */ 5004 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 5005 ac->nodemask = NULL; 5006 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5007 ac->highest_zoneidx, ac->nodemask); 5008 } 5009 5010 /* Attempt with potentially adjusted zonelist and alloc_flags */ 5011 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 5012 if (page) 5013 goto got_pg; 5014 5015 /* Caller is not willing to reclaim, we can't balance anything */ 5016 if (!can_direct_reclaim) 5017 goto nopage; 5018 5019 /* Avoid recursion of direct reclaim */ 5020 if (current->flags & PF_MEMALLOC) 5021 goto nopage; 5022 5023 /* Try direct reclaim and then allocating */ 5024 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 5025 &did_some_progress); 5026 if (page) 5027 goto got_pg; 5028 5029 /* Try direct compaction and then allocating */ 5030 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 5031 compact_priority, &compact_result); 5032 if (page) 5033 goto got_pg; 5034 5035 /* Do not loop if specifically requested */ 5036 if (gfp_mask & __GFP_NORETRY) 5037 goto nopage; 5038 5039 /* 5040 * Do not retry costly high order allocations unless they are 5041 * __GFP_RETRY_MAYFAIL 5042 */ 5043 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 5044 goto nopage; 5045 5046 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 5047 did_some_progress > 0, &no_progress_loops)) 5048 goto retry; 5049 5050 /* 5051 * It doesn't make any sense to retry for the compaction if the order-0 5052 * reclaim is not able to make any progress because the current 5053 * implementation of the compaction depends on the sufficient amount 5054 * of free memory (see __compaction_suitable) 5055 */ 5056 if (did_some_progress > 0 && 5057 should_compact_retry(ac, order, alloc_flags, 5058 compact_result, &compact_priority, 5059 &compaction_retries)) 5060 goto retry; 5061 5062 5063 /* Deal with possible cpuset update races before we start OOM killing */ 5064 if (check_retry_cpuset(cpuset_mems_cookie, ac)) 5065 goto retry_cpuset; 5066 5067 /* Reclaim has failed us, start killing things */ 5068 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 5069 if (page) 5070 goto got_pg; 5071 5072 /* Avoid allocations with no watermarks from looping endlessly */ 5073 if (tsk_is_oom_victim(current) && 5074 (alloc_flags & ALLOC_OOM || 5075 (gfp_mask & __GFP_NOMEMALLOC))) 5076 goto nopage; 5077 5078 /* Retry as long as the OOM killer is making progress */ 5079 if (did_some_progress) { 5080 no_progress_loops = 0; 5081 goto retry; 5082 } 5083 5084 nopage: 5085 /* Deal with possible cpuset update races before we fail */ 5086 if (check_retry_cpuset(cpuset_mems_cookie, ac)) 5087 goto retry_cpuset; 5088 5089 /* 5090 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 5091 * we always retry 5092 */ 5093 if (gfp_mask & __GFP_NOFAIL) { 5094 /* 5095 * All existing users of the __GFP_NOFAIL are blockable, so warn 5096 * of any new users that actually require GFP_NOWAIT 5097 */ 5098 if (WARN_ON_ONCE(!can_direct_reclaim)) 5099 goto fail; 5100 5101 /* 5102 * PF_MEMALLOC request from this context is rather bizarre 5103 * because we cannot reclaim anything and only can loop waiting 5104 * for somebody to do a work for us 5105 */ 5106 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 5107 5108 /* 5109 * non failing costly orders are a hard requirement which we 5110 * are not prepared for much so let's warn about these users 5111 * so that we can identify them and convert them to something 5112 * else. 5113 */ 5114 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); 5115 5116 /* 5117 * Help non-failing allocations by giving them access to memory 5118 * reserves but do not use ALLOC_NO_WATERMARKS because this 5119 * could deplete whole memory reserves which would just make 5120 * the situation worse 5121 */ 5122 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); 5123 if (page) 5124 goto got_pg; 5125 5126 cond_resched(); 5127 goto retry; 5128 } 5129 fail: 5130 warn_alloc(gfp_mask, ac->nodemask, 5131 "page allocation failure: order:%u", order); 5132 got_pg: 5133 return page; 5134 } 5135 5136 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 5137 int preferred_nid, nodemask_t *nodemask, 5138 struct alloc_context *ac, gfp_t *alloc_gfp, 5139 unsigned int *alloc_flags) 5140 { 5141 ac->highest_zoneidx = gfp_zone(gfp_mask); 5142 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 5143 ac->nodemask = nodemask; 5144 ac->migratetype = gfp_migratetype(gfp_mask); 5145 5146 if (cpusets_enabled()) { 5147 *alloc_gfp |= __GFP_HARDWALL; 5148 /* 5149 * When we are in the interrupt context, it is irrelevant 5150 * to the current task context. It means that any node ok. 5151 */ 5152 if (in_task() && !ac->nodemask) 5153 ac->nodemask = &cpuset_current_mems_allowed; 5154 else 5155 *alloc_flags |= ALLOC_CPUSET; 5156 } 5157 5158 fs_reclaim_acquire(gfp_mask); 5159 fs_reclaim_release(gfp_mask); 5160 5161 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 5162 5163 if (should_fail_alloc_page(gfp_mask, order)) 5164 return false; 5165 5166 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 5167 5168 /* Dirty zone balancing only done in the fast path */ 5169 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 5170 5171 /* 5172 * The preferred zone is used for statistics but crucially it is 5173 * also used as the starting point for the zonelist iterator. It 5174 * may get reset for allocations that ignore memory policies. 5175 */ 5176 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5177 ac->highest_zoneidx, ac->nodemask); 5178 5179 return true; 5180 } 5181 5182 /* 5183 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 5184 * @gfp: GFP flags for the allocation 5185 * @preferred_nid: The preferred NUMA node ID to allocate from 5186 * @nodemask: Set of nodes to allocate from, may be NULL 5187 * @nr_pages: The number of pages desired on the list or array 5188 * @page_list: Optional list to store the allocated pages 5189 * @page_array: Optional array to store the pages 5190 * 5191 * This is a batched version of the page allocator that attempts to 5192 * allocate nr_pages quickly. Pages are added to page_list if page_list 5193 * is not NULL, otherwise it is assumed that the page_array is valid. 5194 * 5195 * For lists, nr_pages is the number of pages that should be allocated. 5196 * 5197 * For arrays, only NULL elements are populated with pages and nr_pages 5198 * is the maximum number of pages that will be stored in the array. 5199 * 5200 * Returns the number of pages on the list or array. 5201 */ 5202 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 5203 nodemask_t *nodemask, int nr_pages, 5204 struct list_head *page_list, 5205 struct page **page_array) 5206 { 5207 struct page *page; 5208 unsigned long flags; 5209 struct zone *zone; 5210 struct zoneref *z; 5211 struct per_cpu_pages *pcp; 5212 struct list_head *pcp_list; 5213 struct alloc_context ac; 5214 gfp_t alloc_gfp; 5215 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5216 int nr_populated = 0, nr_account = 0; 5217 5218 /* 5219 * Skip populated array elements to determine if any pages need 5220 * to be allocated before disabling IRQs. 5221 */ 5222 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 5223 nr_populated++; 5224 5225 /* No pages requested? */ 5226 if (unlikely(nr_pages <= 0)) 5227 goto out; 5228 5229 /* Already populated array? */ 5230 if (unlikely(page_array && nr_pages - nr_populated == 0)) 5231 goto out; 5232 5233 /* Bulk allocator does not support memcg accounting. */ 5234 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT)) 5235 goto failed; 5236 5237 /* Use the single page allocator for one page. */ 5238 if (nr_pages - nr_populated == 1) 5239 goto failed; 5240 5241 #ifdef CONFIG_PAGE_OWNER 5242 /* 5243 * PAGE_OWNER may recurse into the allocator to allocate space to 5244 * save the stack with pagesets.lock held. Releasing/reacquiring 5245 * removes much of the performance benefit of bulk allocation so 5246 * force the caller to allocate one page at a time as it'll have 5247 * similar performance to added complexity to the bulk allocator. 5248 */ 5249 if (static_branch_unlikely(&page_owner_inited)) 5250 goto failed; 5251 #endif 5252 5253 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5254 gfp &= gfp_allowed_mask; 5255 alloc_gfp = gfp; 5256 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5257 goto out; 5258 gfp = alloc_gfp; 5259 5260 /* Find an allowed local zone that meets the low watermark. */ 5261 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 5262 unsigned long mark; 5263 5264 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5265 !__cpuset_zone_allowed(zone, gfp)) { 5266 continue; 5267 } 5268 5269 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 5270 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 5271 goto failed; 5272 } 5273 5274 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 5275 if (zone_watermark_fast(zone, 0, mark, 5276 zonelist_zone_idx(ac.preferred_zoneref), 5277 alloc_flags, gfp)) { 5278 break; 5279 } 5280 } 5281 5282 /* 5283 * If there are no allowed local zones that meets the watermarks then 5284 * try to allocate a single page and reclaim if necessary. 5285 */ 5286 if (unlikely(!zone)) 5287 goto failed; 5288 5289 /* Attempt the batch allocation */ 5290 local_lock_irqsave(&pagesets.lock, flags); 5291 pcp = this_cpu_ptr(zone->per_cpu_pageset); 5292 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 5293 5294 while (nr_populated < nr_pages) { 5295 5296 /* Skip existing pages */ 5297 if (page_array && page_array[nr_populated]) { 5298 nr_populated++; 5299 continue; 5300 } 5301 5302 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5303 pcp, pcp_list); 5304 if (unlikely(!page)) { 5305 /* Try and get at least one page */ 5306 if (!nr_populated) 5307 goto failed_irq; 5308 break; 5309 } 5310 nr_account++; 5311 5312 prep_new_page(page, 0, gfp, 0); 5313 if (page_list) 5314 list_add(&page->lru, page_list); 5315 else 5316 page_array[nr_populated] = page; 5317 nr_populated++; 5318 } 5319 5320 local_unlock_irqrestore(&pagesets.lock, flags); 5321 5322 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5323 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 5324 5325 out: 5326 return nr_populated; 5327 5328 failed_irq: 5329 local_unlock_irqrestore(&pagesets.lock, flags); 5330 5331 failed: 5332 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 5333 if (page) { 5334 if (page_list) 5335 list_add(&page->lru, page_list); 5336 else 5337 page_array[nr_populated] = page; 5338 nr_populated++; 5339 } 5340 5341 goto out; 5342 } 5343 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 5344 5345 /* 5346 * This is the 'heart' of the zoned buddy allocator. 5347 */ 5348 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 5349 nodemask_t *nodemask) 5350 { 5351 struct page *page; 5352 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5353 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 5354 struct alloc_context ac = { }; 5355 5356 /* 5357 * There are several places where we assume that the order value is sane 5358 * so bail out early if the request is out of bound. 5359 */ 5360 if (unlikely(order >= MAX_ORDER)) { 5361 WARN_ON_ONCE(!(gfp & __GFP_NOWARN)); 5362 return NULL; 5363 } 5364 5365 gfp &= gfp_allowed_mask; 5366 /* 5367 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 5368 * resp. GFP_NOIO which has to be inherited for all allocation requests 5369 * from a particular context which has been marked by 5370 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 5371 * movable zones are not used during allocation. 5372 */ 5373 gfp = current_gfp_context(gfp); 5374 alloc_gfp = gfp; 5375 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 5376 &alloc_gfp, &alloc_flags)) 5377 return NULL; 5378 5379 /* 5380 * Forbid the first pass from falling back to types that fragment 5381 * memory until all local zones are considered. 5382 */ 5383 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 5384 5385 /* First allocation attempt */ 5386 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 5387 if (likely(page)) 5388 goto out; 5389 5390 alloc_gfp = gfp; 5391 ac.spread_dirty_pages = false; 5392 5393 /* 5394 * Restore the original nodemask if it was potentially replaced with 5395 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5396 */ 5397 ac.nodemask = nodemask; 5398 5399 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 5400 5401 out: 5402 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page && 5403 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 5404 __free_pages(page, order); 5405 page = NULL; 5406 } 5407 5408 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5409 5410 return page; 5411 } 5412 EXPORT_SYMBOL(__alloc_pages); 5413 5414 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 5415 nodemask_t *nodemask) 5416 { 5417 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 5418 preferred_nid, nodemask); 5419 5420 if (page && order > 1) 5421 prep_transhuge_page(page); 5422 return (struct folio *)page; 5423 } 5424 EXPORT_SYMBOL(__folio_alloc); 5425 5426 /* 5427 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5428 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5429 * you need to access high mem. 5430 */ 5431 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 5432 { 5433 struct page *page; 5434 5435 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 5436 if (!page) 5437 return 0; 5438 return (unsigned long) page_address(page); 5439 } 5440 EXPORT_SYMBOL(__get_free_pages); 5441 5442 unsigned long get_zeroed_page(gfp_t gfp_mask) 5443 { 5444 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 5445 } 5446 EXPORT_SYMBOL(get_zeroed_page); 5447 5448 /** 5449 * __free_pages - Free pages allocated with alloc_pages(). 5450 * @page: The page pointer returned from alloc_pages(). 5451 * @order: The order of the allocation. 5452 * 5453 * This function can free multi-page allocations that are not compound 5454 * pages. It does not check that the @order passed in matches that of 5455 * the allocation, so it is easy to leak memory. Freeing more memory 5456 * than was allocated will probably emit a warning. 5457 * 5458 * If the last reference to this page is speculative, it will be released 5459 * by put_page() which only frees the first page of a non-compound 5460 * allocation. To prevent the remaining pages from being leaked, we free 5461 * the subsequent pages here. If you want to use the page's reference 5462 * count to decide when to free the allocation, you should allocate a 5463 * compound page, and use put_page() instead of __free_pages(). 5464 * 5465 * Context: May be called in interrupt context or while holding a normal 5466 * spinlock, but not in NMI context or while holding a raw spinlock. 5467 */ 5468 void __free_pages(struct page *page, unsigned int order) 5469 { 5470 if (put_page_testzero(page)) 5471 free_the_page(page, order); 5472 else if (!PageHead(page)) 5473 while (order-- > 0) 5474 free_the_page(page + (1 << order), order); 5475 } 5476 EXPORT_SYMBOL(__free_pages); 5477 5478 void free_pages(unsigned long addr, unsigned int order) 5479 { 5480 if (addr != 0) { 5481 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5482 __free_pages(virt_to_page((void *)addr), order); 5483 } 5484 } 5485 5486 EXPORT_SYMBOL(free_pages); 5487 5488 /* 5489 * Page Fragment: 5490 * An arbitrary-length arbitrary-offset area of memory which resides 5491 * within a 0 or higher order page. Multiple fragments within that page 5492 * are individually refcounted, in the page's reference counter. 5493 * 5494 * The page_frag functions below provide a simple allocation framework for 5495 * page fragments. This is used by the network stack and network device 5496 * drivers to provide a backing region of memory for use as either an 5497 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 5498 */ 5499 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 5500 gfp_t gfp_mask) 5501 { 5502 struct page *page = NULL; 5503 gfp_t gfp = gfp_mask; 5504 5505 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5506 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 5507 __GFP_NOMEMALLOC; 5508 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 5509 PAGE_FRAG_CACHE_MAX_ORDER); 5510 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 5511 #endif 5512 if (unlikely(!page)) 5513 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 5514 5515 nc->va = page ? page_address(page) : NULL; 5516 5517 return page; 5518 } 5519 5520 void __page_frag_cache_drain(struct page *page, unsigned int count) 5521 { 5522 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 5523 5524 if (page_ref_sub_and_test(page, count)) 5525 free_the_page(page, compound_order(page)); 5526 } 5527 EXPORT_SYMBOL(__page_frag_cache_drain); 5528 5529 void *page_frag_alloc_align(struct page_frag_cache *nc, 5530 unsigned int fragsz, gfp_t gfp_mask, 5531 unsigned int align_mask) 5532 { 5533 unsigned int size = PAGE_SIZE; 5534 struct page *page; 5535 int offset; 5536 5537 if (unlikely(!nc->va)) { 5538 refill: 5539 page = __page_frag_cache_refill(nc, gfp_mask); 5540 if (!page) 5541 return NULL; 5542 5543 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5544 /* if size can vary use size else just use PAGE_SIZE */ 5545 size = nc->size; 5546 #endif 5547 /* Even if we own the page, we do not use atomic_set(). 5548 * This would break get_page_unless_zero() users. 5549 */ 5550 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 5551 5552 /* reset page count bias and offset to start of new frag */ 5553 nc->pfmemalloc = page_is_pfmemalloc(page); 5554 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5555 nc->offset = size; 5556 } 5557 5558 offset = nc->offset - fragsz; 5559 if (unlikely(offset < 0)) { 5560 page = virt_to_page(nc->va); 5561 5562 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 5563 goto refill; 5564 5565 if (unlikely(nc->pfmemalloc)) { 5566 free_the_page(page, compound_order(page)); 5567 goto refill; 5568 } 5569 5570 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5571 /* if size can vary use size else just use PAGE_SIZE */ 5572 size = nc->size; 5573 #endif 5574 /* OK, page count is 0, we can safely set it */ 5575 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 5576 5577 /* reset page count bias and offset to start of new frag */ 5578 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5579 offset = size - fragsz; 5580 } 5581 5582 nc->pagecnt_bias--; 5583 offset &= align_mask; 5584 nc->offset = offset; 5585 5586 return nc->va + offset; 5587 } 5588 EXPORT_SYMBOL(page_frag_alloc_align); 5589 5590 /* 5591 * Frees a page fragment allocated out of either a compound or order 0 page. 5592 */ 5593 void page_frag_free(void *addr) 5594 { 5595 struct page *page = virt_to_head_page(addr); 5596 5597 if (unlikely(put_page_testzero(page))) 5598 free_the_page(page, compound_order(page)); 5599 } 5600 EXPORT_SYMBOL(page_frag_free); 5601 5602 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5603 size_t size) 5604 { 5605 if (addr) { 5606 unsigned long alloc_end = addr + (PAGE_SIZE << order); 5607 unsigned long used = addr + PAGE_ALIGN(size); 5608 5609 split_page(virt_to_page((void *)addr), order); 5610 while (used < alloc_end) { 5611 free_page(used); 5612 used += PAGE_SIZE; 5613 } 5614 } 5615 return (void *)addr; 5616 } 5617 5618 /** 5619 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5620 * @size: the number of bytes to allocate 5621 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5622 * 5623 * This function is similar to alloc_pages(), except that it allocates the 5624 * minimum number of pages to satisfy the request. alloc_pages() can only 5625 * allocate memory in power-of-two pages. 5626 * 5627 * This function is also limited by MAX_ORDER. 5628 * 5629 * Memory allocated by this function must be released by free_pages_exact(). 5630 * 5631 * Return: pointer to the allocated area or %NULL in case of error. 5632 */ 5633 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 5634 { 5635 unsigned int order = get_order(size); 5636 unsigned long addr; 5637 5638 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5639 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5640 5641 addr = __get_free_pages(gfp_mask, order); 5642 return make_alloc_exact(addr, order, size); 5643 } 5644 EXPORT_SYMBOL(alloc_pages_exact); 5645 5646 /** 5647 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5648 * pages on a node. 5649 * @nid: the preferred node ID where memory should be allocated 5650 * @size: the number of bytes to allocate 5651 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5652 * 5653 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5654 * back. 5655 * 5656 * Return: pointer to the allocated area or %NULL in case of error. 5657 */ 5658 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 5659 { 5660 unsigned int order = get_order(size); 5661 struct page *p; 5662 5663 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5664 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5665 5666 p = alloc_pages_node(nid, gfp_mask, order); 5667 if (!p) 5668 return NULL; 5669 return make_alloc_exact((unsigned long)page_address(p), order, size); 5670 } 5671 5672 /** 5673 * free_pages_exact - release memory allocated via alloc_pages_exact() 5674 * @virt: the value returned by alloc_pages_exact. 5675 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5676 * 5677 * Release the memory allocated by a previous call to alloc_pages_exact. 5678 */ 5679 void free_pages_exact(void *virt, size_t size) 5680 { 5681 unsigned long addr = (unsigned long)virt; 5682 unsigned long end = addr + PAGE_ALIGN(size); 5683 5684 while (addr < end) { 5685 free_page(addr); 5686 addr += PAGE_SIZE; 5687 } 5688 } 5689 EXPORT_SYMBOL(free_pages_exact); 5690 5691 /** 5692 * nr_free_zone_pages - count number of pages beyond high watermark 5693 * @offset: The zone index of the highest zone 5694 * 5695 * nr_free_zone_pages() counts the number of pages which are beyond the 5696 * high watermark within all zones at or below a given zone index. For each 5697 * zone, the number of pages is calculated as: 5698 * 5699 * nr_free_zone_pages = managed_pages - high_pages 5700 * 5701 * Return: number of pages beyond high watermark. 5702 */ 5703 static unsigned long nr_free_zone_pages(int offset) 5704 { 5705 struct zoneref *z; 5706 struct zone *zone; 5707 5708 /* Just pick one node, since fallback list is circular */ 5709 unsigned long sum = 0; 5710 5711 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5712 5713 for_each_zone_zonelist(zone, z, zonelist, offset) { 5714 unsigned long size = zone_managed_pages(zone); 5715 unsigned long high = high_wmark_pages(zone); 5716 if (size > high) 5717 sum += size - high; 5718 } 5719 5720 return sum; 5721 } 5722 5723 /** 5724 * nr_free_buffer_pages - count number of pages beyond high watermark 5725 * 5726 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5727 * watermark within ZONE_DMA and ZONE_NORMAL. 5728 * 5729 * Return: number of pages beyond high watermark within ZONE_DMA and 5730 * ZONE_NORMAL. 5731 */ 5732 unsigned long nr_free_buffer_pages(void) 5733 { 5734 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5735 } 5736 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5737 5738 static inline void show_node(struct zone *zone) 5739 { 5740 if (IS_ENABLED(CONFIG_NUMA)) 5741 printk("Node %d ", zone_to_nid(zone)); 5742 } 5743 5744 long si_mem_available(void) 5745 { 5746 long available; 5747 unsigned long pagecache; 5748 unsigned long wmark_low = 0; 5749 unsigned long pages[NR_LRU_LISTS]; 5750 unsigned long reclaimable; 5751 struct zone *zone; 5752 int lru; 5753 5754 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 5755 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 5756 5757 for_each_zone(zone) 5758 wmark_low += low_wmark_pages(zone); 5759 5760 /* 5761 * Estimate the amount of memory available for userspace allocations, 5762 * without causing swapping. 5763 */ 5764 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 5765 5766 /* 5767 * Not all the page cache can be freed, otherwise the system will 5768 * start swapping. Assume at least half of the page cache, or the 5769 * low watermark worth of cache, needs to stay. 5770 */ 5771 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 5772 pagecache -= min(pagecache / 2, wmark_low); 5773 available += pagecache; 5774 5775 /* 5776 * Part of the reclaimable slab and other kernel memory consists of 5777 * items that are in use, and cannot be freed. Cap this estimate at the 5778 * low watermark. 5779 */ 5780 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 5781 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 5782 available += reclaimable - min(reclaimable / 2, wmark_low); 5783 5784 if (available < 0) 5785 available = 0; 5786 return available; 5787 } 5788 EXPORT_SYMBOL_GPL(si_mem_available); 5789 5790 void si_meminfo(struct sysinfo *val) 5791 { 5792 val->totalram = totalram_pages(); 5793 val->sharedram = global_node_page_state(NR_SHMEM); 5794 val->freeram = global_zone_page_state(NR_FREE_PAGES); 5795 val->bufferram = nr_blockdev_pages(); 5796 val->totalhigh = totalhigh_pages(); 5797 val->freehigh = nr_free_highpages(); 5798 val->mem_unit = PAGE_SIZE; 5799 } 5800 5801 EXPORT_SYMBOL(si_meminfo); 5802 5803 #ifdef CONFIG_NUMA 5804 void si_meminfo_node(struct sysinfo *val, int nid) 5805 { 5806 int zone_type; /* needs to be signed */ 5807 unsigned long managed_pages = 0; 5808 unsigned long managed_highpages = 0; 5809 unsigned long free_highpages = 0; 5810 pg_data_t *pgdat = NODE_DATA(nid); 5811 5812 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 5813 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); 5814 val->totalram = managed_pages; 5815 val->sharedram = node_page_state(pgdat, NR_SHMEM); 5816 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 5817 #ifdef CONFIG_HIGHMEM 5818 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 5819 struct zone *zone = &pgdat->node_zones[zone_type]; 5820 5821 if (is_highmem(zone)) { 5822 managed_highpages += zone_managed_pages(zone); 5823 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 5824 } 5825 } 5826 val->totalhigh = managed_highpages; 5827 val->freehigh = free_highpages; 5828 #else 5829 val->totalhigh = managed_highpages; 5830 val->freehigh = free_highpages; 5831 #endif 5832 val->mem_unit = PAGE_SIZE; 5833 } 5834 #endif 5835 5836 /* 5837 * Determine whether the node should be displayed or not, depending on whether 5838 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 5839 */ 5840 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 5841 { 5842 if (!(flags & SHOW_MEM_FILTER_NODES)) 5843 return false; 5844 5845 /* 5846 * no node mask - aka implicit memory numa policy. Do not bother with 5847 * the synchronization - read_mems_allowed_begin - because we do not 5848 * have to be precise here. 5849 */ 5850 if (!nodemask) 5851 nodemask = &cpuset_current_mems_allowed; 5852 5853 return !node_isset(nid, *nodemask); 5854 } 5855 5856 #define K(x) ((x) << (PAGE_SHIFT-10)) 5857 5858 static void show_migration_types(unsigned char type) 5859 { 5860 static const char types[MIGRATE_TYPES] = { 5861 [MIGRATE_UNMOVABLE] = 'U', 5862 [MIGRATE_MOVABLE] = 'M', 5863 [MIGRATE_RECLAIMABLE] = 'E', 5864 [MIGRATE_HIGHATOMIC] = 'H', 5865 #ifdef CONFIG_CMA 5866 [MIGRATE_CMA] = 'C', 5867 #endif 5868 #ifdef CONFIG_MEMORY_ISOLATION 5869 [MIGRATE_ISOLATE] = 'I', 5870 #endif 5871 }; 5872 char tmp[MIGRATE_TYPES + 1]; 5873 char *p = tmp; 5874 int i; 5875 5876 for (i = 0; i < MIGRATE_TYPES; i++) { 5877 if (type & (1 << i)) 5878 *p++ = types[i]; 5879 } 5880 5881 *p = '\0'; 5882 printk(KERN_CONT "(%s) ", tmp); 5883 } 5884 5885 /* 5886 * Show free area list (used inside shift_scroll-lock stuff) 5887 * We also calculate the percentage fragmentation. We do this by counting the 5888 * memory on each free list with the exception of the first item on the list. 5889 * 5890 * Bits in @filter: 5891 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 5892 * cpuset. 5893 */ 5894 void show_free_areas(unsigned int filter, nodemask_t *nodemask) 5895 { 5896 unsigned long free_pcp = 0; 5897 int cpu; 5898 struct zone *zone; 5899 pg_data_t *pgdat; 5900 5901 for_each_populated_zone(zone) { 5902 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5903 continue; 5904 5905 for_each_online_cpu(cpu) 5906 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 5907 } 5908 5909 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 5910 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 5911 " unevictable:%lu dirty:%lu writeback:%lu\n" 5912 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 5913 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 5914 " kernel_misc_reclaimable:%lu\n" 5915 " free:%lu free_pcp:%lu free_cma:%lu\n", 5916 global_node_page_state(NR_ACTIVE_ANON), 5917 global_node_page_state(NR_INACTIVE_ANON), 5918 global_node_page_state(NR_ISOLATED_ANON), 5919 global_node_page_state(NR_ACTIVE_FILE), 5920 global_node_page_state(NR_INACTIVE_FILE), 5921 global_node_page_state(NR_ISOLATED_FILE), 5922 global_node_page_state(NR_UNEVICTABLE), 5923 global_node_page_state(NR_FILE_DIRTY), 5924 global_node_page_state(NR_WRITEBACK), 5925 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 5926 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 5927 global_node_page_state(NR_FILE_MAPPED), 5928 global_node_page_state(NR_SHMEM), 5929 global_node_page_state(NR_PAGETABLE), 5930 global_zone_page_state(NR_BOUNCE), 5931 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), 5932 global_zone_page_state(NR_FREE_PAGES), 5933 free_pcp, 5934 global_zone_page_state(NR_FREE_CMA_PAGES)); 5935 5936 for_each_online_pgdat(pgdat) { 5937 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 5938 continue; 5939 5940 printk("Node %d" 5941 " active_anon:%lukB" 5942 " inactive_anon:%lukB" 5943 " active_file:%lukB" 5944 " inactive_file:%lukB" 5945 " unevictable:%lukB" 5946 " isolated(anon):%lukB" 5947 " isolated(file):%lukB" 5948 " mapped:%lukB" 5949 " dirty:%lukB" 5950 " writeback:%lukB" 5951 " shmem:%lukB" 5952 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5953 " shmem_thp: %lukB" 5954 " shmem_pmdmapped: %lukB" 5955 " anon_thp: %lukB" 5956 #endif 5957 " writeback_tmp:%lukB" 5958 " kernel_stack:%lukB" 5959 #ifdef CONFIG_SHADOW_CALL_STACK 5960 " shadow_call_stack:%lukB" 5961 #endif 5962 " pagetables:%lukB" 5963 " all_unreclaimable? %s" 5964 "\n", 5965 pgdat->node_id, 5966 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 5967 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 5968 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 5969 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 5970 K(node_page_state(pgdat, NR_UNEVICTABLE)), 5971 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 5972 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 5973 K(node_page_state(pgdat, NR_FILE_MAPPED)), 5974 K(node_page_state(pgdat, NR_FILE_DIRTY)), 5975 K(node_page_state(pgdat, NR_WRITEBACK)), 5976 K(node_page_state(pgdat, NR_SHMEM)), 5977 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5978 K(node_page_state(pgdat, NR_SHMEM_THPS)), 5979 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), 5980 K(node_page_state(pgdat, NR_ANON_THPS)), 5981 #endif 5982 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 5983 node_page_state(pgdat, NR_KERNEL_STACK_KB), 5984 #ifdef CONFIG_SHADOW_CALL_STACK 5985 node_page_state(pgdat, NR_KERNEL_SCS_KB), 5986 #endif 5987 K(node_page_state(pgdat, NR_PAGETABLE)), 5988 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 5989 "yes" : "no"); 5990 } 5991 5992 for_each_populated_zone(zone) { 5993 int i; 5994 5995 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5996 continue; 5997 5998 free_pcp = 0; 5999 for_each_online_cpu(cpu) 6000 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 6001 6002 show_node(zone); 6003 printk(KERN_CONT 6004 "%s" 6005 " free:%lukB" 6006 " boost:%lukB" 6007 " min:%lukB" 6008 " low:%lukB" 6009 " high:%lukB" 6010 " reserved_highatomic:%luKB" 6011 " active_anon:%lukB" 6012 " inactive_anon:%lukB" 6013 " active_file:%lukB" 6014 " inactive_file:%lukB" 6015 " unevictable:%lukB" 6016 " writepending:%lukB" 6017 " present:%lukB" 6018 " managed:%lukB" 6019 " mlocked:%lukB" 6020 " bounce:%lukB" 6021 " free_pcp:%lukB" 6022 " local_pcp:%ukB" 6023 " free_cma:%lukB" 6024 "\n", 6025 zone->name, 6026 K(zone_page_state(zone, NR_FREE_PAGES)), 6027 K(zone->watermark_boost), 6028 K(min_wmark_pages(zone)), 6029 K(low_wmark_pages(zone)), 6030 K(high_wmark_pages(zone)), 6031 K(zone->nr_reserved_highatomic), 6032 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 6033 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 6034 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 6035 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 6036 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 6037 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 6038 K(zone->present_pages), 6039 K(zone_managed_pages(zone)), 6040 K(zone_page_state(zone, NR_MLOCK)), 6041 K(zone_page_state(zone, NR_BOUNCE)), 6042 K(free_pcp), 6043 K(this_cpu_read(zone->per_cpu_pageset->count)), 6044 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 6045 printk("lowmem_reserve[]:"); 6046 for (i = 0; i < MAX_NR_ZONES; i++) 6047 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 6048 printk(KERN_CONT "\n"); 6049 } 6050 6051 for_each_populated_zone(zone) { 6052 unsigned int order; 6053 unsigned long nr[MAX_ORDER], flags, total = 0; 6054 unsigned char types[MAX_ORDER]; 6055 6056 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6057 continue; 6058 show_node(zone); 6059 printk(KERN_CONT "%s: ", zone->name); 6060 6061 spin_lock_irqsave(&zone->lock, flags); 6062 for (order = 0; order < MAX_ORDER; order++) { 6063 struct free_area *area = &zone->free_area[order]; 6064 int type; 6065 6066 nr[order] = area->nr_free; 6067 total += nr[order] << order; 6068 6069 types[order] = 0; 6070 for (type = 0; type < MIGRATE_TYPES; type++) { 6071 if (!free_area_empty(area, type)) 6072 types[order] |= 1 << type; 6073 } 6074 } 6075 spin_unlock_irqrestore(&zone->lock, flags); 6076 for (order = 0; order < MAX_ORDER; order++) { 6077 printk(KERN_CONT "%lu*%lukB ", 6078 nr[order], K(1UL) << order); 6079 if (nr[order]) 6080 show_migration_types(types[order]); 6081 } 6082 printk(KERN_CONT "= %lukB\n", K(total)); 6083 } 6084 6085 hugetlb_show_meminfo(); 6086 6087 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 6088 6089 show_swap_cache_info(); 6090 } 6091 6092 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 6093 { 6094 zoneref->zone = zone; 6095 zoneref->zone_idx = zone_idx(zone); 6096 } 6097 6098 /* 6099 * Builds allocation fallback zone lists. 6100 * 6101 * Add all populated zones of a node to the zonelist. 6102 */ 6103 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 6104 { 6105 struct zone *zone; 6106 enum zone_type zone_type = MAX_NR_ZONES; 6107 int nr_zones = 0; 6108 6109 do { 6110 zone_type--; 6111 zone = pgdat->node_zones + zone_type; 6112 if (managed_zone(zone)) { 6113 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 6114 check_highest_zone(zone_type); 6115 } 6116 } while (zone_type); 6117 6118 return nr_zones; 6119 } 6120 6121 #ifdef CONFIG_NUMA 6122 6123 static int __parse_numa_zonelist_order(char *s) 6124 { 6125 /* 6126 * We used to support different zonelists modes but they turned 6127 * out to be just not useful. Let's keep the warning in place 6128 * if somebody still use the cmd line parameter so that we do 6129 * not fail it silently 6130 */ 6131 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 6132 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 6133 return -EINVAL; 6134 } 6135 return 0; 6136 } 6137 6138 char numa_zonelist_order[] = "Node"; 6139 6140 /* 6141 * sysctl handler for numa_zonelist_order 6142 */ 6143 int numa_zonelist_order_handler(struct ctl_table *table, int write, 6144 void *buffer, size_t *length, loff_t *ppos) 6145 { 6146 if (write) 6147 return __parse_numa_zonelist_order(buffer); 6148 return proc_dostring(table, write, buffer, length, ppos); 6149 } 6150 6151 6152 #define MAX_NODE_LOAD (nr_online_nodes) 6153 static int node_load[MAX_NUMNODES]; 6154 6155 /** 6156 * find_next_best_node - find the next node that should appear in a given node's fallback list 6157 * @node: node whose fallback list we're appending 6158 * @used_node_mask: nodemask_t of already used nodes 6159 * 6160 * We use a number of factors to determine which is the next node that should 6161 * appear on a given node's fallback list. The node should not have appeared 6162 * already in @node's fallback list, and it should be the next closest node 6163 * according to the distance array (which contains arbitrary distance values 6164 * from each node to each node in the system), and should also prefer nodes 6165 * with no CPUs, since presumably they'll have very little allocation pressure 6166 * on them otherwise. 6167 * 6168 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 6169 */ 6170 int find_next_best_node(int node, nodemask_t *used_node_mask) 6171 { 6172 int n, val; 6173 int min_val = INT_MAX; 6174 int best_node = NUMA_NO_NODE; 6175 6176 /* Use the local node if we haven't already */ 6177 if (!node_isset(node, *used_node_mask)) { 6178 node_set(node, *used_node_mask); 6179 return node; 6180 } 6181 6182 for_each_node_state(n, N_MEMORY) { 6183 6184 /* Don't want a node to appear more than once */ 6185 if (node_isset(n, *used_node_mask)) 6186 continue; 6187 6188 /* Use the distance array to find the distance */ 6189 val = node_distance(node, n); 6190 6191 /* Penalize nodes under us ("prefer the next node") */ 6192 val += (n < node); 6193 6194 /* Give preference to headless and unused nodes */ 6195 if (!cpumask_empty(cpumask_of_node(n))) 6196 val += PENALTY_FOR_NODE_WITH_CPUS; 6197 6198 /* Slight preference for less loaded node */ 6199 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 6200 val += node_load[n]; 6201 6202 if (val < min_val) { 6203 min_val = val; 6204 best_node = n; 6205 } 6206 } 6207 6208 if (best_node >= 0) 6209 node_set(best_node, *used_node_mask); 6210 6211 return best_node; 6212 } 6213 6214 6215 /* 6216 * Build zonelists ordered by node and zones within node. 6217 * This results in maximum locality--normal zone overflows into local 6218 * DMA zone, if any--but risks exhausting DMA zone. 6219 */ 6220 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 6221 unsigned nr_nodes) 6222 { 6223 struct zoneref *zonerefs; 6224 int i; 6225 6226 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 6227 6228 for (i = 0; i < nr_nodes; i++) { 6229 int nr_zones; 6230 6231 pg_data_t *node = NODE_DATA(node_order[i]); 6232 6233 nr_zones = build_zonerefs_node(node, zonerefs); 6234 zonerefs += nr_zones; 6235 } 6236 zonerefs->zone = NULL; 6237 zonerefs->zone_idx = 0; 6238 } 6239 6240 /* 6241 * Build gfp_thisnode zonelists 6242 */ 6243 static void build_thisnode_zonelists(pg_data_t *pgdat) 6244 { 6245 struct zoneref *zonerefs; 6246 int nr_zones; 6247 6248 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 6249 nr_zones = build_zonerefs_node(pgdat, zonerefs); 6250 zonerefs += nr_zones; 6251 zonerefs->zone = NULL; 6252 zonerefs->zone_idx = 0; 6253 } 6254 6255 /* 6256 * Build zonelists ordered by zone and nodes within zones. 6257 * This results in conserving DMA zone[s] until all Normal memory is 6258 * exhausted, but results in overflowing to remote node while memory 6259 * may still exist in local DMA zone. 6260 */ 6261 6262 static void build_zonelists(pg_data_t *pgdat) 6263 { 6264 static int node_order[MAX_NUMNODES]; 6265 int node, load, nr_nodes = 0; 6266 nodemask_t used_mask = NODE_MASK_NONE; 6267 int local_node, prev_node; 6268 6269 /* NUMA-aware ordering of nodes */ 6270 local_node = pgdat->node_id; 6271 load = nr_online_nodes; 6272 prev_node = local_node; 6273 6274 memset(node_order, 0, sizeof(node_order)); 6275 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 6276 /* 6277 * We don't want to pressure a particular node. 6278 * So adding penalty to the first node in same 6279 * distance group to make it round-robin. 6280 */ 6281 if (node_distance(local_node, node) != 6282 node_distance(local_node, prev_node)) 6283 node_load[node] += load; 6284 6285 node_order[nr_nodes++] = node; 6286 prev_node = node; 6287 load--; 6288 } 6289 6290 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 6291 build_thisnode_zonelists(pgdat); 6292 pr_info("Fallback order for Node %d: ", local_node); 6293 for (node = 0; node < nr_nodes; node++) 6294 pr_cont("%d ", node_order[node]); 6295 pr_cont("\n"); 6296 } 6297 6298 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6299 /* 6300 * Return node id of node used for "local" allocations. 6301 * I.e., first node id of first zone in arg node's generic zonelist. 6302 * Used for initializing percpu 'numa_mem', which is used primarily 6303 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 6304 */ 6305 int local_memory_node(int node) 6306 { 6307 struct zoneref *z; 6308 6309 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 6310 gfp_zone(GFP_KERNEL), 6311 NULL); 6312 return zone_to_nid(z->zone); 6313 } 6314 #endif 6315 6316 static void setup_min_unmapped_ratio(void); 6317 static void setup_min_slab_ratio(void); 6318 #else /* CONFIG_NUMA */ 6319 6320 static void build_zonelists(pg_data_t *pgdat) 6321 { 6322 int node, local_node; 6323 struct zoneref *zonerefs; 6324 int nr_zones; 6325 6326 local_node = pgdat->node_id; 6327 6328 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 6329 nr_zones = build_zonerefs_node(pgdat, zonerefs); 6330 zonerefs += nr_zones; 6331 6332 /* 6333 * Now we build the zonelist so that it contains the zones 6334 * of all the other nodes. 6335 * We don't want to pressure a particular node, so when 6336 * building the zones for node N, we make sure that the 6337 * zones coming right after the local ones are those from 6338 * node N+1 (modulo N) 6339 */ 6340 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 6341 if (!node_online(node)) 6342 continue; 6343 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 6344 zonerefs += nr_zones; 6345 } 6346 for (node = 0; node < local_node; node++) { 6347 if (!node_online(node)) 6348 continue; 6349 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 6350 zonerefs += nr_zones; 6351 } 6352 6353 zonerefs->zone = NULL; 6354 zonerefs->zone_idx = 0; 6355 } 6356 6357 #endif /* CONFIG_NUMA */ 6358 6359 /* 6360 * Boot pageset table. One per cpu which is going to be used for all 6361 * zones and all nodes. The parameters will be set in such a way 6362 * that an item put on a list will immediately be handed over to 6363 * the buddy list. This is safe since pageset manipulation is done 6364 * with interrupts disabled. 6365 * 6366 * The boot_pagesets must be kept even after bootup is complete for 6367 * unused processors and/or zones. They do play a role for bootstrapping 6368 * hotplugged processors. 6369 * 6370 * zoneinfo_show() and maybe other functions do 6371 * not check if the processor is online before following the pageset pointer. 6372 * Other parts of the kernel may not check if the zone is available. 6373 */ 6374 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 6375 /* These effectively disable the pcplists in the boot pageset completely */ 6376 #define BOOT_PAGESET_HIGH 0 6377 #define BOOT_PAGESET_BATCH 1 6378 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 6379 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 6380 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 6381 6382 static void __build_all_zonelists(void *data) 6383 { 6384 int nid; 6385 int __maybe_unused cpu; 6386 pg_data_t *self = data; 6387 static DEFINE_SPINLOCK(lock); 6388 6389 spin_lock(&lock); 6390 6391 #ifdef CONFIG_NUMA 6392 memset(node_load, 0, sizeof(node_load)); 6393 #endif 6394 6395 /* 6396 * This node is hotadded and no memory is yet present. So just 6397 * building zonelists is fine - no need to touch other nodes. 6398 */ 6399 if (self && !node_online(self->node_id)) { 6400 build_zonelists(self); 6401 } else { 6402 for_each_online_node(nid) { 6403 pg_data_t *pgdat = NODE_DATA(nid); 6404 6405 build_zonelists(pgdat); 6406 } 6407 6408 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6409 /* 6410 * We now know the "local memory node" for each node-- 6411 * i.e., the node of the first zone in the generic zonelist. 6412 * Set up numa_mem percpu variable for on-line cpus. During 6413 * boot, only the boot cpu should be on-line; we'll init the 6414 * secondary cpus' numa_mem as they come on-line. During 6415 * node/memory hotplug, we'll fixup all on-line cpus. 6416 */ 6417 for_each_online_cpu(cpu) 6418 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 6419 #endif 6420 } 6421 6422 spin_unlock(&lock); 6423 } 6424 6425 static noinline void __init 6426 build_all_zonelists_init(void) 6427 { 6428 int cpu; 6429 6430 __build_all_zonelists(NULL); 6431 6432 /* 6433 * Initialize the boot_pagesets that are going to be used 6434 * for bootstrapping processors. The real pagesets for 6435 * each zone will be allocated later when the per cpu 6436 * allocator is available. 6437 * 6438 * boot_pagesets are used also for bootstrapping offline 6439 * cpus if the system is already booted because the pagesets 6440 * are needed to initialize allocators on a specific cpu too. 6441 * F.e. the percpu allocator needs the page allocator which 6442 * needs the percpu allocator in order to allocate its pagesets 6443 * (a chicken-egg dilemma). 6444 */ 6445 for_each_possible_cpu(cpu) 6446 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 6447 6448 mminit_verify_zonelist(); 6449 cpuset_init_current_mems_allowed(); 6450 } 6451 6452 /* 6453 * unless system_state == SYSTEM_BOOTING. 6454 * 6455 * __ref due to call of __init annotated helper build_all_zonelists_init 6456 * [protected by SYSTEM_BOOTING]. 6457 */ 6458 void __ref build_all_zonelists(pg_data_t *pgdat) 6459 { 6460 unsigned long vm_total_pages; 6461 6462 if (system_state == SYSTEM_BOOTING) { 6463 build_all_zonelists_init(); 6464 } else { 6465 __build_all_zonelists(pgdat); 6466 /* cpuset refresh routine should be here */ 6467 } 6468 /* Get the number of free pages beyond high watermark in all zones. */ 6469 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 6470 /* 6471 * Disable grouping by mobility if the number of pages in the 6472 * system is too low to allow the mechanism to work. It would be 6473 * more accurate, but expensive to check per-zone. This check is 6474 * made on memory-hotadd so a system can start with mobility 6475 * disabled and enable it later 6476 */ 6477 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 6478 page_group_by_mobility_disabled = 1; 6479 else 6480 page_group_by_mobility_disabled = 0; 6481 6482 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 6483 nr_online_nodes, 6484 page_group_by_mobility_disabled ? "off" : "on", 6485 vm_total_pages); 6486 #ifdef CONFIG_NUMA 6487 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 6488 #endif 6489 } 6490 6491 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 6492 static bool __meminit 6493 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 6494 { 6495 static struct memblock_region *r; 6496 6497 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 6498 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 6499 for_each_mem_region(r) { 6500 if (*pfn < memblock_region_memory_end_pfn(r)) 6501 break; 6502 } 6503 } 6504 if (*pfn >= memblock_region_memory_base_pfn(r) && 6505 memblock_is_mirror(r)) { 6506 *pfn = memblock_region_memory_end_pfn(r); 6507 return true; 6508 } 6509 } 6510 return false; 6511 } 6512 6513 /* 6514 * Initially all pages are reserved - free ones are freed 6515 * up by memblock_free_all() once the early boot process is 6516 * done. Non-atomic initialization, single-pass. 6517 * 6518 * All aligned pageblocks are initialized to the specified migratetype 6519 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 6520 * zone stats (e.g., nr_isolate_pageblock) are touched. 6521 */ 6522 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 6523 unsigned long start_pfn, unsigned long zone_end_pfn, 6524 enum meminit_context context, 6525 struct vmem_altmap *altmap, int migratetype) 6526 { 6527 unsigned long pfn, end_pfn = start_pfn + size; 6528 struct page *page; 6529 6530 if (highest_memmap_pfn < end_pfn - 1) 6531 highest_memmap_pfn = end_pfn - 1; 6532 6533 #ifdef CONFIG_ZONE_DEVICE 6534 /* 6535 * Honor reservation requested by the driver for this ZONE_DEVICE 6536 * memory. We limit the total number of pages to initialize to just 6537 * those that might contain the memory mapping. We will defer the 6538 * ZONE_DEVICE page initialization until after we have released 6539 * the hotplug lock. 6540 */ 6541 if (zone == ZONE_DEVICE) { 6542 if (!altmap) 6543 return; 6544 6545 if (start_pfn == altmap->base_pfn) 6546 start_pfn += altmap->reserve; 6547 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6548 } 6549 #endif 6550 6551 for (pfn = start_pfn; pfn < end_pfn; ) { 6552 /* 6553 * There can be holes in boot-time mem_map[]s handed to this 6554 * function. They do not exist on hotplugged memory. 6555 */ 6556 if (context == MEMINIT_EARLY) { 6557 if (overlap_memmap_init(zone, &pfn)) 6558 continue; 6559 if (defer_init(nid, pfn, zone_end_pfn)) 6560 break; 6561 } 6562 6563 page = pfn_to_page(pfn); 6564 __init_single_page(page, pfn, zone, nid); 6565 if (context == MEMINIT_HOTPLUG) 6566 __SetPageReserved(page); 6567 6568 /* 6569 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 6570 * such that unmovable allocations won't be scattered all 6571 * over the place during system boot. 6572 */ 6573 if (IS_ALIGNED(pfn, pageblock_nr_pages)) { 6574 set_pageblock_migratetype(page, migratetype); 6575 cond_resched(); 6576 } 6577 pfn++; 6578 } 6579 } 6580 6581 #ifdef CONFIG_ZONE_DEVICE 6582 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 6583 unsigned long zone_idx, int nid, 6584 struct dev_pagemap *pgmap) 6585 { 6586 6587 __init_single_page(page, pfn, zone_idx, nid); 6588 6589 /* 6590 * Mark page reserved as it will need to wait for onlining 6591 * phase for it to be fully associated with a zone. 6592 * 6593 * We can use the non-atomic __set_bit operation for setting 6594 * the flag as we are still initializing the pages. 6595 */ 6596 __SetPageReserved(page); 6597 6598 /* 6599 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 6600 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 6601 * ever freed or placed on a driver-private list. 6602 */ 6603 page->pgmap = pgmap; 6604 page->zone_device_data = NULL; 6605 6606 /* 6607 * Mark the block movable so that blocks are reserved for 6608 * movable at startup. This will force kernel allocations 6609 * to reserve their blocks rather than leaking throughout 6610 * the address space during boot when many long-lived 6611 * kernel allocations are made. 6612 * 6613 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 6614 * because this is done early in section_activate() 6615 */ 6616 if (IS_ALIGNED(pfn, pageblock_nr_pages)) { 6617 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 6618 cond_resched(); 6619 } 6620 } 6621 6622 static void __ref memmap_init_compound(struct page *head, 6623 unsigned long head_pfn, 6624 unsigned long zone_idx, int nid, 6625 struct dev_pagemap *pgmap, 6626 unsigned long nr_pages) 6627 { 6628 unsigned long pfn, end_pfn = head_pfn + nr_pages; 6629 unsigned int order = pgmap->vmemmap_shift; 6630 6631 __SetPageHead(head); 6632 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 6633 struct page *page = pfn_to_page(pfn); 6634 6635 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 6636 prep_compound_tail(head, pfn - head_pfn); 6637 set_page_count(page, 0); 6638 6639 /* 6640 * The first tail page stores compound_mapcount_ptr() and 6641 * compound_order() and the second tail page stores 6642 * compound_pincount_ptr(). Call prep_compound_head() after 6643 * the first and second tail pages have been initialized to 6644 * not have the data overwritten. 6645 */ 6646 if (pfn == head_pfn + 2) 6647 prep_compound_head(head, order); 6648 } 6649 } 6650 6651 void __ref memmap_init_zone_device(struct zone *zone, 6652 unsigned long start_pfn, 6653 unsigned long nr_pages, 6654 struct dev_pagemap *pgmap) 6655 { 6656 unsigned long pfn, end_pfn = start_pfn + nr_pages; 6657 struct pglist_data *pgdat = zone->zone_pgdat; 6658 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 6659 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 6660 unsigned long zone_idx = zone_idx(zone); 6661 unsigned long start = jiffies; 6662 int nid = pgdat->node_id; 6663 6664 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) 6665 return; 6666 6667 /* 6668 * The call to memmap_init should have already taken care 6669 * of the pages reserved for the memmap, so we can just jump to 6670 * the end of that region and start processing the device pages. 6671 */ 6672 if (altmap) { 6673 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6674 nr_pages = end_pfn - start_pfn; 6675 } 6676 6677 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 6678 struct page *page = pfn_to_page(pfn); 6679 6680 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 6681 6682 if (pfns_per_compound == 1) 6683 continue; 6684 6685 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 6686 pfns_per_compound); 6687 } 6688 6689 pr_info("%s initialised %lu pages in %ums\n", __func__, 6690 nr_pages, jiffies_to_msecs(jiffies - start)); 6691 } 6692 6693 #endif 6694 static void __meminit zone_init_free_lists(struct zone *zone) 6695 { 6696 unsigned int order, t; 6697 for_each_migratetype_order(order, t) { 6698 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 6699 zone->free_area[order].nr_free = 0; 6700 } 6701 } 6702 6703 /* 6704 * Only struct pages that correspond to ranges defined by memblock.memory 6705 * are zeroed and initialized by going through __init_single_page() during 6706 * memmap_init_zone_range(). 6707 * 6708 * But, there could be struct pages that correspond to holes in 6709 * memblock.memory. This can happen because of the following reasons: 6710 * - physical memory bank size is not necessarily the exact multiple of the 6711 * arbitrary section size 6712 * - early reserved memory may not be listed in memblock.memory 6713 * - memory layouts defined with memmap= kernel parameter may not align 6714 * nicely with memmap sections 6715 * 6716 * Explicitly initialize those struct pages so that: 6717 * - PG_Reserved is set 6718 * - zone and node links point to zone and node that span the page if the 6719 * hole is in the middle of a zone 6720 * - zone and node links point to adjacent zone/node if the hole falls on 6721 * the zone boundary; the pages in such holes will be prepended to the 6722 * zone/node above the hole except for the trailing pages in the last 6723 * section that will be appended to the zone/node below. 6724 */ 6725 static void __init init_unavailable_range(unsigned long spfn, 6726 unsigned long epfn, 6727 int zone, int node) 6728 { 6729 unsigned long pfn; 6730 u64 pgcnt = 0; 6731 6732 for (pfn = spfn; pfn < epfn; pfn++) { 6733 if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { 6734 pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) 6735 + pageblock_nr_pages - 1; 6736 continue; 6737 } 6738 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 6739 __SetPageReserved(pfn_to_page(pfn)); 6740 pgcnt++; 6741 } 6742 6743 if (pgcnt) 6744 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", 6745 node, zone_names[zone], pgcnt); 6746 } 6747 6748 static void __init memmap_init_zone_range(struct zone *zone, 6749 unsigned long start_pfn, 6750 unsigned long end_pfn, 6751 unsigned long *hole_pfn) 6752 { 6753 unsigned long zone_start_pfn = zone->zone_start_pfn; 6754 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 6755 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 6756 6757 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 6758 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 6759 6760 if (start_pfn >= end_pfn) 6761 return; 6762 6763 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 6764 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); 6765 6766 if (*hole_pfn < start_pfn) 6767 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 6768 6769 *hole_pfn = end_pfn; 6770 } 6771 6772 static void __init memmap_init(void) 6773 { 6774 unsigned long start_pfn, end_pfn; 6775 unsigned long hole_pfn = 0; 6776 int i, j, zone_id = 0, nid; 6777 6778 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 6779 struct pglist_data *node = NODE_DATA(nid); 6780 6781 for (j = 0; j < MAX_NR_ZONES; j++) { 6782 struct zone *zone = node->node_zones + j; 6783 6784 if (!populated_zone(zone)) 6785 continue; 6786 6787 memmap_init_zone_range(zone, start_pfn, end_pfn, 6788 &hole_pfn); 6789 zone_id = j; 6790 } 6791 } 6792 6793 #ifdef CONFIG_SPARSEMEM 6794 /* 6795 * Initialize the memory map for hole in the range [memory_end, 6796 * section_end]. 6797 * Append the pages in this hole to the highest zone in the last 6798 * node. 6799 * The call to init_unavailable_range() is outside the ifdef to 6800 * silence the compiler warining about zone_id set but not used; 6801 * for FLATMEM it is a nop anyway 6802 */ 6803 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 6804 if (hole_pfn < end_pfn) 6805 #endif 6806 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 6807 } 6808 6809 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 6810 phys_addr_t min_addr, int nid, bool exact_nid) 6811 { 6812 void *ptr; 6813 6814 if (exact_nid) 6815 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 6816 MEMBLOCK_ALLOC_ACCESSIBLE, 6817 nid); 6818 else 6819 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 6820 MEMBLOCK_ALLOC_ACCESSIBLE, 6821 nid); 6822 6823 if (ptr && size > 0) 6824 page_init_poison(ptr, size); 6825 6826 return ptr; 6827 } 6828 6829 static int zone_batchsize(struct zone *zone) 6830 { 6831 #ifdef CONFIG_MMU 6832 int batch; 6833 6834 /* 6835 * The number of pages to batch allocate is either ~0.1% 6836 * of the zone or 1MB, whichever is smaller. The batch 6837 * size is striking a balance between allocation latency 6838 * and zone lock contention. 6839 */ 6840 batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE); 6841 batch /= 4; /* We effectively *= 4 below */ 6842 if (batch < 1) 6843 batch = 1; 6844 6845 /* 6846 * Clamp the batch to a 2^n - 1 value. Having a power 6847 * of 2 value was found to be more likely to have 6848 * suboptimal cache aliasing properties in some cases. 6849 * 6850 * For example if 2 tasks are alternately allocating 6851 * batches of pages, one task can end up with a lot 6852 * of pages of one half of the possible page colors 6853 * and the other with pages of the other colors. 6854 */ 6855 batch = rounddown_pow_of_two(batch + batch/2) - 1; 6856 6857 return batch; 6858 6859 #else 6860 /* The deferral and batching of frees should be suppressed under NOMMU 6861 * conditions. 6862 * 6863 * The problem is that NOMMU needs to be able to allocate large chunks 6864 * of contiguous memory as there's no hardware page translation to 6865 * assemble apparent contiguous memory from discontiguous pages. 6866 * 6867 * Queueing large contiguous runs of pages for batching, however, 6868 * causes the pages to actually be freed in smaller chunks. As there 6869 * can be a significant delay between the individual batches being 6870 * recycled, this leads to the once large chunks of space being 6871 * fragmented and becoming unavailable for high-order allocations. 6872 */ 6873 return 0; 6874 #endif 6875 } 6876 6877 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 6878 { 6879 #ifdef CONFIG_MMU 6880 int high; 6881 int nr_split_cpus; 6882 unsigned long total_pages; 6883 6884 if (!percpu_pagelist_high_fraction) { 6885 /* 6886 * By default, the high value of the pcp is based on the zone 6887 * low watermark so that if they are full then background 6888 * reclaim will not be started prematurely. 6889 */ 6890 total_pages = low_wmark_pages(zone); 6891 } else { 6892 /* 6893 * If percpu_pagelist_high_fraction is configured, the high 6894 * value is based on a fraction of the managed pages in the 6895 * zone. 6896 */ 6897 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 6898 } 6899 6900 /* 6901 * Split the high value across all online CPUs local to the zone. Note 6902 * that early in boot that CPUs may not be online yet and that during 6903 * CPU hotplug that the cpumask is not yet updated when a CPU is being 6904 * onlined. For memory nodes that have no CPUs, split pcp->high across 6905 * all online CPUs to mitigate the risk that reclaim is triggered 6906 * prematurely due to pages stored on pcp lists. 6907 */ 6908 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 6909 if (!nr_split_cpus) 6910 nr_split_cpus = num_online_cpus(); 6911 high = total_pages / nr_split_cpus; 6912 6913 /* 6914 * Ensure high is at least batch*4. The multiple is based on the 6915 * historical relationship between high and batch. 6916 */ 6917 high = max(high, batch << 2); 6918 6919 return high; 6920 #else 6921 return 0; 6922 #endif 6923 } 6924 6925 /* 6926 * pcp->high and pcp->batch values are related and generally batch is lower 6927 * than high. They are also related to pcp->count such that count is lower 6928 * than high, and as soon as it reaches high, the pcplist is flushed. 6929 * 6930 * However, guaranteeing these relations at all times would require e.g. write 6931 * barriers here but also careful usage of read barriers at the read side, and 6932 * thus be prone to error and bad for performance. Thus the update only prevents 6933 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 6934 * can cope with those fields changing asynchronously, and fully trust only the 6935 * pcp->count field on the local CPU with interrupts disabled. 6936 * 6937 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 6938 * outside of boot time (or some other assurance that no concurrent updaters 6939 * exist). 6940 */ 6941 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 6942 unsigned long batch) 6943 { 6944 WRITE_ONCE(pcp->batch, batch); 6945 WRITE_ONCE(pcp->high, high); 6946 } 6947 6948 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 6949 { 6950 int pindex; 6951 6952 memset(pcp, 0, sizeof(*pcp)); 6953 memset(pzstats, 0, sizeof(*pzstats)); 6954 6955 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 6956 INIT_LIST_HEAD(&pcp->lists[pindex]); 6957 6958 /* 6959 * Set batch and high values safe for a boot pageset. A true percpu 6960 * pageset's initialization will update them subsequently. Here we don't 6961 * need to be as careful as pageset_update() as nobody can access the 6962 * pageset yet. 6963 */ 6964 pcp->high = BOOT_PAGESET_HIGH; 6965 pcp->batch = BOOT_PAGESET_BATCH; 6966 pcp->free_factor = 0; 6967 } 6968 6969 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 6970 unsigned long batch) 6971 { 6972 struct per_cpu_pages *pcp; 6973 int cpu; 6974 6975 for_each_possible_cpu(cpu) { 6976 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6977 pageset_update(pcp, high, batch); 6978 } 6979 } 6980 6981 /* 6982 * Calculate and set new high and batch values for all per-cpu pagesets of a 6983 * zone based on the zone's size. 6984 */ 6985 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 6986 { 6987 int new_high, new_batch; 6988 6989 new_batch = max(1, zone_batchsize(zone)); 6990 new_high = zone_highsize(zone, new_batch, cpu_online); 6991 6992 if (zone->pageset_high == new_high && 6993 zone->pageset_batch == new_batch) 6994 return; 6995 6996 zone->pageset_high = new_high; 6997 zone->pageset_batch = new_batch; 6998 6999 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 7000 } 7001 7002 void __meminit setup_zone_pageset(struct zone *zone) 7003 { 7004 int cpu; 7005 7006 /* Size may be 0 on !SMP && !NUMA */ 7007 if (sizeof(struct per_cpu_zonestat) > 0) 7008 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 7009 7010 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 7011 for_each_possible_cpu(cpu) { 7012 struct per_cpu_pages *pcp; 7013 struct per_cpu_zonestat *pzstats; 7014 7015 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 7016 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7017 per_cpu_pages_init(pcp, pzstats); 7018 } 7019 7020 zone_set_pageset_high_and_batch(zone, 0); 7021 } 7022 7023 /* 7024 * Allocate per cpu pagesets and initialize them. 7025 * Before this call only boot pagesets were available. 7026 */ 7027 void __init setup_per_cpu_pageset(void) 7028 { 7029 struct pglist_data *pgdat; 7030 struct zone *zone; 7031 int __maybe_unused cpu; 7032 7033 for_each_populated_zone(zone) 7034 setup_zone_pageset(zone); 7035 7036 #ifdef CONFIG_NUMA 7037 /* 7038 * Unpopulated zones continue using the boot pagesets. 7039 * The numa stats for these pagesets need to be reset. 7040 * Otherwise, they will end up skewing the stats of 7041 * the nodes these zones are associated with. 7042 */ 7043 for_each_possible_cpu(cpu) { 7044 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 7045 memset(pzstats->vm_numa_event, 0, 7046 sizeof(pzstats->vm_numa_event)); 7047 } 7048 #endif 7049 7050 for_each_online_pgdat(pgdat) 7051 pgdat->per_cpu_nodestats = 7052 alloc_percpu(struct per_cpu_nodestat); 7053 } 7054 7055 static __meminit void zone_pcp_init(struct zone *zone) 7056 { 7057 /* 7058 * per cpu subsystem is not up at this point. The following code 7059 * relies on the ability of the linker to provide the 7060 * offset of a (static) per cpu variable into the per cpu area. 7061 */ 7062 zone->per_cpu_pageset = &boot_pageset; 7063 zone->per_cpu_zonestats = &boot_zonestats; 7064 zone->pageset_high = BOOT_PAGESET_HIGH; 7065 zone->pageset_batch = BOOT_PAGESET_BATCH; 7066 7067 if (populated_zone(zone)) 7068 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 7069 zone->present_pages, zone_batchsize(zone)); 7070 } 7071 7072 void __meminit init_currently_empty_zone(struct zone *zone, 7073 unsigned long zone_start_pfn, 7074 unsigned long size) 7075 { 7076 struct pglist_data *pgdat = zone->zone_pgdat; 7077 int zone_idx = zone_idx(zone) + 1; 7078 7079 if (zone_idx > pgdat->nr_zones) 7080 pgdat->nr_zones = zone_idx; 7081 7082 zone->zone_start_pfn = zone_start_pfn; 7083 7084 mminit_dprintk(MMINIT_TRACE, "memmap_init", 7085 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 7086 pgdat->node_id, 7087 (unsigned long)zone_idx(zone), 7088 zone_start_pfn, (zone_start_pfn + size)); 7089 7090 zone_init_free_lists(zone); 7091 zone->initialized = 1; 7092 } 7093 7094 /** 7095 * get_pfn_range_for_nid - Return the start and end page frames for a node 7096 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 7097 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 7098 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 7099 * 7100 * It returns the start and end page frame of a node based on information 7101 * provided by memblock_set_node(). If called for a node 7102 * with no available memory, a warning is printed and the start and end 7103 * PFNs will be 0. 7104 */ 7105 void __init get_pfn_range_for_nid(unsigned int nid, 7106 unsigned long *start_pfn, unsigned long *end_pfn) 7107 { 7108 unsigned long this_start_pfn, this_end_pfn; 7109 int i; 7110 7111 *start_pfn = -1UL; 7112 *end_pfn = 0; 7113 7114 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 7115 *start_pfn = min(*start_pfn, this_start_pfn); 7116 *end_pfn = max(*end_pfn, this_end_pfn); 7117 } 7118 7119 if (*start_pfn == -1UL) 7120 *start_pfn = 0; 7121 } 7122 7123 /* 7124 * This finds a zone that can be used for ZONE_MOVABLE pages. The 7125 * assumption is made that zones within a node are ordered in monotonic 7126 * increasing memory addresses so that the "highest" populated zone is used 7127 */ 7128 static void __init find_usable_zone_for_movable(void) 7129 { 7130 int zone_index; 7131 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 7132 if (zone_index == ZONE_MOVABLE) 7133 continue; 7134 7135 if (arch_zone_highest_possible_pfn[zone_index] > 7136 arch_zone_lowest_possible_pfn[zone_index]) 7137 break; 7138 } 7139 7140 VM_BUG_ON(zone_index == -1); 7141 movable_zone = zone_index; 7142 } 7143 7144 /* 7145 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 7146 * because it is sized independent of architecture. Unlike the other zones, 7147 * the starting point for ZONE_MOVABLE is not fixed. It may be different 7148 * in each node depending on the size of each node and how evenly kernelcore 7149 * is distributed. This helper function adjusts the zone ranges 7150 * provided by the architecture for a given node by using the end of the 7151 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 7152 * zones within a node are in order of monotonic increases memory addresses 7153 */ 7154 static void __init adjust_zone_range_for_zone_movable(int nid, 7155 unsigned long zone_type, 7156 unsigned long node_start_pfn, 7157 unsigned long node_end_pfn, 7158 unsigned long *zone_start_pfn, 7159 unsigned long *zone_end_pfn) 7160 { 7161 /* Only adjust if ZONE_MOVABLE is on this node */ 7162 if (zone_movable_pfn[nid]) { 7163 /* Size ZONE_MOVABLE */ 7164 if (zone_type == ZONE_MOVABLE) { 7165 *zone_start_pfn = zone_movable_pfn[nid]; 7166 *zone_end_pfn = min(node_end_pfn, 7167 arch_zone_highest_possible_pfn[movable_zone]); 7168 7169 /* Adjust for ZONE_MOVABLE starting within this range */ 7170 } else if (!mirrored_kernelcore && 7171 *zone_start_pfn < zone_movable_pfn[nid] && 7172 *zone_end_pfn > zone_movable_pfn[nid]) { 7173 *zone_end_pfn = zone_movable_pfn[nid]; 7174 7175 /* Check if this whole range is within ZONE_MOVABLE */ 7176 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 7177 *zone_start_pfn = *zone_end_pfn; 7178 } 7179 } 7180 7181 /* 7182 * Return the number of pages a zone spans in a node, including holes 7183 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 7184 */ 7185 static unsigned long __init zone_spanned_pages_in_node(int nid, 7186 unsigned long zone_type, 7187 unsigned long node_start_pfn, 7188 unsigned long node_end_pfn, 7189 unsigned long *zone_start_pfn, 7190 unsigned long *zone_end_pfn) 7191 { 7192 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 7193 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 7194 /* When hotadd a new node from cpu_up(), the node should be empty */ 7195 if (!node_start_pfn && !node_end_pfn) 7196 return 0; 7197 7198 /* Get the start and end of the zone */ 7199 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 7200 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 7201 adjust_zone_range_for_zone_movable(nid, zone_type, 7202 node_start_pfn, node_end_pfn, 7203 zone_start_pfn, zone_end_pfn); 7204 7205 /* Check that this node has pages within the zone's required range */ 7206 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 7207 return 0; 7208 7209 /* Move the zone boundaries inside the node if necessary */ 7210 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 7211 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 7212 7213 /* Return the spanned pages */ 7214 return *zone_end_pfn - *zone_start_pfn; 7215 } 7216 7217 /* 7218 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 7219 * then all holes in the requested range will be accounted for. 7220 */ 7221 unsigned long __init __absent_pages_in_range(int nid, 7222 unsigned long range_start_pfn, 7223 unsigned long range_end_pfn) 7224 { 7225 unsigned long nr_absent = range_end_pfn - range_start_pfn; 7226 unsigned long start_pfn, end_pfn; 7227 int i; 7228 7229 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 7230 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 7231 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 7232 nr_absent -= end_pfn - start_pfn; 7233 } 7234 return nr_absent; 7235 } 7236 7237 /** 7238 * absent_pages_in_range - Return number of page frames in holes within a range 7239 * @start_pfn: The start PFN to start searching for holes 7240 * @end_pfn: The end PFN to stop searching for holes 7241 * 7242 * Return: the number of pages frames in memory holes within a range. 7243 */ 7244 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 7245 unsigned long end_pfn) 7246 { 7247 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 7248 } 7249 7250 /* Return the number of page frames in holes in a zone on a node */ 7251 static unsigned long __init zone_absent_pages_in_node(int nid, 7252 unsigned long zone_type, 7253 unsigned long node_start_pfn, 7254 unsigned long node_end_pfn) 7255 { 7256 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 7257 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 7258 unsigned long zone_start_pfn, zone_end_pfn; 7259 unsigned long nr_absent; 7260 7261 /* When hotadd a new node from cpu_up(), the node should be empty */ 7262 if (!node_start_pfn && !node_end_pfn) 7263 return 0; 7264 7265 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 7266 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 7267 7268 adjust_zone_range_for_zone_movable(nid, zone_type, 7269 node_start_pfn, node_end_pfn, 7270 &zone_start_pfn, &zone_end_pfn); 7271 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 7272 7273 /* 7274 * ZONE_MOVABLE handling. 7275 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 7276 * and vice versa. 7277 */ 7278 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 7279 unsigned long start_pfn, end_pfn; 7280 struct memblock_region *r; 7281 7282 for_each_mem_region(r) { 7283 start_pfn = clamp(memblock_region_memory_base_pfn(r), 7284 zone_start_pfn, zone_end_pfn); 7285 end_pfn = clamp(memblock_region_memory_end_pfn(r), 7286 zone_start_pfn, zone_end_pfn); 7287 7288 if (zone_type == ZONE_MOVABLE && 7289 memblock_is_mirror(r)) 7290 nr_absent += end_pfn - start_pfn; 7291 7292 if (zone_type == ZONE_NORMAL && 7293 !memblock_is_mirror(r)) 7294 nr_absent += end_pfn - start_pfn; 7295 } 7296 } 7297 7298 return nr_absent; 7299 } 7300 7301 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 7302 unsigned long node_start_pfn, 7303 unsigned long node_end_pfn) 7304 { 7305 unsigned long realtotalpages = 0, totalpages = 0; 7306 enum zone_type i; 7307 7308 for (i = 0; i < MAX_NR_ZONES; i++) { 7309 struct zone *zone = pgdat->node_zones + i; 7310 unsigned long zone_start_pfn, zone_end_pfn; 7311 unsigned long spanned, absent; 7312 unsigned long size, real_size; 7313 7314 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 7315 node_start_pfn, 7316 node_end_pfn, 7317 &zone_start_pfn, 7318 &zone_end_pfn); 7319 absent = zone_absent_pages_in_node(pgdat->node_id, i, 7320 node_start_pfn, 7321 node_end_pfn); 7322 7323 size = spanned; 7324 real_size = size - absent; 7325 7326 if (size) 7327 zone->zone_start_pfn = zone_start_pfn; 7328 else 7329 zone->zone_start_pfn = 0; 7330 zone->spanned_pages = size; 7331 zone->present_pages = real_size; 7332 #if defined(CONFIG_MEMORY_HOTPLUG) 7333 zone->present_early_pages = real_size; 7334 #endif 7335 7336 totalpages += size; 7337 realtotalpages += real_size; 7338 } 7339 7340 pgdat->node_spanned_pages = totalpages; 7341 pgdat->node_present_pages = realtotalpages; 7342 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 7343 } 7344 7345 #ifndef CONFIG_SPARSEMEM 7346 /* 7347 * Calculate the size of the zone->blockflags rounded to an unsigned long 7348 * Start by making sure zonesize is a multiple of pageblock_order by rounding 7349 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 7350 * round what is now in bits to nearest long in bits, then return it in 7351 * bytes. 7352 */ 7353 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 7354 { 7355 unsigned long usemapsize; 7356 7357 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 7358 usemapsize = roundup(zonesize, pageblock_nr_pages); 7359 usemapsize = usemapsize >> pageblock_order; 7360 usemapsize *= NR_PAGEBLOCK_BITS; 7361 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 7362 7363 return usemapsize / 8; 7364 } 7365 7366 static void __ref setup_usemap(struct zone *zone) 7367 { 7368 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 7369 zone->spanned_pages); 7370 zone->pageblock_flags = NULL; 7371 if (usemapsize) { 7372 zone->pageblock_flags = 7373 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 7374 zone_to_nid(zone)); 7375 if (!zone->pageblock_flags) 7376 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 7377 usemapsize, zone->name, zone_to_nid(zone)); 7378 } 7379 } 7380 #else 7381 static inline void setup_usemap(struct zone *zone) {} 7382 #endif /* CONFIG_SPARSEMEM */ 7383 7384 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 7385 7386 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 7387 void __init set_pageblock_order(void) 7388 { 7389 unsigned int order; 7390 7391 /* Check that pageblock_nr_pages has not already been setup */ 7392 if (pageblock_order) 7393 return; 7394 7395 if (HPAGE_SHIFT > PAGE_SHIFT) 7396 order = HUGETLB_PAGE_ORDER; 7397 else 7398 order = MAX_ORDER - 1; 7399 7400 /* 7401 * Assume the largest contiguous order of interest is a huge page. 7402 * This value may be variable depending on boot parameters on IA64 and 7403 * powerpc. 7404 */ 7405 pageblock_order = order; 7406 } 7407 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 7408 7409 /* 7410 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 7411 * is unused as pageblock_order is set at compile-time. See 7412 * include/linux/pageblock-flags.h for the values of pageblock_order based on 7413 * the kernel config 7414 */ 7415 void __init set_pageblock_order(void) 7416 { 7417 } 7418 7419 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 7420 7421 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, 7422 unsigned long present_pages) 7423 { 7424 unsigned long pages = spanned_pages; 7425 7426 /* 7427 * Provide a more accurate estimation if there are holes within 7428 * the zone and SPARSEMEM is in use. If there are holes within the 7429 * zone, each populated memory region may cost us one or two extra 7430 * memmap pages due to alignment because memmap pages for each 7431 * populated regions may not be naturally aligned on page boundary. 7432 * So the (present_pages >> 4) heuristic is a tradeoff for that. 7433 */ 7434 if (spanned_pages > present_pages + (present_pages >> 4) && 7435 IS_ENABLED(CONFIG_SPARSEMEM)) 7436 pages = present_pages; 7437 7438 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 7439 } 7440 7441 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 7442 static void pgdat_init_split_queue(struct pglist_data *pgdat) 7443 { 7444 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 7445 7446 spin_lock_init(&ds_queue->split_queue_lock); 7447 INIT_LIST_HEAD(&ds_queue->split_queue); 7448 ds_queue->split_queue_len = 0; 7449 } 7450 #else 7451 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 7452 #endif 7453 7454 #ifdef CONFIG_COMPACTION 7455 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 7456 { 7457 init_waitqueue_head(&pgdat->kcompactd_wait); 7458 } 7459 #else 7460 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 7461 #endif 7462 7463 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 7464 { 7465 int i; 7466 7467 pgdat_resize_init(pgdat); 7468 7469 pgdat_init_split_queue(pgdat); 7470 pgdat_init_kcompactd(pgdat); 7471 7472 init_waitqueue_head(&pgdat->kswapd_wait); 7473 init_waitqueue_head(&pgdat->pfmemalloc_wait); 7474 7475 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 7476 init_waitqueue_head(&pgdat->reclaim_wait[i]); 7477 7478 pgdat_page_ext_init(pgdat); 7479 lruvec_init(&pgdat->__lruvec); 7480 } 7481 7482 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 7483 unsigned long remaining_pages) 7484 { 7485 atomic_long_set(&zone->managed_pages, remaining_pages); 7486 zone_set_nid(zone, nid); 7487 zone->name = zone_names[idx]; 7488 zone->zone_pgdat = NODE_DATA(nid); 7489 spin_lock_init(&zone->lock); 7490 zone_seqlock_init(zone); 7491 zone_pcp_init(zone); 7492 } 7493 7494 /* 7495 * Set up the zone data structures 7496 * - init pgdat internals 7497 * - init all zones belonging to this node 7498 * 7499 * NOTE: this function is only called during memory hotplug 7500 */ 7501 #ifdef CONFIG_MEMORY_HOTPLUG 7502 void __ref free_area_init_core_hotplug(int nid) 7503 { 7504 enum zone_type z; 7505 pg_data_t *pgdat = NODE_DATA(nid); 7506 7507 pgdat_init_internals(pgdat); 7508 for (z = 0; z < MAX_NR_ZONES; z++) 7509 zone_init_internals(&pgdat->node_zones[z], z, nid, 0); 7510 } 7511 #endif 7512 7513 /* 7514 * Set up the zone data structures: 7515 * - mark all pages reserved 7516 * - mark all memory queues empty 7517 * - clear the memory bitmaps 7518 * 7519 * NOTE: pgdat should get zeroed by caller. 7520 * NOTE: this function is only called during early init. 7521 */ 7522 static void __init free_area_init_core(struct pglist_data *pgdat) 7523 { 7524 enum zone_type j; 7525 int nid = pgdat->node_id; 7526 7527 pgdat_init_internals(pgdat); 7528 pgdat->per_cpu_nodestats = &boot_nodestats; 7529 7530 for (j = 0; j < MAX_NR_ZONES; j++) { 7531 struct zone *zone = pgdat->node_zones + j; 7532 unsigned long size, freesize, memmap_pages; 7533 7534 size = zone->spanned_pages; 7535 freesize = zone->present_pages; 7536 7537 /* 7538 * Adjust freesize so that it accounts for how much memory 7539 * is used by this zone for memmap. This affects the watermark 7540 * and per-cpu initialisations 7541 */ 7542 memmap_pages = calc_memmap_size(size, freesize); 7543 if (!is_highmem_idx(j)) { 7544 if (freesize >= memmap_pages) { 7545 freesize -= memmap_pages; 7546 if (memmap_pages) 7547 pr_debug(" %s zone: %lu pages used for memmap\n", 7548 zone_names[j], memmap_pages); 7549 } else 7550 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", 7551 zone_names[j], memmap_pages, freesize); 7552 } 7553 7554 /* Account for reserved pages */ 7555 if (j == 0 && freesize > dma_reserve) { 7556 freesize -= dma_reserve; 7557 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); 7558 } 7559 7560 if (!is_highmem_idx(j)) 7561 nr_kernel_pages += freesize; 7562 /* Charge for highmem memmap if there are enough kernel pages */ 7563 else if (nr_kernel_pages > memmap_pages * 2) 7564 nr_kernel_pages -= memmap_pages; 7565 nr_all_pages += freesize; 7566 7567 /* 7568 * Set an approximate value for lowmem here, it will be adjusted 7569 * when the bootmem allocator frees pages into the buddy system. 7570 * And all highmem pages will be managed by the buddy system. 7571 */ 7572 zone_init_internals(zone, j, nid, freesize); 7573 7574 if (!size) 7575 continue; 7576 7577 set_pageblock_order(); 7578 setup_usemap(zone); 7579 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 7580 } 7581 } 7582 7583 #ifdef CONFIG_FLATMEM 7584 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 7585 { 7586 unsigned long __maybe_unused start = 0; 7587 unsigned long __maybe_unused offset = 0; 7588 7589 /* Skip empty nodes */ 7590 if (!pgdat->node_spanned_pages) 7591 return; 7592 7593 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 7594 offset = pgdat->node_start_pfn - start; 7595 /* ia64 gets its own node_mem_map, before this, without bootmem */ 7596 if (!pgdat->node_mem_map) { 7597 unsigned long size, end; 7598 struct page *map; 7599 7600 /* 7601 * The zone's endpoints aren't required to be MAX_ORDER 7602 * aligned but the node_mem_map endpoints must be in order 7603 * for the buddy allocator to function correctly. 7604 */ 7605 end = pgdat_end_pfn(pgdat); 7606 end = ALIGN(end, MAX_ORDER_NR_PAGES); 7607 size = (end - start) * sizeof(struct page); 7608 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 7609 pgdat->node_id, false); 7610 if (!map) 7611 panic("Failed to allocate %ld bytes for node %d memory map\n", 7612 size, pgdat->node_id); 7613 pgdat->node_mem_map = map + offset; 7614 } 7615 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 7616 __func__, pgdat->node_id, (unsigned long)pgdat, 7617 (unsigned long)pgdat->node_mem_map); 7618 #ifndef CONFIG_NUMA 7619 /* 7620 * With no DISCONTIG, the global mem_map is just set as node 0's 7621 */ 7622 if (pgdat == NODE_DATA(0)) { 7623 mem_map = NODE_DATA(0)->node_mem_map; 7624 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 7625 mem_map -= offset; 7626 } 7627 #endif 7628 } 7629 #else 7630 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 7631 #endif /* CONFIG_FLATMEM */ 7632 7633 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 7634 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 7635 { 7636 pgdat->first_deferred_pfn = ULONG_MAX; 7637 } 7638 #else 7639 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 7640 #endif 7641 7642 static void __init free_area_init_node(int nid) 7643 { 7644 pg_data_t *pgdat = NODE_DATA(nid); 7645 unsigned long start_pfn = 0; 7646 unsigned long end_pfn = 0; 7647 7648 /* pg_data_t should be reset to zero when it's allocated */ 7649 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 7650 7651 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 7652 7653 pgdat->node_id = nid; 7654 pgdat->node_start_pfn = start_pfn; 7655 pgdat->per_cpu_nodestats = NULL; 7656 7657 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 7658 (u64)start_pfn << PAGE_SHIFT, 7659 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 7660 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 7661 7662 alloc_node_mem_map(pgdat); 7663 pgdat_set_deferred_range(pgdat); 7664 7665 free_area_init_core(pgdat); 7666 } 7667 7668 void __init free_area_init_memoryless_node(int nid) 7669 { 7670 free_area_init_node(nid); 7671 } 7672 7673 #if MAX_NUMNODES > 1 7674 /* 7675 * Figure out the number of possible node ids. 7676 */ 7677 void __init setup_nr_node_ids(void) 7678 { 7679 unsigned int highest; 7680 7681 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 7682 nr_node_ids = highest + 1; 7683 } 7684 #endif 7685 7686 /** 7687 * node_map_pfn_alignment - determine the maximum internode alignment 7688 * 7689 * This function should be called after node map is populated and sorted. 7690 * It calculates the maximum power of two alignment which can distinguish 7691 * all the nodes. 7692 * 7693 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 7694 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 7695 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 7696 * shifted, 1GiB is enough and this function will indicate so. 7697 * 7698 * This is used to test whether pfn -> nid mapping of the chosen memory 7699 * model has fine enough granularity to avoid incorrect mapping for the 7700 * populated node map. 7701 * 7702 * Return: the determined alignment in pfn's. 0 if there is no alignment 7703 * requirement (single node). 7704 */ 7705 unsigned long __init node_map_pfn_alignment(void) 7706 { 7707 unsigned long accl_mask = 0, last_end = 0; 7708 unsigned long start, end, mask; 7709 int last_nid = NUMA_NO_NODE; 7710 int i, nid; 7711 7712 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 7713 if (!start || last_nid < 0 || last_nid == nid) { 7714 last_nid = nid; 7715 last_end = end; 7716 continue; 7717 } 7718 7719 /* 7720 * Start with a mask granular enough to pin-point to the 7721 * start pfn and tick off bits one-by-one until it becomes 7722 * too coarse to separate the current node from the last. 7723 */ 7724 mask = ~((1 << __ffs(start)) - 1); 7725 while (mask && last_end <= (start & (mask << 1))) 7726 mask <<= 1; 7727 7728 /* accumulate all internode masks */ 7729 accl_mask |= mask; 7730 } 7731 7732 /* convert mask to number of pages */ 7733 return ~accl_mask + 1; 7734 } 7735 7736 /** 7737 * find_min_pfn_with_active_regions - Find the minimum PFN registered 7738 * 7739 * Return: the minimum PFN based on information provided via 7740 * memblock_set_node(). 7741 */ 7742 unsigned long __init find_min_pfn_with_active_regions(void) 7743 { 7744 return PHYS_PFN(memblock_start_of_DRAM()); 7745 } 7746 7747 /* 7748 * early_calculate_totalpages() 7749 * Sum pages in active regions for movable zone. 7750 * Populate N_MEMORY for calculating usable_nodes. 7751 */ 7752 static unsigned long __init early_calculate_totalpages(void) 7753 { 7754 unsigned long totalpages = 0; 7755 unsigned long start_pfn, end_pfn; 7756 int i, nid; 7757 7758 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 7759 unsigned long pages = end_pfn - start_pfn; 7760 7761 totalpages += pages; 7762 if (pages) 7763 node_set_state(nid, N_MEMORY); 7764 } 7765 return totalpages; 7766 } 7767 7768 /* 7769 * Find the PFN the Movable zone begins in each node. Kernel memory 7770 * is spread evenly between nodes as long as the nodes have enough 7771 * memory. When they don't, some nodes will have more kernelcore than 7772 * others 7773 */ 7774 static void __init find_zone_movable_pfns_for_nodes(void) 7775 { 7776 int i, nid; 7777 unsigned long usable_startpfn; 7778 unsigned long kernelcore_node, kernelcore_remaining; 7779 /* save the state before borrow the nodemask */ 7780 nodemask_t saved_node_state = node_states[N_MEMORY]; 7781 unsigned long totalpages = early_calculate_totalpages(); 7782 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 7783 struct memblock_region *r; 7784 7785 /* Need to find movable_zone earlier when movable_node is specified. */ 7786 find_usable_zone_for_movable(); 7787 7788 /* 7789 * If movable_node is specified, ignore kernelcore and movablecore 7790 * options. 7791 */ 7792 if (movable_node_is_enabled()) { 7793 for_each_mem_region(r) { 7794 if (!memblock_is_hotpluggable(r)) 7795 continue; 7796 7797 nid = memblock_get_region_node(r); 7798 7799 usable_startpfn = PFN_DOWN(r->base); 7800 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 7801 min(usable_startpfn, zone_movable_pfn[nid]) : 7802 usable_startpfn; 7803 } 7804 7805 goto out2; 7806 } 7807 7808 /* 7809 * If kernelcore=mirror is specified, ignore movablecore option 7810 */ 7811 if (mirrored_kernelcore) { 7812 bool mem_below_4gb_not_mirrored = false; 7813 7814 for_each_mem_region(r) { 7815 if (memblock_is_mirror(r)) 7816 continue; 7817 7818 nid = memblock_get_region_node(r); 7819 7820 usable_startpfn = memblock_region_memory_base_pfn(r); 7821 7822 if (usable_startpfn < 0x100000) { 7823 mem_below_4gb_not_mirrored = true; 7824 continue; 7825 } 7826 7827 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 7828 min(usable_startpfn, zone_movable_pfn[nid]) : 7829 usable_startpfn; 7830 } 7831 7832 if (mem_below_4gb_not_mirrored) 7833 pr_warn("This configuration results in unmirrored kernel memory.\n"); 7834 7835 goto out2; 7836 } 7837 7838 /* 7839 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 7840 * amount of necessary memory. 7841 */ 7842 if (required_kernelcore_percent) 7843 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 7844 10000UL; 7845 if (required_movablecore_percent) 7846 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 7847 10000UL; 7848 7849 /* 7850 * If movablecore= was specified, calculate what size of 7851 * kernelcore that corresponds so that memory usable for 7852 * any allocation type is evenly spread. If both kernelcore 7853 * and movablecore are specified, then the value of kernelcore 7854 * will be used for required_kernelcore if it's greater than 7855 * what movablecore would have allowed. 7856 */ 7857 if (required_movablecore) { 7858 unsigned long corepages; 7859 7860 /* 7861 * Round-up so that ZONE_MOVABLE is at least as large as what 7862 * was requested by the user 7863 */ 7864 required_movablecore = 7865 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 7866 required_movablecore = min(totalpages, required_movablecore); 7867 corepages = totalpages - required_movablecore; 7868 7869 required_kernelcore = max(required_kernelcore, corepages); 7870 } 7871 7872 /* 7873 * If kernelcore was not specified or kernelcore size is larger 7874 * than totalpages, there is no ZONE_MOVABLE. 7875 */ 7876 if (!required_kernelcore || required_kernelcore >= totalpages) 7877 goto out; 7878 7879 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 7880 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 7881 7882 restart: 7883 /* Spread kernelcore memory as evenly as possible throughout nodes */ 7884 kernelcore_node = required_kernelcore / usable_nodes; 7885 for_each_node_state(nid, N_MEMORY) { 7886 unsigned long start_pfn, end_pfn; 7887 7888 /* 7889 * Recalculate kernelcore_node if the division per node 7890 * now exceeds what is necessary to satisfy the requested 7891 * amount of memory for the kernel 7892 */ 7893 if (required_kernelcore < kernelcore_node) 7894 kernelcore_node = required_kernelcore / usable_nodes; 7895 7896 /* 7897 * As the map is walked, we track how much memory is usable 7898 * by the kernel using kernelcore_remaining. When it is 7899 * 0, the rest of the node is usable by ZONE_MOVABLE 7900 */ 7901 kernelcore_remaining = kernelcore_node; 7902 7903 /* Go through each range of PFNs within this node */ 7904 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 7905 unsigned long size_pages; 7906 7907 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 7908 if (start_pfn >= end_pfn) 7909 continue; 7910 7911 /* Account for what is only usable for kernelcore */ 7912 if (start_pfn < usable_startpfn) { 7913 unsigned long kernel_pages; 7914 kernel_pages = min(end_pfn, usable_startpfn) 7915 - start_pfn; 7916 7917 kernelcore_remaining -= min(kernel_pages, 7918 kernelcore_remaining); 7919 required_kernelcore -= min(kernel_pages, 7920 required_kernelcore); 7921 7922 /* Continue if range is now fully accounted */ 7923 if (end_pfn <= usable_startpfn) { 7924 7925 /* 7926 * Push zone_movable_pfn to the end so 7927 * that if we have to rebalance 7928 * kernelcore across nodes, we will 7929 * not double account here 7930 */ 7931 zone_movable_pfn[nid] = end_pfn; 7932 continue; 7933 } 7934 start_pfn = usable_startpfn; 7935 } 7936 7937 /* 7938 * The usable PFN range for ZONE_MOVABLE is from 7939 * start_pfn->end_pfn. Calculate size_pages as the 7940 * number of pages used as kernelcore 7941 */ 7942 size_pages = end_pfn - start_pfn; 7943 if (size_pages > kernelcore_remaining) 7944 size_pages = kernelcore_remaining; 7945 zone_movable_pfn[nid] = start_pfn + size_pages; 7946 7947 /* 7948 * Some kernelcore has been met, update counts and 7949 * break if the kernelcore for this node has been 7950 * satisfied 7951 */ 7952 required_kernelcore -= min(required_kernelcore, 7953 size_pages); 7954 kernelcore_remaining -= size_pages; 7955 if (!kernelcore_remaining) 7956 break; 7957 } 7958 } 7959 7960 /* 7961 * If there is still required_kernelcore, we do another pass with one 7962 * less node in the count. This will push zone_movable_pfn[nid] further 7963 * along on the nodes that still have memory until kernelcore is 7964 * satisfied 7965 */ 7966 usable_nodes--; 7967 if (usable_nodes && required_kernelcore > usable_nodes) 7968 goto restart; 7969 7970 out2: 7971 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 7972 for (nid = 0; nid < MAX_NUMNODES; nid++) 7973 zone_movable_pfn[nid] = 7974 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 7975 7976 out: 7977 /* restore the node_state */ 7978 node_states[N_MEMORY] = saved_node_state; 7979 } 7980 7981 /* Any regular or high memory on that node ? */ 7982 static void check_for_memory(pg_data_t *pgdat, int nid) 7983 { 7984 enum zone_type zone_type; 7985 7986 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 7987 struct zone *zone = &pgdat->node_zones[zone_type]; 7988 if (populated_zone(zone)) { 7989 if (IS_ENABLED(CONFIG_HIGHMEM)) 7990 node_set_state(nid, N_HIGH_MEMORY); 7991 if (zone_type <= ZONE_NORMAL) 7992 node_set_state(nid, N_NORMAL_MEMORY); 7993 break; 7994 } 7995 } 7996 } 7997 7998 /* 7999 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 8000 * such cases we allow max_zone_pfn sorted in the descending order 8001 */ 8002 bool __weak arch_has_descending_max_zone_pfns(void) 8003 { 8004 return false; 8005 } 8006 8007 /** 8008 * free_area_init - Initialise all pg_data_t and zone data 8009 * @max_zone_pfn: an array of max PFNs for each zone 8010 * 8011 * This will call free_area_init_node() for each active node in the system. 8012 * Using the page ranges provided by memblock_set_node(), the size of each 8013 * zone in each node and their holes is calculated. If the maximum PFN 8014 * between two adjacent zones match, it is assumed that the zone is empty. 8015 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 8016 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 8017 * starts where the previous one ended. For example, ZONE_DMA32 starts 8018 * at arch_max_dma_pfn. 8019 */ 8020 void __init free_area_init(unsigned long *max_zone_pfn) 8021 { 8022 unsigned long start_pfn, end_pfn; 8023 int i, nid, zone; 8024 bool descending; 8025 8026 /* Record where the zone boundaries are */ 8027 memset(arch_zone_lowest_possible_pfn, 0, 8028 sizeof(arch_zone_lowest_possible_pfn)); 8029 memset(arch_zone_highest_possible_pfn, 0, 8030 sizeof(arch_zone_highest_possible_pfn)); 8031 8032 start_pfn = find_min_pfn_with_active_regions(); 8033 descending = arch_has_descending_max_zone_pfns(); 8034 8035 for (i = 0; i < MAX_NR_ZONES; i++) { 8036 if (descending) 8037 zone = MAX_NR_ZONES - i - 1; 8038 else 8039 zone = i; 8040 8041 if (zone == ZONE_MOVABLE) 8042 continue; 8043 8044 end_pfn = max(max_zone_pfn[zone], start_pfn); 8045 arch_zone_lowest_possible_pfn[zone] = start_pfn; 8046 arch_zone_highest_possible_pfn[zone] = end_pfn; 8047 8048 start_pfn = end_pfn; 8049 } 8050 8051 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 8052 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 8053 find_zone_movable_pfns_for_nodes(); 8054 8055 /* Print out the zone ranges */ 8056 pr_info("Zone ranges:\n"); 8057 for (i = 0; i < MAX_NR_ZONES; i++) { 8058 if (i == ZONE_MOVABLE) 8059 continue; 8060 pr_info(" %-8s ", zone_names[i]); 8061 if (arch_zone_lowest_possible_pfn[i] == 8062 arch_zone_highest_possible_pfn[i]) 8063 pr_cont("empty\n"); 8064 else 8065 pr_cont("[mem %#018Lx-%#018Lx]\n", 8066 (u64)arch_zone_lowest_possible_pfn[i] 8067 << PAGE_SHIFT, 8068 ((u64)arch_zone_highest_possible_pfn[i] 8069 << PAGE_SHIFT) - 1); 8070 } 8071 8072 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 8073 pr_info("Movable zone start for each node\n"); 8074 for (i = 0; i < MAX_NUMNODES; i++) { 8075 if (zone_movable_pfn[i]) 8076 pr_info(" Node %d: %#018Lx\n", i, 8077 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 8078 } 8079 8080 /* 8081 * Print out the early node map, and initialize the 8082 * subsection-map relative to active online memory ranges to 8083 * enable future "sub-section" extensions of the memory map. 8084 */ 8085 pr_info("Early memory node ranges\n"); 8086 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 8087 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 8088 (u64)start_pfn << PAGE_SHIFT, 8089 ((u64)end_pfn << PAGE_SHIFT) - 1); 8090 subsection_map_init(start_pfn, end_pfn - start_pfn); 8091 } 8092 8093 /* Initialise every node */ 8094 mminit_verify_pageflags_layout(); 8095 setup_nr_node_ids(); 8096 for_each_online_node(nid) { 8097 pg_data_t *pgdat = NODE_DATA(nid); 8098 free_area_init_node(nid); 8099 8100 /* Any memory on that node */ 8101 if (pgdat->node_present_pages) 8102 node_set_state(nid, N_MEMORY); 8103 check_for_memory(pgdat, nid); 8104 } 8105 8106 memmap_init(); 8107 } 8108 8109 static int __init cmdline_parse_core(char *p, unsigned long *core, 8110 unsigned long *percent) 8111 { 8112 unsigned long long coremem; 8113 char *endptr; 8114 8115 if (!p) 8116 return -EINVAL; 8117 8118 /* Value may be a percentage of total memory, otherwise bytes */ 8119 coremem = simple_strtoull(p, &endptr, 0); 8120 if (*endptr == '%') { 8121 /* Paranoid check for percent values greater than 100 */ 8122 WARN_ON(coremem > 100); 8123 8124 *percent = coremem; 8125 } else { 8126 coremem = memparse(p, &p); 8127 /* Paranoid check that UL is enough for the coremem value */ 8128 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 8129 8130 *core = coremem >> PAGE_SHIFT; 8131 *percent = 0UL; 8132 } 8133 return 0; 8134 } 8135 8136 /* 8137 * kernelcore=size sets the amount of memory for use for allocations that 8138 * cannot be reclaimed or migrated. 8139 */ 8140 static int __init cmdline_parse_kernelcore(char *p) 8141 { 8142 /* parse kernelcore=mirror */ 8143 if (parse_option_str(p, "mirror")) { 8144 mirrored_kernelcore = true; 8145 return 0; 8146 } 8147 8148 return cmdline_parse_core(p, &required_kernelcore, 8149 &required_kernelcore_percent); 8150 } 8151 8152 /* 8153 * movablecore=size sets the amount of memory for use for allocations that 8154 * can be reclaimed or migrated. 8155 */ 8156 static int __init cmdline_parse_movablecore(char *p) 8157 { 8158 return cmdline_parse_core(p, &required_movablecore, 8159 &required_movablecore_percent); 8160 } 8161 8162 early_param("kernelcore", cmdline_parse_kernelcore); 8163 early_param("movablecore", cmdline_parse_movablecore); 8164 8165 void adjust_managed_page_count(struct page *page, long count) 8166 { 8167 atomic_long_add(count, &page_zone(page)->managed_pages); 8168 totalram_pages_add(count); 8169 #ifdef CONFIG_HIGHMEM 8170 if (PageHighMem(page)) 8171 totalhigh_pages_add(count); 8172 #endif 8173 } 8174 EXPORT_SYMBOL(adjust_managed_page_count); 8175 8176 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 8177 { 8178 void *pos; 8179 unsigned long pages = 0; 8180 8181 start = (void *)PAGE_ALIGN((unsigned long)start); 8182 end = (void *)((unsigned long)end & PAGE_MASK); 8183 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 8184 struct page *page = virt_to_page(pos); 8185 void *direct_map_addr; 8186 8187 /* 8188 * 'direct_map_addr' might be different from 'pos' 8189 * because some architectures' virt_to_page() 8190 * work with aliases. Getting the direct map 8191 * address ensures that we get a _writeable_ 8192 * alias for the memset(). 8193 */ 8194 direct_map_addr = page_address(page); 8195 /* 8196 * Perform a kasan-unchecked memset() since this memory 8197 * has not been initialized. 8198 */ 8199 direct_map_addr = kasan_reset_tag(direct_map_addr); 8200 if ((unsigned int)poison <= 0xFF) 8201 memset(direct_map_addr, poison, PAGE_SIZE); 8202 8203 free_reserved_page(page); 8204 } 8205 8206 if (pages && s) 8207 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 8208 8209 return pages; 8210 } 8211 8212 void __init mem_init_print_info(void) 8213 { 8214 unsigned long physpages, codesize, datasize, rosize, bss_size; 8215 unsigned long init_code_size, init_data_size; 8216 8217 physpages = get_num_physpages(); 8218 codesize = _etext - _stext; 8219 datasize = _edata - _sdata; 8220 rosize = __end_rodata - __start_rodata; 8221 bss_size = __bss_stop - __bss_start; 8222 init_data_size = __init_end - __init_begin; 8223 init_code_size = _einittext - _sinittext; 8224 8225 /* 8226 * Detect special cases and adjust section sizes accordingly: 8227 * 1) .init.* may be embedded into .data sections 8228 * 2) .init.text.* may be out of [__init_begin, __init_end], 8229 * please refer to arch/tile/kernel/vmlinux.lds.S. 8230 * 3) .rodata.* may be embedded into .text or .data sections. 8231 */ 8232 #define adj_init_size(start, end, size, pos, adj) \ 8233 do { \ 8234 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 8235 size -= adj; \ 8236 } while (0) 8237 8238 adj_init_size(__init_begin, __init_end, init_data_size, 8239 _sinittext, init_code_size); 8240 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 8241 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 8242 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 8243 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 8244 8245 #undef adj_init_size 8246 8247 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 8248 #ifdef CONFIG_HIGHMEM 8249 ", %luK highmem" 8250 #endif 8251 ")\n", 8252 K(nr_free_pages()), K(physpages), 8253 codesize >> 10, datasize >> 10, rosize >> 10, 8254 (init_data_size + init_code_size) >> 10, bss_size >> 10, 8255 K(physpages - totalram_pages() - totalcma_pages), 8256 K(totalcma_pages) 8257 #ifdef CONFIG_HIGHMEM 8258 , K(totalhigh_pages()) 8259 #endif 8260 ); 8261 } 8262 8263 /** 8264 * set_dma_reserve - set the specified number of pages reserved in the first zone 8265 * @new_dma_reserve: The number of pages to mark reserved 8266 * 8267 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 8268 * In the DMA zone, a significant percentage may be consumed by kernel image 8269 * and other unfreeable allocations which can skew the watermarks badly. This 8270 * function may optionally be used to account for unfreeable pages in the 8271 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 8272 * smaller per-cpu batchsize. 8273 */ 8274 void __init set_dma_reserve(unsigned long new_dma_reserve) 8275 { 8276 dma_reserve = new_dma_reserve; 8277 } 8278 8279 static int page_alloc_cpu_dead(unsigned int cpu) 8280 { 8281 struct zone *zone; 8282 8283 lru_add_drain_cpu(cpu); 8284 drain_pages(cpu); 8285 8286 /* 8287 * Spill the event counters of the dead processor 8288 * into the current processors event counters. 8289 * This artificially elevates the count of the current 8290 * processor. 8291 */ 8292 vm_events_fold_cpu(cpu); 8293 8294 /* 8295 * Zero the differential counters of the dead processor 8296 * so that the vm statistics are consistent. 8297 * 8298 * This is only okay since the processor is dead and cannot 8299 * race with what we are doing. 8300 */ 8301 cpu_vm_stats_fold(cpu); 8302 8303 for_each_populated_zone(zone) 8304 zone_pcp_update(zone, 0); 8305 8306 return 0; 8307 } 8308 8309 static int page_alloc_cpu_online(unsigned int cpu) 8310 { 8311 struct zone *zone; 8312 8313 for_each_populated_zone(zone) 8314 zone_pcp_update(zone, 1); 8315 return 0; 8316 } 8317 8318 #ifdef CONFIG_NUMA 8319 int hashdist = HASHDIST_DEFAULT; 8320 8321 static int __init set_hashdist(char *str) 8322 { 8323 if (!str) 8324 return 0; 8325 hashdist = simple_strtoul(str, &str, 0); 8326 return 1; 8327 } 8328 __setup("hashdist=", set_hashdist); 8329 #endif 8330 8331 void __init page_alloc_init(void) 8332 { 8333 int ret; 8334 8335 #ifdef CONFIG_NUMA 8336 if (num_node_state(N_MEMORY) == 1) 8337 hashdist = 0; 8338 #endif 8339 8340 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 8341 "mm/page_alloc:pcp", 8342 page_alloc_cpu_online, 8343 page_alloc_cpu_dead); 8344 WARN_ON(ret < 0); 8345 } 8346 8347 /* 8348 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 8349 * or min_free_kbytes changes. 8350 */ 8351 static void calculate_totalreserve_pages(void) 8352 { 8353 struct pglist_data *pgdat; 8354 unsigned long reserve_pages = 0; 8355 enum zone_type i, j; 8356 8357 for_each_online_pgdat(pgdat) { 8358 8359 pgdat->totalreserve_pages = 0; 8360 8361 for (i = 0; i < MAX_NR_ZONES; i++) { 8362 struct zone *zone = pgdat->node_zones + i; 8363 long max = 0; 8364 unsigned long managed_pages = zone_managed_pages(zone); 8365 8366 /* Find valid and maximum lowmem_reserve in the zone */ 8367 for (j = i; j < MAX_NR_ZONES; j++) { 8368 if (zone->lowmem_reserve[j] > max) 8369 max = zone->lowmem_reserve[j]; 8370 } 8371 8372 /* we treat the high watermark as reserved pages. */ 8373 max += high_wmark_pages(zone); 8374 8375 if (max > managed_pages) 8376 max = managed_pages; 8377 8378 pgdat->totalreserve_pages += max; 8379 8380 reserve_pages += max; 8381 } 8382 } 8383 totalreserve_pages = reserve_pages; 8384 } 8385 8386 /* 8387 * setup_per_zone_lowmem_reserve - called whenever 8388 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 8389 * has a correct pages reserved value, so an adequate number of 8390 * pages are left in the zone after a successful __alloc_pages(). 8391 */ 8392 static void setup_per_zone_lowmem_reserve(void) 8393 { 8394 struct pglist_data *pgdat; 8395 enum zone_type i, j; 8396 8397 for_each_online_pgdat(pgdat) { 8398 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 8399 struct zone *zone = &pgdat->node_zones[i]; 8400 int ratio = sysctl_lowmem_reserve_ratio[i]; 8401 bool clear = !ratio || !zone_managed_pages(zone); 8402 unsigned long managed_pages = 0; 8403 8404 for (j = i + 1; j < MAX_NR_ZONES; j++) { 8405 struct zone *upper_zone = &pgdat->node_zones[j]; 8406 8407 managed_pages += zone_managed_pages(upper_zone); 8408 8409 if (clear) 8410 zone->lowmem_reserve[j] = 0; 8411 else 8412 zone->lowmem_reserve[j] = managed_pages / ratio; 8413 } 8414 } 8415 } 8416 8417 /* update totalreserve_pages */ 8418 calculate_totalreserve_pages(); 8419 } 8420 8421 static void __setup_per_zone_wmarks(void) 8422 { 8423 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 8424 unsigned long lowmem_pages = 0; 8425 struct zone *zone; 8426 unsigned long flags; 8427 8428 /* Calculate total number of !ZONE_HIGHMEM pages */ 8429 for_each_zone(zone) { 8430 if (!is_highmem(zone)) 8431 lowmem_pages += zone_managed_pages(zone); 8432 } 8433 8434 for_each_zone(zone) { 8435 u64 tmp; 8436 8437 spin_lock_irqsave(&zone->lock, flags); 8438 tmp = (u64)pages_min * zone_managed_pages(zone); 8439 do_div(tmp, lowmem_pages); 8440 if (is_highmem(zone)) { 8441 /* 8442 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 8443 * need highmem pages, so cap pages_min to a small 8444 * value here. 8445 * 8446 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 8447 * deltas control async page reclaim, and so should 8448 * not be capped for highmem. 8449 */ 8450 unsigned long min_pages; 8451 8452 min_pages = zone_managed_pages(zone) / 1024; 8453 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 8454 zone->_watermark[WMARK_MIN] = min_pages; 8455 } else { 8456 /* 8457 * If it's a lowmem zone, reserve a number of pages 8458 * proportionate to the zone's size. 8459 */ 8460 zone->_watermark[WMARK_MIN] = tmp; 8461 } 8462 8463 /* 8464 * Set the kswapd watermarks distance according to the 8465 * scale factor in proportion to available memory, but 8466 * ensure a minimum size on small systems. 8467 */ 8468 tmp = max_t(u64, tmp >> 2, 8469 mult_frac(zone_managed_pages(zone), 8470 watermark_scale_factor, 10000)); 8471 8472 zone->watermark_boost = 0; 8473 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 8474 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; 8475 8476 spin_unlock_irqrestore(&zone->lock, flags); 8477 } 8478 8479 /* update totalreserve_pages */ 8480 calculate_totalreserve_pages(); 8481 } 8482 8483 /** 8484 * setup_per_zone_wmarks - called when min_free_kbytes changes 8485 * or when memory is hot-{added|removed} 8486 * 8487 * Ensures that the watermark[min,low,high] values for each zone are set 8488 * correctly with respect to min_free_kbytes. 8489 */ 8490 void setup_per_zone_wmarks(void) 8491 { 8492 struct zone *zone; 8493 static DEFINE_SPINLOCK(lock); 8494 8495 spin_lock(&lock); 8496 __setup_per_zone_wmarks(); 8497 spin_unlock(&lock); 8498 8499 /* 8500 * The watermark size have changed so update the pcpu batch 8501 * and high limits or the limits may be inappropriate. 8502 */ 8503 for_each_zone(zone) 8504 zone_pcp_update(zone, 0); 8505 } 8506 8507 /* 8508 * Initialise min_free_kbytes. 8509 * 8510 * For small machines we want it small (128k min). For large machines 8511 * we want it large (256MB max). But it is not linear, because network 8512 * bandwidth does not increase linearly with machine size. We use 8513 * 8514 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 8515 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 8516 * 8517 * which yields 8518 * 8519 * 16MB: 512k 8520 * 32MB: 724k 8521 * 64MB: 1024k 8522 * 128MB: 1448k 8523 * 256MB: 2048k 8524 * 512MB: 2896k 8525 * 1024MB: 4096k 8526 * 2048MB: 5792k 8527 * 4096MB: 8192k 8528 * 8192MB: 11584k 8529 * 16384MB: 16384k 8530 */ 8531 void calculate_min_free_kbytes(void) 8532 { 8533 unsigned long lowmem_kbytes; 8534 int new_min_free_kbytes; 8535 8536 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 8537 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 8538 8539 if (new_min_free_kbytes > user_min_free_kbytes) 8540 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 8541 else 8542 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 8543 new_min_free_kbytes, user_min_free_kbytes); 8544 8545 } 8546 8547 int __meminit init_per_zone_wmark_min(void) 8548 { 8549 calculate_min_free_kbytes(); 8550 setup_per_zone_wmarks(); 8551 refresh_zone_stat_thresholds(); 8552 setup_per_zone_lowmem_reserve(); 8553 8554 #ifdef CONFIG_NUMA 8555 setup_min_unmapped_ratio(); 8556 setup_min_slab_ratio(); 8557 #endif 8558 8559 khugepaged_min_free_kbytes_update(); 8560 8561 return 0; 8562 } 8563 postcore_initcall(init_per_zone_wmark_min) 8564 8565 /* 8566 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 8567 * that we can call two helper functions whenever min_free_kbytes 8568 * changes. 8569 */ 8570 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 8571 void *buffer, size_t *length, loff_t *ppos) 8572 { 8573 int rc; 8574 8575 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8576 if (rc) 8577 return rc; 8578 8579 if (write) { 8580 user_min_free_kbytes = min_free_kbytes; 8581 setup_per_zone_wmarks(); 8582 } 8583 return 0; 8584 } 8585 8586 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 8587 void *buffer, size_t *length, loff_t *ppos) 8588 { 8589 int rc; 8590 8591 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8592 if (rc) 8593 return rc; 8594 8595 if (write) 8596 setup_per_zone_wmarks(); 8597 8598 return 0; 8599 } 8600 8601 #ifdef CONFIG_NUMA 8602 static void setup_min_unmapped_ratio(void) 8603 { 8604 pg_data_t *pgdat; 8605 struct zone *zone; 8606 8607 for_each_online_pgdat(pgdat) 8608 pgdat->min_unmapped_pages = 0; 8609 8610 for_each_zone(zone) 8611 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 8612 sysctl_min_unmapped_ratio) / 100; 8613 } 8614 8615 8616 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 8617 void *buffer, size_t *length, loff_t *ppos) 8618 { 8619 int rc; 8620 8621 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8622 if (rc) 8623 return rc; 8624 8625 setup_min_unmapped_ratio(); 8626 8627 return 0; 8628 } 8629 8630 static void setup_min_slab_ratio(void) 8631 { 8632 pg_data_t *pgdat; 8633 struct zone *zone; 8634 8635 for_each_online_pgdat(pgdat) 8636 pgdat->min_slab_pages = 0; 8637 8638 for_each_zone(zone) 8639 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 8640 sysctl_min_slab_ratio) / 100; 8641 } 8642 8643 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 8644 void *buffer, size_t *length, loff_t *ppos) 8645 { 8646 int rc; 8647 8648 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8649 if (rc) 8650 return rc; 8651 8652 setup_min_slab_ratio(); 8653 8654 return 0; 8655 } 8656 #endif 8657 8658 /* 8659 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 8660 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 8661 * whenever sysctl_lowmem_reserve_ratio changes. 8662 * 8663 * The reserve ratio obviously has absolutely no relation with the 8664 * minimum watermarks. The lowmem reserve ratio can only make sense 8665 * if in function of the boot time zone sizes. 8666 */ 8667 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 8668 void *buffer, size_t *length, loff_t *ppos) 8669 { 8670 int i; 8671 8672 proc_dointvec_minmax(table, write, buffer, length, ppos); 8673 8674 for (i = 0; i < MAX_NR_ZONES; i++) { 8675 if (sysctl_lowmem_reserve_ratio[i] < 1) 8676 sysctl_lowmem_reserve_ratio[i] = 0; 8677 } 8678 8679 setup_per_zone_lowmem_reserve(); 8680 return 0; 8681 } 8682 8683 /* 8684 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 8685 * cpu. It is the fraction of total pages in each zone that a hot per cpu 8686 * pagelist can have before it gets flushed back to buddy allocator. 8687 */ 8688 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 8689 int write, void *buffer, size_t *length, loff_t *ppos) 8690 { 8691 struct zone *zone; 8692 int old_percpu_pagelist_high_fraction; 8693 int ret; 8694 8695 mutex_lock(&pcp_batch_high_lock); 8696 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 8697 8698 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 8699 if (!write || ret < 0) 8700 goto out; 8701 8702 /* Sanity checking to avoid pcp imbalance */ 8703 if (percpu_pagelist_high_fraction && 8704 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 8705 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 8706 ret = -EINVAL; 8707 goto out; 8708 } 8709 8710 /* No change? */ 8711 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 8712 goto out; 8713 8714 for_each_populated_zone(zone) 8715 zone_set_pageset_high_and_batch(zone, 0); 8716 out: 8717 mutex_unlock(&pcp_batch_high_lock); 8718 return ret; 8719 } 8720 8721 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 8722 /* 8723 * Returns the number of pages that arch has reserved but 8724 * is not known to alloc_large_system_hash(). 8725 */ 8726 static unsigned long __init arch_reserved_kernel_pages(void) 8727 { 8728 return 0; 8729 } 8730 #endif 8731 8732 /* 8733 * Adaptive scale is meant to reduce sizes of hash tables on large memory 8734 * machines. As memory size is increased the scale is also increased but at 8735 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 8736 * quadruples the scale is increased by one, which means the size of hash table 8737 * only doubles, instead of quadrupling as well. 8738 * Because 32-bit systems cannot have large physical memory, where this scaling 8739 * makes sense, it is disabled on such platforms. 8740 */ 8741 #if __BITS_PER_LONG > 32 8742 #define ADAPT_SCALE_BASE (64ul << 30) 8743 #define ADAPT_SCALE_SHIFT 2 8744 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 8745 #endif 8746 8747 /* 8748 * allocate a large system hash table from bootmem 8749 * - it is assumed that the hash table must contain an exact power-of-2 8750 * quantity of entries 8751 * - limit is the number of hash buckets, not the total allocation size 8752 */ 8753 void *__init alloc_large_system_hash(const char *tablename, 8754 unsigned long bucketsize, 8755 unsigned long numentries, 8756 int scale, 8757 int flags, 8758 unsigned int *_hash_shift, 8759 unsigned int *_hash_mask, 8760 unsigned long low_limit, 8761 unsigned long high_limit) 8762 { 8763 unsigned long long max = high_limit; 8764 unsigned long log2qty, size; 8765 void *table = NULL; 8766 gfp_t gfp_flags; 8767 bool virt; 8768 bool huge; 8769 8770 /* allow the kernel cmdline to have a say */ 8771 if (!numentries) { 8772 /* round applicable memory size up to nearest megabyte */ 8773 numentries = nr_kernel_pages; 8774 numentries -= arch_reserved_kernel_pages(); 8775 8776 /* It isn't necessary when PAGE_SIZE >= 1MB */ 8777 if (PAGE_SHIFT < 20) 8778 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 8779 8780 #if __BITS_PER_LONG > 32 8781 if (!high_limit) { 8782 unsigned long adapt; 8783 8784 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 8785 adapt <<= ADAPT_SCALE_SHIFT) 8786 scale++; 8787 } 8788 #endif 8789 8790 /* limit to 1 bucket per 2^scale bytes of low memory */ 8791 if (scale > PAGE_SHIFT) 8792 numentries >>= (scale - PAGE_SHIFT); 8793 else 8794 numentries <<= (PAGE_SHIFT - scale); 8795 8796 /* Make sure we've got at least a 0-order allocation.. */ 8797 if (unlikely(flags & HASH_SMALL)) { 8798 /* Makes no sense without HASH_EARLY */ 8799 WARN_ON(!(flags & HASH_EARLY)); 8800 if (!(numentries >> *_hash_shift)) { 8801 numentries = 1UL << *_hash_shift; 8802 BUG_ON(!numentries); 8803 } 8804 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 8805 numentries = PAGE_SIZE / bucketsize; 8806 } 8807 numentries = roundup_pow_of_two(numentries); 8808 8809 /* limit allocation size to 1/16 total memory by default */ 8810 if (max == 0) { 8811 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 8812 do_div(max, bucketsize); 8813 } 8814 max = min(max, 0x80000000ULL); 8815 8816 if (numentries < low_limit) 8817 numentries = low_limit; 8818 if (numentries > max) 8819 numentries = max; 8820 8821 log2qty = ilog2(numentries); 8822 8823 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 8824 do { 8825 virt = false; 8826 size = bucketsize << log2qty; 8827 if (flags & HASH_EARLY) { 8828 if (flags & HASH_ZERO) 8829 table = memblock_alloc(size, SMP_CACHE_BYTES); 8830 else 8831 table = memblock_alloc_raw(size, 8832 SMP_CACHE_BYTES); 8833 } else if (get_order(size) >= MAX_ORDER || hashdist) { 8834 table = __vmalloc(size, gfp_flags); 8835 virt = true; 8836 if (table) 8837 huge = is_vm_area_hugepages(table); 8838 } else { 8839 /* 8840 * If bucketsize is not a power-of-two, we may free 8841 * some pages at the end of hash table which 8842 * alloc_pages_exact() automatically does 8843 */ 8844 table = alloc_pages_exact(size, gfp_flags); 8845 kmemleak_alloc(table, size, 1, gfp_flags); 8846 } 8847 } while (!table && size > PAGE_SIZE && --log2qty); 8848 8849 if (!table) 8850 panic("Failed to allocate %s hash table\n", tablename); 8851 8852 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 8853 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, 8854 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 8855 8856 if (_hash_shift) 8857 *_hash_shift = log2qty; 8858 if (_hash_mask) 8859 *_hash_mask = (1 << log2qty) - 1; 8860 8861 return table; 8862 } 8863 8864 /* 8865 * This function checks whether pageblock includes unmovable pages or not. 8866 * 8867 * PageLRU check without isolation or lru_lock could race so that 8868 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable 8869 * check without lock_page also may miss some movable non-lru pages at 8870 * race condition. So you can't expect this function should be exact. 8871 * 8872 * Returns a page without holding a reference. If the caller wants to 8873 * dereference that page (e.g., dumping), it has to make sure that it 8874 * cannot get removed (e.g., via memory unplug) concurrently. 8875 * 8876 */ 8877 struct page *has_unmovable_pages(struct zone *zone, struct page *page, 8878 int migratetype, int flags) 8879 { 8880 unsigned long iter = 0; 8881 unsigned long pfn = page_to_pfn(page); 8882 unsigned long offset = pfn % pageblock_nr_pages; 8883 8884 if (is_migrate_cma_page(page)) { 8885 /* 8886 * CMA allocations (alloc_contig_range) really need to mark 8887 * isolate CMA pageblocks even when they are not movable in fact 8888 * so consider them movable here. 8889 */ 8890 if (is_migrate_cma(migratetype)) 8891 return NULL; 8892 8893 return page; 8894 } 8895 8896 for (; iter < pageblock_nr_pages - offset; iter++) { 8897 page = pfn_to_page(pfn + iter); 8898 8899 /* 8900 * Both, bootmem allocations and memory holes are marked 8901 * PG_reserved and are unmovable. We can even have unmovable 8902 * allocations inside ZONE_MOVABLE, for example when 8903 * specifying "movablecore". 8904 */ 8905 if (PageReserved(page)) 8906 return page; 8907 8908 /* 8909 * If the zone is movable and we have ruled out all reserved 8910 * pages then it should be reasonably safe to assume the rest 8911 * is movable. 8912 */ 8913 if (zone_idx(zone) == ZONE_MOVABLE) 8914 continue; 8915 8916 /* 8917 * Hugepages are not in LRU lists, but they're movable. 8918 * THPs are on the LRU, but need to be counted as #small pages. 8919 * We need not scan over tail pages because we don't 8920 * handle each tail page individually in migration. 8921 */ 8922 if (PageHuge(page) || PageTransCompound(page)) { 8923 struct page *head = compound_head(page); 8924 unsigned int skip_pages; 8925 8926 if (PageHuge(page)) { 8927 if (!hugepage_migration_supported(page_hstate(head))) 8928 return page; 8929 } else if (!PageLRU(head) && !__PageMovable(head)) { 8930 return page; 8931 } 8932 8933 skip_pages = compound_nr(head) - (page - head); 8934 iter += skip_pages - 1; 8935 continue; 8936 } 8937 8938 /* 8939 * We can't use page_count without pin a page 8940 * because another CPU can free compound page. 8941 * This check already skips compound tails of THP 8942 * because their page->_refcount is zero at all time. 8943 */ 8944 if (!page_ref_count(page)) { 8945 if (PageBuddy(page)) 8946 iter += (1 << buddy_order(page)) - 1; 8947 continue; 8948 } 8949 8950 /* 8951 * The HWPoisoned page may be not in buddy system, and 8952 * page_count() is not 0. 8953 */ 8954 if ((flags & MEMORY_OFFLINE) && PageHWPoison(page)) 8955 continue; 8956 8957 /* 8958 * We treat all PageOffline() pages as movable when offlining 8959 * to give drivers a chance to decrement their reference count 8960 * in MEM_GOING_OFFLINE in order to indicate that these pages 8961 * can be offlined as there are no direct references anymore. 8962 * For actually unmovable PageOffline() where the driver does 8963 * not support this, we will fail later when trying to actually 8964 * move these pages that still have a reference count > 0. 8965 * (false negatives in this function only) 8966 */ 8967 if ((flags & MEMORY_OFFLINE) && PageOffline(page)) 8968 continue; 8969 8970 if (__PageMovable(page) || PageLRU(page)) 8971 continue; 8972 8973 /* 8974 * If there are RECLAIMABLE pages, we need to check 8975 * it. But now, memory offline itself doesn't call 8976 * shrink_node_slabs() and it still to be fixed. 8977 */ 8978 return page; 8979 } 8980 return NULL; 8981 } 8982 8983 #ifdef CONFIG_CONTIG_ALLOC 8984 static unsigned long pfn_max_align_down(unsigned long pfn) 8985 { 8986 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 8987 pageblock_nr_pages) - 1); 8988 } 8989 8990 static unsigned long pfn_max_align_up(unsigned long pfn) 8991 { 8992 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 8993 pageblock_nr_pages)); 8994 } 8995 8996 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 8997 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 8998 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 8999 static void alloc_contig_dump_pages(struct list_head *page_list) 9000 { 9001 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 9002 9003 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 9004 struct page *page; 9005 9006 dump_stack(); 9007 list_for_each_entry(page, page_list, lru) 9008 dump_page(page, "migration failure"); 9009 } 9010 } 9011 #else 9012 static inline void alloc_contig_dump_pages(struct list_head *page_list) 9013 { 9014 } 9015 #endif 9016 9017 /* [start, end) must belong to a single zone. */ 9018 static int __alloc_contig_migrate_range(struct compact_control *cc, 9019 unsigned long start, unsigned long end) 9020 { 9021 /* This function is based on compact_zone() from compaction.c. */ 9022 unsigned int nr_reclaimed; 9023 unsigned long pfn = start; 9024 unsigned int tries = 0; 9025 int ret = 0; 9026 struct migration_target_control mtc = { 9027 .nid = zone_to_nid(cc->zone), 9028 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 9029 }; 9030 9031 lru_cache_disable(); 9032 9033 while (pfn < end || !list_empty(&cc->migratepages)) { 9034 if (fatal_signal_pending(current)) { 9035 ret = -EINTR; 9036 break; 9037 } 9038 9039 if (list_empty(&cc->migratepages)) { 9040 cc->nr_migratepages = 0; 9041 ret = isolate_migratepages_range(cc, pfn, end); 9042 if (ret && ret != -EAGAIN) 9043 break; 9044 pfn = cc->migrate_pfn; 9045 tries = 0; 9046 } else if (++tries == 5) { 9047 ret = -EBUSY; 9048 break; 9049 } 9050 9051 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 9052 &cc->migratepages); 9053 cc->nr_migratepages -= nr_reclaimed; 9054 9055 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 9056 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 9057 9058 /* 9059 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 9060 * to retry again over this error, so do the same here. 9061 */ 9062 if (ret == -ENOMEM) 9063 break; 9064 } 9065 9066 lru_cache_enable(); 9067 if (ret < 0) { 9068 if (ret == -EBUSY) 9069 alloc_contig_dump_pages(&cc->migratepages); 9070 putback_movable_pages(&cc->migratepages); 9071 return ret; 9072 } 9073 return 0; 9074 } 9075 9076 /** 9077 * alloc_contig_range() -- tries to allocate given range of pages 9078 * @start: start PFN to allocate 9079 * @end: one-past-the-last PFN to allocate 9080 * @migratetype: migratetype of the underlying pageblocks (either 9081 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 9082 * in range must have the same migratetype and it must 9083 * be either of the two. 9084 * @gfp_mask: GFP mask to use during compaction 9085 * 9086 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 9087 * aligned. The PFN range must belong to a single zone. 9088 * 9089 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 9090 * pageblocks in the range. Once isolated, the pageblocks should not 9091 * be modified by others. 9092 * 9093 * Return: zero on success or negative error code. On success all 9094 * pages which PFN is in [start, end) are allocated for the caller and 9095 * need to be freed with free_contig_range(). 9096 */ 9097 int alloc_contig_range(unsigned long start, unsigned long end, 9098 unsigned migratetype, gfp_t gfp_mask) 9099 { 9100 unsigned long outer_start, outer_end; 9101 unsigned int order; 9102 int ret = 0; 9103 9104 struct compact_control cc = { 9105 .nr_migratepages = 0, 9106 .order = -1, 9107 .zone = page_zone(pfn_to_page(start)), 9108 .mode = MIGRATE_SYNC, 9109 .ignore_skip_hint = true, 9110 .no_set_skip_hint = true, 9111 .gfp_mask = current_gfp_context(gfp_mask), 9112 .alloc_contig = true, 9113 }; 9114 INIT_LIST_HEAD(&cc.migratepages); 9115 9116 /* 9117 * What we do here is we mark all pageblocks in range as 9118 * MIGRATE_ISOLATE. Because pageblock and max order pages may 9119 * have different sizes, and due to the way page allocator 9120 * work, we align the range to biggest of the two pages so 9121 * that page allocator won't try to merge buddies from 9122 * different pageblocks and change MIGRATE_ISOLATE to some 9123 * other migration type. 9124 * 9125 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 9126 * migrate the pages from an unaligned range (ie. pages that 9127 * we are interested in). This will put all the pages in 9128 * range back to page allocator as MIGRATE_ISOLATE. 9129 * 9130 * When this is done, we take the pages in range from page 9131 * allocator removing them from the buddy system. This way 9132 * page allocator will never consider using them. 9133 * 9134 * This lets us mark the pageblocks back as 9135 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 9136 * aligned range but not in the unaligned, original range are 9137 * put back to page allocator so that buddy can use them. 9138 */ 9139 9140 ret = start_isolate_page_range(pfn_max_align_down(start), 9141 pfn_max_align_up(end), migratetype, 0); 9142 if (ret) 9143 return ret; 9144 9145 drain_all_pages(cc.zone); 9146 9147 /* 9148 * In case of -EBUSY, we'd like to know which page causes problem. 9149 * So, just fall through. test_pages_isolated() has a tracepoint 9150 * which will report the busy page. 9151 * 9152 * It is possible that busy pages could become available before 9153 * the call to test_pages_isolated, and the range will actually be 9154 * allocated. So, if we fall through be sure to clear ret so that 9155 * -EBUSY is not accidentally used or returned to caller. 9156 */ 9157 ret = __alloc_contig_migrate_range(&cc, start, end); 9158 if (ret && ret != -EBUSY) 9159 goto done; 9160 ret = 0; 9161 9162 /* 9163 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 9164 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 9165 * more, all pages in [start, end) are free in page allocator. 9166 * What we are going to do is to allocate all pages from 9167 * [start, end) (that is remove them from page allocator). 9168 * 9169 * The only problem is that pages at the beginning and at the 9170 * end of interesting range may be not aligned with pages that 9171 * page allocator holds, ie. they can be part of higher order 9172 * pages. Because of this, we reserve the bigger range and 9173 * once this is done free the pages we are not interested in. 9174 * 9175 * We don't have to hold zone->lock here because the pages are 9176 * isolated thus they won't get removed from buddy. 9177 */ 9178 9179 order = 0; 9180 outer_start = start; 9181 while (!PageBuddy(pfn_to_page(outer_start))) { 9182 if (++order >= MAX_ORDER) { 9183 outer_start = start; 9184 break; 9185 } 9186 outer_start &= ~0UL << order; 9187 } 9188 9189 if (outer_start != start) { 9190 order = buddy_order(pfn_to_page(outer_start)); 9191 9192 /* 9193 * outer_start page could be small order buddy page and 9194 * it doesn't include start page. Adjust outer_start 9195 * in this case to report failed page properly 9196 * on tracepoint in test_pages_isolated() 9197 */ 9198 if (outer_start + (1UL << order) <= start) 9199 outer_start = start; 9200 } 9201 9202 /* Make sure the range is really isolated. */ 9203 if (test_pages_isolated(outer_start, end, 0)) { 9204 ret = -EBUSY; 9205 goto done; 9206 } 9207 9208 /* Grab isolated pages from freelists. */ 9209 outer_end = isolate_freepages_range(&cc, outer_start, end); 9210 if (!outer_end) { 9211 ret = -EBUSY; 9212 goto done; 9213 } 9214 9215 /* Free head and tail (if any) */ 9216 if (start != outer_start) 9217 free_contig_range(outer_start, start - outer_start); 9218 if (end != outer_end) 9219 free_contig_range(end, outer_end - end); 9220 9221 done: 9222 undo_isolate_page_range(pfn_max_align_down(start), 9223 pfn_max_align_up(end), migratetype); 9224 return ret; 9225 } 9226 EXPORT_SYMBOL(alloc_contig_range); 9227 9228 static int __alloc_contig_pages(unsigned long start_pfn, 9229 unsigned long nr_pages, gfp_t gfp_mask) 9230 { 9231 unsigned long end_pfn = start_pfn + nr_pages; 9232 9233 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 9234 gfp_mask); 9235 } 9236 9237 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 9238 unsigned long nr_pages) 9239 { 9240 unsigned long i, end_pfn = start_pfn + nr_pages; 9241 struct page *page; 9242 9243 for (i = start_pfn; i < end_pfn; i++) { 9244 page = pfn_to_online_page(i); 9245 if (!page) 9246 return false; 9247 9248 if (page_zone(page) != z) 9249 return false; 9250 9251 if (PageReserved(page)) 9252 return false; 9253 } 9254 return true; 9255 } 9256 9257 static bool zone_spans_last_pfn(const struct zone *zone, 9258 unsigned long start_pfn, unsigned long nr_pages) 9259 { 9260 unsigned long last_pfn = start_pfn + nr_pages - 1; 9261 9262 return zone_spans_pfn(zone, last_pfn); 9263 } 9264 9265 /** 9266 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 9267 * @nr_pages: Number of contiguous pages to allocate 9268 * @gfp_mask: GFP mask to limit search and used during compaction 9269 * @nid: Target node 9270 * @nodemask: Mask for other possible nodes 9271 * 9272 * This routine is a wrapper around alloc_contig_range(). It scans over zones 9273 * on an applicable zonelist to find a contiguous pfn range which can then be 9274 * tried for allocation with alloc_contig_range(). This routine is intended 9275 * for allocation requests which can not be fulfilled with the buddy allocator. 9276 * 9277 * The allocated memory is always aligned to a page boundary. If nr_pages is a 9278 * power of two, then allocated range is also guaranteed to be aligned to same 9279 * nr_pages (e.g. 1GB request would be aligned to 1GB). 9280 * 9281 * Allocated pages can be freed with free_contig_range() or by manually calling 9282 * __free_page() on each allocated page. 9283 * 9284 * Return: pointer to contiguous pages on success, or NULL if not successful. 9285 */ 9286 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 9287 int nid, nodemask_t *nodemask) 9288 { 9289 unsigned long ret, pfn, flags; 9290 struct zonelist *zonelist; 9291 struct zone *zone; 9292 struct zoneref *z; 9293 9294 zonelist = node_zonelist(nid, gfp_mask); 9295 for_each_zone_zonelist_nodemask(zone, z, zonelist, 9296 gfp_zone(gfp_mask), nodemask) { 9297 spin_lock_irqsave(&zone->lock, flags); 9298 9299 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 9300 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 9301 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 9302 /* 9303 * We release the zone lock here because 9304 * alloc_contig_range() will also lock the zone 9305 * at some point. If there's an allocation 9306 * spinning on this lock, it may win the race 9307 * and cause alloc_contig_range() to fail... 9308 */ 9309 spin_unlock_irqrestore(&zone->lock, flags); 9310 ret = __alloc_contig_pages(pfn, nr_pages, 9311 gfp_mask); 9312 if (!ret) 9313 return pfn_to_page(pfn); 9314 spin_lock_irqsave(&zone->lock, flags); 9315 } 9316 pfn += nr_pages; 9317 } 9318 spin_unlock_irqrestore(&zone->lock, flags); 9319 } 9320 return NULL; 9321 } 9322 #endif /* CONFIG_CONTIG_ALLOC */ 9323 9324 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 9325 { 9326 unsigned long count = 0; 9327 9328 for (; nr_pages--; pfn++) { 9329 struct page *page = pfn_to_page(pfn); 9330 9331 count += page_count(page) != 1; 9332 __free_page(page); 9333 } 9334 WARN(count != 0, "%lu pages are still in use!\n", count); 9335 } 9336 EXPORT_SYMBOL(free_contig_range); 9337 9338 /* 9339 * The zone indicated has a new number of managed_pages; batch sizes and percpu 9340 * page high values need to be recalculated. 9341 */ 9342 void zone_pcp_update(struct zone *zone, int cpu_online) 9343 { 9344 mutex_lock(&pcp_batch_high_lock); 9345 zone_set_pageset_high_and_batch(zone, cpu_online); 9346 mutex_unlock(&pcp_batch_high_lock); 9347 } 9348 9349 /* 9350 * Effectively disable pcplists for the zone by setting the high limit to 0 9351 * and draining all cpus. A concurrent page freeing on another CPU that's about 9352 * to put the page on pcplist will either finish before the drain and the page 9353 * will be drained, or observe the new high limit and skip the pcplist. 9354 * 9355 * Must be paired with a call to zone_pcp_enable(). 9356 */ 9357 void zone_pcp_disable(struct zone *zone) 9358 { 9359 mutex_lock(&pcp_batch_high_lock); 9360 __zone_set_pageset_high_and_batch(zone, 0, 1); 9361 __drain_all_pages(zone, true); 9362 } 9363 9364 void zone_pcp_enable(struct zone *zone) 9365 { 9366 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 9367 mutex_unlock(&pcp_batch_high_lock); 9368 } 9369 9370 void zone_pcp_reset(struct zone *zone) 9371 { 9372 int cpu; 9373 struct per_cpu_zonestat *pzstats; 9374 9375 if (zone->per_cpu_pageset != &boot_pageset) { 9376 for_each_online_cpu(cpu) { 9377 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 9378 drain_zonestat(zone, pzstats); 9379 } 9380 free_percpu(zone->per_cpu_pageset); 9381 free_percpu(zone->per_cpu_zonestats); 9382 zone->per_cpu_pageset = &boot_pageset; 9383 zone->per_cpu_zonestats = &boot_zonestats; 9384 } 9385 } 9386 9387 #ifdef CONFIG_MEMORY_HOTREMOVE 9388 /* 9389 * All pages in the range must be in a single zone, must not contain holes, 9390 * must span full sections, and must be isolated before calling this function. 9391 */ 9392 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 9393 { 9394 unsigned long pfn = start_pfn; 9395 struct page *page; 9396 struct zone *zone; 9397 unsigned int order; 9398 unsigned long flags; 9399 9400 offline_mem_sections(pfn, end_pfn); 9401 zone = page_zone(pfn_to_page(pfn)); 9402 spin_lock_irqsave(&zone->lock, flags); 9403 while (pfn < end_pfn) { 9404 page = pfn_to_page(pfn); 9405 /* 9406 * The HWPoisoned page may be not in buddy system, and 9407 * page_count() is not 0. 9408 */ 9409 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 9410 pfn++; 9411 continue; 9412 } 9413 /* 9414 * At this point all remaining PageOffline() pages have a 9415 * reference count of 0 and can simply be skipped. 9416 */ 9417 if (PageOffline(page)) { 9418 BUG_ON(page_count(page)); 9419 BUG_ON(PageBuddy(page)); 9420 pfn++; 9421 continue; 9422 } 9423 9424 BUG_ON(page_count(page)); 9425 BUG_ON(!PageBuddy(page)); 9426 order = buddy_order(page); 9427 del_page_from_free_list(page, zone, order); 9428 pfn += (1 << order); 9429 } 9430 spin_unlock_irqrestore(&zone->lock, flags); 9431 } 9432 #endif 9433 9434 /* 9435 * This function returns a stable result only if called under zone lock. 9436 */ 9437 bool is_free_buddy_page(struct page *page) 9438 { 9439 unsigned long pfn = page_to_pfn(page); 9440 unsigned int order; 9441 9442 for (order = 0; order < MAX_ORDER; order++) { 9443 struct page *page_head = page - (pfn & ((1 << order) - 1)); 9444 9445 if (PageBuddy(page_head) && 9446 buddy_order_unsafe(page_head) >= order) 9447 break; 9448 } 9449 9450 return order < MAX_ORDER; 9451 } 9452 9453 #ifdef CONFIG_MEMORY_FAILURE 9454 /* 9455 * Break down a higher-order page in sub-pages, and keep our target out of 9456 * buddy allocator. 9457 */ 9458 static void break_down_buddy_pages(struct zone *zone, struct page *page, 9459 struct page *target, int low, int high, 9460 int migratetype) 9461 { 9462 unsigned long size = 1 << high; 9463 struct page *current_buddy, *next_page; 9464 9465 while (high > low) { 9466 high--; 9467 size >>= 1; 9468 9469 if (target >= &page[size]) { 9470 next_page = page + size; 9471 current_buddy = page; 9472 } else { 9473 next_page = page; 9474 current_buddy = page + size; 9475 } 9476 9477 if (set_page_guard(zone, current_buddy, high, migratetype)) 9478 continue; 9479 9480 if (current_buddy != target) { 9481 add_to_free_list(current_buddy, zone, high, migratetype); 9482 set_buddy_order(current_buddy, high); 9483 page = next_page; 9484 } 9485 } 9486 } 9487 9488 /* 9489 * Take a page that will be marked as poisoned off the buddy allocator. 9490 */ 9491 bool take_page_off_buddy(struct page *page) 9492 { 9493 struct zone *zone = page_zone(page); 9494 unsigned long pfn = page_to_pfn(page); 9495 unsigned long flags; 9496 unsigned int order; 9497 bool ret = false; 9498 9499 spin_lock_irqsave(&zone->lock, flags); 9500 for (order = 0; order < MAX_ORDER; order++) { 9501 struct page *page_head = page - (pfn & ((1 << order) - 1)); 9502 int page_order = buddy_order(page_head); 9503 9504 if (PageBuddy(page_head) && page_order >= order) { 9505 unsigned long pfn_head = page_to_pfn(page_head); 9506 int migratetype = get_pfnblock_migratetype(page_head, 9507 pfn_head); 9508 9509 del_page_from_free_list(page_head, zone, page_order); 9510 break_down_buddy_pages(zone, page_head, page, 0, 9511 page_order, migratetype); 9512 SetPageHWPoisonTakenOff(page); 9513 if (!is_migrate_isolate(migratetype)) 9514 __mod_zone_freepage_state(zone, -1, migratetype); 9515 ret = true; 9516 break; 9517 } 9518 if (page_count(page_head) > 0) 9519 break; 9520 } 9521 spin_unlock_irqrestore(&zone->lock, flags); 9522 return ret; 9523 } 9524 9525 /* 9526 * Cancel takeoff done by take_page_off_buddy(). 9527 */ 9528 bool put_page_back_buddy(struct page *page) 9529 { 9530 struct zone *zone = page_zone(page); 9531 unsigned long pfn = page_to_pfn(page); 9532 unsigned long flags; 9533 int migratetype = get_pfnblock_migratetype(page, pfn); 9534 bool ret = false; 9535 9536 spin_lock_irqsave(&zone->lock, flags); 9537 if (put_page_testzero(page)) { 9538 ClearPageHWPoisonTakenOff(page); 9539 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 9540 if (TestClearPageHWPoison(page)) { 9541 num_poisoned_pages_dec(); 9542 ret = true; 9543 } 9544 } 9545 spin_unlock_irqrestore(&zone->lock, flags); 9546 9547 return ret; 9548 } 9549 #endif 9550 9551 #ifdef CONFIG_ZONE_DMA 9552 bool has_managed_dma(void) 9553 { 9554 struct pglist_data *pgdat; 9555 9556 for_each_online_pgdat(pgdat) { 9557 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 9558 9559 if (managed_zone(zone)) 9560 return true; 9561 } 9562 return false; 9563 } 9564 #endif /* CONFIG_ZONE_DMA */ 9565