1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/swap.h> 22 #include <linux/swapops.h> 23 #include <linux/interrupt.h> 24 #include <linux/pagemap.h> 25 #include <linux/jiffies.h> 26 #include <linux/memblock.h> 27 #include <linux/compiler.h> 28 #include <linux/kernel.h> 29 #include <linux/kasan.h> 30 #include <linux/kmsan.h> 31 #include <linux/module.h> 32 #include <linux/suspend.h> 33 #include <linux/pagevec.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/ratelimit.h> 37 #include <linux/oom.h> 38 #include <linux/topology.h> 39 #include <linux/sysctl.h> 40 #include <linux/cpu.h> 41 #include <linux/cpuset.h> 42 #include <linux/memory_hotplug.h> 43 #include <linux/nodemask.h> 44 #include <linux/vmalloc.h> 45 #include <linux/vmstat.h> 46 #include <linux/mempolicy.h> 47 #include <linux/memremap.h> 48 #include <linux/stop_machine.h> 49 #include <linux/random.h> 50 #include <linux/sort.h> 51 #include <linux/pfn.h> 52 #include <linux/backing-dev.h> 53 #include <linux/fault-inject.h> 54 #include <linux/page-isolation.h> 55 #include <linux/debugobjects.h> 56 #include <linux/kmemleak.h> 57 #include <linux/compaction.h> 58 #include <trace/events/kmem.h> 59 #include <trace/events/oom.h> 60 #include <linux/prefetch.h> 61 #include <linux/mm_inline.h> 62 #include <linux/mmu_notifier.h> 63 #include <linux/migrate.h> 64 #include <linux/hugetlb.h> 65 #include <linux/sched/rt.h> 66 #include <linux/sched/mm.h> 67 #include <linux/page_owner.h> 68 #include <linux/page_table_check.h> 69 #include <linux/kthread.h> 70 #include <linux/memcontrol.h> 71 #include <linux/ftrace.h> 72 #include <linux/lockdep.h> 73 #include <linux/nmi.h> 74 #include <linux/psi.h> 75 #include <linux/khugepaged.h> 76 #include <linux/delayacct.h> 77 #include <asm/sections.h> 78 #include <asm/tlbflush.h> 79 #include <asm/div64.h> 80 #include "internal.h" 81 #include "shuffle.h" 82 #include "page_reporting.h" 83 #include "swap.h" 84 85 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 86 typedef int __bitwise fpi_t; 87 88 /* No special request */ 89 #define FPI_NONE ((__force fpi_t)0) 90 91 /* 92 * Skip free page reporting notification for the (possibly merged) page. 93 * This does not hinder free page reporting from grabbing the page, 94 * reporting it and marking it "reported" - it only skips notifying 95 * the free page reporting infrastructure about a newly freed page. For 96 * example, used when temporarily pulling a page from a freelist and 97 * putting it back unmodified. 98 */ 99 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 100 101 /* 102 * Place the (possibly merged) page to the tail of the freelist. Will ignore 103 * page shuffling (relevant code - e.g., memory onlining - is expected to 104 * shuffle the whole zone). 105 * 106 * Note: No code should rely on this flag for correctness - it's purely 107 * to allow for optimizations when handing back either fresh pages 108 * (memory onlining) or untouched pages (page isolation, free page 109 * reporting). 110 */ 111 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 112 113 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 114 static DEFINE_MUTEX(pcp_batch_high_lock); 115 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 116 117 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 118 /* 119 * On SMP, spin_trylock is sufficient protection. 120 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 121 */ 122 #define pcp_trylock_prepare(flags) do { } while (0) 123 #define pcp_trylock_finish(flag) do { } while (0) 124 #else 125 126 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 127 #define pcp_trylock_prepare(flags) local_irq_save(flags) 128 #define pcp_trylock_finish(flags) local_irq_restore(flags) 129 #endif 130 131 /* 132 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 133 * a migration causing the wrong PCP to be locked and remote memory being 134 * potentially allocated, pin the task to the CPU for the lookup+lock. 135 * preempt_disable is used on !RT because it is faster than migrate_disable. 136 * migrate_disable is used on RT because otherwise RT spinlock usage is 137 * interfered with and a high priority task cannot preempt the allocator. 138 */ 139 #ifndef CONFIG_PREEMPT_RT 140 #define pcpu_task_pin() preempt_disable() 141 #define pcpu_task_unpin() preempt_enable() 142 #else 143 #define pcpu_task_pin() migrate_disable() 144 #define pcpu_task_unpin() migrate_enable() 145 #endif 146 147 /* 148 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 149 * Return value should be used with equivalent unlock helper. 150 */ 151 #define pcpu_spin_lock(type, member, ptr) \ 152 ({ \ 153 type *_ret; \ 154 pcpu_task_pin(); \ 155 _ret = this_cpu_ptr(ptr); \ 156 spin_lock(&_ret->member); \ 157 _ret; \ 158 }) 159 160 #define pcpu_spin_trylock(type, member, ptr) \ 161 ({ \ 162 type *_ret; \ 163 pcpu_task_pin(); \ 164 _ret = this_cpu_ptr(ptr); \ 165 if (!spin_trylock(&_ret->member)) { \ 166 pcpu_task_unpin(); \ 167 _ret = NULL; \ 168 } \ 169 _ret; \ 170 }) 171 172 #define pcpu_spin_unlock(member, ptr) \ 173 ({ \ 174 spin_unlock(&ptr->member); \ 175 pcpu_task_unpin(); \ 176 }) 177 178 /* struct per_cpu_pages specific helpers. */ 179 #define pcp_spin_lock(ptr) \ 180 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 181 182 #define pcp_spin_trylock(ptr) \ 183 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 184 185 #define pcp_spin_unlock(ptr) \ 186 pcpu_spin_unlock(lock, ptr) 187 188 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 189 DEFINE_PER_CPU(int, numa_node); 190 EXPORT_PER_CPU_SYMBOL(numa_node); 191 #endif 192 193 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 194 195 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 196 /* 197 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 198 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 199 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 200 * defined in <linux/topology.h>. 201 */ 202 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 203 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 204 #endif 205 206 static DEFINE_MUTEX(pcpu_drain_mutex); 207 208 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 209 volatile unsigned long latent_entropy __latent_entropy; 210 EXPORT_SYMBOL(latent_entropy); 211 #endif 212 213 /* 214 * Array of node states. 215 */ 216 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 217 [N_POSSIBLE] = NODE_MASK_ALL, 218 [N_ONLINE] = { { [0] = 1UL } }, 219 #ifndef CONFIG_NUMA 220 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 221 #ifdef CONFIG_HIGHMEM 222 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 223 #endif 224 [N_MEMORY] = { { [0] = 1UL } }, 225 [N_CPU] = { { [0] = 1UL } }, 226 #endif /* NUMA */ 227 }; 228 EXPORT_SYMBOL(node_states); 229 230 atomic_long_t _totalram_pages __read_mostly; 231 EXPORT_SYMBOL(_totalram_pages); 232 unsigned long totalreserve_pages __read_mostly; 233 unsigned long totalcma_pages __read_mostly; 234 235 int percpu_pagelist_high_fraction; 236 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 237 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 238 EXPORT_SYMBOL(init_on_alloc); 239 240 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 241 EXPORT_SYMBOL(init_on_free); 242 243 /* 244 * A cached value of the page's pageblock's migratetype, used when the page is 245 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 246 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 247 * Also the migratetype set in the page does not necessarily match the pcplist 248 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 249 * other index - this ensures that it will be put on the correct CMA freelist. 250 */ 251 static inline int get_pcppage_migratetype(struct page *page) 252 { 253 return page->index; 254 } 255 256 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 257 { 258 page->index = migratetype; 259 } 260 261 #ifdef CONFIG_PM_SLEEP 262 /* 263 * The following functions are used by the suspend/hibernate code to temporarily 264 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 265 * while devices are suspended. To avoid races with the suspend/hibernate code, 266 * they should always be called with system_transition_mutex held 267 * (gfp_allowed_mask also should only be modified with system_transition_mutex 268 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 269 * with that modification). 270 */ 271 272 static gfp_t saved_gfp_mask; 273 274 void pm_restore_gfp_mask(void) 275 { 276 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 277 if (saved_gfp_mask) { 278 gfp_allowed_mask = saved_gfp_mask; 279 saved_gfp_mask = 0; 280 } 281 } 282 283 void pm_restrict_gfp_mask(void) 284 { 285 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 286 WARN_ON(saved_gfp_mask); 287 saved_gfp_mask = gfp_allowed_mask; 288 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 289 } 290 291 bool pm_suspended_storage(void) 292 { 293 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 294 return false; 295 return true; 296 } 297 #endif /* CONFIG_PM_SLEEP */ 298 299 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 300 unsigned int pageblock_order __read_mostly; 301 #endif 302 303 static void __free_pages_ok(struct page *page, unsigned int order, 304 fpi_t fpi_flags); 305 306 /* 307 * results with 256, 32 in the lowmem_reserve sysctl: 308 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 309 * 1G machine -> (16M dma, 784M normal, 224M high) 310 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 311 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 312 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 313 * 314 * TBD: should special case ZONE_DMA32 machines here - in those we normally 315 * don't need any ZONE_NORMAL reservation 316 */ 317 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 318 #ifdef CONFIG_ZONE_DMA 319 [ZONE_DMA] = 256, 320 #endif 321 #ifdef CONFIG_ZONE_DMA32 322 [ZONE_DMA32] = 256, 323 #endif 324 [ZONE_NORMAL] = 32, 325 #ifdef CONFIG_HIGHMEM 326 [ZONE_HIGHMEM] = 0, 327 #endif 328 [ZONE_MOVABLE] = 0, 329 }; 330 331 char * const zone_names[MAX_NR_ZONES] = { 332 #ifdef CONFIG_ZONE_DMA 333 "DMA", 334 #endif 335 #ifdef CONFIG_ZONE_DMA32 336 "DMA32", 337 #endif 338 "Normal", 339 #ifdef CONFIG_HIGHMEM 340 "HighMem", 341 #endif 342 "Movable", 343 #ifdef CONFIG_ZONE_DEVICE 344 "Device", 345 #endif 346 }; 347 348 const char * const migratetype_names[MIGRATE_TYPES] = { 349 "Unmovable", 350 "Movable", 351 "Reclaimable", 352 "HighAtomic", 353 #ifdef CONFIG_CMA 354 "CMA", 355 #endif 356 #ifdef CONFIG_MEMORY_ISOLATION 357 "Isolate", 358 #endif 359 }; 360 361 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { 362 [NULL_COMPOUND_DTOR] = NULL, 363 [COMPOUND_PAGE_DTOR] = free_compound_page, 364 #ifdef CONFIG_HUGETLB_PAGE 365 [HUGETLB_PAGE_DTOR] = free_huge_page, 366 #endif 367 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 368 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, 369 #endif 370 }; 371 372 int min_free_kbytes = 1024; 373 int user_min_free_kbytes = -1; 374 int watermark_boost_factor __read_mostly = 15000; 375 int watermark_scale_factor = 10; 376 377 bool mirrored_kernelcore __initdata_memblock; 378 379 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 380 int movable_zone; 381 EXPORT_SYMBOL(movable_zone); 382 383 #if MAX_NUMNODES > 1 384 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 385 unsigned int nr_online_nodes __read_mostly = 1; 386 EXPORT_SYMBOL(nr_node_ids); 387 EXPORT_SYMBOL(nr_online_nodes); 388 #endif 389 390 static bool page_contains_unaccepted(struct page *page, unsigned int order); 391 static void accept_page(struct page *page, unsigned int order); 392 static bool try_to_accept_memory(struct zone *zone, unsigned int order); 393 static inline bool has_unaccepted_memory(void); 394 static bool __free_unaccepted(struct page *page); 395 396 int page_group_by_mobility_disabled __read_mostly; 397 398 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 399 /* 400 * During boot we initialize deferred pages on-demand, as needed, but once 401 * page_alloc_init_late() has finished, the deferred pages are all initialized, 402 * and we can permanently disable that path. 403 */ 404 DEFINE_STATIC_KEY_TRUE(deferred_pages); 405 406 static inline bool deferred_pages_enabled(void) 407 { 408 return static_branch_unlikely(&deferred_pages); 409 } 410 411 /* 412 * deferred_grow_zone() is __init, but it is called from 413 * get_page_from_freelist() during early boot until deferred_pages permanently 414 * disables this call. This is why we have refdata wrapper to avoid warning, 415 * and to ensure that the function body gets unloaded. 416 */ 417 static bool __ref 418 _deferred_grow_zone(struct zone *zone, unsigned int order) 419 { 420 return deferred_grow_zone(zone, order); 421 } 422 #else 423 static inline bool deferred_pages_enabled(void) 424 { 425 return false; 426 } 427 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 428 429 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 430 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 431 unsigned long pfn) 432 { 433 #ifdef CONFIG_SPARSEMEM 434 return section_to_usemap(__pfn_to_section(pfn)); 435 #else 436 return page_zone(page)->pageblock_flags; 437 #endif /* CONFIG_SPARSEMEM */ 438 } 439 440 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 441 { 442 #ifdef CONFIG_SPARSEMEM 443 pfn &= (PAGES_PER_SECTION-1); 444 #else 445 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 446 #endif /* CONFIG_SPARSEMEM */ 447 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 448 } 449 450 static __always_inline 451 unsigned long __get_pfnblock_flags_mask(const struct page *page, 452 unsigned long pfn, 453 unsigned long mask) 454 { 455 unsigned long *bitmap; 456 unsigned long bitidx, word_bitidx; 457 unsigned long word; 458 459 bitmap = get_pageblock_bitmap(page, pfn); 460 bitidx = pfn_to_bitidx(page, pfn); 461 word_bitidx = bitidx / BITS_PER_LONG; 462 bitidx &= (BITS_PER_LONG-1); 463 /* 464 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 465 * a consistent read of the memory array, so that results, even though 466 * racy, are not corrupted. 467 */ 468 word = READ_ONCE(bitmap[word_bitidx]); 469 return (word >> bitidx) & mask; 470 } 471 472 /** 473 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 474 * @page: The page within the block of interest 475 * @pfn: The target page frame number 476 * @mask: mask of bits that the caller is interested in 477 * 478 * Return: pageblock_bits flags 479 */ 480 unsigned long get_pfnblock_flags_mask(const struct page *page, 481 unsigned long pfn, unsigned long mask) 482 { 483 return __get_pfnblock_flags_mask(page, pfn, mask); 484 } 485 486 static __always_inline int get_pfnblock_migratetype(const struct page *page, 487 unsigned long pfn) 488 { 489 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 490 } 491 492 /** 493 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 494 * @page: The page within the block of interest 495 * @flags: The flags to set 496 * @pfn: The target page frame number 497 * @mask: mask of bits that the caller is interested in 498 */ 499 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 500 unsigned long pfn, 501 unsigned long mask) 502 { 503 unsigned long *bitmap; 504 unsigned long bitidx, word_bitidx; 505 unsigned long word; 506 507 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 508 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 509 510 bitmap = get_pageblock_bitmap(page, pfn); 511 bitidx = pfn_to_bitidx(page, pfn); 512 word_bitidx = bitidx / BITS_PER_LONG; 513 bitidx &= (BITS_PER_LONG-1); 514 515 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 516 517 mask <<= bitidx; 518 flags <<= bitidx; 519 520 word = READ_ONCE(bitmap[word_bitidx]); 521 do { 522 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 523 } 524 525 void set_pageblock_migratetype(struct page *page, int migratetype) 526 { 527 if (unlikely(page_group_by_mobility_disabled && 528 migratetype < MIGRATE_PCPTYPES)) 529 migratetype = MIGRATE_UNMOVABLE; 530 531 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 532 page_to_pfn(page), MIGRATETYPE_MASK); 533 } 534 535 #ifdef CONFIG_DEBUG_VM 536 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 537 { 538 int ret = 0; 539 unsigned seq; 540 unsigned long pfn = page_to_pfn(page); 541 unsigned long sp, start_pfn; 542 543 do { 544 seq = zone_span_seqbegin(zone); 545 start_pfn = zone->zone_start_pfn; 546 sp = zone->spanned_pages; 547 if (!zone_spans_pfn(zone, pfn)) 548 ret = 1; 549 } while (zone_span_seqretry(zone, seq)); 550 551 if (ret) 552 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 553 pfn, zone_to_nid(zone), zone->name, 554 start_pfn, start_pfn + sp); 555 556 return ret; 557 } 558 559 static int page_is_consistent(struct zone *zone, struct page *page) 560 { 561 if (zone != page_zone(page)) 562 return 0; 563 564 return 1; 565 } 566 /* 567 * Temporary debugging check for pages not lying within a given zone. 568 */ 569 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 570 { 571 if (page_outside_zone_boundaries(zone, page)) 572 return 1; 573 if (!page_is_consistent(zone, page)) 574 return 1; 575 576 return 0; 577 } 578 #else 579 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 580 { 581 return 0; 582 } 583 #endif 584 585 static void bad_page(struct page *page, const char *reason) 586 { 587 static unsigned long resume; 588 static unsigned long nr_shown; 589 static unsigned long nr_unshown; 590 591 /* 592 * Allow a burst of 60 reports, then keep quiet for that minute; 593 * or allow a steady drip of one report per second. 594 */ 595 if (nr_shown == 60) { 596 if (time_before(jiffies, resume)) { 597 nr_unshown++; 598 goto out; 599 } 600 if (nr_unshown) { 601 pr_alert( 602 "BUG: Bad page state: %lu messages suppressed\n", 603 nr_unshown); 604 nr_unshown = 0; 605 } 606 nr_shown = 0; 607 } 608 if (nr_shown++ == 0) 609 resume = jiffies + 60 * HZ; 610 611 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 612 current->comm, page_to_pfn(page)); 613 dump_page(page, reason); 614 615 print_modules(); 616 dump_stack(); 617 out: 618 /* Leave bad fields for debug, except PageBuddy could make trouble */ 619 page_mapcount_reset(page); /* remove PageBuddy */ 620 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 621 } 622 623 static inline unsigned int order_to_pindex(int migratetype, int order) 624 { 625 int base = order; 626 627 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 628 if (order > PAGE_ALLOC_COSTLY_ORDER) { 629 VM_BUG_ON(order != pageblock_order); 630 return NR_LOWORDER_PCP_LISTS; 631 } 632 #else 633 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 634 #endif 635 636 return (MIGRATE_PCPTYPES * base) + migratetype; 637 } 638 639 static inline int pindex_to_order(unsigned int pindex) 640 { 641 int order = pindex / MIGRATE_PCPTYPES; 642 643 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 644 if (pindex == NR_LOWORDER_PCP_LISTS) 645 order = pageblock_order; 646 #else 647 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 648 #endif 649 650 return order; 651 } 652 653 static inline bool pcp_allowed_order(unsigned int order) 654 { 655 if (order <= PAGE_ALLOC_COSTLY_ORDER) 656 return true; 657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 658 if (order == pageblock_order) 659 return true; 660 #endif 661 return false; 662 } 663 664 static inline void free_the_page(struct page *page, unsigned int order) 665 { 666 if (pcp_allowed_order(order)) /* Via pcp? */ 667 free_unref_page(page, order); 668 else 669 __free_pages_ok(page, order, FPI_NONE); 670 } 671 672 /* 673 * Higher-order pages are called "compound pages". They are structured thusly: 674 * 675 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 676 * 677 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 678 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 679 * 680 * The first tail page's ->compound_dtor holds the offset in array of compound 681 * page destructors. See compound_page_dtors. 682 * 683 * The first tail page's ->compound_order holds the order of allocation. 684 * This usage means that zero-order pages may not be compound. 685 */ 686 687 void free_compound_page(struct page *page) 688 { 689 mem_cgroup_uncharge(page_folio(page)); 690 free_the_page(page, compound_order(page)); 691 } 692 693 void prep_compound_page(struct page *page, unsigned int order) 694 { 695 int i; 696 int nr_pages = 1 << order; 697 698 __SetPageHead(page); 699 for (i = 1; i < nr_pages; i++) 700 prep_compound_tail(page, i); 701 702 prep_compound_head(page, order); 703 } 704 705 void destroy_large_folio(struct folio *folio) 706 { 707 enum compound_dtor_id dtor = folio->_folio_dtor; 708 709 VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio); 710 compound_page_dtors[dtor](&folio->page); 711 } 712 713 #ifdef CONFIG_DEBUG_PAGEALLOC 714 unsigned int _debug_guardpage_minorder; 715 716 bool _debug_pagealloc_enabled_early __read_mostly 717 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 718 EXPORT_SYMBOL(_debug_pagealloc_enabled_early); 719 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 720 EXPORT_SYMBOL(_debug_pagealloc_enabled); 721 722 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 723 724 static int __init early_debug_pagealloc(char *buf) 725 { 726 return kstrtobool(buf, &_debug_pagealloc_enabled_early); 727 } 728 early_param("debug_pagealloc", early_debug_pagealloc); 729 730 static int __init debug_guardpage_minorder_setup(char *buf) 731 { 732 unsigned long res; 733 734 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 735 pr_err("Bad debug_guardpage_minorder value\n"); 736 return 0; 737 } 738 _debug_guardpage_minorder = res; 739 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 740 return 0; 741 } 742 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); 743 744 static inline bool set_page_guard(struct zone *zone, struct page *page, 745 unsigned int order, int migratetype) 746 { 747 if (!debug_guardpage_enabled()) 748 return false; 749 750 if (order >= debug_guardpage_minorder()) 751 return false; 752 753 __SetPageGuard(page); 754 INIT_LIST_HEAD(&page->buddy_list); 755 set_page_private(page, order); 756 /* Guard pages are not available for any usage */ 757 if (!is_migrate_isolate(migratetype)) 758 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 759 760 return true; 761 } 762 763 static inline void clear_page_guard(struct zone *zone, struct page *page, 764 unsigned int order, int migratetype) 765 { 766 if (!debug_guardpage_enabled()) 767 return; 768 769 __ClearPageGuard(page); 770 771 set_page_private(page, 0); 772 if (!is_migrate_isolate(migratetype)) 773 __mod_zone_freepage_state(zone, (1 << order), migratetype); 774 } 775 #else 776 static inline bool set_page_guard(struct zone *zone, struct page *page, 777 unsigned int order, int migratetype) { return false; } 778 static inline void clear_page_guard(struct zone *zone, struct page *page, 779 unsigned int order, int migratetype) {} 780 #endif 781 782 static inline void set_buddy_order(struct page *page, unsigned int order) 783 { 784 set_page_private(page, order); 785 __SetPageBuddy(page); 786 } 787 788 #ifdef CONFIG_COMPACTION 789 static inline struct capture_control *task_capc(struct zone *zone) 790 { 791 struct capture_control *capc = current->capture_control; 792 793 return unlikely(capc) && 794 !(current->flags & PF_KTHREAD) && 795 !capc->page && 796 capc->cc->zone == zone ? capc : NULL; 797 } 798 799 static inline bool 800 compaction_capture(struct capture_control *capc, struct page *page, 801 int order, int migratetype) 802 { 803 if (!capc || order != capc->cc->order) 804 return false; 805 806 /* Do not accidentally pollute CMA or isolated regions*/ 807 if (is_migrate_cma(migratetype) || 808 is_migrate_isolate(migratetype)) 809 return false; 810 811 /* 812 * Do not let lower order allocations pollute a movable pageblock. 813 * This might let an unmovable request use a reclaimable pageblock 814 * and vice-versa but no more than normal fallback logic which can 815 * have trouble finding a high-order free page. 816 */ 817 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 818 return false; 819 820 capc->page = page; 821 return true; 822 } 823 824 #else 825 static inline struct capture_control *task_capc(struct zone *zone) 826 { 827 return NULL; 828 } 829 830 static inline bool 831 compaction_capture(struct capture_control *capc, struct page *page, 832 int order, int migratetype) 833 { 834 return false; 835 } 836 #endif /* CONFIG_COMPACTION */ 837 838 /* Used for pages not on another list */ 839 static inline void add_to_free_list(struct page *page, struct zone *zone, 840 unsigned int order, int migratetype) 841 { 842 struct free_area *area = &zone->free_area[order]; 843 844 list_add(&page->buddy_list, &area->free_list[migratetype]); 845 area->nr_free++; 846 } 847 848 /* Used for pages not on another list */ 849 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 850 unsigned int order, int migratetype) 851 { 852 struct free_area *area = &zone->free_area[order]; 853 854 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 855 area->nr_free++; 856 } 857 858 /* 859 * Used for pages which are on another list. Move the pages to the tail 860 * of the list - so the moved pages won't immediately be considered for 861 * allocation again (e.g., optimization for memory onlining). 862 */ 863 static inline void move_to_free_list(struct page *page, struct zone *zone, 864 unsigned int order, int migratetype) 865 { 866 struct free_area *area = &zone->free_area[order]; 867 868 list_move_tail(&page->buddy_list, &area->free_list[migratetype]); 869 } 870 871 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 872 unsigned int order) 873 { 874 /* clear reported state and update reported page count */ 875 if (page_reported(page)) 876 __ClearPageReported(page); 877 878 list_del(&page->buddy_list); 879 __ClearPageBuddy(page); 880 set_page_private(page, 0); 881 zone->free_area[order].nr_free--; 882 } 883 884 static inline struct page *get_page_from_free_area(struct free_area *area, 885 int migratetype) 886 { 887 return list_first_entry_or_null(&area->free_list[migratetype], 888 struct page, lru); 889 } 890 891 /* 892 * If this is not the largest possible page, check if the buddy 893 * of the next-highest order is free. If it is, it's possible 894 * that pages are being freed that will coalesce soon. In case, 895 * that is happening, add the free page to the tail of the list 896 * so it's less likely to be used soon and more likely to be merged 897 * as a higher order page 898 */ 899 static inline bool 900 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 901 struct page *page, unsigned int order) 902 { 903 unsigned long higher_page_pfn; 904 struct page *higher_page; 905 906 if (order >= MAX_ORDER - 1) 907 return false; 908 909 higher_page_pfn = buddy_pfn & pfn; 910 higher_page = page + (higher_page_pfn - pfn); 911 912 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 913 NULL) != NULL; 914 } 915 916 /* 917 * Freeing function for a buddy system allocator. 918 * 919 * The concept of a buddy system is to maintain direct-mapped table 920 * (containing bit values) for memory blocks of various "orders". 921 * The bottom level table contains the map for the smallest allocatable 922 * units of memory (here, pages), and each level above it describes 923 * pairs of units from the levels below, hence, "buddies". 924 * At a high level, all that happens here is marking the table entry 925 * at the bottom level available, and propagating the changes upward 926 * as necessary, plus some accounting needed to play nicely with other 927 * parts of the VM system. 928 * At each level, we keep a list of pages, which are heads of continuous 929 * free pages of length of (1 << order) and marked with PageBuddy. 930 * Page's order is recorded in page_private(page) field. 931 * So when we are allocating or freeing one, we can derive the state of the 932 * other. That is, if we allocate a small block, and both were 933 * free, the remainder of the region must be split into blocks. 934 * If a block is freed, and its buddy is also free, then this 935 * triggers coalescing into a block of larger size. 936 * 937 * -- nyc 938 */ 939 940 static inline void __free_one_page(struct page *page, 941 unsigned long pfn, 942 struct zone *zone, unsigned int order, 943 int migratetype, fpi_t fpi_flags) 944 { 945 struct capture_control *capc = task_capc(zone); 946 unsigned long buddy_pfn = 0; 947 unsigned long combined_pfn; 948 struct page *buddy; 949 bool to_tail; 950 951 VM_BUG_ON(!zone_is_initialized(zone)); 952 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 953 954 VM_BUG_ON(migratetype == -1); 955 if (likely(!is_migrate_isolate(migratetype))) 956 __mod_zone_freepage_state(zone, 1 << order, migratetype); 957 958 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 959 VM_BUG_ON_PAGE(bad_range(zone, page), page); 960 961 while (order < MAX_ORDER) { 962 if (compaction_capture(capc, page, order, migratetype)) { 963 __mod_zone_freepage_state(zone, -(1 << order), 964 migratetype); 965 return; 966 } 967 968 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 969 if (!buddy) 970 goto done_merging; 971 972 if (unlikely(order >= pageblock_order)) { 973 /* 974 * We want to prevent merge between freepages on pageblock 975 * without fallbacks and normal pageblock. Without this, 976 * pageblock isolation could cause incorrect freepage or CMA 977 * accounting or HIGHATOMIC accounting. 978 */ 979 int buddy_mt = get_pageblock_migratetype(buddy); 980 981 if (migratetype != buddy_mt 982 && (!migratetype_is_mergeable(migratetype) || 983 !migratetype_is_mergeable(buddy_mt))) 984 goto done_merging; 985 } 986 987 /* 988 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 989 * merge with it and move up one order. 990 */ 991 if (page_is_guard(buddy)) 992 clear_page_guard(zone, buddy, order, migratetype); 993 else 994 del_page_from_free_list(buddy, zone, order); 995 combined_pfn = buddy_pfn & pfn; 996 page = page + (combined_pfn - pfn); 997 pfn = combined_pfn; 998 order++; 999 } 1000 1001 done_merging: 1002 set_buddy_order(page, order); 1003 1004 if (fpi_flags & FPI_TO_TAIL) 1005 to_tail = true; 1006 else if (is_shuffle_order(order)) 1007 to_tail = shuffle_pick_tail(); 1008 else 1009 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1010 1011 if (to_tail) 1012 add_to_free_list_tail(page, zone, order, migratetype); 1013 else 1014 add_to_free_list(page, zone, order, migratetype); 1015 1016 /* Notify page reporting subsystem of freed page */ 1017 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1018 page_reporting_notify_free(order); 1019 } 1020 1021 /** 1022 * split_free_page() -- split a free page at split_pfn_offset 1023 * @free_page: the original free page 1024 * @order: the order of the page 1025 * @split_pfn_offset: split offset within the page 1026 * 1027 * Return -ENOENT if the free page is changed, otherwise 0 1028 * 1029 * It is used when the free page crosses two pageblocks with different migratetypes 1030 * at split_pfn_offset within the page. The split free page will be put into 1031 * separate migratetype lists afterwards. Otherwise, the function achieves 1032 * nothing. 1033 */ 1034 int split_free_page(struct page *free_page, 1035 unsigned int order, unsigned long split_pfn_offset) 1036 { 1037 struct zone *zone = page_zone(free_page); 1038 unsigned long free_page_pfn = page_to_pfn(free_page); 1039 unsigned long pfn; 1040 unsigned long flags; 1041 int free_page_order; 1042 int mt; 1043 int ret = 0; 1044 1045 if (split_pfn_offset == 0) 1046 return ret; 1047 1048 spin_lock_irqsave(&zone->lock, flags); 1049 1050 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { 1051 ret = -ENOENT; 1052 goto out; 1053 } 1054 1055 mt = get_pageblock_migratetype(free_page); 1056 if (likely(!is_migrate_isolate(mt))) 1057 __mod_zone_freepage_state(zone, -(1UL << order), mt); 1058 1059 del_page_from_free_list(free_page, zone, order); 1060 for (pfn = free_page_pfn; 1061 pfn < free_page_pfn + (1UL << order);) { 1062 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); 1063 1064 free_page_order = min_t(unsigned int, 1065 pfn ? __ffs(pfn) : order, 1066 __fls(split_pfn_offset)); 1067 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, 1068 mt, FPI_NONE); 1069 pfn += 1UL << free_page_order; 1070 split_pfn_offset -= (1UL << free_page_order); 1071 /* we have done the first part, now switch to second part */ 1072 if (split_pfn_offset == 0) 1073 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); 1074 } 1075 out: 1076 spin_unlock_irqrestore(&zone->lock, flags); 1077 return ret; 1078 } 1079 /* 1080 * A bad page could be due to a number of fields. Instead of multiple branches, 1081 * try and check multiple fields with one check. The caller must do a detailed 1082 * check if necessary. 1083 */ 1084 static inline bool page_expected_state(struct page *page, 1085 unsigned long check_flags) 1086 { 1087 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1088 return false; 1089 1090 if (unlikely((unsigned long)page->mapping | 1091 page_ref_count(page) | 1092 #ifdef CONFIG_MEMCG 1093 page->memcg_data | 1094 #endif 1095 (page->flags & check_flags))) 1096 return false; 1097 1098 return true; 1099 } 1100 1101 static const char *page_bad_reason(struct page *page, unsigned long flags) 1102 { 1103 const char *bad_reason = NULL; 1104 1105 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1106 bad_reason = "nonzero mapcount"; 1107 if (unlikely(page->mapping != NULL)) 1108 bad_reason = "non-NULL mapping"; 1109 if (unlikely(page_ref_count(page) != 0)) 1110 bad_reason = "nonzero _refcount"; 1111 if (unlikely(page->flags & flags)) { 1112 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1113 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1114 else 1115 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1116 } 1117 #ifdef CONFIG_MEMCG 1118 if (unlikely(page->memcg_data)) 1119 bad_reason = "page still charged to cgroup"; 1120 #endif 1121 return bad_reason; 1122 } 1123 1124 static void free_page_is_bad_report(struct page *page) 1125 { 1126 bad_page(page, 1127 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1128 } 1129 1130 static inline bool free_page_is_bad(struct page *page) 1131 { 1132 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1133 return false; 1134 1135 /* Something has gone sideways, find it */ 1136 free_page_is_bad_report(page); 1137 return true; 1138 } 1139 1140 static int free_tail_page_prepare(struct page *head_page, struct page *page) 1141 { 1142 struct folio *folio = (struct folio *)head_page; 1143 int ret = 1; 1144 1145 /* 1146 * We rely page->lru.next never has bit 0 set, unless the page 1147 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1148 */ 1149 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1150 1151 if (!static_branch_unlikely(&check_pages_enabled)) { 1152 ret = 0; 1153 goto out; 1154 } 1155 switch (page - head_page) { 1156 case 1: 1157 /* the first tail page: these may be in place of ->mapping */ 1158 if (unlikely(folio_entire_mapcount(folio))) { 1159 bad_page(page, "nonzero entire_mapcount"); 1160 goto out; 1161 } 1162 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 1163 bad_page(page, "nonzero nr_pages_mapped"); 1164 goto out; 1165 } 1166 if (unlikely(atomic_read(&folio->_pincount))) { 1167 bad_page(page, "nonzero pincount"); 1168 goto out; 1169 } 1170 break; 1171 case 2: 1172 /* 1173 * the second tail page: ->mapping is 1174 * deferred_list.next -- ignore value. 1175 */ 1176 break; 1177 default: 1178 if (page->mapping != TAIL_MAPPING) { 1179 bad_page(page, "corrupted mapping in tail page"); 1180 goto out; 1181 } 1182 break; 1183 } 1184 if (unlikely(!PageTail(page))) { 1185 bad_page(page, "PageTail not set"); 1186 goto out; 1187 } 1188 if (unlikely(compound_head(page) != head_page)) { 1189 bad_page(page, "compound_head not consistent"); 1190 goto out; 1191 } 1192 ret = 0; 1193 out: 1194 page->mapping = NULL; 1195 clear_compound_head(page); 1196 return ret; 1197 } 1198 1199 /* 1200 * Skip KASAN memory poisoning when either: 1201 * 1202 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1203 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1204 * using page tags instead (see below). 1205 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1206 * that error detection is disabled for accesses via the page address. 1207 * 1208 * Pages will have match-all tags in the following circumstances: 1209 * 1210 * 1. Pages are being initialized for the first time, including during deferred 1211 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1212 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1213 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1214 * 3. The allocation was excluded from being checked due to sampling, 1215 * see the call to kasan_unpoison_pages. 1216 * 1217 * Poisoning pages during deferred memory init will greatly lengthen the 1218 * process and cause problem in large memory systems as the deferred pages 1219 * initialization is done with interrupt disabled. 1220 * 1221 * Assuming that there will be no reference to those newly initialized 1222 * pages before they are ever allocated, this should have no effect on 1223 * KASAN memory tracking as the poison will be properly inserted at page 1224 * allocation time. The only corner case is when pages are allocated by 1225 * on-demand allocation and then freed again before the deferred pages 1226 * initialization is done, but this is not likely to happen. 1227 */ 1228 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 1229 { 1230 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1231 return deferred_pages_enabled(); 1232 1233 return page_kasan_tag(page) == 0xff; 1234 } 1235 1236 static void kernel_init_pages(struct page *page, int numpages) 1237 { 1238 int i; 1239 1240 /* s390's use of memset() could override KASAN redzones. */ 1241 kasan_disable_current(); 1242 for (i = 0; i < numpages; i++) 1243 clear_highpage_kasan_tagged(page + i); 1244 kasan_enable_current(); 1245 } 1246 1247 static __always_inline bool free_pages_prepare(struct page *page, 1248 unsigned int order, fpi_t fpi_flags) 1249 { 1250 int bad = 0; 1251 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); 1252 bool init = want_init_on_free(); 1253 1254 VM_BUG_ON_PAGE(PageTail(page), page); 1255 1256 trace_mm_page_free(page, order); 1257 kmsan_free_page(page, order); 1258 1259 if (unlikely(PageHWPoison(page)) && !order) { 1260 /* 1261 * Do not let hwpoison pages hit pcplists/buddy 1262 * Untie memcg state and reset page's owner 1263 */ 1264 if (memcg_kmem_online() && PageMemcgKmem(page)) 1265 __memcg_kmem_uncharge_page(page, order); 1266 reset_page_owner(page, order); 1267 page_table_check_free(page, order); 1268 return false; 1269 } 1270 1271 /* 1272 * Check tail pages before head page information is cleared to 1273 * avoid checking PageCompound for order-0 pages. 1274 */ 1275 if (unlikely(order)) { 1276 bool compound = PageCompound(page); 1277 int i; 1278 1279 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1280 1281 if (compound) 1282 ClearPageHasHWPoisoned(page); 1283 for (i = 1; i < (1 << order); i++) { 1284 if (compound) 1285 bad += free_tail_page_prepare(page, page + i); 1286 if (is_check_pages_enabled()) { 1287 if (free_page_is_bad(page + i)) { 1288 bad++; 1289 continue; 1290 } 1291 } 1292 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1293 } 1294 } 1295 if (PageMappingFlags(page)) 1296 page->mapping = NULL; 1297 if (memcg_kmem_online() && PageMemcgKmem(page)) 1298 __memcg_kmem_uncharge_page(page, order); 1299 if (is_check_pages_enabled()) { 1300 if (free_page_is_bad(page)) 1301 bad++; 1302 if (bad) 1303 return false; 1304 } 1305 1306 page_cpupid_reset_last(page); 1307 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1308 reset_page_owner(page, order); 1309 page_table_check_free(page, order); 1310 1311 if (!PageHighMem(page)) { 1312 debug_check_no_locks_freed(page_address(page), 1313 PAGE_SIZE << order); 1314 debug_check_no_obj_freed(page_address(page), 1315 PAGE_SIZE << order); 1316 } 1317 1318 kernel_poison_pages(page, 1 << order); 1319 1320 /* 1321 * As memory initialization might be integrated into KASAN, 1322 * KASAN poisoning and memory initialization code must be 1323 * kept together to avoid discrepancies in behavior. 1324 * 1325 * With hardware tag-based KASAN, memory tags must be set before the 1326 * page becomes unavailable via debug_pagealloc or arch_free_page. 1327 */ 1328 if (!skip_kasan_poison) { 1329 kasan_poison_pages(page, order, init); 1330 1331 /* Memory is already initialized if KASAN did it internally. */ 1332 if (kasan_has_integrated_init()) 1333 init = false; 1334 } 1335 if (init) 1336 kernel_init_pages(page, 1 << order); 1337 1338 /* 1339 * arch_free_page() can make the page's contents inaccessible. s390 1340 * does this. So nothing which can access the page's contents should 1341 * happen after this. 1342 */ 1343 arch_free_page(page, order); 1344 1345 debug_pagealloc_unmap_pages(page, 1 << order); 1346 1347 return true; 1348 } 1349 1350 /* 1351 * Frees a number of pages from the PCP lists 1352 * Assumes all pages on list are in same zone. 1353 * count is the number of pages to free. 1354 */ 1355 static void free_pcppages_bulk(struct zone *zone, int count, 1356 struct per_cpu_pages *pcp, 1357 int pindex) 1358 { 1359 unsigned long flags; 1360 int min_pindex = 0; 1361 int max_pindex = NR_PCP_LISTS - 1; 1362 unsigned int order; 1363 bool isolated_pageblocks; 1364 struct page *page; 1365 1366 /* 1367 * Ensure proper count is passed which otherwise would stuck in the 1368 * below while (list_empty(list)) loop. 1369 */ 1370 count = min(pcp->count, count); 1371 1372 /* Ensure requested pindex is drained first. */ 1373 pindex = pindex - 1; 1374 1375 spin_lock_irqsave(&zone->lock, flags); 1376 isolated_pageblocks = has_isolate_pageblock(zone); 1377 1378 while (count > 0) { 1379 struct list_head *list; 1380 int nr_pages; 1381 1382 /* Remove pages from lists in a round-robin fashion. */ 1383 do { 1384 if (++pindex > max_pindex) 1385 pindex = min_pindex; 1386 list = &pcp->lists[pindex]; 1387 if (!list_empty(list)) 1388 break; 1389 1390 if (pindex == max_pindex) 1391 max_pindex--; 1392 if (pindex == min_pindex) 1393 min_pindex++; 1394 } while (1); 1395 1396 order = pindex_to_order(pindex); 1397 nr_pages = 1 << order; 1398 do { 1399 int mt; 1400 1401 page = list_last_entry(list, struct page, pcp_list); 1402 mt = get_pcppage_migratetype(page); 1403 1404 /* must delete to avoid corrupting pcp list */ 1405 list_del(&page->pcp_list); 1406 count -= nr_pages; 1407 pcp->count -= nr_pages; 1408 1409 /* MIGRATE_ISOLATE page should not go to pcplists */ 1410 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1411 /* Pageblock could have been isolated meanwhile */ 1412 if (unlikely(isolated_pageblocks)) 1413 mt = get_pageblock_migratetype(page); 1414 1415 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1416 trace_mm_page_pcpu_drain(page, order, mt); 1417 } while (count > 0 && !list_empty(list)); 1418 } 1419 1420 spin_unlock_irqrestore(&zone->lock, flags); 1421 } 1422 1423 static void free_one_page(struct zone *zone, 1424 struct page *page, unsigned long pfn, 1425 unsigned int order, 1426 int migratetype, fpi_t fpi_flags) 1427 { 1428 unsigned long flags; 1429 1430 spin_lock_irqsave(&zone->lock, flags); 1431 if (unlikely(has_isolate_pageblock(zone) || 1432 is_migrate_isolate(migratetype))) { 1433 migratetype = get_pfnblock_migratetype(page, pfn); 1434 } 1435 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1436 spin_unlock_irqrestore(&zone->lock, flags); 1437 } 1438 1439 static void __free_pages_ok(struct page *page, unsigned int order, 1440 fpi_t fpi_flags) 1441 { 1442 unsigned long flags; 1443 int migratetype; 1444 unsigned long pfn = page_to_pfn(page); 1445 struct zone *zone = page_zone(page); 1446 1447 if (!free_pages_prepare(page, order, fpi_flags)) 1448 return; 1449 1450 /* 1451 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here 1452 * is used to avoid calling get_pfnblock_migratetype() under the lock. 1453 * This will reduce the lock holding time. 1454 */ 1455 migratetype = get_pfnblock_migratetype(page, pfn); 1456 1457 spin_lock_irqsave(&zone->lock, flags); 1458 if (unlikely(has_isolate_pageblock(zone) || 1459 is_migrate_isolate(migratetype))) { 1460 migratetype = get_pfnblock_migratetype(page, pfn); 1461 } 1462 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1463 spin_unlock_irqrestore(&zone->lock, flags); 1464 1465 __count_vm_events(PGFREE, 1 << order); 1466 } 1467 1468 void __free_pages_core(struct page *page, unsigned int order) 1469 { 1470 unsigned int nr_pages = 1 << order; 1471 struct page *p = page; 1472 unsigned int loop; 1473 1474 /* 1475 * When initializing the memmap, __init_single_page() sets the refcount 1476 * of all pages to 1 ("allocated"/"not free"). We have to set the 1477 * refcount of all involved pages to 0. 1478 */ 1479 prefetchw(p); 1480 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1481 prefetchw(p + 1); 1482 __ClearPageReserved(p); 1483 set_page_count(p, 0); 1484 } 1485 __ClearPageReserved(p); 1486 set_page_count(p, 0); 1487 1488 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1489 1490 if (page_contains_unaccepted(page, order)) { 1491 if (order == MAX_ORDER && __free_unaccepted(page)) 1492 return; 1493 1494 accept_page(page, order); 1495 } 1496 1497 /* 1498 * Bypass PCP and place fresh pages right to the tail, primarily 1499 * relevant for memory onlining. 1500 */ 1501 __free_pages_ok(page, order, FPI_TO_TAIL); 1502 } 1503 1504 /* 1505 * Check that the whole (or subset of) a pageblock given by the interval of 1506 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1507 * with the migration of free compaction scanner. 1508 * 1509 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1510 * 1511 * It's possible on some configurations to have a setup like node0 node1 node0 1512 * i.e. it's possible that all pages within a zones range of pages do not 1513 * belong to a single zone. We assume that a border between node0 and node1 1514 * can occur within a single pageblock, but not a node0 node1 node0 1515 * interleaving within a single pageblock. It is therefore sufficient to check 1516 * the first and last page of a pageblock and avoid checking each individual 1517 * page in a pageblock. 1518 * 1519 * Note: the function may return non-NULL struct page even for a page block 1520 * which contains a memory hole (i.e. there is no physical memory for a subset 1521 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which 1522 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1523 * even though the start pfn is online and valid. This should be safe most of 1524 * the time because struct pages are still initialized via init_unavailable_range() 1525 * and pfn walkers shouldn't touch any physical memory range for which they do 1526 * not recognize any specific metadata in struct pages. 1527 */ 1528 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1529 unsigned long end_pfn, struct zone *zone) 1530 { 1531 struct page *start_page; 1532 struct page *end_page; 1533 1534 /* end_pfn is one past the range we are checking */ 1535 end_pfn--; 1536 1537 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1538 return NULL; 1539 1540 start_page = pfn_to_online_page(start_pfn); 1541 if (!start_page) 1542 return NULL; 1543 1544 if (page_zone(start_page) != zone) 1545 return NULL; 1546 1547 end_page = pfn_to_page(end_pfn); 1548 1549 /* This gives a shorter code than deriving page_zone(end_page) */ 1550 if (page_zone_id(start_page) != page_zone_id(end_page)) 1551 return NULL; 1552 1553 return start_page; 1554 } 1555 1556 void set_zone_contiguous(struct zone *zone) 1557 { 1558 unsigned long block_start_pfn = zone->zone_start_pfn; 1559 unsigned long block_end_pfn; 1560 1561 block_end_pfn = pageblock_end_pfn(block_start_pfn); 1562 for (; block_start_pfn < zone_end_pfn(zone); 1563 block_start_pfn = block_end_pfn, 1564 block_end_pfn += pageblock_nr_pages) { 1565 1566 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1567 1568 if (!__pageblock_pfn_to_page(block_start_pfn, 1569 block_end_pfn, zone)) 1570 return; 1571 cond_resched(); 1572 } 1573 1574 /* We confirm that there is no hole */ 1575 zone->contiguous = true; 1576 } 1577 1578 void clear_zone_contiguous(struct zone *zone) 1579 { 1580 zone->contiguous = false; 1581 } 1582 1583 /* 1584 * The order of subdivision here is critical for the IO subsystem. 1585 * Please do not alter this order without good reasons and regression 1586 * testing. Specifically, as large blocks of memory are subdivided, 1587 * the order in which smaller blocks are delivered depends on the order 1588 * they're subdivided in this function. This is the primary factor 1589 * influencing the order in which pages are delivered to the IO 1590 * subsystem according to empirical testing, and this is also justified 1591 * by considering the behavior of a buddy system containing a single 1592 * large block of memory acted on by a series of small allocations. 1593 * This behavior is a critical factor in sglist merging's success. 1594 * 1595 * -- nyc 1596 */ 1597 static inline void expand(struct zone *zone, struct page *page, 1598 int low, int high, int migratetype) 1599 { 1600 unsigned long size = 1 << high; 1601 1602 while (high > low) { 1603 high--; 1604 size >>= 1; 1605 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1606 1607 /* 1608 * Mark as guard pages (or page), that will allow to 1609 * merge back to allocator when buddy will be freed. 1610 * Corresponding page table entries will not be touched, 1611 * pages will stay not present in virtual address space 1612 */ 1613 if (set_page_guard(zone, &page[size], high, migratetype)) 1614 continue; 1615 1616 add_to_free_list(&page[size], zone, high, migratetype); 1617 set_buddy_order(&page[size], high); 1618 } 1619 } 1620 1621 static void check_new_page_bad(struct page *page) 1622 { 1623 if (unlikely(page->flags & __PG_HWPOISON)) { 1624 /* Don't complain about hwpoisoned pages */ 1625 page_mapcount_reset(page); /* remove PageBuddy */ 1626 return; 1627 } 1628 1629 bad_page(page, 1630 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1631 } 1632 1633 /* 1634 * This page is about to be returned from the page allocator 1635 */ 1636 static int check_new_page(struct page *page) 1637 { 1638 if (likely(page_expected_state(page, 1639 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1640 return 0; 1641 1642 check_new_page_bad(page); 1643 return 1; 1644 } 1645 1646 static inline bool check_new_pages(struct page *page, unsigned int order) 1647 { 1648 if (is_check_pages_enabled()) { 1649 for (int i = 0; i < (1 << order); i++) { 1650 struct page *p = page + i; 1651 1652 if (check_new_page(p)) 1653 return true; 1654 } 1655 } 1656 1657 return false; 1658 } 1659 1660 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1661 { 1662 /* Don't skip if a software KASAN mode is enabled. */ 1663 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1664 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1665 return false; 1666 1667 /* Skip, if hardware tag-based KASAN is not enabled. */ 1668 if (!kasan_hw_tags_enabled()) 1669 return true; 1670 1671 /* 1672 * With hardware tag-based KASAN enabled, skip if this has been 1673 * requested via __GFP_SKIP_KASAN. 1674 */ 1675 return flags & __GFP_SKIP_KASAN; 1676 } 1677 1678 static inline bool should_skip_init(gfp_t flags) 1679 { 1680 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1681 if (!kasan_hw_tags_enabled()) 1682 return false; 1683 1684 /* For hardware tag-based KASAN, skip if requested. */ 1685 return (flags & __GFP_SKIP_ZERO); 1686 } 1687 1688 inline void post_alloc_hook(struct page *page, unsigned int order, 1689 gfp_t gfp_flags) 1690 { 1691 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1692 !should_skip_init(gfp_flags); 1693 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1694 int i; 1695 1696 set_page_private(page, 0); 1697 set_page_refcounted(page); 1698 1699 arch_alloc_page(page, order); 1700 debug_pagealloc_map_pages(page, 1 << order); 1701 1702 /* 1703 * Page unpoisoning must happen before memory initialization. 1704 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1705 * allocations and the page unpoisoning code will complain. 1706 */ 1707 kernel_unpoison_pages(page, 1 << order); 1708 1709 /* 1710 * As memory initialization might be integrated into KASAN, 1711 * KASAN unpoisoning and memory initializion code must be 1712 * kept together to avoid discrepancies in behavior. 1713 */ 1714 1715 /* 1716 * If memory tags should be zeroed 1717 * (which happens only when memory should be initialized as well). 1718 */ 1719 if (zero_tags) { 1720 /* Initialize both memory and memory tags. */ 1721 for (i = 0; i != 1 << order; ++i) 1722 tag_clear_highpage(page + i); 1723 1724 /* Take note that memory was initialized by the loop above. */ 1725 init = false; 1726 } 1727 if (!should_skip_kasan_unpoison(gfp_flags) && 1728 kasan_unpoison_pages(page, order, init)) { 1729 /* Take note that memory was initialized by KASAN. */ 1730 if (kasan_has_integrated_init()) 1731 init = false; 1732 } else { 1733 /* 1734 * If memory tags have not been set by KASAN, reset the page 1735 * tags to ensure page_address() dereferencing does not fault. 1736 */ 1737 for (i = 0; i != 1 << order; ++i) 1738 page_kasan_tag_reset(page + i); 1739 } 1740 /* If memory is still not initialized, initialize it now. */ 1741 if (init) 1742 kernel_init_pages(page, 1 << order); 1743 1744 set_page_owner(page, order, gfp_flags); 1745 page_table_check_alloc(page, order); 1746 } 1747 1748 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1749 unsigned int alloc_flags) 1750 { 1751 post_alloc_hook(page, order, gfp_flags); 1752 1753 if (order && (gfp_flags & __GFP_COMP)) 1754 prep_compound_page(page, order); 1755 1756 /* 1757 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1758 * allocate the page. The expectation is that the caller is taking 1759 * steps that will free more memory. The caller should avoid the page 1760 * being used for !PFMEMALLOC purposes. 1761 */ 1762 if (alloc_flags & ALLOC_NO_WATERMARKS) 1763 set_page_pfmemalloc(page); 1764 else 1765 clear_page_pfmemalloc(page); 1766 } 1767 1768 /* 1769 * Go through the free lists for the given migratetype and remove 1770 * the smallest available page from the freelists 1771 */ 1772 static __always_inline 1773 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1774 int migratetype) 1775 { 1776 unsigned int current_order; 1777 struct free_area *area; 1778 struct page *page; 1779 1780 /* Find a page of the appropriate size in the preferred list */ 1781 for (current_order = order; current_order <= MAX_ORDER; ++current_order) { 1782 area = &(zone->free_area[current_order]); 1783 page = get_page_from_free_area(area, migratetype); 1784 if (!page) 1785 continue; 1786 del_page_from_free_list(page, zone, current_order); 1787 expand(zone, page, order, current_order, migratetype); 1788 set_pcppage_migratetype(page, migratetype); 1789 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1790 pcp_allowed_order(order) && 1791 migratetype < MIGRATE_PCPTYPES); 1792 return page; 1793 } 1794 1795 return NULL; 1796 } 1797 1798 1799 /* 1800 * This array describes the order lists are fallen back to when 1801 * the free lists for the desirable migrate type are depleted 1802 * 1803 * The other migratetypes do not have fallbacks. 1804 */ 1805 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = { 1806 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1807 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1808 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1809 }; 1810 1811 #ifdef CONFIG_CMA 1812 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1813 unsigned int order) 1814 { 1815 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1816 } 1817 #else 1818 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1819 unsigned int order) { return NULL; } 1820 #endif 1821 1822 /* 1823 * Move the free pages in a range to the freelist tail of the requested type. 1824 * Note that start_page and end_pages are not aligned on a pageblock 1825 * boundary. If alignment is required, use move_freepages_block() 1826 */ 1827 static int move_freepages(struct zone *zone, 1828 unsigned long start_pfn, unsigned long end_pfn, 1829 int migratetype, int *num_movable) 1830 { 1831 struct page *page; 1832 unsigned long pfn; 1833 unsigned int order; 1834 int pages_moved = 0; 1835 1836 for (pfn = start_pfn; pfn <= end_pfn;) { 1837 page = pfn_to_page(pfn); 1838 if (!PageBuddy(page)) { 1839 /* 1840 * We assume that pages that could be isolated for 1841 * migration are movable. But we don't actually try 1842 * isolating, as that would be expensive. 1843 */ 1844 if (num_movable && 1845 (PageLRU(page) || __PageMovable(page))) 1846 (*num_movable)++; 1847 pfn++; 1848 continue; 1849 } 1850 1851 /* Make sure we are not inadvertently changing nodes */ 1852 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1853 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1854 1855 order = buddy_order(page); 1856 move_to_free_list(page, zone, order, migratetype); 1857 pfn += 1 << order; 1858 pages_moved += 1 << order; 1859 } 1860 1861 return pages_moved; 1862 } 1863 1864 int move_freepages_block(struct zone *zone, struct page *page, 1865 int migratetype, int *num_movable) 1866 { 1867 unsigned long start_pfn, end_pfn, pfn; 1868 1869 if (num_movable) 1870 *num_movable = 0; 1871 1872 pfn = page_to_pfn(page); 1873 start_pfn = pageblock_start_pfn(pfn); 1874 end_pfn = pageblock_end_pfn(pfn) - 1; 1875 1876 /* Do not cross zone boundaries */ 1877 if (!zone_spans_pfn(zone, start_pfn)) 1878 start_pfn = pfn; 1879 if (!zone_spans_pfn(zone, end_pfn)) 1880 return 0; 1881 1882 return move_freepages(zone, start_pfn, end_pfn, migratetype, 1883 num_movable); 1884 } 1885 1886 static void change_pageblock_range(struct page *pageblock_page, 1887 int start_order, int migratetype) 1888 { 1889 int nr_pageblocks = 1 << (start_order - pageblock_order); 1890 1891 while (nr_pageblocks--) { 1892 set_pageblock_migratetype(pageblock_page, migratetype); 1893 pageblock_page += pageblock_nr_pages; 1894 } 1895 } 1896 1897 /* 1898 * When we are falling back to another migratetype during allocation, try to 1899 * steal extra free pages from the same pageblocks to satisfy further 1900 * allocations, instead of polluting multiple pageblocks. 1901 * 1902 * If we are stealing a relatively large buddy page, it is likely there will 1903 * be more free pages in the pageblock, so try to steal them all. For 1904 * reclaimable and unmovable allocations, we steal regardless of page size, 1905 * as fragmentation caused by those allocations polluting movable pageblocks 1906 * is worse than movable allocations stealing from unmovable and reclaimable 1907 * pageblocks. 1908 */ 1909 static bool can_steal_fallback(unsigned int order, int start_mt) 1910 { 1911 /* 1912 * Leaving this order check is intended, although there is 1913 * relaxed order check in next check. The reason is that 1914 * we can actually steal whole pageblock if this condition met, 1915 * but, below check doesn't guarantee it and that is just heuristic 1916 * so could be changed anytime. 1917 */ 1918 if (order >= pageblock_order) 1919 return true; 1920 1921 if (order >= pageblock_order / 2 || 1922 start_mt == MIGRATE_RECLAIMABLE || 1923 start_mt == MIGRATE_UNMOVABLE || 1924 page_group_by_mobility_disabled) 1925 return true; 1926 1927 return false; 1928 } 1929 1930 static inline bool boost_watermark(struct zone *zone) 1931 { 1932 unsigned long max_boost; 1933 1934 if (!watermark_boost_factor) 1935 return false; 1936 /* 1937 * Don't bother in zones that are unlikely to produce results. 1938 * On small machines, including kdump capture kernels running 1939 * in a small area, boosting the watermark can cause an out of 1940 * memory situation immediately. 1941 */ 1942 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1943 return false; 1944 1945 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1946 watermark_boost_factor, 10000); 1947 1948 /* 1949 * high watermark may be uninitialised if fragmentation occurs 1950 * very early in boot so do not boost. We do not fall 1951 * through and boost by pageblock_nr_pages as failing 1952 * allocations that early means that reclaim is not going 1953 * to help and it may even be impossible to reclaim the 1954 * boosted watermark resulting in a hang. 1955 */ 1956 if (!max_boost) 1957 return false; 1958 1959 max_boost = max(pageblock_nr_pages, max_boost); 1960 1961 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1962 max_boost); 1963 1964 return true; 1965 } 1966 1967 /* 1968 * This function implements actual steal behaviour. If order is large enough, 1969 * we can steal whole pageblock. If not, we first move freepages in this 1970 * pageblock to our migratetype and determine how many already-allocated pages 1971 * are there in the pageblock with a compatible migratetype. If at least half 1972 * of pages are free or compatible, we can change migratetype of the pageblock 1973 * itself, so pages freed in the future will be put on the correct free list. 1974 */ 1975 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1976 unsigned int alloc_flags, int start_type, bool whole_block) 1977 { 1978 unsigned int current_order = buddy_order(page); 1979 int free_pages, movable_pages, alike_pages; 1980 int old_block_type; 1981 1982 old_block_type = get_pageblock_migratetype(page); 1983 1984 /* 1985 * This can happen due to races and we want to prevent broken 1986 * highatomic accounting. 1987 */ 1988 if (is_migrate_highatomic(old_block_type)) 1989 goto single_page; 1990 1991 /* Take ownership for orders >= pageblock_order */ 1992 if (current_order >= pageblock_order) { 1993 change_pageblock_range(page, current_order, start_type); 1994 goto single_page; 1995 } 1996 1997 /* 1998 * Boost watermarks to increase reclaim pressure to reduce the 1999 * likelihood of future fallbacks. Wake kswapd now as the node 2000 * may be balanced overall and kswapd will not wake naturally. 2001 */ 2002 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2003 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2004 2005 /* We are not allowed to try stealing from the whole block */ 2006 if (!whole_block) 2007 goto single_page; 2008 2009 free_pages = move_freepages_block(zone, page, start_type, 2010 &movable_pages); 2011 /* 2012 * Determine how many pages are compatible with our allocation. 2013 * For movable allocation, it's the number of movable pages which 2014 * we just obtained. For other types it's a bit more tricky. 2015 */ 2016 if (start_type == MIGRATE_MOVABLE) { 2017 alike_pages = movable_pages; 2018 } else { 2019 /* 2020 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2021 * to MOVABLE pageblock, consider all non-movable pages as 2022 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2023 * vice versa, be conservative since we can't distinguish the 2024 * exact migratetype of non-movable pages. 2025 */ 2026 if (old_block_type == MIGRATE_MOVABLE) 2027 alike_pages = pageblock_nr_pages 2028 - (free_pages + movable_pages); 2029 else 2030 alike_pages = 0; 2031 } 2032 2033 /* moving whole block can fail due to zone boundary conditions */ 2034 if (!free_pages) 2035 goto single_page; 2036 2037 /* 2038 * If a sufficient number of pages in the block are either free or of 2039 * comparable migratability as our allocation, claim the whole block. 2040 */ 2041 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2042 page_group_by_mobility_disabled) 2043 set_pageblock_migratetype(page, start_type); 2044 2045 return; 2046 2047 single_page: 2048 move_to_free_list(page, zone, current_order, start_type); 2049 } 2050 2051 /* 2052 * Check whether there is a suitable fallback freepage with requested order. 2053 * If only_stealable is true, this function returns fallback_mt only if 2054 * we can steal other freepages all together. This would help to reduce 2055 * fragmentation due to mixed migratetype pages in one pageblock. 2056 */ 2057 int find_suitable_fallback(struct free_area *area, unsigned int order, 2058 int migratetype, bool only_stealable, bool *can_steal) 2059 { 2060 int i; 2061 int fallback_mt; 2062 2063 if (area->nr_free == 0) 2064 return -1; 2065 2066 *can_steal = false; 2067 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 2068 fallback_mt = fallbacks[migratetype][i]; 2069 if (free_area_empty(area, fallback_mt)) 2070 continue; 2071 2072 if (can_steal_fallback(order, migratetype)) 2073 *can_steal = true; 2074 2075 if (!only_stealable) 2076 return fallback_mt; 2077 2078 if (*can_steal) 2079 return fallback_mt; 2080 } 2081 2082 return -1; 2083 } 2084 2085 /* 2086 * Reserve a pageblock for exclusive use of high-order atomic allocations if 2087 * there are no empty page blocks that contain a page with a suitable order 2088 */ 2089 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2090 unsigned int alloc_order) 2091 { 2092 int mt; 2093 unsigned long max_managed, flags; 2094 2095 /* 2096 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2097 * Check is race-prone but harmless. 2098 */ 2099 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 2100 if (zone->nr_reserved_highatomic >= max_managed) 2101 return; 2102 2103 spin_lock_irqsave(&zone->lock, flags); 2104 2105 /* Recheck the nr_reserved_highatomic limit under the lock */ 2106 if (zone->nr_reserved_highatomic >= max_managed) 2107 goto out_unlock; 2108 2109 /* Yoink! */ 2110 mt = get_pageblock_migratetype(page); 2111 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 2112 if (migratetype_is_mergeable(mt)) { 2113 zone->nr_reserved_highatomic += pageblock_nr_pages; 2114 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2115 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 2116 } 2117 2118 out_unlock: 2119 spin_unlock_irqrestore(&zone->lock, flags); 2120 } 2121 2122 /* 2123 * Used when an allocation is about to fail under memory pressure. This 2124 * potentially hurts the reliability of high-order allocations when under 2125 * intense memory pressure but failed atomic allocations should be easier 2126 * to recover from than an OOM. 2127 * 2128 * If @force is true, try to unreserve a pageblock even though highatomic 2129 * pageblock is exhausted. 2130 */ 2131 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2132 bool force) 2133 { 2134 struct zonelist *zonelist = ac->zonelist; 2135 unsigned long flags; 2136 struct zoneref *z; 2137 struct zone *zone; 2138 struct page *page; 2139 int order; 2140 bool ret; 2141 2142 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2143 ac->nodemask) { 2144 /* 2145 * Preserve at least one pageblock unless memory pressure 2146 * is really high. 2147 */ 2148 if (!force && zone->nr_reserved_highatomic <= 2149 pageblock_nr_pages) 2150 continue; 2151 2152 spin_lock_irqsave(&zone->lock, flags); 2153 for (order = 0; order <= MAX_ORDER; order++) { 2154 struct free_area *area = &(zone->free_area[order]); 2155 2156 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2157 if (!page) 2158 continue; 2159 2160 /* 2161 * In page freeing path, migratetype change is racy so 2162 * we can counter several free pages in a pageblock 2163 * in this loop although we changed the pageblock type 2164 * from highatomic to ac->migratetype. So we should 2165 * adjust the count once. 2166 */ 2167 if (is_migrate_highatomic_page(page)) { 2168 /* 2169 * It should never happen but changes to 2170 * locking could inadvertently allow a per-cpu 2171 * drain to add pages to MIGRATE_HIGHATOMIC 2172 * while unreserving so be safe and watch for 2173 * underflows. 2174 */ 2175 zone->nr_reserved_highatomic -= min( 2176 pageblock_nr_pages, 2177 zone->nr_reserved_highatomic); 2178 } 2179 2180 /* 2181 * Convert to ac->migratetype and avoid the normal 2182 * pageblock stealing heuristics. Minimally, the caller 2183 * is doing the work and needs the pages. More 2184 * importantly, if the block was always converted to 2185 * MIGRATE_UNMOVABLE or another type then the number 2186 * of pageblocks that cannot be completely freed 2187 * may increase. 2188 */ 2189 set_pageblock_migratetype(page, ac->migratetype); 2190 ret = move_freepages_block(zone, page, ac->migratetype, 2191 NULL); 2192 if (ret) { 2193 spin_unlock_irqrestore(&zone->lock, flags); 2194 return ret; 2195 } 2196 } 2197 spin_unlock_irqrestore(&zone->lock, flags); 2198 } 2199 2200 return false; 2201 } 2202 2203 /* 2204 * Try finding a free buddy page on the fallback list and put it on the free 2205 * list of requested migratetype, possibly along with other pages from the same 2206 * block, depending on fragmentation avoidance heuristics. Returns true if 2207 * fallback was found so that __rmqueue_smallest() can grab it. 2208 * 2209 * The use of signed ints for order and current_order is a deliberate 2210 * deviation from the rest of this file, to make the for loop 2211 * condition simpler. 2212 */ 2213 static __always_inline bool 2214 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2215 unsigned int alloc_flags) 2216 { 2217 struct free_area *area; 2218 int current_order; 2219 int min_order = order; 2220 struct page *page; 2221 int fallback_mt; 2222 bool can_steal; 2223 2224 /* 2225 * Do not steal pages from freelists belonging to other pageblocks 2226 * i.e. orders < pageblock_order. If there are no local zones free, 2227 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2228 */ 2229 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2230 min_order = pageblock_order; 2231 2232 /* 2233 * Find the largest available free page in the other list. This roughly 2234 * approximates finding the pageblock with the most free pages, which 2235 * would be too costly to do exactly. 2236 */ 2237 for (current_order = MAX_ORDER; current_order >= min_order; 2238 --current_order) { 2239 area = &(zone->free_area[current_order]); 2240 fallback_mt = find_suitable_fallback(area, current_order, 2241 start_migratetype, false, &can_steal); 2242 if (fallback_mt == -1) 2243 continue; 2244 2245 /* 2246 * We cannot steal all free pages from the pageblock and the 2247 * requested migratetype is movable. In that case it's better to 2248 * steal and split the smallest available page instead of the 2249 * largest available page, because even if the next movable 2250 * allocation falls back into a different pageblock than this 2251 * one, it won't cause permanent fragmentation. 2252 */ 2253 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2254 && current_order > order) 2255 goto find_smallest; 2256 2257 goto do_steal; 2258 } 2259 2260 return false; 2261 2262 find_smallest: 2263 for (current_order = order; current_order <= MAX_ORDER; 2264 current_order++) { 2265 area = &(zone->free_area[current_order]); 2266 fallback_mt = find_suitable_fallback(area, current_order, 2267 start_migratetype, false, &can_steal); 2268 if (fallback_mt != -1) 2269 break; 2270 } 2271 2272 /* 2273 * This should not happen - we already found a suitable fallback 2274 * when looking for the largest page. 2275 */ 2276 VM_BUG_ON(current_order > MAX_ORDER); 2277 2278 do_steal: 2279 page = get_page_from_free_area(area, fallback_mt); 2280 2281 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2282 can_steal); 2283 2284 trace_mm_page_alloc_extfrag(page, order, current_order, 2285 start_migratetype, fallback_mt); 2286 2287 return true; 2288 2289 } 2290 2291 /* 2292 * Do the hard work of removing an element from the buddy allocator. 2293 * Call me with the zone->lock already held. 2294 */ 2295 static __always_inline struct page * 2296 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2297 unsigned int alloc_flags) 2298 { 2299 struct page *page; 2300 2301 if (IS_ENABLED(CONFIG_CMA)) { 2302 /* 2303 * Balance movable allocations between regular and CMA areas by 2304 * allocating from CMA when over half of the zone's free memory 2305 * is in the CMA area. 2306 */ 2307 if (alloc_flags & ALLOC_CMA && 2308 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2309 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2310 page = __rmqueue_cma_fallback(zone, order); 2311 if (page) 2312 return page; 2313 } 2314 } 2315 retry: 2316 page = __rmqueue_smallest(zone, order, migratetype); 2317 if (unlikely(!page)) { 2318 if (alloc_flags & ALLOC_CMA) 2319 page = __rmqueue_cma_fallback(zone, order); 2320 2321 if (!page && __rmqueue_fallback(zone, order, migratetype, 2322 alloc_flags)) 2323 goto retry; 2324 } 2325 return page; 2326 } 2327 2328 /* 2329 * Obtain a specified number of elements from the buddy allocator, all under 2330 * a single hold of the lock, for efficiency. Add them to the supplied list. 2331 * Returns the number of new pages which were placed at *list. 2332 */ 2333 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2334 unsigned long count, struct list_head *list, 2335 int migratetype, unsigned int alloc_flags) 2336 { 2337 unsigned long flags; 2338 int i; 2339 2340 spin_lock_irqsave(&zone->lock, flags); 2341 for (i = 0; i < count; ++i) { 2342 struct page *page = __rmqueue(zone, order, migratetype, 2343 alloc_flags); 2344 if (unlikely(page == NULL)) 2345 break; 2346 2347 /* 2348 * Split buddy pages returned by expand() are received here in 2349 * physical page order. The page is added to the tail of 2350 * caller's list. From the callers perspective, the linked list 2351 * is ordered by page number under some conditions. This is 2352 * useful for IO devices that can forward direction from the 2353 * head, thus also in the physical page order. This is useful 2354 * for IO devices that can merge IO requests if the physical 2355 * pages are ordered properly. 2356 */ 2357 list_add_tail(&page->pcp_list, list); 2358 if (is_migrate_cma(get_pcppage_migratetype(page))) 2359 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2360 -(1 << order)); 2361 } 2362 2363 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2364 spin_unlock_irqrestore(&zone->lock, flags); 2365 2366 return i; 2367 } 2368 2369 #ifdef CONFIG_NUMA 2370 /* 2371 * Called from the vmstat counter updater to drain pagesets of this 2372 * currently executing processor on remote nodes after they have 2373 * expired. 2374 */ 2375 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2376 { 2377 int to_drain, batch; 2378 2379 batch = READ_ONCE(pcp->batch); 2380 to_drain = min(pcp->count, batch); 2381 if (to_drain > 0) { 2382 spin_lock(&pcp->lock); 2383 free_pcppages_bulk(zone, to_drain, pcp, 0); 2384 spin_unlock(&pcp->lock); 2385 } 2386 } 2387 #endif 2388 2389 /* 2390 * Drain pcplists of the indicated processor and zone. 2391 */ 2392 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2393 { 2394 struct per_cpu_pages *pcp; 2395 2396 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2397 if (pcp->count) { 2398 spin_lock(&pcp->lock); 2399 free_pcppages_bulk(zone, pcp->count, pcp, 0); 2400 spin_unlock(&pcp->lock); 2401 } 2402 } 2403 2404 /* 2405 * Drain pcplists of all zones on the indicated processor. 2406 */ 2407 static void drain_pages(unsigned int cpu) 2408 { 2409 struct zone *zone; 2410 2411 for_each_populated_zone(zone) { 2412 drain_pages_zone(cpu, zone); 2413 } 2414 } 2415 2416 /* 2417 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2418 */ 2419 void drain_local_pages(struct zone *zone) 2420 { 2421 int cpu = smp_processor_id(); 2422 2423 if (zone) 2424 drain_pages_zone(cpu, zone); 2425 else 2426 drain_pages(cpu); 2427 } 2428 2429 /* 2430 * The implementation of drain_all_pages(), exposing an extra parameter to 2431 * drain on all cpus. 2432 * 2433 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2434 * not empty. The check for non-emptiness can however race with a free to 2435 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2436 * that need the guarantee that every CPU has drained can disable the 2437 * optimizing racy check. 2438 */ 2439 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2440 { 2441 int cpu; 2442 2443 /* 2444 * Allocate in the BSS so we won't require allocation in 2445 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2446 */ 2447 static cpumask_t cpus_with_pcps; 2448 2449 /* 2450 * Do not drain if one is already in progress unless it's specific to 2451 * a zone. Such callers are primarily CMA and memory hotplug and need 2452 * the drain to be complete when the call returns. 2453 */ 2454 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2455 if (!zone) 2456 return; 2457 mutex_lock(&pcpu_drain_mutex); 2458 } 2459 2460 /* 2461 * We don't care about racing with CPU hotplug event 2462 * as offline notification will cause the notified 2463 * cpu to drain that CPU pcps and on_each_cpu_mask 2464 * disables preemption as part of its processing 2465 */ 2466 for_each_online_cpu(cpu) { 2467 struct per_cpu_pages *pcp; 2468 struct zone *z; 2469 bool has_pcps = false; 2470 2471 if (force_all_cpus) { 2472 /* 2473 * The pcp.count check is racy, some callers need a 2474 * guarantee that no cpu is missed. 2475 */ 2476 has_pcps = true; 2477 } else if (zone) { 2478 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2479 if (pcp->count) 2480 has_pcps = true; 2481 } else { 2482 for_each_populated_zone(z) { 2483 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2484 if (pcp->count) { 2485 has_pcps = true; 2486 break; 2487 } 2488 } 2489 } 2490 2491 if (has_pcps) 2492 cpumask_set_cpu(cpu, &cpus_with_pcps); 2493 else 2494 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2495 } 2496 2497 for_each_cpu(cpu, &cpus_with_pcps) { 2498 if (zone) 2499 drain_pages_zone(cpu, zone); 2500 else 2501 drain_pages(cpu); 2502 } 2503 2504 mutex_unlock(&pcpu_drain_mutex); 2505 } 2506 2507 /* 2508 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2509 * 2510 * When zone parameter is non-NULL, spill just the single zone's pages. 2511 */ 2512 void drain_all_pages(struct zone *zone) 2513 { 2514 __drain_all_pages(zone, false); 2515 } 2516 2517 #ifdef CONFIG_HIBERNATION 2518 2519 /* 2520 * Touch the watchdog for every WD_PAGE_COUNT pages. 2521 */ 2522 #define WD_PAGE_COUNT (128*1024) 2523 2524 void mark_free_pages(struct zone *zone) 2525 { 2526 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; 2527 unsigned long flags; 2528 unsigned int order, t; 2529 struct page *page; 2530 2531 if (zone_is_empty(zone)) 2532 return; 2533 2534 spin_lock_irqsave(&zone->lock, flags); 2535 2536 max_zone_pfn = zone_end_pfn(zone); 2537 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 2538 if (pfn_valid(pfn)) { 2539 page = pfn_to_page(pfn); 2540 2541 if (!--page_count) { 2542 touch_nmi_watchdog(); 2543 page_count = WD_PAGE_COUNT; 2544 } 2545 2546 if (page_zone(page) != zone) 2547 continue; 2548 2549 if (!swsusp_page_is_forbidden(page)) 2550 swsusp_unset_page_free(page); 2551 } 2552 2553 for_each_migratetype_order(order, t) { 2554 list_for_each_entry(page, 2555 &zone->free_area[order].free_list[t], buddy_list) { 2556 unsigned long i; 2557 2558 pfn = page_to_pfn(page); 2559 for (i = 0; i < (1UL << order); i++) { 2560 if (!--page_count) { 2561 touch_nmi_watchdog(); 2562 page_count = WD_PAGE_COUNT; 2563 } 2564 swsusp_set_page_free(pfn_to_page(pfn + i)); 2565 } 2566 } 2567 } 2568 spin_unlock_irqrestore(&zone->lock, flags); 2569 } 2570 #endif /* CONFIG_PM */ 2571 2572 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 2573 unsigned int order) 2574 { 2575 int migratetype; 2576 2577 if (!free_pages_prepare(page, order, FPI_NONE)) 2578 return false; 2579 2580 migratetype = get_pfnblock_migratetype(page, pfn); 2581 set_pcppage_migratetype(page, migratetype); 2582 return true; 2583 } 2584 2585 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch, 2586 bool free_high) 2587 { 2588 int min_nr_free, max_nr_free; 2589 2590 /* Free everything if batch freeing high-order pages. */ 2591 if (unlikely(free_high)) 2592 return pcp->count; 2593 2594 /* Check for PCP disabled or boot pageset */ 2595 if (unlikely(high < batch)) 2596 return 1; 2597 2598 /* Leave at least pcp->batch pages on the list */ 2599 min_nr_free = batch; 2600 max_nr_free = high - batch; 2601 2602 /* 2603 * Double the number of pages freed each time there is subsequent 2604 * freeing of pages without any allocation. 2605 */ 2606 batch <<= pcp->free_factor; 2607 if (batch < max_nr_free) 2608 pcp->free_factor++; 2609 batch = clamp(batch, min_nr_free, max_nr_free); 2610 2611 return batch; 2612 } 2613 2614 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2615 bool free_high) 2616 { 2617 int high = READ_ONCE(pcp->high); 2618 2619 if (unlikely(!high || free_high)) 2620 return 0; 2621 2622 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 2623 return high; 2624 2625 /* 2626 * If reclaim is active, limit the number of pages that can be 2627 * stored on pcp lists 2628 */ 2629 return min(READ_ONCE(pcp->batch) << 2, high); 2630 } 2631 2632 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 2633 struct page *page, int migratetype, 2634 unsigned int order) 2635 { 2636 int high; 2637 int pindex; 2638 bool free_high; 2639 2640 __count_vm_events(PGFREE, 1 << order); 2641 pindex = order_to_pindex(migratetype, order); 2642 list_add(&page->pcp_list, &pcp->lists[pindex]); 2643 pcp->count += 1 << order; 2644 2645 /* 2646 * As high-order pages other than THP's stored on PCP can contribute 2647 * to fragmentation, limit the number stored when PCP is heavily 2648 * freeing without allocation. The remainder after bulk freeing 2649 * stops will be drained from vmstat refresh context. 2650 */ 2651 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); 2652 2653 high = nr_pcp_high(pcp, zone, free_high); 2654 if (pcp->count >= high) { 2655 int batch = READ_ONCE(pcp->batch); 2656 2657 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex); 2658 } 2659 } 2660 2661 /* 2662 * Free a pcp page 2663 */ 2664 void free_unref_page(struct page *page, unsigned int order) 2665 { 2666 unsigned long __maybe_unused UP_flags; 2667 struct per_cpu_pages *pcp; 2668 struct zone *zone; 2669 unsigned long pfn = page_to_pfn(page); 2670 int migratetype; 2671 2672 if (!free_unref_page_prepare(page, pfn, order)) 2673 return; 2674 2675 /* 2676 * We only track unmovable, reclaimable and movable on pcp lists. 2677 * Place ISOLATE pages on the isolated list because they are being 2678 * offlined but treat HIGHATOMIC as movable pages so we can get those 2679 * areas back if necessary. Otherwise, we may have to free 2680 * excessively into the page allocator 2681 */ 2682 migratetype = get_pcppage_migratetype(page); 2683 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2684 if (unlikely(is_migrate_isolate(migratetype))) { 2685 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 2686 return; 2687 } 2688 migratetype = MIGRATE_MOVABLE; 2689 } 2690 2691 zone = page_zone(page); 2692 pcp_trylock_prepare(UP_flags); 2693 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2694 if (pcp) { 2695 free_unref_page_commit(zone, pcp, page, migratetype, order); 2696 pcp_spin_unlock(pcp); 2697 } else { 2698 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); 2699 } 2700 pcp_trylock_finish(UP_flags); 2701 } 2702 2703 /* 2704 * Free a list of 0-order pages 2705 */ 2706 void free_unref_page_list(struct list_head *list) 2707 { 2708 unsigned long __maybe_unused UP_flags; 2709 struct page *page, *next; 2710 struct per_cpu_pages *pcp = NULL; 2711 struct zone *locked_zone = NULL; 2712 int batch_count = 0; 2713 int migratetype; 2714 2715 /* Prepare pages for freeing */ 2716 list_for_each_entry_safe(page, next, list, lru) { 2717 unsigned long pfn = page_to_pfn(page); 2718 if (!free_unref_page_prepare(page, pfn, 0)) { 2719 list_del(&page->lru); 2720 continue; 2721 } 2722 2723 /* 2724 * Free isolated pages directly to the allocator, see 2725 * comment in free_unref_page. 2726 */ 2727 migratetype = get_pcppage_migratetype(page); 2728 if (unlikely(is_migrate_isolate(migratetype))) { 2729 list_del(&page->lru); 2730 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 2731 continue; 2732 } 2733 } 2734 2735 list_for_each_entry_safe(page, next, list, lru) { 2736 struct zone *zone = page_zone(page); 2737 2738 list_del(&page->lru); 2739 migratetype = get_pcppage_migratetype(page); 2740 2741 /* 2742 * Either different zone requiring a different pcp lock or 2743 * excessive lock hold times when freeing a large list of 2744 * pages. 2745 */ 2746 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) { 2747 if (pcp) { 2748 pcp_spin_unlock(pcp); 2749 pcp_trylock_finish(UP_flags); 2750 } 2751 2752 batch_count = 0; 2753 2754 /* 2755 * trylock is necessary as pages may be getting freed 2756 * from IRQ or SoftIRQ context after an IO completion. 2757 */ 2758 pcp_trylock_prepare(UP_flags); 2759 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2760 if (unlikely(!pcp)) { 2761 pcp_trylock_finish(UP_flags); 2762 free_one_page(zone, page, page_to_pfn(page), 2763 0, migratetype, FPI_NONE); 2764 locked_zone = NULL; 2765 continue; 2766 } 2767 locked_zone = zone; 2768 } 2769 2770 /* 2771 * Non-isolated types over MIGRATE_PCPTYPES get added 2772 * to the MIGRATE_MOVABLE pcp list. 2773 */ 2774 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2775 migratetype = MIGRATE_MOVABLE; 2776 2777 trace_mm_page_free_batched(page); 2778 free_unref_page_commit(zone, pcp, page, migratetype, 0); 2779 batch_count++; 2780 } 2781 2782 if (pcp) { 2783 pcp_spin_unlock(pcp); 2784 pcp_trylock_finish(UP_flags); 2785 } 2786 } 2787 2788 /* 2789 * split_page takes a non-compound higher-order page, and splits it into 2790 * n (1<<order) sub-pages: page[0..n] 2791 * Each sub-page must be freed individually. 2792 * 2793 * Note: this is probably too low level an operation for use in drivers. 2794 * Please consult with lkml before using this in your driver. 2795 */ 2796 void split_page(struct page *page, unsigned int order) 2797 { 2798 int i; 2799 2800 VM_BUG_ON_PAGE(PageCompound(page), page); 2801 VM_BUG_ON_PAGE(!page_count(page), page); 2802 2803 for (i = 1; i < (1 << order); i++) 2804 set_page_refcounted(page + i); 2805 split_page_owner(page, 1 << order); 2806 split_page_memcg(page, 1 << order); 2807 } 2808 EXPORT_SYMBOL_GPL(split_page); 2809 2810 int __isolate_free_page(struct page *page, unsigned int order) 2811 { 2812 struct zone *zone = page_zone(page); 2813 int mt = get_pageblock_migratetype(page); 2814 2815 if (!is_migrate_isolate(mt)) { 2816 unsigned long watermark; 2817 /* 2818 * Obey watermarks as if the page was being allocated. We can 2819 * emulate a high-order watermark check with a raised order-0 2820 * watermark, because we already know our high-order page 2821 * exists. 2822 */ 2823 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2824 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2825 return 0; 2826 2827 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2828 } 2829 2830 del_page_from_free_list(page, zone, order); 2831 2832 /* 2833 * Set the pageblock if the isolated page is at least half of a 2834 * pageblock 2835 */ 2836 if (order >= pageblock_order - 1) { 2837 struct page *endpage = page + (1 << order) - 1; 2838 for (; page < endpage; page += pageblock_nr_pages) { 2839 int mt = get_pageblock_migratetype(page); 2840 /* 2841 * Only change normal pageblocks (i.e., they can merge 2842 * with others) 2843 */ 2844 if (migratetype_is_mergeable(mt)) 2845 set_pageblock_migratetype(page, 2846 MIGRATE_MOVABLE); 2847 } 2848 } 2849 2850 return 1UL << order; 2851 } 2852 2853 /** 2854 * __putback_isolated_page - Return a now-isolated page back where we got it 2855 * @page: Page that was isolated 2856 * @order: Order of the isolated page 2857 * @mt: The page's pageblock's migratetype 2858 * 2859 * This function is meant to return a page pulled from the free lists via 2860 * __isolate_free_page back to the free lists they were pulled from. 2861 */ 2862 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2863 { 2864 struct zone *zone = page_zone(page); 2865 2866 /* zone lock should be held when this function is called */ 2867 lockdep_assert_held(&zone->lock); 2868 2869 /* Return isolated page to tail of freelist. */ 2870 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2871 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2872 } 2873 2874 /* 2875 * Update NUMA hit/miss statistics 2876 */ 2877 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2878 long nr_account) 2879 { 2880 #ifdef CONFIG_NUMA 2881 enum numa_stat_item local_stat = NUMA_LOCAL; 2882 2883 /* skip numa counters update if numa stats is disabled */ 2884 if (!static_branch_likely(&vm_numa_stat_key)) 2885 return; 2886 2887 if (zone_to_nid(z) != numa_node_id()) 2888 local_stat = NUMA_OTHER; 2889 2890 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2891 __count_numa_events(z, NUMA_HIT, nr_account); 2892 else { 2893 __count_numa_events(z, NUMA_MISS, nr_account); 2894 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2895 } 2896 __count_numa_events(z, local_stat, nr_account); 2897 #endif 2898 } 2899 2900 static __always_inline 2901 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2902 unsigned int order, unsigned int alloc_flags, 2903 int migratetype) 2904 { 2905 struct page *page; 2906 unsigned long flags; 2907 2908 do { 2909 page = NULL; 2910 spin_lock_irqsave(&zone->lock, flags); 2911 /* 2912 * order-0 request can reach here when the pcplist is skipped 2913 * due to non-CMA allocation context. HIGHATOMIC area is 2914 * reserved for high-order atomic allocation, so order-0 2915 * request should skip it. 2916 */ 2917 if (alloc_flags & ALLOC_HIGHATOMIC) 2918 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2919 if (!page) { 2920 page = __rmqueue(zone, order, migratetype, alloc_flags); 2921 2922 /* 2923 * If the allocation fails, allow OOM handling access 2924 * to HIGHATOMIC reserves as failing now is worse than 2925 * failing a high-order atomic allocation in the 2926 * future. 2927 */ 2928 if (!page && (alloc_flags & ALLOC_OOM)) 2929 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2930 2931 if (!page) { 2932 spin_unlock_irqrestore(&zone->lock, flags); 2933 return NULL; 2934 } 2935 } 2936 __mod_zone_freepage_state(zone, -(1 << order), 2937 get_pcppage_migratetype(page)); 2938 spin_unlock_irqrestore(&zone->lock, flags); 2939 } while (check_new_pages(page, order)); 2940 2941 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2942 zone_statistics(preferred_zone, zone, 1); 2943 2944 return page; 2945 } 2946 2947 /* Remove page from the per-cpu list, caller must protect the list */ 2948 static inline 2949 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 2950 int migratetype, 2951 unsigned int alloc_flags, 2952 struct per_cpu_pages *pcp, 2953 struct list_head *list) 2954 { 2955 struct page *page; 2956 2957 do { 2958 if (list_empty(list)) { 2959 int batch = READ_ONCE(pcp->batch); 2960 int alloced; 2961 2962 /* 2963 * Scale batch relative to order if batch implies 2964 * free pages can be stored on the PCP. Batch can 2965 * be 1 for small zones or for boot pagesets which 2966 * should never store free pages as the pages may 2967 * belong to arbitrary zones. 2968 */ 2969 if (batch > 1) 2970 batch = max(batch >> order, 2); 2971 alloced = rmqueue_bulk(zone, order, 2972 batch, list, 2973 migratetype, alloc_flags); 2974 2975 pcp->count += alloced << order; 2976 if (unlikely(list_empty(list))) 2977 return NULL; 2978 } 2979 2980 page = list_first_entry(list, struct page, pcp_list); 2981 list_del(&page->pcp_list); 2982 pcp->count -= 1 << order; 2983 } while (check_new_pages(page, order)); 2984 2985 return page; 2986 } 2987 2988 /* Lock and remove page from the per-cpu list */ 2989 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2990 struct zone *zone, unsigned int order, 2991 int migratetype, unsigned int alloc_flags) 2992 { 2993 struct per_cpu_pages *pcp; 2994 struct list_head *list; 2995 struct page *page; 2996 unsigned long __maybe_unused UP_flags; 2997 2998 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 2999 pcp_trylock_prepare(UP_flags); 3000 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 3001 if (!pcp) { 3002 pcp_trylock_finish(UP_flags); 3003 return NULL; 3004 } 3005 3006 /* 3007 * On allocation, reduce the number of pages that are batch freed. 3008 * See nr_pcp_free() where free_factor is increased for subsequent 3009 * frees. 3010 */ 3011 pcp->free_factor >>= 1; 3012 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3013 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3014 pcp_spin_unlock(pcp); 3015 pcp_trylock_finish(UP_flags); 3016 if (page) { 3017 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3018 zone_statistics(preferred_zone, zone, 1); 3019 } 3020 return page; 3021 } 3022 3023 /* 3024 * Allocate a page from the given zone. 3025 * Use pcplists for THP or "cheap" high-order allocations. 3026 */ 3027 3028 /* 3029 * Do not instrument rmqueue() with KMSAN. This function may call 3030 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3031 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3032 * may call rmqueue() again, which will result in a deadlock. 3033 */ 3034 __no_sanitize_memory 3035 static inline 3036 struct page *rmqueue(struct zone *preferred_zone, 3037 struct zone *zone, unsigned int order, 3038 gfp_t gfp_flags, unsigned int alloc_flags, 3039 int migratetype) 3040 { 3041 struct page *page; 3042 3043 /* 3044 * We most definitely don't want callers attempting to 3045 * allocate greater than order-1 page units with __GFP_NOFAIL. 3046 */ 3047 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 3048 3049 if (likely(pcp_allowed_order(order))) { 3050 /* 3051 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and 3052 * we need to skip it when CMA area isn't allowed. 3053 */ 3054 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || 3055 migratetype != MIGRATE_MOVABLE) { 3056 page = rmqueue_pcplist(preferred_zone, zone, order, 3057 migratetype, alloc_flags); 3058 if (likely(page)) 3059 goto out; 3060 } 3061 } 3062 3063 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3064 migratetype); 3065 3066 out: 3067 /* Separate test+clear to avoid unnecessary atomics */ 3068 if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3069 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3070 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3071 } 3072 3073 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3074 return page; 3075 } 3076 3077 #ifdef CONFIG_FAIL_PAGE_ALLOC 3078 3079 static struct { 3080 struct fault_attr attr; 3081 3082 bool ignore_gfp_highmem; 3083 bool ignore_gfp_reclaim; 3084 u32 min_order; 3085 } fail_page_alloc = { 3086 .attr = FAULT_ATTR_INITIALIZER, 3087 .ignore_gfp_reclaim = true, 3088 .ignore_gfp_highmem = true, 3089 .min_order = 1, 3090 }; 3091 3092 static int __init setup_fail_page_alloc(char *str) 3093 { 3094 return setup_fault_attr(&fail_page_alloc.attr, str); 3095 } 3096 __setup("fail_page_alloc=", setup_fail_page_alloc); 3097 3098 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3099 { 3100 int flags = 0; 3101 3102 if (order < fail_page_alloc.min_order) 3103 return false; 3104 if (gfp_mask & __GFP_NOFAIL) 3105 return false; 3106 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 3107 return false; 3108 if (fail_page_alloc.ignore_gfp_reclaim && 3109 (gfp_mask & __GFP_DIRECT_RECLAIM)) 3110 return false; 3111 3112 /* See comment in __should_failslab() */ 3113 if (gfp_mask & __GFP_NOWARN) 3114 flags |= FAULT_NOWARN; 3115 3116 return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags); 3117 } 3118 3119 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3120 3121 static int __init fail_page_alloc_debugfs(void) 3122 { 3123 umode_t mode = S_IFREG | 0600; 3124 struct dentry *dir; 3125 3126 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 3127 &fail_page_alloc.attr); 3128 3129 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3130 &fail_page_alloc.ignore_gfp_reclaim); 3131 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 3132 &fail_page_alloc.ignore_gfp_highmem); 3133 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); 3134 3135 return 0; 3136 } 3137 3138 late_initcall(fail_page_alloc_debugfs); 3139 3140 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3141 3142 #else /* CONFIG_FAIL_PAGE_ALLOC */ 3143 3144 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3145 { 3146 return false; 3147 } 3148 3149 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 3150 3151 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3152 { 3153 return __should_fail_alloc_page(gfp_mask, order); 3154 } 3155 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 3156 3157 static inline long __zone_watermark_unusable_free(struct zone *z, 3158 unsigned int order, unsigned int alloc_flags) 3159 { 3160 long unusable_free = (1 << order) - 1; 3161 3162 /* 3163 * If the caller does not have rights to reserves below the min 3164 * watermark then subtract the high-atomic reserves. This will 3165 * over-estimate the size of the atomic reserve but it avoids a search. 3166 */ 3167 if (likely(!(alloc_flags & ALLOC_RESERVES))) 3168 unusable_free += z->nr_reserved_highatomic; 3169 3170 #ifdef CONFIG_CMA 3171 /* If allocation can't use CMA areas don't use free CMA pages */ 3172 if (!(alloc_flags & ALLOC_CMA)) 3173 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3174 #endif 3175 #ifdef CONFIG_UNACCEPTED_MEMORY 3176 unusable_free += zone_page_state(z, NR_UNACCEPTED); 3177 #endif 3178 3179 return unusable_free; 3180 } 3181 3182 /* 3183 * Return true if free base pages are above 'mark'. For high-order checks it 3184 * will return true of the order-0 watermark is reached and there is at least 3185 * one free page of a suitable size. Checking now avoids taking the zone lock 3186 * to check in the allocation paths if no pages are free. 3187 */ 3188 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3189 int highest_zoneidx, unsigned int alloc_flags, 3190 long free_pages) 3191 { 3192 long min = mark; 3193 int o; 3194 3195 /* free_pages may go negative - that's OK */ 3196 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3197 3198 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 3199 /* 3200 * __GFP_HIGH allows access to 50% of the min reserve as well 3201 * as OOM. 3202 */ 3203 if (alloc_flags & ALLOC_MIN_RESERVE) { 3204 min -= min / 2; 3205 3206 /* 3207 * Non-blocking allocations (e.g. GFP_ATOMIC) can 3208 * access more reserves than just __GFP_HIGH. Other 3209 * non-blocking allocations requests such as GFP_NOWAIT 3210 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 3211 * access to the min reserve. 3212 */ 3213 if (alloc_flags & ALLOC_NON_BLOCK) 3214 min -= min / 4; 3215 } 3216 3217 /* 3218 * OOM victims can try even harder than the normal reserve 3219 * users on the grounds that it's definitely going to be in 3220 * the exit path shortly and free memory. Any allocation it 3221 * makes during the free path will be small and short-lived. 3222 */ 3223 if (alloc_flags & ALLOC_OOM) 3224 min -= min / 2; 3225 } 3226 3227 /* 3228 * Check watermarks for an order-0 allocation request. If these 3229 * are not met, then a high-order request also cannot go ahead 3230 * even if a suitable page happened to be free. 3231 */ 3232 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 3233 return false; 3234 3235 /* If this is an order-0 request then the watermark is fine */ 3236 if (!order) 3237 return true; 3238 3239 /* For a high-order request, check at least one suitable page is free */ 3240 for (o = order; o <= MAX_ORDER; o++) { 3241 struct free_area *area = &z->free_area[o]; 3242 int mt; 3243 3244 if (!area->nr_free) 3245 continue; 3246 3247 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 3248 if (!free_area_empty(area, mt)) 3249 return true; 3250 } 3251 3252 #ifdef CONFIG_CMA 3253 if ((alloc_flags & ALLOC_CMA) && 3254 !free_area_empty(area, MIGRATE_CMA)) { 3255 return true; 3256 } 3257 #endif 3258 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 3259 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 3260 return true; 3261 } 3262 } 3263 return false; 3264 } 3265 3266 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3267 int highest_zoneidx, unsigned int alloc_flags) 3268 { 3269 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3270 zone_page_state(z, NR_FREE_PAGES)); 3271 } 3272 3273 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 3274 unsigned long mark, int highest_zoneidx, 3275 unsigned int alloc_flags, gfp_t gfp_mask) 3276 { 3277 long free_pages; 3278 3279 free_pages = zone_page_state(z, NR_FREE_PAGES); 3280 3281 /* 3282 * Fast check for order-0 only. If this fails then the reserves 3283 * need to be calculated. 3284 */ 3285 if (!order) { 3286 long usable_free; 3287 long reserved; 3288 3289 usable_free = free_pages; 3290 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 3291 3292 /* reserved may over estimate high-atomic reserves. */ 3293 usable_free -= min(usable_free, reserved); 3294 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 3295 return true; 3296 } 3297 3298 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 3299 free_pages)) 3300 return true; 3301 3302 /* 3303 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 3304 * when checking the min watermark. The min watermark is the 3305 * point where boosting is ignored so that kswapd is woken up 3306 * when below the low watermark. 3307 */ 3308 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 3309 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 3310 mark = z->_watermark[WMARK_MIN]; 3311 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 3312 alloc_flags, free_pages); 3313 } 3314 3315 return false; 3316 } 3317 3318 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 3319 unsigned long mark, int highest_zoneidx) 3320 { 3321 long free_pages = zone_page_state(z, NR_FREE_PAGES); 3322 3323 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 3324 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 3325 3326 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3327 free_pages); 3328 } 3329 3330 #ifdef CONFIG_NUMA 3331 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3332 3333 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3334 { 3335 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3336 node_reclaim_distance; 3337 } 3338 #else /* CONFIG_NUMA */ 3339 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3340 { 3341 return true; 3342 } 3343 #endif /* CONFIG_NUMA */ 3344 3345 /* 3346 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3347 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3348 * premature use of a lower zone may cause lowmem pressure problems that 3349 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3350 * probably too small. It only makes sense to spread allocations to avoid 3351 * fragmentation between the Normal and DMA32 zones. 3352 */ 3353 static inline unsigned int 3354 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3355 { 3356 unsigned int alloc_flags; 3357 3358 /* 3359 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3360 * to save a branch. 3361 */ 3362 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3363 3364 #ifdef CONFIG_ZONE_DMA32 3365 if (!zone) 3366 return alloc_flags; 3367 3368 if (zone_idx(zone) != ZONE_NORMAL) 3369 return alloc_flags; 3370 3371 /* 3372 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3373 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3374 * on UMA that if Normal is populated then so is DMA32. 3375 */ 3376 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3377 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3378 return alloc_flags; 3379 3380 alloc_flags |= ALLOC_NOFRAGMENT; 3381 #endif /* CONFIG_ZONE_DMA32 */ 3382 return alloc_flags; 3383 } 3384 3385 /* Must be called after current_gfp_context() which can change gfp_mask */ 3386 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3387 unsigned int alloc_flags) 3388 { 3389 #ifdef CONFIG_CMA 3390 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3391 alloc_flags |= ALLOC_CMA; 3392 #endif 3393 return alloc_flags; 3394 } 3395 3396 /* 3397 * get_page_from_freelist goes through the zonelist trying to allocate 3398 * a page. 3399 */ 3400 static struct page * 3401 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3402 const struct alloc_context *ac) 3403 { 3404 struct zoneref *z; 3405 struct zone *zone; 3406 struct pglist_data *last_pgdat = NULL; 3407 bool last_pgdat_dirty_ok = false; 3408 bool no_fallback; 3409 3410 retry: 3411 /* 3412 * Scan zonelist, looking for a zone with enough free. 3413 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3414 */ 3415 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3416 z = ac->preferred_zoneref; 3417 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3418 ac->nodemask) { 3419 struct page *page; 3420 unsigned long mark; 3421 3422 if (cpusets_enabled() && 3423 (alloc_flags & ALLOC_CPUSET) && 3424 !__cpuset_zone_allowed(zone, gfp_mask)) 3425 continue; 3426 /* 3427 * When allocating a page cache page for writing, we 3428 * want to get it from a node that is within its dirty 3429 * limit, such that no single node holds more than its 3430 * proportional share of globally allowed dirty pages. 3431 * The dirty limits take into account the node's 3432 * lowmem reserves and high watermark so that kswapd 3433 * should be able to balance it without having to 3434 * write pages from its LRU list. 3435 * 3436 * XXX: For now, allow allocations to potentially 3437 * exceed the per-node dirty limit in the slowpath 3438 * (spread_dirty_pages unset) before going into reclaim, 3439 * which is important when on a NUMA setup the allowed 3440 * nodes are together not big enough to reach the 3441 * global limit. The proper fix for these situations 3442 * will require awareness of nodes in the 3443 * dirty-throttling and the flusher threads. 3444 */ 3445 if (ac->spread_dirty_pages) { 3446 if (last_pgdat != zone->zone_pgdat) { 3447 last_pgdat = zone->zone_pgdat; 3448 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3449 } 3450 3451 if (!last_pgdat_dirty_ok) 3452 continue; 3453 } 3454 3455 if (no_fallback && nr_online_nodes > 1 && 3456 zone != ac->preferred_zoneref->zone) { 3457 int local_nid; 3458 3459 /* 3460 * If moving to a remote node, retry but allow 3461 * fragmenting fallbacks. Locality is more important 3462 * than fragmentation avoidance. 3463 */ 3464 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 3465 if (zone_to_nid(zone) != local_nid) { 3466 alloc_flags &= ~ALLOC_NOFRAGMENT; 3467 goto retry; 3468 } 3469 } 3470 3471 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3472 if (!zone_watermark_fast(zone, order, mark, 3473 ac->highest_zoneidx, alloc_flags, 3474 gfp_mask)) { 3475 int ret; 3476 3477 if (has_unaccepted_memory()) { 3478 if (try_to_accept_memory(zone, order)) 3479 goto try_this_zone; 3480 } 3481 3482 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3483 /* 3484 * Watermark failed for this zone, but see if we can 3485 * grow this zone if it contains deferred pages. 3486 */ 3487 if (deferred_pages_enabled()) { 3488 if (_deferred_grow_zone(zone, order)) 3489 goto try_this_zone; 3490 } 3491 #endif 3492 /* Checked here to keep the fast path fast */ 3493 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3494 if (alloc_flags & ALLOC_NO_WATERMARKS) 3495 goto try_this_zone; 3496 3497 if (!node_reclaim_enabled() || 3498 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3499 continue; 3500 3501 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3502 switch (ret) { 3503 case NODE_RECLAIM_NOSCAN: 3504 /* did not scan */ 3505 continue; 3506 case NODE_RECLAIM_FULL: 3507 /* scanned but unreclaimable */ 3508 continue; 3509 default: 3510 /* did we reclaim enough */ 3511 if (zone_watermark_ok(zone, order, mark, 3512 ac->highest_zoneidx, alloc_flags)) 3513 goto try_this_zone; 3514 3515 continue; 3516 } 3517 } 3518 3519 try_this_zone: 3520 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3521 gfp_mask, alloc_flags, ac->migratetype); 3522 if (page) { 3523 prep_new_page(page, order, gfp_mask, alloc_flags); 3524 3525 /* 3526 * If this is a high-order atomic allocation then check 3527 * if the pageblock should be reserved for the future 3528 */ 3529 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3530 reserve_highatomic_pageblock(page, zone, order); 3531 3532 return page; 3533 } else { 3534 if (has_unaccepted_memory()) { 3535 if (try_to_accept_memory(zone, order)) 3536 goto try_this_zone; 3537 } 3538 3539 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3540 /* Try again if zone has deferred pages */ 3541 if (deferred_pages_enabled()) { 3542 if (_deferred_grow_zone(zone, order)) 3543 goto try_this_zone; 3544 } 3545 #endif 3546 } 3547 } 3548 3549 /* 3550 * It's possible on a UMA machine to get through all zones that are 3551 * fragmented. If avoiding fragmentation, reset and try again. 3552 */ 3553 if (no_fallback) { 3554 alloc_flags &= ~ALLOC_NOFRAGMENT; 3555 goto retry; 3556 } 3557 3558 return NULL; 3559 } 3560 3561 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3562 { 3563 unsigned int filter = SHOW_MEM_FILTER_NODES; 3564 3565 /* 3566 * This documents exceptions given to allocations in certain 3567 * contexts that are allowed to allocate outside current's set 3568 * of allowed nodes. 3569 */ 3570 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3571 if (tsk_is_oom_victim(current) || 3572 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3573 filter &= ~SHOW_MEM_FILTER_NODES; 3574 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3575 filter &= ~SHOW_MEM_FILTER_NODES; 3576 3577 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3578 } 3579 3580 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3581 { 3582 struct va_format vaf; 3583 va_list args; 3584 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3585 3586 if ((gfp_mask & __GFP_NOWARN) || 3587 !__ratelimit(&nopage_rs) || 3588 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3589 return; 3590 3591 va_start(args, fmt); 3592 vaf.fmt = fmt; 3593 vaf.va = &args; 3594 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3595 current->comm, &vaf, gfp_mask, &gfp_mask, 3596 nodemask_pr_args(nodemask)); 3597 va_end(args); 3598 3599 cpuset_print_current_mems_allowed(); 3600 pr_cont("\n"); 3601 dump_stack(); 3602 warn_alloc_show_mem(gfp_mask, nodemask); 3603 } 3604 3605 static inline struct page * 3606 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3607 unsigned int alloc_flags, 3608 const struct alloc_context *ac) 3609 { 3610 struct page *page; 3611 3612 page = get_page_from_freelist(gfp_mask, order, 3613 alloc_flags|ALLOC_CPUSET, ac); 3614 /* 3615 * fallback to ignore cpuset restriction if our nodes 3616 * are depleted 3617 */ 3618 if (!page) 3619 page = get_page_from_freelist(gfp_mask, order, 3620 alloc_flags, ac); 3621 3622 return page; 3623 } 3624 3625 static inline struct page * 3626 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3627 const struct alloc_context *ac, unsigned long *did_some_progress) 3628 { 3629 struct oom_control oc = { 3630 .zonelist = ac->zonelist, 3631 .nodemask = ac->nodemask, 3632 .memcg = NULL, 3633 .gfp_mask = gfp_mask, 3634 .order = order, 3635 }; 3636 struct page *page; 3637 3638 *did_some_progress = 0; 3639 3640 /* 3641 * Acquire the oom lock. If that fails, somebody else is 3642 * making progress for us. 3643 */ 3644 if (!mutex_trylock(&oom_lock)) { 3645 *did_some_progress = 1; 3646 schedule_timeout_uninterruptible(1); 3647 return NULL; 3648 } 3649 3650 /* 3651 * Go through the zonelist yet one more time, keep very high watermark 3652 * here, this is only to catch a parallel oom killing, we must fail if 3653 * we're still under heavy pressure. But make sure that this reclaim 3654 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3655 * allocation which will never fail due to oom_lock already held. 3656 */ 3657 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3658 ~__GFP_DIRECT_RECLAIM, order, 3659 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3660 if (page) 3661 goto out; 3662 3663 /* Coredumps can quickly deplete all memory reserves */ 3664 if (current->flags & PF_DUMPCORE) 3665 goto out; 3666 /* The OOM killer will not help higher order allocs */ 3667 if (order > PAGE_ALLOC_COSTLY_ORDER) 3668 goto out; 3669 /* 3670 * We have already exhausted all our reclaim opportunities without any 3671 * success so it is time to admit defeat. We will skip the OOM killer 3672 * because it is very likely that the caller has a more reasonable 3673 * fallback than shooting a random task. 3674 * 3675 * The OOM killer may not free memory on a specific node. 3676 */ 3677 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3678 goto out; 3679 /* The OOM killer does not needlessly kill tasks for lowmem */ 3680 if (ac->highest_zoneidx < ZONE_NORMAL) 3681 goto out; 3682 if (pm_suspended_storage()) 3683 goto out; 3684 /* 3685 * XXX: GFP_NOFS allocations should rather fail than rely on 3686 * other request to make a forward progress. 3687 * We are in an unfortunate situation where out_of_memory cannot 3688 * do much for this context but let's try it to at least get 3689 * access to memory reserved if the current task is killed (see 3690 * out_of_memory). Once filesystems are ready to handle allocation 3691 * failures more gracefully we should just bail out here. 3692 */ 3693 3694 /* Exhausted what can be done so it's blame time */ 3695 if (out_of_memory(&oc) || 3696 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3697 *did_some_progress = 1; 3698 3699 /* 3700 * Help non-failing allocations by giving them access to memory 3701 * reserves 3702 */ 3703 if (gfp_mask & __GFP_NOFAIL) 3704 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3705 ALLOC_NO_WATERMARKS, ac); 3706 } 3707 out: 3708 mutex_unlock(&oom_lock); 3709 return page; 3710 } 3711 3712 /* 3713 * Maximum number of compaction retries with a progress before OOM 3714 * killer is consider as the only way to move forward. 3715 */ 3716 #define MAX_COMPACT_RETRIES 16 3717 3718 #ifdef CONFIG_COMPACTION 3719 /* Try memory compaction for high-order allocations before reclaim */ 3720 static struct page * 3721 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3722 unsigned int alloc_flags, const struct alloc_context *ac, 3723 enum compact_priority prio, enum compact_result *compact_result) 3724 { 3725 struct page *page = NULL; 3726 unsigned long pflags; 3727 unsigned int noreclaim_flag; 3728 3729 if (!order) 3730 return NULL; 3731 3732 psi_memstall_enter(&pflags); 3733 delayacct_compact_start(); 3734 noreclaim_flag = memalloc_noreclaim_save(); 3735 3736 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3737 prio, &page); 3738 3739 memalloc_noreclaim_restore(noreclaim_flag); 3740 psi_memstall_leave(&pflags); 3741 delayacct_compact_end(); 3742 3743 if (*compact_result == COMPACT_SKIPPED) 3744 return NULL; 3745 /* 3746 * At least in one zone compaction wasn't deferred or skipped, so let's 3747 * count a compaction stall 3748 */ 3749 count_vm_event(COMPACTSTALL); 3750 3751 /* Prep a captured page if available */ 3752 if (page) 3753 prep_new_page(page, order, gfp_mask, alloc_flags); 3754 3755 /* Try get a page from the freelist if available */ 3756 if (!page) 3757 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3758 3759 if (page) { 3760 struct zone *zone = page_zone(page); 3761 3762 zone->compact_blockskip_flush = false; 3763 compaction_defer_reset(zone, order, true); 3764 count_vm_event(COMPACTSUCCESS); 3765 return page; 3766 } 3767 3768 /* 3769 * It's bad if compaction run occurs and fails. The most likely reason 3770 * is that pages exist, but not enough to satisfy watermarks. 3771 */ 3772 count_vm_event(COMPACTFAIL); 3773 3774 cond_resched(); 3775 3776 return NULL; 3777 } 3778 3779 static inline bool 3780 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3781 enum compact_result compact_result, 3782 enum compact_priority *compact_priority, 3783 int *compaction_retries) 3784 { 3785 int max_retries = MAX_COMPACT_RETRIES; 3786 int min_priority; 3787 bool ret = false; 3788 int retries = *compaction_retries; 3789 enum compact_priority priority = *compact_priority; 3790 3791 if (!order) 3792 return false; 3793 3794 if (fatal_signal_pending(current)) 3795 return false; 3796 3797 if (compaction_made_progress(compact_result)) 3798 (*compaction_retries)++; 3799 3800 /* 3801 * compaction considers all the zone as desperately out of memory 3802 * so it doesn't really make much sense to retry except when the 3803 * failure could be caused by insufficient priority 3804 */ 3805 if (compaction_failed(compact_result)) 3806 goto check_priority; 3807 3808 /* 3809 * compaction was skipped because there are not enough order-0 pages 3810 * to work with, so we retry only if it looks like reclaim can help. 3811 */ 3812 if (compaction_needs_reclaim(compact_result)) { 3813 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3814 goto out; 3815 } 3816 3817 /* 3818 * make sure the compaction wasn't deferred or didn't bail out early 3819 * due to locks contention before we declare that we should give up. 3820 * But the next retry should use a higher priority if allowed, so 3821 * we don't just keep bailing out endlessly. 3822 */ 3823 if (compaction_withdrawn(compact_result)) { 3824 goto check_priority; 3825 } 3826 3827 /* 3828 * !costly requests are much more important than __GFP_RETRY_MAYFAIL 3829 * costly ones because they are de facto nofail and invoke OOM 3830 * killer to move on while costly can fail and users are ready 3831 * to cope with that. 1/4 retries is rather arbitrary but we 3832 * would need much more detailed feedback from compaction to 3833 * make a better decision. 3834 */ 3835 if (order > PAGE_ALLOC_COSTLY_ORDER) 3836 max_retries /= 4; 3837 if (*compaction_retries <= max_retries) { 3838 ret = true; 3839 goto out; 3840 } 3841 3842 /* 3843 * Make sure there are attempts at the highest priority if we exhausted 3844 * all retries or failed at the lower priorities. 3845 */ 3846 check_priority: 3847 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3848 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3849 3850 if (*compact_priority > min_priority) { 3851 (*compact_priority)--; 3852 *compaction_retries = 0; 3853 ret = true; 3854 } 3855 out: 3856 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3857 return ret; 3858 } 3859 #else 3860 static inline struct page * 3861 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3862 unsigned int alloc_flags, const struct alloc_context *ac, 3863 enum compact_priority prio, enum compact_result *compact_result) 3864 { 3865 *compact_result = COMPACT_SKIPPED; 3866 return NULL; 3867 } 3868 3869 static inline bool 3870 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3871 enum compact_result compact_result, 3872 enum compact_priority *compact_priority, 3873 int *compaction_retries) 3874 { 3875 struct zone *zone; 3876 struct zoneref *z; 3877 3878 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3879 return false; 3880 3881 /* 3882 * There are setups with compaction disabled which would prefer to loop 3883 * inside the allocator rather than hit the oom killer prematurely. 3884 * Let's give them a good hope and keep retrying while the order-0 3885 * watermarks are OK. 3886 */ 3887 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3888 ac->highest_zoneidx, ac->nodemask) { 3889 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3890 ac->highest_zoneidx, alloc_flags)) 3891 return true; 3892 } 3893 return false; 3894 } 3895 #endif /* CONFIG_COMPACTION */ 3896 3897 #ifdef CONFIG_LOCKDEP 3898 static struct lockdep_map __fs_reclaim_map = 3899 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3900 3901 static bool __need_reclaim(gfp_t gfp_mask) 3902 { 3903 /* no reclaim without waiting on it */ 3904 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3905 return false; 3906 3907 /* this guy won't enter reclaim */ 3908 if (current->flags & PF_MEMALLOC) 3909 return false; 3910 3911 if (gfp_mask & __GFP_NOLOCKDEP) 3912 return false; 3913 3914 return true; 3915 } 3916 3917 void __fs_reclaim_acquire(unsigned long ip) 3918 { 3919 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3920 } 3921 3922 void __fs_reclaim_release(unsigned long ip) 3923 { 3924 lock_release(&__fs_reclaim_map, ip); 3925 } 3926 3927 void fs_reclaim_acquire(gfp_t gfp_mask) 3928 { 3929 gfp_mask = current_gfp_context(gfp_mask); 3930 3931 if (__need_reclaim(gfp_mask)) { 3932 if (gfp_mask & __GFP_FS) 3933 __fs_reclaim_acquire(_RET_IP_); 3934 3935 #ifdef CONFIG_MMU_NOTIFIER 3936 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3937 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3938 #endif 3939 3940 } 3941 } 3942 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3943 3944 void fs_reclaim_release(gfp_t gfp_mask) 3945 { 3946 gfp_mask = current_gfp_context(gfp_mask); 3947 3948 if (__need_reclaim(gfp_mask)) { 3949 if (gfp_mask & __GFP_FS) 3950 __fs_reclaim_release(_RET_IP_); 3951 } 3952 } 3953 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3954 #endif 3955 3956 /* 3957 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3958 * have been rebuilt so allocation retries. Reader side does not lock and 3959 * retries the allocation if zonelist changes. Writer side is protected by the 3960 * embedded spin_lock. 3961 */ 3962 static DEFINE_SEQLOCK(zonelist_update_seq); 3963 3964 static unsigned int zonelist_iter_begin(void) 3965 { 3966 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3967 return read_seqbegin(&zonelist_update_seq); 3968 3969 return 0; 3970 } 3971 3972 static unsigned int check_retry_zonelist(unsigned int seq) 3973 { 3974 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3975 return read_seqretry(&zonelist_update_seq, seq); 3976 3977 return seq; 3978 } 3979 3980 /* Perform direct synchronous page reclaim */ 3981 static unsigned long 3982 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3983 const struct alloc_context *ac) 3984 { 3985 unsigned int noreclaim_flag; 3986 unsigned long progress; 3987 3988 cond_resched(); 3989 3990 /* We now go into synchronous reclaim */ 3991 cpuset_memory_pressure_bump(); 3992 fs_reclaim_acquire(gfp_mask); 3993 noreclaim_flag = memalloc_noreclaim_save(); 3994 3995 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3996 ac->nodemask); 3997 3998 memalloc_noreclaim_restore(noreclaim_flag); 3999 fs_reclaim_release(gfp_mask); 4000 4001 cond_resched(); 4002 4003 return progress; 4004 } 4005 4006 /* The really slow allocator path where we enter direct reclaim */ 4007 static inline struct page * 4008 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4009 unsigned int alloc_flags, const struct alloc_context *ac, 4010 unsigned long *did_some_progress) 4011 { 4012 struct page *page = NULL; 4013 unsigned long pflags; 4014 bool drained = false; 4015 4016 psi_memstall_enter(&pflags); 4017 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4018 if (unlikely(!(*did_some_progress))) 4019 goto out; 4020 4021 retry: 4022 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4023 4024 /* 4025 * If an allocation failed after direct reclaim, it could be because 4026 * pages are pinned on the per-cpu lists or in high alloc reserves. 4027 * Shrink them and try again 4028 */ 4029 if (!page && !drained) { 4030 unreserve_highatomic_pageblock(ac, false); 4031 drain_all_pages(NULL); 4032 drained = true; 4033 goto retry; 4034 } 4035 out: 4036 psi_memstall_leave(&pflags); 4037 4038 return page; 4039 } 4040 4041 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4042 const struct alloc_context *ac) 4043 { 4044 struct zoneref *z; 4045 struct zone *zone; 4046 pg_data_t *last_pgdat = NULL; 4047 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4048 4049 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4050 ac->nodemask) { 4051 if (!managed_zone(zone)) 4052 continue; 4053 if (last_pgdat != zone->zone_pgdat) { 4054 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 4055 last_pgdat = zone->zone_pgdat; 4056 } 4057 } 4058 } 4059 4060 static inline unsigned int 4061 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 4062 { 4063 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4064 4065 /* 4066 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 4067 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4068 * to save two branches. 4069 */ 4070 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 4071 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4072 4073 /* 4074 * The caller may dip into page reserves a bit more if the caller 4075 * cannot run direct reclaim, or if the caller has realtime scheduling 4076 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4077 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 4078 */ 4079 alloc_flags |= (__force int) 4080 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4081 4082 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 4083 /* 4084 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4085 * if it can't schedule. 4086 */ 4087 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 4088 alloc_flags |= ALLOC_NON_BLOCK; 4089 4090 if (order > 0) 4091 alloc_flags |= ALLOC_HIGHATOMIC; 4092 } 4093 4094 /* 4095 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 4096 * GFP_ATOMIC) rather than fail, see the comment for 4097 * cpuset_node_allowed(). 4098 */ 4099 if (alloc_flags & ALLOC_MIN_RESERVE) 4100 alloc_flags &= ~ALLOC_CPUSET; 4101 } else if (unlikely(rt_task(current)) && in_task()) 4102 alloc_flags |= ALLOC_MIN_RESERVE; 4103 4104 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4105 4106 return alloc_flags; 4107 } 4108 4109 static bool oom_reserves_allowed(struct task_struct *tsk) 4110 { 4111 if (!tsk_is_oom_victim(tsk)) 4112 return false; 4113 4114 /* 4115 * !MMU doesn't have oom reaper so give access to memory reserves 4116 * only to the thread with TIF_MEMDIE set 4117 */ 4118 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4119 return false; 4120 4121 return true; 4122 } 4123 4124 /* 4125 * Distinguish requests which really need access to full memory 4126 * reserves from oom victims which can live with a portion of it 4127 */ 4128 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4129 { 4130 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4131 return 0; 4132 if (gfp_mask & __GFP_MEMALLOC) 4133 return ALLOC_NO_WATERMARKS; 4134 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4135 return ALLOC_NO_WATERMARKS; 4136 if (!in_interrupt()) { 4137 if (current->flags & PF_MEMALLOC) 4138 return ALLOC_NO_WATERMARKS; 4139 else if (oom_reserves_allowed(current)) 4140 return ALLOC_OOM; 4141 } 4142 4143 return 0; 4144 } 4145 4146 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4147 { 4148 return !!__gfp_pfmemalloc_flags(gfp_mask); 4149 } 4150 4151 /* 4152 * Checks whether it makes sense to retry the reclaim to make a forward progress 4153 * for the given allocation request. 4154 * 4155 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4156 * without success, or when we couldn't even meet the watermark if we 4157 * reclaimed all remaining pages on the LRU lists. 4158 * 4159 * Returns true if a retry is viable or false to enter the oom path. 4160 */ 4161 static inline bool 4162 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4163 struct alloc_context *ac, int alloc_flags, 4164 bool did_some_progress, int *no_progress_loops) 4165 { 4166 struct zone *zone; 4167 struct zoneref *z; 4168 bool ret = false; 4169 4170 /* 4171 * Costly allocations might have made a progress but this doesn't mean 4172 * their order will become available due to high fragmentation so 4173 * always increment the no progress counter for them 4174 */ 4175 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4176 *no_progress_loops = 0; 4177 else 4178 (*no_progress_loops)++; 4179 4180 /* 4181 * Make sure we converge to OOM if we cannot make any progress 4182 * several times in the row. 4183 */ 4184 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 4185 /* Before OOM, exhaust highatomic_reserve */ 4186 return unreserve_highatomic_pageblock(ac, true); 4187 } 4188 4189 /* 4190 * Keep reclaiming pages while there is a chance this will lead 4191 * somewhere. If none of the target zones can satisfy our allocation 4192 * request even if all reclaimable pages are considered then we are 4193 * screwed and have to go OOM. 4194 */ 4195 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4196 ac->highest_zoneidx, ac->nodemask) { 4197 unsigned long available; 4198 unsigned long reclaimable; 4199 unsigned long min_wmark = min_wmark_pages(zone); 4200 bool wmark; 4201 4202 available = reclaimable = zone_reclaimable_pages(zone); 4203 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4204 4205 /* 4206 * Would the allocation succeed if we reclaimed all 4207 * reclaimable pages? 4208 */ 4209 wmark = __zone_watermark_ok(zone, order, min_wmark, 4210 ac->highest_zoneidx, alloc_flags, available); 4211 trace_reclaim_retry_zone(z, order, reclaimable, 4212 available, min_wmark, *no_progress_loops, wmark); 4213 if (wmark) { 4214 ret = true; 4215 break; 4216 } 4217 } 4218 4219 /* 4220 * Memory allocation/reclaim might be called from a WQ context and the 4221 * current implementation of the WQ concurrency control doesn't 4222 * recognize that a particular WQ is congested if the worker thread is 4223 * looping without ever sleeping. Therefore we have to do a short sleep 4224 * here rather than calling cond_resched(). 4225 */ 4226 if (current->flags & PF_WQ_WORKER) 4227 schedule_timeout_uninterruptible(1); 4228 else 4229 cond_resched(); 4230 return ret; 4231 } 4232 4233 static inline bool 4234 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4235 { 4236 /* 4237 * It's possible that cpuset's mems_allowed and the nodemask from 4238 * mempolicy don't intersect. This should be normally dealt with by 4239 * policy_nodemask(), but it's possible to race with cpuset update in 4240 * such a way the check therein was true, and then it became false 4241 * before we got our cpuset_mems_cookie here. 4242 * This assumes that for all allocations, ac->nodemask can come only 4243 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4244 * when it does not intersect with the cpuset restrictions) or the 4245 * caller can deal with a violated nodemask. 4246 */ 4247 if (cpusets_enabled() && ac->nodemask && 4248 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 4249 ac->nodemask = NULL; 4250 return true; 4251 } 4252 4253 /* 4254 * When updating a task's mems_allowed or mempolicy nodemask, it is 4255 * possible to race with parallel threads in such a way that our 4256 * allocation can fail while the mask is being updated. If we are about 4257 * to fail, check if the cpuset changed during allocation and if so, 4258 * retry. 4259 */ 4260 if (read_mems_allowed_retry(cpuset_mems_cookie)) 4261 return true; 4262 4263 return false; 4264 } 4265 4266 static inline struct page * 4267 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 4268 struct alloc_context *ac) 4269 { 4270 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4271 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4272 struct page *page = NULL; 4273 unsigned int alloc_flags; 4274 unsigned long did_some_progress; 4275 enum compact_priority compact_priority; 4276 enum compact_result compact_result; 4277 int compaction_retries; 4278 int no_progress_loops; 4279 unsigned int cpuset_mems_cookie; 4280 unsigned int zonelist_iter_cookie; 4281 int reserve_flags; 4282 4283 restart: 4284 compaction_retries = 0; 4285 no_progress_loops = 0; 4286 compact_priority = DEF_COMPACT_PRIORITY; 4287 cpuset_mems_cookie = read_mems_allowed_begin(); 4288 zonelist_iter_cookie = zonelist_iter_begin(); 4289 4290 /* 4291 * The fast path uses conservative alloc_flags to succeed only until 4292 * kswapd needs to be woken up, and to avoid the cost of setting up 4293 * alloc_flags precisely. So we do that now. 4294 */ 4295 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 4296 4297 /* 4298 * We need to recalculate the starting point for the zonelist iterator 4299 * because we might have used different nodemask in the fast path, or 4300 * there was a cpuset modification and we are retrying - otherwise we 4301 * could end up iterating over non-eligible zones endlessly. 4302 */ 4303 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4304 ac->highest_zoneidx, ac->nodemask); 4305 if (!ac->preferred_zoneref->zone) 4306 goto nopage; 4307 4308 /* 4309 * Check for insane configurations where the cpuset doesn't contain 4310 * any suitable zone to satisfy the request - e.g. non-movable 4311 * GFP_HIGHUSER allocations from MOVABLE nodes only. 4312 */ 4313 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 4314 struct zoneref *z = first_zones_zonelist(ac->zonelist, 4315 ac->highest_zoneidx, 4316 &cpuset_current_mems_allowed); 4317 if (!z->zone) 4318 goto nopage; 4319 } 4320 4321 if (alloc_flags & ALLOC_KSWAPD) 4322 wake_all_kswapds(order, gfp_mask, ac); 4323 4324 /* 4325 * The adjusted alloc_flags might result in immediate success, so try 4326 * that first 4327 */ 4328 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4329 if (page) 4330 goto got_pg; 4331 4332 /* 4333 * For costly allocations, try direct compaction first, as it's likely 4334 * that we have enough base pages and don't need to reclaim. For non- 4335 * movable high-order allocations, do that as well, as compaction will 4336 * try prevent permanent fragmentation by migrating from blocks of the 4337 * same migratetype. 4338 * Don't try this for allocations that are allowed to ignore 4339 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4340 */ 4341 if (can_direct_reclaim && 4342 (costly_order || 4343 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4344 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4345 page = __alloc_pages_direct_compact(gfp_mask, order, 4346 alloc_flags, ac, 4347 INIT_COMPACT_PRIORITY, 4348 &compact_result); 4349 if (page) 4350 goto got_pg; 4351 4352 /* 4353 * Checks for costly allocations with __GFP_NORETRY, which 4354 * includes some THP page fault allocations 4355 */ 4356 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4357 /* 4358 * If allocating entire pageblock(s) and compaction 4359 * failed because all zones are below low watermarks 4360 * or is prohibited because it recently failed at this 4361 * order, fail immediately unless the allocator has 4362 * requested compaction and reclaim retry. 4363 * 4364 * Reclaim is 4365 * - potentially very expensive because zones are far 4366 * below their low watermarks or this is part of very 4367 * bursty high order allocations, 4368 * - not guaranteed to help because isolate_freepages() 4369 * may not iterate over freed pages as part of its 4370 * linear scan, and 4371 * - unlikely to make entire pageblocks free on its 4372 * own. 4373 */ 4374 if (compact_result == COMPACT_SKIPPED || 4375 compact_result == COMPACT_DEFERRED) 4376 goto nopage; 4377 4378 /* 4379 * Looks like reclaim/compaction is worth trying, but 4380 * sync compaction could be very expensive, so keep 4381 * using async compaction. 4382 */ 4383 compact_priority = INIT_COMPACT_PRIORITY; 4384 } 4385 } 4386 4387 retry: 4388 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4389 if (alloc_flags & ALLOC_KSWAPD) 4390 wake_all_kswapds(order, gfp_mask, ac); 4391 4392 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4393 if (reserve_flags) 4394 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4395 (alloc_flags & ALLOC_KSWAPD); 4396 4397 /* 4398 * Reset the nodemask and zonelist iterators if memory policies can be 4399 * ignored. These allocations are high priority and system rather than 4400 * user oriented. 4401 */ 4402 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4403 ac->nodemask = NULL; 4404 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4405 ac->highest_zoneidx, ac->nodemask); 4406 } 4407 4408 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4409 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4410 if (page) 4411 goto got_pg; 4412 4413 /* Caller is not willing to reclaim, we can't balance anything */ 4414 if (!can_direct_reclaim) 4415 goto nopage; 4416 4417 /* Avoid recursion of direct reclaim */ 4418 if (current->flags & PF_MEMALLOC) 4419 goto nopage; 4420 4421 /* Try direct reclaim and then allocating */ 4422 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4423 &did_some_progress); 4424 if (page) 4425 goto got_pg; 4426 4427 /* Try direct compaction and then allocating */ 4428 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4429 compact_priority, &compact_result); 4430 if (page) 4431 goto got_pg; 4432 4433 /* Do not loop if specifically requested */ 4434 if (gfp_mask & __GFP_NORETRY) 4435 goto nopage; 4436 4437 /* 4438 * Do not retry costly high order allocations unless they are 4439 * __GFP_RETRY_MAYFAIL 4440 */ 4441 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 4442 goto nopage; 4443 4444 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4445 did_some_progress > 0, &no_progress_loops)) 4446 goto retry; 4447 4448 /* 4449 * It doesn't make any sense to retry for the compaction if the order-0 4450 * reclaim is not able to make any progress because the current 4451 * implementation of the compaction depends on the sufficient amount 4452 * of free memory (see __compaction_suitable) 4453 */ 4454 if (did_some_progress > 0 && 4455 should_compact_retry(ac, order, alloc_flags, 4456 compact_result, &compact_priority, 4457 &compaction_retries)) 4458 goto retry; 4459 4460 4461 /* 4462 * Deal with possible cpuset update races or zonelist updates to avoid 4463 * a unnecessary OOM kill. 4464 */ 4465 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4466 check_retry_zonelist(zonelist_iter_cookie)) 4467 goto restart; 4468 4469 /* Reclaim has failed us, start killing things */ 4470 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4471 if (page) 4472 goto got_pg; 4473 4474 /* Avoid allocations with no watermarks from looping endlessly */ 4475 if (tsk_is_oom_victim(current) && 4476 (alloc_flags & ALLOC_OOM || 4477 (gfp_mask & __GFP_NOMEMALLOC))) 4478 goto nopage; 4479 4480 /* Retry as long as the OOM killer is making progress */ 4481 if (did_some_progress) { 4482 no_progress_loops = 0; 4483 goto retry; 4484 } 4485 4486 nopage: 4487 /* 4488 * Deal with possible cpuset update races or zonelist updates to avoid 4489 * a unnecessary OOM kill. 4490 */ 4491 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4492 check_retry_zonelist(zonelist_iter_cookie)) 4493 goto restart; 4494 4495 /* 4496 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4497 * we always retry 4498 */ 4499 if (gfp_mask & __GFP_NOFAIL) { 4500 /* 4501 * All existing users of the __GFP_NOFAIL are blockable, so warn 4502 * of any new users that actually require GFP_NOWAIT 4503 */ 4504 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 4505 goto fail; 4506 4507 /* 4508 * PF_MEMALLOC request from this context is rather bizarre 4509 * because we cannot reclaim anything and only can loop waiting 4510 * for somebody to do a work for us 4511 */ 4512 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 4513 4514 /* 4515 * non failing costly orders are a hard requirement which we 4516 * are not prepared for much so let's warn about these users 4517 * so that we can identify them and convert them to something 4518 * else. 4519 */ 4520 WARN_ON_ONCE_GFP(costly_order, gfp_mask); 4521 4522 /* 4523 * Help non-failing allocations by giving some access to memory 4524 * reserves normally used for high priority non-blocking 4525 * allocations but do not use ALLOC_NO_WATERMARKS because this 4526 * could deplete whole memory reserves which would just make 4527 * the situation worse. 4528 */ 4529 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4530 if (page) 4531 goto got_pg; 4532 4533 cond_resched(); 4534 goto retry; 4535 } 4536 fail: 4537 warn_alloc(gfp_mask, ac->nodemask, 4538 "page allocation failure: order:%u", order); 4539 got_pg: 4540 return page; 4541 } 4542 4543 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4544 int preferred_nid, nodemask_t *nodemask, 4545 struct alloc_context *ac, gfp_t *alloc_gfp, 4546 unsigned int *alloc_flags) 4547 { 4548 ac->highest_zoneidx = gfp_zone(gfp_mask); 4549 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4550 ac->nodemask = nodemask; 4551 ac->migratetype = gfp_migratetype(gfp_mask); 4552 4553 if (cpusets_enabled()) { 4554 *alloc_gfp |= __GFP_HARDWALL; 4555 /* 4556 * When we are in the interrupt context, it is irrelevant 4557 * to the current task context. It means that any node ok. 4558 */ 4559 if (in_task() && !ac->nodemask) 4560 ac->nodemask = &cpuset_current_mems_allowed; 4561 else 4562 *alloc_flags |= ALLOC_CPUSET; 4563 } 4564 4565 might_alloc(gfp_mask); 4566 4567 if (should_fail_alloc_page(gfp_mask, order)) 4568 return false; 4569 4570 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4571 4572 /* Dirty zone balancing only done in the fast path */ 4573 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4574 4575 /* 4576 * The preferred zone is used for statistics but crucially it is 4577 * also used as the starting point for the zonelist iterator. It 4578 * may get reset for allocations that ignore memory policies. 4579 */ 4580 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4581 ac->highest_zoneidx, ac->nodemask); 4582 4583 return true; 4584 } 4585 4586 /* 4587 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 4588 * @gfp: GFP flags for the allocation 4589 * @preferred_nid: The preferred NUMA node ID to allocate from 4590 * @nodemask: Set of nodes to allocate from, may be NULL 4591 * @nr_pages: The number of pages desired on the list or array 4592 * @page_list: Optional list to store the allocated pages 4593 * @page_array: Optional array to store the pages 4594 * 4595 * This is a batched version of the page allocator that attempts to 4596 * allocate nr_pages quickly. Pages are added to page_list if page_list 4597 * is not NULL, otherwise it is assumed that the page_array is valid. 4598 * 4599 * For lists, nr_pages is the number of pages that should be allocated. 4600 * 4601 * For arrays, only NULL elements are populated with pages and nr_pages 4602 * is the maximum number of pages that will be stored in the array. 4603 * 4604 * Returns the number of pages on the list or array. 4605 */ 4606 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 4607 nodemask_t *nodemask, int nr_pages, 4608 struct list_head *page_list, 4609 struct page **page_array) 4610 { 4611 struct page *page; 4612 unsigned long __maybe_unused UP_flags; 4613 struct zone *zone; 4614 struct zoneref *z; 4615 struct per_cpu_pages *pcp; 4616 struct list_head *pcp_list; 4617 struct alloc_context ac; 4618 gfp_t alloc_gfp; 4619 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4620 int nr_populated = 0, nr_account = 0; 4621 4622 /* 4623 * Skip populated array elements to determine if any pages need 4624 * to be allocated before disabling IRQs. 4625 */ 4626 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 4627 nr_populated++; 4628 4629 /* No pages requested? */ 4630 if (unlikely(nr_pages <= 0)) 4631 goto out; 4632 4633 /* Already populated array? */ 4634 if (unlikely(page_array && nr_pages - nr_populated == 0)) 4635 goto out; 4636 4637 /* Bulk allocator does not support memcg accounting. */ 4638 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4639 goto failed; 4640 4641 /* Use the single page allocator for one page. */ 4642 if (nr_pages - nr_populated == 1) 4643 goto failed; 4644 4645 #ifdef CONFIG_PAGE_OWNER 4646 /* 4647 * PAGE_OWNER may recurse into the allocator to allocate space to 4648 * save the stack with pagesets.lock held. Releasing/reacquiring 4649 * removes much of the performance benefit of bulk allocation so 4650 * force the caller to allocate one page at a time as it'll have 4651 * similar performance to added complexity to the bulk allocator. 4652 */ 4653 if (static_branch_unlikely(&page_owner_inited)) 4654 goto failed; 4655 #endif 4656 4657 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4658 gfp &= gfp_allowed_mask; 4659 alloc_gfp = gfp; 4660 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4661 goto out; 4662 gfp = alloc_gfp; 4663 4664 /* Find an allowed local zone that meets the low watermark. */ 4665 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 4666 unsigned long mark; 4667 4668 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4669 !__cpuset_zone_allowed(zone, gfp)) { 4670 continue; 4671 } 4672 4673 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 4674 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 4675 goto failed; 4676 } 4677 4678 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4679 if (zone_watermark_fast(zone, 0, mark, 4680 zonelist_zone_idx(ac.preferred_zoneref), 4681 alloc_flags, gfp)) { 4682 break; 4683 } 4684 } 4685 4686 /* 4687 * If there are no allowed local zones that meets the watermarks then 4688 * try to allocate a single page and reclaim if necessary. 4689 */ 4690 if (unlikely(!zone)) 4691 goto failed; 4692 4693 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4694 pcp_trylock_prepare(UP_flags); 4695 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4696 if (!pcp) 4697 goto failed_irq; 4698 4699 /* Attempt the batch allocation */ 4700 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4701 while (nr_populated < nr_pages) { 4702 4703 /* Skip existing pages */ 4704 if (page_array && page_array[nr_populated]) { 4705 nr_populated++; 4706 continue; 4707 } 4708 4709 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4710 pcp, pcp_list); 4711 if (unlikely(!page)) { 4712 /* Try and allocate at least one page */ 4713 if (!nr_account) { 4714 pcp_spin_unlock(pcp); 4715 goto failed_irq; 4716 } 4717 break; 4718 } 4719 nr_account++; 4720 4721 prep_new_page(page, 0, gfp, 0); 4722 if (page_list) 4723 list_add(&page->lru, page_list); 4724 else 4725 page_array[nr_populated] = page; 4726 nr_populated++; 4727 } 4728 4729 pcp_spin_unlock(pcp); 4730 pcp_trylock_finish(UP_flags); 4731 4732 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4733 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 4734 4735 out: 4736 return nr_populated; 4737 4738 failed_irq: 4739 pcp_trylock_finish(UP_flags); 4740 4741 failed: 4742 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 4743 if (page) { 4744 if (page_list) 4745 list_add(&page->lru, page_list); 4746 else 4747 page_array[nr_populated] = page; 4748 nr_populated++; 4749 } 4750 4751 goto out; 4752 } 4753 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 4754 4755 /* 4756 * This is the 'heart' of the zoned buddy allocator. 4757 */ 4758 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 4759 nodemask_t *nodemask) 4760 { 4761 struct page *page; 4762 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4763 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4764 struct alloc_context ac = { }; 4765 4766 /* 4767 * There are several places where we assume that the order value is sane 4768 * so bail out early if the request is out of bound. 4769 */ 4770 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp)) 4771 return NULL; 4772 4773 gfp &= gfp_allowed_mask; 4774 /* 4775 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4776 * resp. GFP_NOIO which has to be inherited for all allocation requests 4777 * from a particular context which has been marked by 4778 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4779 * movable zones are not used during allocation. 4780 */ 4781 gfp = current_gfp_context(gfp); 4782 alloc_gfp = gfp; 4783 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4784 &alloc_gfp, &alloc_flags)) 4785 return NULL; 4786 4787 /* 4788 * Forbid the first pass from falling back to types that fragment 4789 * memory until all local zones are considered. 4790 */ 4791 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 4792 4793 /* First allocation attempt */ 4794 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4795 if (likely(page)) 4796 goto out; 4797 4798 alloc_gfp = gfp; 4799 ac.spread_dirty_pages = false; 4800 4801 /* 4802 * Restore the original nodemask if it was potentially replaced with 4803 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4804 */ 4805 ac.nodemask = nodemask; 4806 4807 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4808 4809 out: 4810 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4811 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4812 __free_pages(page, order); 4813 page = NULL; 4814 } 4815 4816 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4817 kmsan_alloc_page(page, order, alloc_gfp); 4818 4819 return page; 4820 } 4821 EXPORT_SYMBOL(__alloc_pages); 4822 4823 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 4824 nodemask_t *nodemask) 4825 { 4826 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 4827 preferred_nid, nodemask); 4828 4829 if (page && order > 1) 4830 prep_transhuge_page(page); 4831 return (struct folio *)page; 4832 } 4833 EXPORT_SYMBOL(__folio_alloc); 4834 4835 /* 4836 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4837 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4838 * you need to access high mem. 4839 */ 4840 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 4841 { 4842 struct page *page; 4843 4844 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 4845 if (!page) 4846 return 0; 4847 return (unsigned long) page_address(page); 4848 } 4849 EXPORT_SYMBOL(__get_free_pages); 4850 4851 unsigned long get_zeroed_page(gfp_t gfp_mask) 4852 { 4853 return __get_free_page(gfp_mask | __GFP_ZERO); 4854 } 4855 EXPORT_SYMBOL(get_zeroed_page); 4856 4857 /** 4858 * __free_pages - Free pages allocated with alloc_pages(). 4859 * @page: The page pointer returned from alloc_pages(). 4860 * @order: The order of the allocation. 4861 * 4862 * This function can free multi-page allocations that are not compound 4863 * pages. It does not check that the @order passed in matches that of 4864 * the allocation, so it is easy to leak memory. Freeing more memory 4865 * than was allocated will probably emit a warning. 4866 * 4867 * If the last reference to this page is speculative, it will be released 4868 * by put_page() which only frees the first page of a non-compound 4869 * allocation. To prevent the remaining pages from being leaked, we free 4870 * the subsequent pages here. If you want to use the page's reference 4871 * count to decide when to free the allocation, you should allocate a 4872 * compound page, and use put_page() instead of __free_pages(). 4873 * 4874 * Context: May be called in interrupt context or while holding a normal 4875 * spinlock, but not in NMI context or while holding a raw spinlock. 4876 */ 4877 void __free_pages(struct page *page, unsigned int order) 4878 { 4879 /* get PageHead before we drop reference */ 4880 int head = PageHead(page); 4881 4882 if (put_page_testzero(page)) 4883 free_the_page(page, order); 4884 else if (!head) 4885 while (order-- > 0) 4886 free_the_page(page + (1 << order), order); 4887 } 4888 EXPORT_SYMBOL(__free_pages); 4889 4890 void free_pages(unsigned long addr, unsigned int order) 4891 { 4892 if (addr != 0) { 4893 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4894 __free_pages(virt_to_page((void *)addr), order); 4895 } 4896 } 4897 4898 EXPORT_SYMBOL(free_pages); 4899 4900 /* 4901 * Page Fragment: 4902 * An arbitrary-length arbitrary-offset area of memory which resides 4903 * within a 0 or higher order page. Multiple fragments within that page 4904 * are individually refcounted, in the page's reference counter. 4905 * 4906 * The page_frag functions below provide a simple allocation framework for 4907 * page fragments. This is used by the network stack and network device 4908 * drivers to provide a backing region of memory for use as either an 4909 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4910 */ 4911 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4912 gfp_t gfp_mask) 4913 { 4914 struct page *page = NULL; 4915 gfp_t gfp = gfp_mask; 4916 4917 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4918 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 4919 __GFP_NOMEMALLOC; 4920 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4921 PAGE_FRAG_CACHE_MAX_ORDER); 4922 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4923 #endif 4924 if (unlikely(!page)) 4925 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4926 4927 nc->va = page ? page_address(page) : NULL; 4928 4929 return page; 4930 } 4931 4932 void __page_frag_cache_drain(struct page *page, unsigned int count) 4933 { 4934 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4935 4936 if (page_ref_sub_and_test(page, count)) 4937 free_the_page(page, compound_order(page)); 4938 } 4939 EXPORT_SYMBOL(__page_frag_cache_drain); 4940 4941 void *page_frag_alloc_align(struct page_frag_cache *nc, 4942 unsigned int fragsz, gfp_t gfp_mask, 4943 unsigned int align_mask) 4944 { 4945 unsigned int size = PAGE_SIZE; 4946 struct page *page; 4947 int offset; 4948 4949 if (unlikely(!nc->va)) { 4950 refill: 4951 page = __page_frag_cache_refill(nc, gfp_mask); 4952 if (!page) 4953 return NULL; 4954 4955 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4956 /* if size can vary use size else just use PAGE_SIZE */ 4957 size = nc->size; 4958 #endif 4959 /* Even if we own the page, we do not use atomic_set(). 4960 * This would break get_page_unless_zero() users. 4961 */ 4962 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 4963 4964 /* reset page count bias and offset to start of new frag */ 4965 nc->pfmemalloc = page_is_pfmemalloc(page); 4966 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4967 nc->offset = size; 4968 } 4969 4970 offset = nc->offset - fragsz; 4971 if (unlikely(offset < 0)) { 4972 page = virt_to_page(nc->va); 4973 4974 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4975 goto refill; 4976 4977 if (unlikely(nc->pfmemalloc)) { 4978 free_the_page(page, compound_order(page)); 4979 goto refill; 4980 } 4981 4982 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4983 /* if size can vary use size else just use PAGE_SIZE */ 4984 size = nc->size; 4985 #endif 4986 /* OK, page count is 0, we can safely set it */ 4987 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 4988 4989 /* reset page count bias and offset to start of new frag */ 4990 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4991 offset = size - fragsz; 4992 if (unlikely(offset < 0)) { 4993 /* 4994 * The caller is trying to allocate a fragment 4995 * with fragsz > PAGE_SIZE but the cache isn't big 4996 * enough to satisfy the request, this may 4997 * happen in low memory conditions. 4998 * We don't release the cache page because 4999 * it could make memory pressure worse 5000 * so we simply return NULL here. 5001 */ 5002 return NULL; 5003 } 5004 } 5005 5006 nc->pagecnt_bias--; 5007 offset &= align_mask; 5008 nc->offset = offset; 5009 5010 return nc->va + offset; 5011 } 5012 EXPORT_SYMBOL(page_frag_alloc_align); 5013 5014 /* 5015 * Frees a page fragment allocated out of either a compound or order 0 page. 5016 */ 5017 void page_frag_free(void *addr) 5018 { 5019 struct page *page = virt_to_head_page(addr); 5020 5021 if (unlikely(put_page_testzero(page))) 5022 free_the_page(page, compound_order(page)); 5023 } 5024 EXPORT_SYMBOL(page_frag_free); 5025 5026 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5027 size_t size) 5028 { 5029 if (addr) { 5030 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 5031 struct page *page = virt_to_page((void *)addr); 5032 struct page *last = page + nr; 5033 5034 split_page_owner(page, 1 << order); 5035 split_page_memcg(page, 1 << order); 5036 while (page < --last) 5037 set_page_refcounted(last); 5038 5039 last = page + (1UL << order); 5040 for (page += nr; page < last; page++) 5041 __free_pages_ok(page, 0, FPI_TO_TAIL); 5042 } 5043 return (void *)addr; 5044 } 5045 5046 /** 5047 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5048 * @size: the number of bytes to allocate 5049 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5050 * 5051 * This function is similar to alloc_pages(), except that it allocates the 5052 * minimum number of pages to satisfy the request. alloc_pages() can only 5053 * allocate memory in power-of-two pages. 5054 * 5055 * This function is also limited by MAX_ORDER. 5056 * 5057 * Memory allocated by this function must be released by free_pages_exact(). 5058 * 5059 * Return: pointer to the allocated area or %NULL in case of error. 5060 */ 5061 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 5062 { 5063 unsigned int order = get_order(size); 5064 unsigned long addr; 5065 5066 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5067 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5068 5069 addr = __get_free_pages(gfp_mask, order); 5070 return make_alloc_exact(addr, order, size); 5071 } 5072 EXPORT_SYMBOL(alloc_pages_exact); 5073 5074 /** 5075 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5076 * pages on a node. 5077 * @nid: the preferred node ID where memory should be allocated 5078 * @size: the number of bytes to allocate 5079 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5080 * 5081 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5082 * back. 5083 * 5084 * Return: pointer to the allocated area or %NULL in case of error. 5085 */ 5086 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 5087 { 5088 unsigned int order = get_order(size); 5089 struct page *p; 5090 5091 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5092 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5093 5094 p = alloc_pages_node(nid, gfp_mask, order); 5095 if (!p) 5096 return NULL; 5097 return make_alloc_exact((unsigned long)page_address(p), order, size); 5098 } 5099 5100 /** 5101 * free_pages_exact - release memory allocated via alloc_pages_exact() 5102 * @virt: the value returned by alloc_pages_exact. 5103 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5104 * 5105 * Release the memory allocated by a previous call to alloc_pages_exact. 5106 */ 5107 void free_pages_exact(void *virt, size_t size) 5108 { 5109 unsigned long addr = (unsigned long)virt; 5110 unsigned long end = addr + PAGE_ALIGN(size); 5111 5112 while (addr < end) { 5113 free_page(addr); 5114 addr += PAGE_SIZE; 5115 } 5116 } 5117 EXPORT_SYMBOL(free_pages_exact); 5118 5119 /** 5120 * nr_free_zone_pages - count number of pages beyond high watermark 5121 * @offset: The zone index of the highest zone 5122 * 5123 * nr_free_zone_pages() counts the number of pages which are beyond the 5124 * high watermark within all zones at or below a given zone index. For each 5125 * zone, the number of pages is calculated as: 5126 * 5127 * nr_free_zone_pages = managed_pages - high_pages 5128 * 5129 * Return: number of pages beyond high watermark. 5130 */ 5131 static unsigned long nr_free_zone_pages(int offset) 5132 { 5133 struct zoneref *z; 5134 struct zone *zone; 5135 5136 /* Just pick one node, since fallback list is circular */ 5137 unsigned long sum = 0; 5138 5139 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5140 5141 for_each_zone_zonelist(zone, z, zonelist, offset) { 5142 unsigned long size = zone_managed_pages(zone); 5143 unsigned long high = high_wmark_pages(zone); 5144 if (size > high) 5145 sum += size - high; 5146 } 5147 5148 return sum; 5149 } 5150 5151 /** 5152 * nr_free_buffer_pages - count number of pages beyond high watermark 5153 * 5154 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5155 * watermark within ZONE_DMA and ZONE_NORMAL. 5156 * 5157 * Return: number of pages beyond high watermark within ZONE_DMA and 5158 * ZONE_NORMAL. 5159 */ 5160 unsigned long nr_free_buffer_pages(void) 5161 { 5162 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5163 } 5164 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5165 5166 static inline void show_node(struct zone *zone) 5167 { 5168 if (IS_ENABLED(CONFIG_NUMA)) 5169 printk("Node %d ", zone_to_nid(zone)); 5170 } 5171 5172 long si_mem_available(void) 5173 { 5174 long available; 5175 unsigned long pagecache; 5176 unsigned long wmark_low = 0; 5177 unsigned long pages[NR_LRU_LISTS]; 5178 unsigned long reclaimable; 5179 struct zone *zone; 5180 int lru; 5181 5182 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 5183 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 5184 5185 for_each_zone(zone) 5186 wmark_low += low_wmark_pages(zone); 5187 5188 /* 5189 * Estimate the amount of memory available for userspace allocations, 5190 * without causing swapping or OOM. 5191 */ 5192 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 5193 5194 /* 5195 * Not all the page cache can be freed, otherwise the system will 5196 * start swapping or thrashing. Assume at least half of the page 5197 * cache, or the low watermark worth of cache, needs to stay. 5198 */ 5199 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 5200 pagecache -= min(pagecache / 2, wmark_low); 5201 available += pagecache; 5202 5203 /* 5204 * Part of the reclaimable slab and other kernel memory consists of 5205 * items that are in use, and cannot be freed. Cap this estimate at the 5206 * low watermark. 5207 */ 5208 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 5209 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 5210 available += reclaimable - min(reclaimable / 2, wmark_low); 5211 5212 if (available < 0) 5213 available = 0; 5214 return available; 5215 } 5216 EXPORT_SYMBOL_GPL(si_mem_available); 5217 5218 void si_meminfo(struct sysinfo *val) 5219 { 5220 val->totalram = totalram_pages(); 5221 val->sharedram = global_node_page_state(NR_SHMEM); 5222 val->freeram = global_zone_page_state(NR_FREE_PAGES); 5223 val->bufferram = nr_blockdev_pages(); 5224 val->totalhigh = totalhigh_pages(); 5225 val->freehigh = nr_free_highpages(); 5226 val->mem_unit = PAGE_SIZE; 5227 } 5228 5229 EXPORT_SYMBOL(si_meminfo); 5230 5231 #ifdef CONFIG_NUMA 5232 void si_meminfo_node(struct sysinfo *val, int nid) 5233 { 5234 int zone_type; /* needs to be signed */ 5235 unsigned long managed_pages = 0; 5236 unsigned long managed_highpages = 0; 5237 unsigned long free_highpages = 0; 5238 pg_data_t *pgdat = NODE_DATA(nid); 5239 5240 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 5241 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); 5242 val->totalram = managed_pages; 5243 val->sharedram = node_page_state(pgdat, NR_SHMEM); 5244 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 5245 #ifdef CONFIG_HIGHMEM 5246 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 5247 struct zone *zone = &pgdat->node_zones[zone_type]; 5248 5249 if (is_highmem(zone)) { 5250 managed_highpages += zone_managed_pages(zone); 5251 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 5252 } 5253 } 5254 val->totalhigh = managed_highpages; 5255 val->freehigh = free_highpages; 5256 #else 5257 val->totalhigh = managed_highpages; 5258 val->freehigh = free_highpages; 5259 #endif 5260 val->mem_unit = PAGE_SIZE; 5261 } 5262 #endif 5263 5264 /* 5265 * Determine whether the node should be displayed or not, depending on whether 5266 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 5267 */ 5268 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 5269 { 5270 if (!(flags & SHOW_MEM_FILTER_NODES)) 5271 return false; 5272 5273 /* 5274 * no node mask - aka implicit memory numa policy. Do not bother with 5275 * the synchronization - read_mems_allowed_begin - because we do not 5276 * have to be precise here. 5277 */ 5278 if (!nodemask) 5279 nodemask = &cpuset_current_mems_allowed; 5280 5281 return !node_isset(nid, *nodemask); 5282 } 5283 5284 static void show_migration_types(unsigned char type) 5285 { 5286 static const char types[MIGRATE_TYPES] = { 5287 [MIGRATE_UNMOVABLE] = 'U', 5288 [MIGRATE_MOVABLE] = 'M', 5289 [MIGRATE_RECLAIMABLE] = 'E', 5290 [MIGRATE_HIGHATOMIC] = 'H', 5291 #ifdef CONFIG_CMA 5292 [MIGRATE_CMA] = 'C', 5293 #endif 5294 #ifdef CONFIG_MEMORY_ISOLATION 5295 [MIGRATE_ISOLATE] = 'I', 5296 #endif 5297 }; 5298 char tmp[MIGRATE_TYPES + 1]; 5299 char *p = tmp; 5300 int i; 5301 5302 for (i = 0; i < MIGRATE_TYPES; i++) { 5303 if (type & (1 << i)) 5304 *p++ = types[i]; 5305 } 5306 5307 *p = '\0'; 5308 printk(KERN_CONT "(%s) ", tmp); 5309 } 5310 5311 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) 5312 { 5313 int zone_idx; 5314 for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) 5315 if (zone_managed_pages(pgdat->node_zones + zone_idx)) 5316 return true; 5317 return false; 5318 } 5319 5320 /* 5321 * Show free area list (used inside shift_scroll-lock stuff) 5322 * We also calculate the percentage fragmentation. We do this by counting the 5323 * memory on each free list with the exception of the first item on the list. 5324 * 5325 * Bits in @filter: 5326 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 5327 * cpuset. 5328 */ 5329 void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 5330 { 5331 unsigned long free_pcp = 0; 5332 int cpu, nid; 5333 struct zone *zone; 5334 pg_data_t *pgdat; 5335 5336 for_each_populated_zone(zone) { 5337 if (zone_idx(zone) > max_zone_idx) 5338 continue; 5339 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5340 continue; 5341 5342 for_each_online_cpu(cpu) 5343 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 5344 } 5345 5346 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 5347 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 5348 " unevictable:%lu dirty:%lu writeback:%lu\n" 5349 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 5350 " mapped:%lu shmem:%lu pagetables:%lu\n" 5351 " sec_pagetables:%lu bounce:%lu\n" 5352 " kernel_misc_reclaimable:%lu\n" 5353 " free:%lu free_pcp:%lu free_cma:%lu\n", 5354 global_node_page_state(NR_ACTIVE_ANON), 5355 global_node_page_state(NR_INACTIVE_ANON), 5356 global_node_page_state(NR_ISOLATED_ANON), 5357 global_node_page_state(NR_ACTIVE_FILE), 5358 global_node_page_state(NR_INACTIVE_FILE), 5359 global_node_page_state(NR_ISOLATED_FILE), 5360 global_node_page_state(NR_UNEVICTABLE), 5361 global_node_page_state(NR_FILE_DIRTY), 5362 global_node_page_state(NR_WRITEBACK), 5363 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 5364 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 5365 global_node_page_state(NR_FILE_MAPPED), 5366 global_node_page_state(NR_SHMEM), 5367 global_node_page_state(NR_PAGETABLE), 5368 global_node_page_state(NR_SECONDARY_PAGETABLE), 5369 global_zone_page_state(NR_BOUNCE), 5370 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), 5371 global_zone_page_state(NR_FREE_PAGES), 5372 free_pcp, 5373 global_zone_page_state(NR_FREE_CMA_PAGES)); 5374 5375 for_each_online_pgdat(pgdat) { 5376 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 5377 continue; 5378 if (!node_has_managed_zones(pgdat, max_zone_idx)) 5379 continue; 5380 5381 printk("Node %d" 5382 " active_anon:%lukB" 5383 " inactive_anon:%lukB" 5384 " active_file:%lukB" 5385 " inactive_file:%lukB" 5386 " unevictable:%lukB" 5387 " isolated(anon):%lukB" 5388 " isolated(file):%lukB" 5389 " mapped:%lukB" 5390 " dirty:%lukB" 5391 " writeback:%lukB" 5392 " shmem:%lukB" 5393 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5394 " shmem_thp: %lukB" 5395 " shmem_pmdmapped: %lukB" 5396 " anon_thp: %lukB" 5397 #endif 5398 " writeback_tmp:%lukB" 5399 " kernel_stack:%lukB" 5400 #ifdef CONFIG_SHADOW_CALL_STACK 5401 " shadow_call_stack:%lukB" 5402 #endif 5403 " pagetables:%lukB" 5404 " sec_pagetables:%lukB" 5405 " all_unreclaimable? %s" 5406 "\n", 5407 pgdat->node_id, 5408 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 5409 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 5410 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 5411 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 5412 K(node_page_state(pgdat, NR_UNEVICTABLE)), 5413 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 5414 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 5415 K(node_page_state(pgdat, NR_FILE_MAPPED)), 5416 K(node_page_state(pgdat, NR_FILE_DIRTY)), 5417 K(node_page_state(pgdat, NR_WRITEBACK)), 5418 K(node_page_state(pgdat, NR_SHMEM)), 5419 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5420 K(node_page_state(pgdat, NR_SHMEM_THPS)), 5421 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), 5422 K(node_page_state(pgdat, NR_ANON_THPS)), 5423 #endif 5424 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 5425 node_page_state(pgdat, NR_KERNEL_STACK_KB), 5426 #ifdef CONFIG_SHADOW_CALL_STACK 5427 node_page_state(pgdat, NR_KERNEL_SCS_KB), 5428 #endif 5429 K(node_page_state(pgdat, NR_PAGETABLE)), 5430 K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), 5431 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 5432 "yes" : "no"); 5433 } 5434 5435 for_each_populated_zone(zone) { 5436 int i; 5437 5438 if (zone_idx(zone) > max_zone_idx) 5439 continue; 5440 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5441 continue; 5442 5443 free_pcp = 0; 5444 for_each_online_cpu(cpu) 5445 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 5446 5447 show_node(zone); 5448 printk(KERN_CONT 5449 "%s" 5450 " free:%lukB" 5451 " boost:%lukB" 5452 " min:%lukB" 5453 " low:%lukB" 5454 " high:%lukB" 5455 " reserved_highatomic:%luKB" 5456 " active_anon:%lukB" 5457 " inactive_anon:%lukB" 5458 " active_file:%lukB" 5459 " inactive_file:%lukB" 5460 " unevictable:%lukB" 5461 " writepending:%lukB" 5462 " present:%lukB" 5463 " managed:%lukB" 5464 " mlocked:%lukB" 5465 " bounce:%lukB" 5466 " free_pcp:%lukB" 5467 " local_pcp:%ukB" 5468 " free_cma:%lukB" 5469 "\n", 5470 zone->name, 5471 K(zone_page_state(zone, NR_FREE_PAGES)), 5472 K(zone->watermark_boost), 5473 K(min_wmark_pages(zone)), 5474 K(low_wmark_pages(zone)), 5475 K(high_wmark_pages(zone)), 5476 K(zone->nr_reserved_highatomic), 5477 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 5478 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 5479 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 5480 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 5481 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 5482 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 5483 K(zone->present_pages), 5484 K(zone_managed_pages(zone)), 5485 K(zone_page_state(zone, NR_MLOCK)), 5486 K(zone_page_state(zone, NR_BOUNCE)), 5487 K(free_pcp), 5488 K(this_cpu_read(zone->per_cpu_pageset->count)), 5489 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 5490 printk("lowmem_reserve[]:"); 5491 for (i = 0; i < MAX_NR_ZONES; i++) 5492 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 5493 printk(KERN_CONT "\n"); 5494 } 5495 5496 for_each_populated_zone(zone) { 5497 unsigned int order; 5498 unsigned long nr[MAX_ORDER + 1], flags, total = 0; 5499 unsigned char types[MAX_ORDER + 1]; 5500 5501 if (zone_idx(zone) > max_zone_idx) 5502 continue; 5503 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 5504 continue; 5505 show_node(zone); 5506 printk(KERN_CONT "%s: ", zone->name); 5507 5508 spin_lock_irqsave(&zone->lock, flags); 5509 for (order = 0; order <= MAX_ORDER; order++) { 5510 struct free_area *area = &zone->free_area[order]; 5511 int type; 5512 5513 nr[order] = area->nr_free; 5514 total += nr[order] << order; 5515 5516 types[order] = 0; 5517 for (type = 0; type < MIGRATE_TYPES; type++) { 5518 if (!free_area_empty(area, type)) 5519 types[order] |= 1 << type; 5520 } 5521 } 5522 spin_unlock_irqrestore(&zone->lock, flags); 5523 for (order = 0; order <= MAX_ORDER; order++) { 5524 printk(KERN_CONT "%lu*%lukB ", 5525 nr[order], K(1UL) << order); 5526 if (nr[order]) 5527 show_migration_types(types[order]); 5528 } 5529 printk(KERN_CONT "= %lukB\n", K(total)); 5530 } 5531 5532 for_each_online_node(nid) { 5533 if (show_mem_node_skip(filter, nid, nodemask)) 5534 continue; 5535 hugetlb_show_meminfo_node(nid); 5536 } 5537 5538 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 5539 5540 show_swap_cache_info(); 5541 } 5542 5543 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 5544 { 5545 zoneref->zone = zone; 5546 zoneref->zone_idx = zone_idx(zone); 5547 } 5548 5549 /* 5550 * Builds allocation fallback zone lists. 5551 * 5552 * Add all populated zones of a node to the zonelist. 5553 */ 5554 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 5555 { 5556 struct zone *zone; 5557 enum zone_type zone_type = MAX_NR_ZONES; 5558 int nr_zones = 0; 5559 5560 do { 5561 zone_type--; 5562 zone = pgdat->node_zones + zone_type; 5563 if (populated_zone(zone)) { 5564 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 5565 check_highest_zone(zone_type); 5566 } 5567 } while (zone_type); 5568 5569 return nr_zones; 5570 } 5571 5572 #ifdef CONFIG_NUMA 5573 5574 static int __parse_numa_zonelist_order(char *s) 5575 { 5576 /* 5577 * We used to support different zonelists modes but they turned 5578 * out to be just not useful. Let's keep the warning in place 5579 * if somebody still use the cmd line parameter so that we do 5580 * not fail it silently 5581 */ 5582 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 5583 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 5584 return -EINVAL; 5585 } 5586 return 0; 5587 } 5588 5589 char numa_zonelist_order[] = "Node"; 5590 5591 /* 5592 * sysctl handler for numa_zonelist_order 5593 */ 5594 int numa_zonelist_order_handler(struct ctl_table *table, int write, 5595 void *buffer, size_t *length, loff_t *ppos) 5596 { 5597 if (write) 5598 return __parse_numa_zonelist_order(buffer); 5599 return proc_dostring(table, write, buffer, length, ppos); 5600 } 5601 5602 5603 static int node_load[MAX_NUMNODES]; 5604 5605 /** 5606 * find_next_best_node - find the next node that should appear in a given node's fallback list 5607 * @node: node whose fallback list we're appending 5608 * @used_node_mask: nodemask_t of already used nodes 5609 * 5610 * We use a number of factors to determine which is the next node that should 5611 * appear on a given node's fallback list. The node should not have appeared 5612 * already in @node's fallback list, and it should be the next closest node 5613 * according to the distance array (which contains arbitrary distance values 5614 * from each node to each node in the system), and should also prefer nodes 5615 * with no CPUs, since presumably they'll have very little allocation pressure 5616 * on them otherwise. 5617 * 5618 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 5619 */ 5620 int find_next_best_node(int node, nodemask_t *used_node_mask) 5621 { 5622 int n, val; 5623 int min_val = INT_MAX; 5624 int best_node = NUMA_NO_NODE; 5625 5626 /* Use the local node if we haven't already */ 5627 if (!node_isset(node, *used_node_mask)) { 5628 node_set(node, *used_node_mask); 5629 return node; 5630 } 5631 5632 for_each_node_state(n, N_MEMORY) { 5633 5634 /* Don't want a node to appear more than once */ 5635 if (node_isset(n, *used_node_mask)) 5636 continue; 5637 5638 /* Use the distance array to find the distance */ 5639 val = node_distance(node, n); 5640 5641 /* Penalize nodes under us ("prefer the next node") */ 5642 val += (n < node); 5643 5644 /* Give preference to headless and unused nodes */ 5645 if (!cpumask_empty(cpumask_of_node(n))) 5646 val += PENALTY_FOR_NODE_WITH_CPUS; 5647 5648 /* Slight preference for less loaded node */ 5649 val *= MAX_NUMNODES; 5650 val += node_load[n]; 5651 5652 if (val < min_val) { 5653 min_val = val; 5654 best_node = n; 5655 } 5656 } 5657 5658 if (best_node >= 0) 5659 node_set(best_node, *used_node_mask); 5660 5661 return best_node; 5662 } 5663 5664 5665 /* 5666 * Build zonelists ordered by node and zones within node. 5667 * This results in maximum locality--normal zone overflows into local 5668 * DMA zone, if any--but risks exhausting DMA zone. 5669 */ 5670 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 5671 unsigned nr_nodes) 5672 { 5673 struct zoneref *zonerefs; 5674 int i; 5675 5676 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5677 5678 for (i = 0; i < nr_nodes; i++) { 5679 int nr_zones; 5680 5681 pg_data_t *node = NODE_DATA(node_order[i]); 5682 5683 nr_zones = build_zonerefs_node(node, zonerefs); 5684 zonerefs += nr_zones; 5685 } 5686 zonerefs->zone = NULL; 5687 zonerefs->zone_idx = 0; 5688 } 5689 5690 /* 5691 * Build gfp_thisnode zonelists 5692 */ 5693 static void build_thisnode_zonelists(pg_data_t *pgdat) 5694 { 5695 struct zoneref *zonerefs; 5696 int nr_zones; 5697 5698 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 5699 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5700 zonerefs += nr_zones; 5701 zonerefs->zone = NULL; 5702 zonerefs->zone_idx = 0; 5703 } 5704 5705 /* 5706 * Build zonelists ordered by zone and nodes within zones. 5707 * This results in conserving DMA zone[s] until all Normal memory is 5708 * exhausted, but results in overflowing to remote node while memory 5709 * may still exist in local DMA zone. 5710 */ 5711 5712 static void build_zonelists(pg_data_t *pgdat) 5713 { 5714 static int node_order[MAX_NUMNODES]; 5715 int node, nr_nodes = 0; 5716 nodemask_t used_mask = NODE_MASK_NONE; 5717 int local_node, prev_node; 5718 5719 /* NUMA-aware ordering of nodes */ 5720 local_node = pgdat->node_id; 5721 prev_node = local_node; 5722 5723 memset(node_order, 0, sizeof(node_order)); 5724 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5725 /* 5726 * We don't want to pressure a particular node. 5727 * So adding penalty to the first node in same 5728 * distance group to make it round-robin. 5729 */ 5730 if (node_distance(local_node, node) != 5731 node_distance(local_node, prev_node)) 5732 node_load[node] += 1; 5733 5734 node_order[nr_nodes++] = node; 5735 prev_node = node; 5736 } 5737 5738 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5739 build_thisnode_zonelists(pgdat); 5740 pr_info("Fallback order for Node %d: ", local_node); 5741 for (node = 0; node < nr_nodes; node++) 5742 pr_cont("%d ", node_order[node]); 5743 pr_cont("\n"); 5744 } 5745 5746 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5747 /* 5748 * Return node id of node used for "local" allocations. 5749 * I.e., first node id of first zone in arg node's generic zonelist. 5750 * Used for initializing percpu 'numa_mem', which is used primarily 5751 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5752 */ 5753 int local_memory_node(int node) 5754 { 5755 struct zoneref *z; 5756 5757 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5758 gfp_zone(GFP_KERNEL), 5759 NULL); 5760 return zone_to_nid(z->zone); 5761 } 5762 #endif 5763 5764 static void setup_min_unmapped_ratio(void); 5765 static void setup_min_slab_ratio(void); 5766 #else /* CONFIG_NUMA */ 5767 5768 static void build_zonelists(pg_data_t *pgdat) 5769 { 5770 int node, local_node; 5771 struct zoneref *zonerefs; 5772 int nr_zones; 5773 5774 local_node = pgdat->node_id; 5775 5776 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5777 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5778 zonerefs += nr_zones; 5779 5780 /* 5781 * Now we build the zonelist so that it contains the zones 5782 * of all the other nodes. 5783 * We don't want to pressure a particular node, so when 5784 * building the zones for node N, we make sure that the 5785 * zones coming right after the local ones are those from 5786 * node N+1 (modulo N) 5787 */ 5788 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 5789 if (!node_online(node)) 5790 continue; 5791 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5792 zonerefs += nr_zones; 5793 } 5794 for (node = 0; node < local_node; node++) { 5795 if (!node_online(node)) 5796 continue; 5797 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5798 zonerefs += nr_zones; 5799 } 5800 5801 zonerefs->zone = NULL; 5802 zonerefs->zone_idx = 0; 5803 } 5804 5805 #endif /* CONFIG_NUMA */ 5806 5807 /* 5808 * Boot pageset table. One per cpu which is going to be used for all 5809 * zones and all nodes. The parameters will be set in such a way 5810 * that an item put on a list will immediately be handed over to 5811 * the buddy list. This is safe since pageset manipulation is done 5812 * with interrupts disabled. 5813 * 5814 * The boot_pagesets must be kept even after bootup is complete for 5815 * unused processors and/or zones. They do play a role for bootstrapping 5816 * hotplugged processors. 5817 * 5818 * zoneinfo_show() and maybe other functions do 5819 * not check if the processor is online before following the pageset pointer. 5820 * Other parts of the kernel may not check if the zone is available. 5821 */ 5822 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5823 /* These effectively disable the pcplists in the boot pageset completely */ 5824 #define BOOT_PAGESET_HIGH 0 5825 #define BOOT_PAGESET_BATCH 1 5826 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5827 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5828 5829 static void __build_all_zonelists(void *data) 5830 { 5831 int nid; 5832 int __maybe_unused cpu; 5833 pg_data_t *self = data; 5834 unsigned long flags; 5835 5836 /* 5837 * Explicitly disable this CPU's interrupts before taking seqlock 5838 * to prevent any IRQ handler from calling into the page allocator 5839 * (e.g. GFP_ATOMIC) that could hit zonelist_iter_begin and livelock. 5840 */ 5841 local_irq_save(flags); 5842 /* 5843 * Explicitly disable this CPU's synchronous printk() before taking 5844 * seqlock to prevent any printk() from trying to hold port->lock, for 5845 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5846 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5847 */ 5848 printk_deferred_enter(); 5849 write_seqlock(&zonelist_update_seq); 5850 5851 #ifdef CONFIG_NUMA 5852 memset(node_load, 0, sizeof(node_load)); 5853 #endif 5854 5855 /* 5856 * This node is hotadded and no memory is yet present. So just 5857 * building zonelists is fine - no need to touch other nodes. 5858 */ 5859 if (self && !node_online(self->node_id)) { 5860 build_zonelists(self); 5861 } else { 5862 /* 5863 * All possible nodes have pgdat preallocated 5864 * in free_area_init 5865 */ 5866 for_each_node(nid) { 5867 pg_data_t *pgdat = NODE_DATA(nid); 5868 5869 build_zonelists(pgdat); 5870 } 5871 5872 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5873 /* 5874 * We now know the "local memory node" for each node-- 5875 * i.e., the node of the first zone in the generic zonelist. 5876 * Set up numa_mem percpu variable for on-line cpus. During 5877 * boot, only the boot cpu should be on-line; we'll init the 5878 * secondary cpus' numa_mem as they come on-line. During 5879 * node/memory hotplug, we'll fixup all on-line cpus. 5880 */ 5881 for_each_online_cpu(cpu) 5882 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5883 #endif 5884 } 5885 5886 write_sequnlock(&zonelist_update_seq); 5887 printk_deferred_exit(); 5888 local_irq_restore(flags); 5889 } 5890 5891 static noinline void __init 5892 build_all_zonelists_init(void) 5893 { 5894 int cpu; 5895 5896 __build_all_zonelists(NULL); 5897 5898 /* 5899 * Initialize the boot_pagesets that are going to be used 5900 * for bootstrapping processors. The real pagesets for 5901 * each zone will be allocated later when the per cpu 5902 * allocator is available. 5903 * 5904 * boot_pagesets are used also for bootstrapping offline 5905 * cpus if the system is already booted because the pagesets 5906 * are needed to initialize allocators on a specific cpu too. 5907 * F.e. the percpu allocator needs the page allocator which 5908 * needs the percpu allocator in order to allocate its pagesets 5909 * (a chicken-egg dilemma). 5910 */ 5911 for_each_possible_cpu(cpu) 5912 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5913 5914 mminit_verify_zonelist(); 5915 cpuset_init_current_mems_allowed(); 5916 } 5917 5918 /* 5919 * unless system_state == SYSTEM_BOOTING. 5920 * 5921 * __ref due to call of __init annotated helper build_all_zonelists_init 5922 * [protected by SYSTEM_BOOTING]. 5923 */ 5924 void __ref build_all_zonelists(pg_data_t *pgdat) 5925 { 5926 unsigned long vm_total_pages; 5927 5928 if (system_state == SYSTEM_BOOTING) { 5929 build_all_zonelists_init(); 5930 } else { 5931 __build_all_zonelists(pgdat); 5932 /* cpuset refresh routine should be here */ 5933 } 5934 /* Get the number of free pages beyond high watermark in all zones. */ 5935 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5936 /* 5937 * Disable grouping by mobility if the number of pages in the 5938 * system is too low to allow the mechanism to work. It would be 5939 * more accurate, but expensive to check per-zone. This check is 5940 * made on memory-hotadd so a system can start with mobility 5941 * disabled and enable it later 5942 */ 5943 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5944 page_group_by_mobility_disabled = 1; 5945 else 5946 page_group_by_mobility_disabled = 0; 5947 5948 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5949 nr_online_nodes, 5950 page_group_by_mobility_disabled ? "off" : "on", 5951 vm_total_pages); 5952 #ifdef CONFIG_NUMA 5953 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5954 #endif 5955 } 5956 5957 static int zone_batchsize(struct zone *zone) 5958 { 5959 #ifdef CONFIG_MMU 5960 int batch; 5961 5962 /* 5963 * The number of pages to batch allocate is either ~0.1% 5964 * of the zone or 1MB, whichever is smaller. The batch 5965 * size is striking a balance between allocation latency 5966 * and zone lock contention. 5967 */ 5968 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5969 batch /= 4; /* We effectively *= 4 below */ 5970 if (batch < 1) 5971 batch = 1; 5972 5973 /* 5974 * Clamp the batch to a 2^n - 1 value. Having a power 5975 * of 2 value was found to be more likely to have 5976 * suboptimal cache aliasing properties in some cases. 5977 * 5978 * For example if 2 tasks are alternately allocating 5979 * batches of pages, one task can end up with a lot 5980 * of pages of one half of the possible page colors 5981 * and the other with pages of the other colors. 5982 */ 5983 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5984 5985 return batch; 5986 5987 #else 5988 /* The deferral and batching of frees should be suppressed under NOMMU 5989 * conditions. 5990 * 5991 * The problem is that NOMMU needs to be able to allocate large chunks 5992 * of contiguous memory as there's no hardware page translation to 5993 * assemble apparent contiguous memory from discontiguous pages. 5994 * 5995 * Queueing large contiguous runs of pages for batching, however, 5996 * causes the pages to actually be freed in smaller chunks. As there 5997 * can be a significant delay between the individual batches being 5998 * recycled, this leads to the once large chunks of space being 5999 * fragmented and becoming unavailable for high-order allocations. 6000 */ 6001 return 0; 6002 #endif 6003 } 6004 6005 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 6006 { 6007 #ifdef CONFIG_MMU 6008 int high; 6009 int nr_split_cpus; 6010 unsigned long total_pages; 6011 6012 if (!percpu_pagelist_high_fraction) { 6013 /* 6014 * By default, the high value of the pcp is based on the zone 6015 * low watermark so that if they are full then background 6016 * reclaim will not be started prematurely. 6017 */ 6018 total_pages = low_wmark_pages(zone); 6019 } else { 6020 /* 6021 * If percpu_pagelist_high_fraction is configured, the high 6022 * value is based on a fraction of the managed pages in the 6023 * zone. 6024 */ 6025 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 6026 } 6027 6028 /* 6029 * Split the high value across all online CPUs local to the zone. Note 6030 * that early in boot that CPUs may not be online yet and that during 6031 * CPU hotplug that the cpumask is not yet updated when a CPU is being 6032 * onlined. For memory nodes that have no CPUs, split pcp->high across 6033 * all online CPUs to mitigate the risk that reclaim is triggered 6034 * prematurely due to pages stored on pcp lists. 6035 */ 6036 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 6037 if (!nr_split_cpus) 6038 nr_split_cpus = num_online_cpus(); 6039 high = total_pages / nr_split_cpus; 6040 6041 /* 6042 * Ensure high is at least batch*4. The multiple is based on the 6043 * historical relationship between high and batch. 6044 */ 6045 high = max(high, batch << 2); 6046 6047 return high; 6048 #else 6049 return 0; 6050 #endif 6051 } 6052 6053 /* 6054 * pcp->high and pcp->batch values are related and generally batch is lower 6055 * than high. They are also related to pcp->count such that count is lower 6056 * than high, and as soon as it reaches high, the pcplist is flushed. 6057 * 6058 * However, guaranteeing these relations at all times would require e.g. write 6059 * barriers here but also careful usage of read barriers at the read side, and 6060 * thus be prone to error and bad for performance. Thus the update only prevents 6061 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 6062 * can cope with those fields changing asynchronously, and fully trust only the 6063 * pcp->count field on the local CPU with interrupts disabled. 6064 * 6065 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 6066 * outside of boot time (or some other assurance that no concurrent updaters 6067 * exist). 6068 */ 6069 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 6070 unsigned long batch) 6071 { 6072 WRITE_ONCE(pcp->batch, batch); 6073 WRITE_ONCE(pcp->high, high); 6074 } 6075 6076 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 6077 { 6078 int pindex; 6079 6080 memset(pcp, 0, sizeof(*pcp)); 6081 memset(pzstats, 0, sizeof(*pzstats)); 6082 6083 spin_lock_init(&pcp->lock); 6084 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 6085 INIT_LIST_HEAD(&pcp->lists[pindex]); 6086 6087 /* 6088 * Set batch and high values safe for a boot pageset. A true percpu 6089 * pageset's initialization will update them subsequently. Here we don't 6090 * need to be as careful as pageset_update() as nobody can access the 6091 * pageset yet. 6092 */ 6093 pcp->high = BOOT_PAGESET_HIGH; 6094 pcp->batch = BOOT_PAGESET_BATCH; 6095 pcp->free_factor = 0; 6096 } 6097 6098 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 6099 unsigned long batch) 6100 { 6101 struct per_cpu_pages *pcp; 6102 int cpu; 6103 6104 for_each_possible_cpu(cpu) { 6105 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6106 pageset_update(pcp, high, batch); 6107 } 6108 } 6109 6110 /* 6111 * Calculate and set new high and batch values for all per-cpu pagesets of a 6112 * zone based on the zone's size. 6113 */ 6114 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 6115 { 6116 int new_high, new_batch; 6117 6118 new_batch = max(1, zone_batchsize(zone)); 6119 new_high = zone_highsize(zone, new_batch, cpu_online); 6120 6121 if (zone->pageset_high == new_high && 6122 zone->pageset_batch == new_batch) 6123 return; 6124 6125 zone->pageset_high = new_high; 6126 zone->pageset_batch = new_batch; 6127 6128 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 6129 } 6130 6131 void __meminit setup_zone_pageset(struct zone *zone) 6132 { 6133 int cpu; 6134 6135 /* Size may be 0 on !SMP && !NUMA */ 6136 if (sizeof(struct per_cpu_zonestat) > 0) 6137 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 6138 6139 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 6140 for_each_possible_cpu(cpu) { 6141 struct per_cpu_pages *pcp; 6142 struct per_cpu_zonestat *pzstats; 6143 6144 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 6145 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6146 per_cpu_pages_init(pcp, pzstats); 6147 } 6148 6149 zone_set_pageset_high_and_batch(zone, 0); 6150 } 6151 6152 /* 6153 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6154 * page high values need to be recalculated. 6155 */ 6156 static void zone_pcp_update(struct zone *zone, int cpu_online) 6157 { 6158 mutex_lock(&pcp_batch_high_lock); 6159 zone_set_pageset_high_and_batch(zone, cpu_online); 6160 mutex_unlock(&pcp_batch_high_lock); 6161 } 6162 6163 /* 6164 * Allocate per cpu pagesets and initialize them. 6165 * Before this call only boot pagesets were available. 6166 */ 6167 void __init setup_per_cpu_pageset(void) 6168 { 6169 struct pglist_data *pgdat; 6170 struct zone *zone; 6171 int __maybe_unused cpu; 6172 6173 for_each_populated_zone(zone) 6174 setup_zone_pageset(zone); 6175 6176 #ifdef CONFIG_NUMA 6177 /* 6178 * Unpopulated zones continue using the boot pagesets. 6179 * The numa stats for these pagesets need to be reset. 6180 * Otherwise, they will end up skewing the stats of 6181 * the nodes these zones are associated with. 6182 */ 6183 for_each_possible_cpu(cpu) { 6184 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 6185 memset(pzstats->vm_numa_event, 0, 6186 sizeof(pzstats->vm_numa_event)); 6187 } 6188 #endif 6189 6190 for_each_online_pgdat(pgdat) 6191 pgdat->per_cpu_nodestats = 6192 alloc_percpu(struct per_cpu_nodestat); 6193 } 6194 6195 __meminit void zone_pcp_init(struct zone *zone) 6196 { 6197 /* 6198 * per cpu subsystem is not up at this point. The following code 6199 * relies on the ability of the linker to provide the 6200 * offset of a (static) per cpu variable into the per cpu area. 6201 */ 6202 zone->per_cpu_pageset = &boot_pageset; 6203 zone->per_cpu_zonestats = &boot_zonestats; 6204 zone->pageset_high = BOOT_PAGESET_HIGH; 6205 zone->pageset_batch = BOOT_PAGESET_BATCH; 6206 6207 if (populated_zone(zone)) 6208 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 6209 zone->present_pages, zone_batchsize(zone)); 6210 } 6211 6212 void adjust_managed_page_count(struct page *page, long count) 6213 { 6214 atomic_long_add(count, &page_zone(page)->managed_pages); 6215 totalram_pages_add(count); 6216 #ifdef CONFIG_HIGHMEM 6217 if (PageHighMem(page)) 6218 totalhigh_pages_add(count); 6219 #endif 6220 } 6221 EXPORT_SYMBOL(adjust_managed_page_count); 6222 6223 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 6224 { 6225 void *pos; 6226 unsigned long pages = 0; 6227 6228 start = (void *)PAGE_ALIGN((unsigned long)start); 6229 end = (void *)((unsigned long)end & PAGE_MASK); 6230 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6231 struct page *page = virt_to_page(pos); 6232 void *direct_map_addr; 6233 6234 /* 6235 * 'direct_map_addr' might be different from 'pos' 6236 * because some architectures' virt_to_page() 6237 * work with aliases. Getting the direct map 6238 * address ensures that we get a _writeable_ 6239 * alias for the memset(). 6240 */ 6241 direct_map_addr = page_address(page); 6242 /* 6243 * Perform a kasan-unchecked memset() since this memory 6244 * has not been initialized. 6245 */ 6246 direct_map_addr = kasan_reset_tag(direct_map_addr); 6247 if ((unsigned int)poison <= 0xFF) 6248 memset(direct_map_addr, poison, PAGE_SIZE); 6249 6250 free_reserved_page(page); 6251 } 6252 6253 if (pages && s) 6254 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 6255 6256 return pages; 6257 } 6258 6259 static int page_alloc_cpu_dead(unsigned int cpu) 6260 { 6261 struct zone *zone; 6262 6263 lru_add_drain_cpu(cpu); 6264 mlock_drain_remote(cpu); 6265 drain_pages(cpu); 6266 6267 /* 6268 * Spill the event counters of the dead processor 6269 * into the current processors event counters. 6270 * This artificially elevates the count of the current 6271 * processor. 6272 */ 6273 vm_events_fold_cpu(cpu); 6274 6275 /* 6276 * Zero the differential counters of the dead processor 6277 * so that the vm statistics are consistent. 6278 * 6279 * This is only okay since the processor is dead and cannot 6280 * race with what we are doing. 6281 */ 6282 cpu_vm_stats_fold(cpu); 6283 6284 for_each_populated_zone(zone) 6285 zone_pcp_update(zone, 0); 6286 6287 return 0; 6288 } 6289 6290 static int page_alloc_cpu_online(unsigned int cpu) 6291 { 6292 struct zone *zone; 6293 6294 for_each_populated_zone(zone) 6295 zone_pcp_update(zone, 1); 6296 return 0; 6297 } 6298 6299 void __init page_alloc_init_cpuhp(void) 6300 { 6301 int ret; 6302 6303 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 6304 "mm/page_alloc:pcp", 6305 page_alloc_cpu_online, 6306 page_alloc_cpu_dead); 6307 WARN_ON(ret < 0); 6308 } 6309 6310 /* 6311 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6312 * or min_free_kbytes changes. 6313 */ 6314 static void calculate_totalreserve_pages(void) 6315 { 6316 struct pglist_data *pgdat; 6317 unsigned long reserve_pages = 0; 6318 enum zone_type i, j; 6319 6320 for_each_online_pgdat(pgdat) { 6321 6322 pgdat->totalreserve_pages = 0; 6323 6324 for (i = 0; i < MAX_NR_ZONES; i++) { 6325 struct zone *zone = pgdat->node_zones + i; 6326 long max = 0; 6327 unsigned long managed_pages = zone_managed_pages(zone); 6328 6329 /* Find valid and maximum lowmem_reserve in the zone */ 6330 for (j = i; j < MAX_NR_ZONES; j++) { 6331 if (zone->lowmem_reserve[j] > max) 6332 max = zone->lowmem_reserve[j]; 6333 } 6334 6335 /* we treat the high watermark as reserved pages. */ 6336 max += high_wmark_pages(zone); 6337 6338 if (max > managed_pages) 6339 max = managed_pages; 6340 6341 pgdat->totalreserve_pages += max; 6342 6343 reserve_pages += max; 6344 } 6345 } 6346 totalreserve_pages = reserve_pages; 6347 } 6348 6349 /* 6350 * setup_per_zone_lowmem_reserve - called whenever 6351 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6352 * has a correct pages reserved value, so an adequate number of 6353 * pages are left in the zone after a successful __alloc_pages(). 6354 */ 6355 static void setup_per_zone_lowmem_reserve(void) 6356 { 6357 struct pglist_data *pgdat; 6358 enum zone_type i, j; 6359 6360 for_each_online_pgdat(pgdat) { 6361 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 6362 struct zone *zone = &pgdat->node_zones[i]; 6363 int ratio = sysctl_lowmem_reserve_ratio[i]; 6364 bool clear = !ratio || !zone_managed_pages(zone); 6365 unsigned long managed_pages = 0; 6366 6367 for (j = i + 1; j < MAX_NR_ZONES; j++) { 6368 struct zone *upper_zone = &pgdat->node_zones[j]; 6369 6370 managed_pages += zone_managed_pages(upper_zone); 6371 6372 if (clear) 6373 zone->lowmem_reserve[j] = 0; 6374 else 6375 zone->lowmem_reserve[j] = managed_pages / ratio; 6376 } 6377 } 6378 } 6379 6380 /* update totalreserve_pages */ 6381 calculate_totalreserve_pages(); 6382 } 6383 6384 static void __setup_per_zone_wmarks(void) 6385 { 6386 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6387 unsigned long lowmem_pages = 0; 6388 struct zone *zone; 6389 unsigned long flags; 6390 6391 /* Calculate total number of !ZONE_HIGHMEM pages */ 6392 for_each_zone(zone) { 6393 if (!is_highmem(zone)) 6394 lowmem_pages += zone_managed_pages(zone); 6395 } 6396 6397 for_each_zone(zone) { 6398 u64 tmp; 6399 6400 spin_lock_irqsave(&zone->lock, flags); 6401 tmp = (u64)pages_min * zone_managed_pages(zone); 6402 do_div(tmp, lowmem_pages); 6403 if (is_highmem(zone)) { 6404 /* 6405 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6406 * need highmem pages, so cap pages_min to a small 6407 * value here. 6408 * 6409 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6410 * deltas control async page reclaim, and so should 6411 * not be capped for highmem. 6412 */ 6413 unsigned long min_pages; 6414 6415 min_pages = zone_managed_pages(zone) / 1024; 6416 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6417 zone->_watermark[WMARK_MIN] = min_pages; 6418 } else { 6419 /* 6420 * If it's a lowmem zone, reserve a number of pages 6421 * proportionate to the zone's size. 6422 */ 6423 zone->_watermark[WMARK_MIN] = tmp; 6424 } 6425 6426 /* 6427 * Set the kswapd watermarks distance according to the 6428 * scale factor in proportion to available memory, but 6429 * ensure a minimum size on small systems. 6430 */ 6431 tmp = max_t(u64, tmp >> 2, 6432 mult_frac(zone_managed_pages(zone), 6433 watermark_scale_factor, 10000)); 6434 6435 zone->watermark_boost = 0; 6436 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6437 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 6438 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 6439 6440 spin_unlock_irqrestore(&zone->lock, flags); 6441 } 6442 6443 /* update totalreserve_pages */ 6444 calculate_totalreserve_pages(); 6445 } 6446 6447 /** 6448 * setup_per_zone_wmarks - called when min_free_kbytes changes 6449 * or when memory is hot-{added|removed} 6450 * 6451 * Ensures that the watermark[min,low,high] values for each zone are set 6452 * correctly with respect to min_free_kbytes. 6453 */ 6454 void setup_per_zone_wmarks(void) 6455 { 6456 struct zone *zone; 6457 static DEFINE_SPINLOCK(lock); 6458 6459 spin_lock(&lock); 6460 __setup_per_zone_wmarks(); 6461 spin_unlock(&lock); 6462 6463 /* 6464 * The watermark size have changed so update the pcpu batch 6465 * and high limits or the limits may be inappropriate. 6466 */ 6467 for_each_zone(zone) 6468 zone_pcp_update(zone, 0); 6469 } 6470 6471 /* 6472 * Initialise min_free_kbytes. 6473 * 6474 * For small machines we want it small (128k min). For large machines 6475 * we want it large (256MB max). But it is not linear, because network 6476 * bandwidth does not increase linearly with machine size. We use 6477 * 6478 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6479 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6480 * 6481 * which yields 6482 * 6483 * 16MB: 512k 6484 * 32MB: 724k 6485 * 64MB: 1024k 6486 * 128MB: 1448k 6487 * 256MB: 2048k 6488 * 512MB: 2896k 6489 * 1024MB: 4096k 6490 * 2048MB: 5792k 6491 * 4096MB: 8192k 6492 * 8192MB: 11584k 6493 * 16384MB: 16384k 6494 */ 6495 void calculate_min_free_kbytes(void) 6496 { 6497 unsigned long lowmem_kbytes; 6498 int new_min_free_kbytes; 6499 6500 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6501 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6502 6503 if (new_min_free_kbytes > user_min_free_kbytes) 6504 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 6505 else 6506 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6507 new_min_free_kbytes, user_min_free_kbytes); 6508 6509 } 6510 6511 int __meminit init_per_zone_wmark_min(void) 6512 { 6513 calculate_min_free_kbytes(); 6514 setup_per_zone_wmarks(); 6515 refresh_zone_stat_thresholds(); 6516 setup_per_zone_lowmem_reserve(); 6517 6518 #ifdef CONFIG_NUMA 6519 setup_min_unmapped_ratio(); 6520 setup_min_slab_ratio(); 6521 #endif 6522 6523 khugepaged_min_free_kbytes_update(); 6524 6525 return 0; 6526 } 6527 postcore_initcall(init_per_zone_wmark_min) 6528 6529 /* 6530 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6531 * that we can call two helper functions whenever min_free_kbytes 6532 * changes. 6533 */ 6534 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 6535 void *buffer, size_t *length, loff_t *ppos) 6536 { 6537 int rc; 6538 6539 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6540 if (rc) 6541 return rc; 6542 6543 if (write) { 6544 user_min_free_kbytes = min_free_kbytes; 6545 setup_per_zone_wmarks(); 6546 } 6547 return 0; 6548 } 6549 6550 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 6551 void *buffer, size_t *length, loff_t *ppos) 6552 { 6553 int rc; 6554 6555 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6556 if (rc) 6557 return rc; 6558 6559 if (write) 6560 setup_per_zone_wmarks(); 6561 6562 return 0; 6563 } 6564 6565 #ifdef CONFIG_NUMA 6566 static void setup_min_unmapped_ratio(void) 6567 { 6568 pg_data_t *pgdat; 6569 struct zone *zone; 6570 6571 for_each_online_pgdat(pgdat) 6572 pgdat->min_unmapped_pages = 0; 6573 6574 for_each_zone(zone) 6575 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 6576 sysctl_min_unmapped_ratio) / 100; 6577 } 6578 6579 6580 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 6581 void *buffer, size_t *length, loff_t *ppos) 6582 { 6583 int rc; 6584 6585 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6586 if (rc) 6587 return rc; 6588 6589 setup_min_unmapped_ratio(); 6590 6591 return 0; 6592 } 6593 6594 static void setup_min_slab_ratio(void) 6595 { 6596 pg_data_t *pgdat; 6597 struct zone *zone; 6598 6599 for_each_online_pgdat(pgdat) 6600 pgdat->min_slab_pages = 0; 6601 6602 for_each_zone(zone) 6603 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 6604 sysctl_min_slab_ratio) / 100; 6605 } 6606 6607 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 6608 void *buffer, size_t *length, loff_t *ppos) 6609 { 6610 int rc; 6611 6612 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6613 if (rc) 6614 return rc; 6615 6616 setup_min_slab_ratio(); 6617 6618 return 0; 6619 } 6620 #endif 6621 6622 /* 6623 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 6624 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 6625 * whenever sysctl_lowmem_reserve_ratio changes. 6626 * 6627 * The reserve ratio obviously has absolutely no relation with the 6628 * minimum watermarks. The lowmem reserve ratio can only make sense 6629 * if in function of the boot time zone sizes. 6630 */ 6631 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 6632 void *buffer, size_t *length, loff_t *ppos) 6633 { 6634 int i; 6635 6636 proc_dointvec_minmax(table, write, buffer, length, ppos); 6637 6638 for (i = 0; i < MAX_NR_ZONES; i++) { 6639 if (sysctl_lowmem_reserve_ratio[i] < 1) 6640 sysctl_lowmem_reserve_ratio[i] = 0; 6641 } 6642 6643 setup_per_zone_lowmem_reserve(); 6644 return 0; 6645 } 6646 6647 /* 6648 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 6649 * cpu. It is the fraction of total pages in each zone that a hot per cpu 6650 * pagelist can have before it gets flushed back to buddy allocator. 6651 */ 6652 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 6653 int write, void *buffer, size_t *length, loff_t *ppos) 6654 { 6655 struct zone *zone; 6656 int old_percpu_pagelist_high_fraction; 6657 int ret; 6658 6659 mutex_lock(&pcp_batch_high_lock); 6660 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 6661 6662 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 6663 if (!write || ret < 0) 6664 goto out; 6665 6666 /* Sanity checking to avoid pcp imbalance */ 6667 if (percpu_pagelist_high_fraction && 6668 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 6669 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 6670 ret = -EINVAL; 6671 goto out; 6672 } 6673 6674 /* No change? */ 6675 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 6676 goto out; 6677 6678 for_each_populated_zone(zone) 6679 zone_set_pageset_high_and_batch(zone, 0); 6680 out: 6681 mutex_unlock(&pcp_batch_high_lock); 6682 return ret; 6683 } 6684 6685 #ifdef CONFIG_CONTIG_ALLOC 6686 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 6687 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 6688 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6689 static void alloc_contig_dump_pages(struct list_head *page_list) 6690 { 6691 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6692 6693 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6694 struct page *page; 6695 6696 dump_stack(); 6697 list_for_each_entry(page, page_list, lru) 6698 dump_page(page, "migration failure"); 6699 } 6700 } 6701 #else 6702 static inline void alloc_contig_dump_pages(struct list_head *page_list) 6703 { 6704 } 6705 #endif 6706 6707 /* [start, end) must belong to a single zone. */ 6708 int __alloc_contig_migrate_range(struct compact_control *cc, 6709 unsigned long start, unsigned long end) 6710 { 6711 /* This function is based on compact_zone() from compaction.c. */ 6712 unsigned int nr_reclaimed; 6713 unsigned long pfn = start; 6714 unsigned int tries = 0; 6715 int ret = 0; 6716 struct migration_target_control mtc = { 6717 .nid = zone_to_nid(cc->zone), 6718 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 6719 }; 6720 6721 lru_cache_disable(); 6722 6723 while (pfn < end || !list_empty(&cc->migratepages)) { 6724 if (fatal_signal_pending(current)) { 6725 ret = -EINTR; 6726 break; 6727 } 6728 6729 if (list_empty(&cc->migratepages)) { 6730 cc->nr_migratepages = 0; 6731 ret = isolate_migratepages_range(cc, pfn, end); 6732 if (ret && ret != -EAGAIN) 6733 break; 6734 pfn = cc->migrate_pfn; 6735 tries = 0; 6736 } else if (++tries == 5) { 6737 ret = -EBUSY; 6738 break; 6739 } 6740 6741 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6742 &cc->migratepages); 6743 cc->nr_migratepages -= nr_reclaimed; 6744 6745 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6746 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6747 6748 /* 6749 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6750 * to retry again over this error, so do the same here. 6751 */ 6752 if (ret == -ENOMEM) 6753 break; 6754 } 6755 6756 lru_cache_enable(); 6757 if (ret < 0) { 6758 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6759 alloc_contig_dump_pages(&cc->migratepages); 6760 putback_movable_pages(&cc->migratepages); 6761 return ret; 6762 } 6763 return 0; 6764 } 6765 6766 /** 6767 * alloc_contig_range() -- tries to allocate given range of pages 6768 * @start: start PFN to allocate 6769 * @end: one-past-the-last PFN to allocate 6770 * @migratetype: migratetype of the underlying pageblocks (either 6771 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6772 * in range must have the same migratetype and it must 6773 * be either of the two. 6774 * @gfp_mask: GFP mask to use during compaction 6775 * 6776 * The PFN range does not have to be pageblock aligned. The PFN range must 6777 * belong to a single zone. 6778 * 6779 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6780 * pageblocks in the range. Once isolated, the pageblocks should not 6781 * be modified by others. 6782 * 6783 * Return: zero on success or negative error code. On success all 6784 * pages which PFN is in [start, end) are allocated for the caller and 6785 * need to be freed with free_contig_range(). 6786 */ 6787 int alloc_contig_range(unsigned long start, unsigned long end, 6788 unsigned migratetype, gfp_t gfp_mask) 6789 { 6790 unsigned long outer_start, outer_end; 6791 int order; 6792 int ret = 0; 6793 6794 struct compact_control cc = { 6795 .nr_migratepages = 0, 6796 .order = -1, 6797 .zone = page_zone(pfn_to_page(start)), 6798 .mode = MIGRATE_SYNC, 6799 .ignore_skip_hint = true, 6800 .no_set_skip_hint = true, 6801 .gfp_mask = current_gfp_context(gfp_mask), 6802 .alloc_contig = true, 6803 }; 6804 INIT_LIST_HEAD(&cc.migratepages); 6805 6806 /* 6807 * What we do here is we mark all pageblocks in range as 6808 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6809 * have different sizes, and due to the way page allocator 6810 * work, start_isolate_page_range() has special handlings for this. 6811 * 6812 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6813 * migrate the pages from an unaligned range (ie. pages that 6814 * we are interested in). This will put all the pages in 6815 * range back to page allocator as MIGRATE_ISOLATE. 6816 * 6817 * When this is done, we take the pages in range from page 6818 * allocator removing them from the buddy system. This way 6819 * page allocator will never consider using them. 6820 * 6821 * This lets us mark the pageblocks back as 6822 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6823 * aligned range but not in the unaligned, original range are 6824 * put back to page allocator so that buddy can use them. 6825 */ 6826 6827 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 6828 if (ret) 6829 goto done; 6830 6831 drain_all_pages(cc.zone); 6832 6833 /* 6834 * In case of -EBUSY, we'd like to know which page causes problem. 6835 * So, just fall through. test_pages_isolated() has a tracepoint 6836 * which will report the busy page. 6837 * 6838 * It is possible that busy pages could become available before 6839 * the call to test_pages_isolated, and the range will actually be 6840 * allocated. So, if we fall through be sure to clear ret so that 6841 * -EBUSY is not accidentally used or returned to caller. 6842 */ 6843 ret = __alloc_contig_migrate_range(&cc, start, end); 6844 if (ret && ret != -EBUSY) 6845 goto done; 6846 ret = 0; 6847 6848 /* 6849 * Pages from [start, end) are within a pageblock_nr_pages 6850 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6851 * more, all pages in [start, end) are free in page allocator. 6852 * What we are going to do is to allocate all pages from 6853 * [start, end) (that is remove them from page allocator). 6854 * 6855 * The only problem is that pages at the beginning and at the 6856 * end of interesting range may be not aligned with pages that 6857 * page allocator holds, ie. they can be part of higher order 6858 * pages. Because of this, we reserve the bigger range and 6859 * once this is done free the pages we are not interested in. 6860 * 6861 * We don't have to hold zone->lock here because the pages are 6862 * isolated thus they won't get removed from buddy. 6863 */ 6864 6865 order = 0; 6866 outer_start = start; 6867 while (!PageBuddy(pfn_to_page(outer_start))) { 6868 if (++order > MAX_ORDER) { 6869 outer_start = start; 6870 break; 6871 } 6872 outer_start &= ~0UL << order; 6873 } 6874 6875 if (outer_start != start) { 6876 order = buddy_order(pfn_to_page(outer_start)); 6877 6878 /* 6879 * outer_start page could be small order buddy page and 6880 * it doesn't include start page. Adjust outer_start 6881 * in this case to report failed page properly 6882 * on tracepoint in test_pages_isolated() 6883 */ 6884 if (outer_start + (1UL << order) <= start) 6885 outer_start = start; 6886 } 6887 6888 /* Make sure the range is really isolated. */ 6889 if (test_pages_isolated(outer_start, end, 0)) { 6890 ret = -EBUSY; 6891 goto done; 6892 } 6893 6894 /* Grab isolated pages from freelists. */ 6895 outer_end = isolate_freepages_range(&cc, outer_start, end); 6896 if (!outer_end) { 6897 ret = -EBUSY; 6898 goto done; 6899 } 6900 6901 /* Free head and tail (if any) */ 6902 if (start != outer_start) 6903 free_contig_range(outer_start, start - outer_start); 6904 if (end != outer_end) 6905 free_contig_range(end, outer_end - end); 6906 6907 done: 6908 undo_isolate_page_range(start, end, migratetype); 6909 return ret; 6910 } 6911 EXPORT_SYMBOL(alloc_contig_range); 6912 6913 static int __alloc_contig_pages(unsigned long start_pfn, 6914 unsigned long nr_pages, gfp_t gfp_mask) 6915 { 6916 unsigned long end_pfn = start_pfn + nr_pages; 6917 6918 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 6919 gfp_mask); 6920 } 6921 6922 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6923 unsigned long nr_pages) 6924 { 6925 unsigned long i, end_pfn = start_pfn + nr_pages; 6926 struct page *page; 6927 6928 for (i = start_pfn; i < end_pfn; i++) { 6929 page = pfn_to_online_page(i); 6930 if (!page) 6931 return false; 6932 6933 if (page_zone(page) != z) 6934 return false; 6935 6936 if (PageReserved(page)) 6937 return false; 6938 6939 if (PageHuge(page)) 6940 return false; 6941 } 6942 return true; 6943 } 6944 6945 static bool zone_spans_last_pfn(const struct zone *zone, 6946 unsigned long start_pfn, unsigned long nr_pages) 6947 { 6948 unsigned long last_pfn = start_pfn + nr_pages - 1; 6949 6950 return zone_spans_pfn(zone, last_pfn); 6951 } 6952 6953 /** 6954 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6955 * @nr_pages: Number of contiguous pages to allocate 6956 * @gfp_mask: GFP mask to limit search and used during compaction 6957 * @nid: Target node 6958 * @nodemask: Mask for other possible nodes 6959 * 6960 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6961 * on an applicable zonelist to find a contiguous pfn range which can then be 6962 * tried for allocation with alloc_contig_range(). This routine is intended 6963 * for allocation requests which can not be fulfilled with the buddy allocator. 6964 * 6965 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6966 * power of two, then allocated range is also guaranteed to be aligned to same 6967 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6968 * 6969 * Allocated pages can be freed with free_contig_range() or by manually calling 6970 * __free_page() on each allocated page. 6971 * 6972 * Return: pointer to contiguous pages on success, or NULL if not successful. 6973 */ 6974 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 6975 int nid, nodemask_t *nodemask) 6976 { 6977 unsigned long ret, pfn, flags; 6978 struct zonelist *zonelist; 6979 struct zone *zone; 6980 struct zoneref *z; 6981 6982 zonelist = node_zonelist(nid, gfp_mask); 6983 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6984 gfp_zone(gfp_mask), nodemask) { 6985 spin_lock_irqsave(&zone->lock, flags); 6986 6987 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6988 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6989 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6990 /* 6991 * We release the zone lock here because 6992 * alloc_contig_range() will also lock the zone 6993 * at some point. If there's an allocation 6994 * spinning on this lock, it may win the race 6995 * and cause alloc_contig_range() to fail... 6996 */ 6997 spin_unlock_irqrestore(&zone->lock, flags); 6998 ret = __alloc_contig_pages(pfn, nr_pages, 6999 gfp_mask); 7000 if (!ret) 7001 return pfn_to_page(pfn); 7002 spin_lock_irqsave(&zone->lock, flags); 7003 } 7004 pfn += nr_pages; 7005 } 7006 spin_unlock_irqrestore(&zone->lock, flags); 7007 } 7008 return NULL; 7009 } 7010 #endif /* CONFIG_CONTIG_ALLOC */ 7011 7012 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 7013 { 7014 unsigned long count = 0; 7015 7016 for (; nr_pages--; pfn++) { 7017 struct page *page = pfn_to_page(pfn); 7018 7019 count += page_count(page) != 1; 7020 __free_page(page); 7021 } 7022 WARN(count != 0, "%lu pages are still in use!\n", count); 7023 } 7024 EXPORT_SYMBOL(free_contig_range); 7025 7026 /* 7027 * Effectively disable pcplists for the zone by setting the high limit to 0 7028 * and draining all cpus. A concurrent page freeing on another CPU that's about 7029 * to put the page on pcplist will either finish before the drain and the page 7030 * will be drained, or observe the new high limit and skip the pcplist. 7031 * 7032 * Must be paired with a call to zone_pcp_enable(). 7033 */ 7034 void zone_pcp_disable(struct zone *zone) 7035 { 7036 mutex_lock(&pcp_batch_high_lock); 7037 __zone_set_pageset_high_and_batch(zone, 0, 1); 7038 __drain_all_pages(zone, true); 7039 } 7040 7041 void zone_pcp_enable(struct zone *zone) 7042 { 7043 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 7044 mutex_unlock(&pcp_batch_high_lock); 7045 } 7046 7047 void zone_pcp_reset(struct zone *zone) 7048 { 7049 int cpu; 7050 struct per_cpu_zonestat *pzstats; 7051 7052 if (zone->per_cpu_pageset != &boot_pageset) { 7053 for_each_online_cpu(cpu) { 7054 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7055 drain_zonestat(zone, pzstats); 7056 } 7057 free_percpu(zone->per_cpu_pageset); 7058 zone->per_cpu_pageset = &boot_pageset; 7059 if (zone->per_cpu_zonestats != &boot_zonestats) { 7060 free_percpu(zone->per_cpu_zonestats); 7061 zone->per_cpu_zonestats = &boot_zonestats; 7062 } 7063 } 7064 } 7065 7066 #ifdef CONFIG_MEMORY_HOTREMOVE 7067 /* 7068 * All pages in the range must be in a single zone, must not contain holes, 7069 * must span full sections, and must be isolated before calling this function. 7070 */ 7071 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 7072 { 7073 unsigned long pfn = start_pfn; 7074 struct page *page; 7075 struct zone *zone; 7076 unsigned int order; 7077 unsigned long flags; 7078 7079 offline_mem_sections(pfn, end_pfn); 7080 zone = page_zone(pfn_to_page(pfn)); 7081 spin_lock_irqsave(&zone->lock, flags); 7082 while (pfn < end_pfn) { 7083 page = pfn_to_page(pfn); 7084 /* 7085 * The HWPoisoned page may be not in buddy system, and 7086 * page_count() is not 0. 7087 */ 7088 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7089 pfn++; 7090 continue; 7091 } 7092 /* 7093 * At this point all remaining PageOffline() pages have a 7094 * reference count of 0 and can simply be skipped. 7095 */ 7096 if (PageOffline(page)) { 7097 BUG_ON(page_count(page)); 7098 BUG_ON(PageBuddy(page)); 7099 pfn++; 7100 continue; 7101 } 7102 7103 BUG_ON(page_count(page)); 7104 BUG_ON(!PageBuddy(page)); 7105 order = buddy_order(page); 7106 del_page_from_free_list(page, zone, order); 7107 pfn += (1 << order); 7108 } 7109 spin_unlock_irqrestore(&zone->lock, flags); 7110 } 7111 #endif 7112 7113 /* 7114 * This function returns a stable result only if called under zone lock. 7115 */ 7116 bool is_free_buddy_page(struct page *page) 7117 { 7118 unsigned long pfn = page_to_pfn(page); 7119 unsigned int order; 7120 7121 for (order = 0; order <= MAX_ORDER; order++) { 7122 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7123 7124 if (PageBuddy(page_head) && 7125 buddy_order_unsafe(page_head) >= order) 7126 break; 7127 } 7128 7129 return order <= MAX_ORDER; 7130 } 7131 EXPORT_SYMBOL(is_free_buddy_page); 7132 7133 #ifdef CONFIG_MEMORY_FAILURE 7134 /* 7135 * Break down a higher-order page in sub-pages, and keep our target out of 7136 * buddy allocator. 7137 */ 7138 static void break_down_buddy_pages(struct zone *zone, struct page *page, 7139 struct page *target, int low, int high, 7140 int migratetype) 7141 { 7142 unsigned long size = 1 << high; 7143 struct page *current_buddy, *next_page; 7144 7145 while (high > low) { 7146 high--; 7147 size >>= 1; 7148 7149 if (target >= &page[size]) { 7150 next_page = page + size; 7151 current_buddy = page; 7152 } else { 7153 next_page = page; 7154 current_buddy = page + size; 7155 } 7156 7157 if (set_page_guard(zone, current_buddy, high, migratetype)) 7158 continue; 7159 7160 if (current_buddy != target) { 7161 add_to_free_list(current_buddy, zone, high, migratetype); 7162 set_buddy_order(current_buddy, high); 7163 page = next_page; 7164 } 7165 } 7166 } 7167 7168 /* 7169 * Take a page that will be marked as poisoned off the buddy allocator. 7170 */ 7171 bool take_page_off_buddy(struct page *page) 7172 { 7173 struct zone *zone = page_zone(page); 7174 unsigned long pfn = page_to_pfn(page); 7175 unsigned long flags; 7176 unsigned int order; 7177 bool ret = false; 7178 7179 spin_lock_irqsave(&zone->lock, flags); 7180 for (order = 0; order <= MAX_ORDER; order++) { 7181 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7182 int page_order = buddy_order(page_head); 7183 7184 if (PageBuddy(page_head) && page_order >= order) { 7185 unsigned long pfn_head = page_to_pfn(page_head); 7186 int migratetype = get_pfnblock_migratetype(page_head, 7187 pfn_head); 7188 7189 del_page_from_free_list(page_head, zone, page_order); 7190 break_down_buddy_pages(zone, page_head, page, 0, 7191 page_order, migratetype); 7192 SetPageHWPoisonTakenOff(page); 7193 if (!is_migrate_isolate(migratetype)) 7194 __mod_zone_freepage_state(zone, -1, migratetype); 7195 ret = true; 7196 break; 7197 } 7198 if (page_count(page_head) > 0) 7199 break; 7200 } 7201 spin_unlock_irqrestore(&zone->lock, flags); 7202 return ret; 7203 } 7204 7205 /* 7206 * Cancel takeoff done by take_page_off_buddy(). 7207 */ 7208 bool put_page_back_buddy(struct page *page) 7209 { 7210 struct zone *zone = page_zone(page); 7211 unsigned long pfn = page_to_pfn(page); 7212 unsigned long flags; 7213 int migratetype = get_pfnblock_migratetype(page, pfn); 7214 bool ret = false; 7215 7216 spin_lock_irqsave(&zone->lock, flags); 7217 if (put_page_testzero(page)) { 7218 ClearPageHWPoisonTakenOff(page); 7219 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 7220 if (TestClearPageHWPoison(page)) { 7221 ret = true; 7222 } 7223 } 7224 spin_unlock_irqrestore(&zone->lock, flags); 7225 7226 return ret; 7227 } 7228 #endif 7229 7230 #ifdef CONFIG_ZONE_DMA 7231 bool has_managed_dma(void) 7232 { 7233 struct pglist_data *pgdat; 7234 7235 for_each_online_pgdat(pgdat) { 7236 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 7237 7238 if (managed_zone(zone)) 7239 return true; 7240 } 7241 return false; 7242 } 7243 #endif /* CONFIG_ZONE_DMA */ 7244 7245 #ifdef CONFIG_UNACCEPTED_MEMORY 7246 7247 /* Counts number of zones with unaccepted pages. */ 7248 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 7249 7250 static bool lazy_accept = true; 7251 7252 static int __init accept_memory_parse(char *p) 7253 { 7254 if (!strcmp(p, "lazy")) { 7255 lazy_accept = true; 7256 return 0; 7257 } else if (!strcmp(p, "eager")) { 7258 lazy_accept = false; 7259 return 0; 7260 } else { 7261 return -EINVAL; 7262 } 7263 } 7264 early_param("accept_memory", accept_memory_parse); 7265 7266 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7267 { 7268 phys_addr_t start = page_to_phys(page); 7269 phys_addr_t end = start + (PAGE_SIZE << order); 7270 7271 return range_contains_unaccepted_memory(start, end); 7272 } 7273 7274 static void accept_page(struct page *page, unsigned int order) 7275 { 7276 phys_addr_t start = page_to_phys(page); 7277 7278 accept_memory(start, start + (PAGE_SIZE << order)); 7279 } 7280 7281 static bool try_to_accept_memory_one(struct zone *zone) 7282 { 7283 unsigned long flags; 7284 struct page *page; 7285 bool last; 7286 7287 if (list_empty(&zone->unaccepted_pages)) 7288 return false; 7289 7290 spin_lock_irqsave(&zone->lock, flags); 7291 page = list_first_entry_or_null(&zone->unaccepted_pages, 7292 struct page, lru); 7293 if (!page) { 7294 spin_unlock_irqrestore(&zone->lock, flags); 7295 return false; 7296 } 7297 7298 list_del(&page->lru); 7299 last = list_empty(&zone->unaccepted_pages); 7300 7301 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7302 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 7303 spin_unlock_irqrestore(&zone->lock, flags); 7304 7305 accept_page(page, MAX_ORDER); 7306 7307 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); 7308 7309 if (last) 7310 static_branch_dec(&zones_with_unaccepted_pages); 7311 7312 return true; 7313 } 7314 7315 static bool try_to_accept_memory(struct zone *zone, unsigned int order) 7316 { 7317 long to_accept; 7318 int ret = false; 7319 7320 /* How much to accept to get to high watermark? */ 7321 to_accept = high_wmark_pages(zone) - 7322 (zone_page_state(zone, NR_FREE_PAGES) - 7323 __zone_watermark_unusable_free(zone, order, 0)); 7324 7325 /* Accept at least one page */ 7326 do { 7327 if (!try_to_accept_memory_one(zone)) 7328 break; 7329 ret = true; 7330 to_accept -= MAX_ORDER_NR_PAGES; 7331 } while (to_accept > 0); 7332 7333 return ret; 7334 } 7335 7336 static inline bool has_unaccepted_memory(void) 7337 { 7338 return static_branch_unlikely(&zones_with_unaccepted_pages); 7339 } 7340 7341 static bool __free_unaccepted(struct page *page) 7342 { 7343 struct zone *zone = page_zone(page); 7344 unsigned long flags; 7345 bool first = false; 7346 7347 if (!lazy_accept) 7348 return false; 7349 7350 spin_lock_irqsave(&zone->lock, flags); 7351 first = list_empty(&zone->unaccepted_pages); 7352 list_add_tail(&page->lru, &zone->unaccepted_pages); 7353 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 7354 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 7355 spin_unlock_irqrestore(&zone->lock, flags); 7356 7357 if (first) 7358 static_branch_inc(&zones_with_unaccepted_pages); 7359 7360 return true; 7361 } 7362 7363 #else 7364 7365 static bool page_contains_unaccepted(struct page *page, unsigned int order) 7366 { 7367 return false; 7368 } 7369 7370 static void accept_page(struct page *page, unsigned int order) 7371 { 7372 } 7373 7374 static bool try_to_accept_memory(struct zone *zone, unsigned int order) 7375 { 7376 return false; 7377 } 7378 7379 static inline bool has_unaccepted_memory(void) 7380 { 7381 return false; 7382 } 7383 7384 static bool __free_unaccepted(struct page *page) 7385 { 7386 BUILD_BUG(); 7387 return false; 7388 } 7389 7390 #endif /* CONFIG_UNACCEPTED_MEMORY */ 7391