1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/memory_hotplug.h> 36 #include <linux/nodemask.h> 37 #include <linux/vmstat.h> 38 #include <linux/fault-inject.h> 39 #include <linux/compaction.h> 40 #include <trace/events/kmem.h> 41 #include <trace/events/oom.h> 42 #include <linux/prefetch.h> 43 #include <linux/mm_inline.h> 44 #include <linux/mmu_notifier.h> 45 #include <linux/migrate.h> 46 #include <linux/sched/mm.h> 47 #include <linux/page_owner.h> 48 #include <linux/page_table_check.h> 49 #include <linux/memcontrol.h> 50 #include <linux/ftrace.h> 51 #include <linux/lockdep.h> 52 #include <linux/psi.h> 53 #include <linux/khugepaged.h> 54 #include <linux/delayacct.h> 55 #include <asm/div64.h> 56 #include "internal.h" 57 #include "shuffle.h" 58 #include "page_reporting.h" 59 60 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 61 typedef int __bitwise fpi_t; 62 63 /* No special request */ 64 #define FPI_NONE ((__force fpi_t)0) 65 66 /* 67 * Skip free page reporting notification for the (possibly merged) page. 68 * This does not hinder free page reporting from grabbing the page, 69 * reporting it and marking it "reported" - it only skips notifying 70 * the free page reporting infrastructure about a newly freed page. For 71 * example, used when temporarily pulling a page from a freelist and 72 * putting it back unmodified. 73 */ 74 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 75 76 /* 77 * Place the (possibly merged) page to the tail of the freelist. Will ignore 78 * page shuffling (relevant code - e.g., memory onlining - is expected to 79 * shuffle the whole zone). 80 * 81 * Note: No code should rely on this flag for correctness - it's purely 82 * to allow for optimizations when handing back either fresh pages 83 * (memory onlining) or untouched pages (page isolation, free page 84 * reporting). 85 */ 86 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 87 88 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 89 static DEFINE_MUTEX(pcp_batch_high_lock); 90 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 91 92 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 93 /* 94 * On SMP, spin_trylock is sufficient protection. 95 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 96 */ 97 #define pcp_trylock_prepare(flags) do { } while (0) 98 #define pcp_trylock_finish(flag) do { } while (0) 99 #else 100 101 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 102 #define pcp_trylock_prepare(flags) local_irq_save(flags) 103 #define pcp_trylock_finish(flags) local_irq_restore(flags) 104 #endif 105 106 /* 107 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 108 * a migration causing the wrong PCP to be locked and remote memory being 109 * potentially allocated, pin the task to the CPU for the lookup+lock. 110 * preempt_disable is used on !RT because it is faster than migrate_disable. 111 * migrate_disable is used on RT because otherwise RT spinlock usage is 112 * interfered with and a high priority task cannot preempt the allocator. 113 */ 114 #ifndef CONFIG_PREEMPT_RT 115 #define pcpu_task_pin() preempt_disable() 116 #define pcpu_task_unpin() preempt_enable() 117 #else 118 #define pcpu_task_pin() migrate_disable() 119 #define pcpu_task_unpin() migrate_enable() 120 #endif 121 122 /* 123 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 124 * Return value should be used with equivalent unlock helper. 125 */ 126 #define pcpu_spin_lock(type, member, ptr) \ 127 ({ \ 128 type *_ret; \ 129 pcpu_task_pin(); \ 130 _ret = this_cpu_ptr(ptr); \ 131 spin_lock(&_ret->member); \ 132 _ret; \ 133 }) 134 135 #define pcpu_spin_trylock(type, member, ptr) \ 136 ({ \ 137 type *_ret; \ 138 pcpu_task_pin(); \ 139 _ret = this_cpu_ptr(ptr); \ 140 if (!spin_trylock(&_ret->member)) { \ 141 pcpu_task_unpin(); \ 142 _ret = NULL; \ 143 } \ 144 _ret; \ 145 }) 146 147 #define pcpu_spin_unlock(member, ptr) \ 148 ({ \ 149 spin_unlock(&ptr->member); \ 150 pcpu_task_unpin(); \ 151 }) 152 153 /* struct per_cpu_pages specific helpers. */ 154 #define pcp_spin_lock(ptr) \ 155 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 156 157 #define pcp_spin_trylock(ptr) \ 158 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 159 160 #define pcp_spin_unlock(ptr) \ 161 pcpu_spin_unlock(lock, ptr) 162 163 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 164 DEFINE_PER_CPU(int, numa_node); 165 EXPORT_PER_CPU_SYMBOL(numa_node); 166 #endif 167 168 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 169 170 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 171 /* 172 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 173 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 174 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 175 * defined in <linux/topology.h>. 176 */ 177 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 178 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 179 #endif 180 181 static DEFINE_MUTEX(pcpu_drain_mutex); 182 183 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 184 volatile unsigned long latent_entropy __latent_entropy; 185 EXPORT_SYMBOL(latent_entropy); 186 #endif 187 188 /* 189 * Array of node states. 190 */ 191 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 192 [N_POSSIBLE] = NODE_MASK_ALL, 193 [N_ONLINE] = { { [0] = 1UL } }, 194 #ifndef CONFIG_NUMA 195 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 196 #ifdef CONFIG_HIGHMEM 197 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 198 #endif 199 [N_MEMORY] = { { [0] = 1UL } }, 200 [N_CPU] = { { [0] = 1UL } }, 201 #endif /* NUMA */ 202 }; 203 EXPORT_SYMBOL(node_states); 204 205 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 206 207 /* 208 * A cached value of the page's pageblock's migratetype, used when the page is 209 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 210 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 211 * Also the migratetype set in the page does not necessarily match the pcplist 212 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 213 * other index - this ensures that it will be put on the correct CMA freelist. 214 */ 215 static inline int get_pcppage_migratetype(struct page *page) 216 { 217 return page->index; 218 } 219 220 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 221 { 222 page->index = migratetype; 223 } 224 225 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 226 unsigned int pageblock_order __read_mostly; 227 #endif 228 229 static void __free_pages_ok(struct page *page, unsigned int order, 230 fpi_t fpi_flags); 231 232 /* 233 * results with 256, 32 in the lowmem_reserve sysctl: 234 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 235 * 1G machine -> (16M dma, 784M normal, 224M high) 236 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 237 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 238 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 239 * 240 * TBD: should special case ZONE_DMA32 machines here - in those we normally 241 * don't need any ZONE_NORMAL reservation 242 */ 243 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 244 #ifdef CONFIG_ZONE_DMA 245 [ZONE_DMA] = 256, 246 #endif 247 #ifdef CONFIG_ZONE_DMA32 248 [ZONE_DMA32] = 256, 249 #endif 250 [ZONE_NORMAL] = 32, 251 #ifdef CONFIG_HIGHMEM 252 [ZONE_HIGHMEM] = 0, 253 #endif 254 [ZONE_MOVABLE] = 0, 255 }; 256 257 char * const zone_names[MAX_NR_ZONES] = { 258 #ifdef CONFIG_ZONE_DMA 259 "DMA", 260 #endif 261 #ifdef CONFIG_ZONE_DMA32 262 "DMA32", 263 #endif 264 "Normal", 265 #ifdef CONFIG_HIGHMEM 266 "HighMem", 267 #endif 268 "Movable", 269 #ifdef CONFIG_ZONE_DEVICE 270 "Device", 271 #endif 272 }; 273 274 const char * const migratetype_names[MIGRATE_TYPES] = { 275 "Unmovable", 276 "Movable", 277 "Reclaimable", 278 "HighAtomic", 279 #ifdef CONFIG_CMA 280 "CMA", 281 #endif 282 #ifdef CONFIG_MEMORY_ISOLATION 283 "Isolate", 284 #endif 285 }; 286 287 static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { 288 [NULL_COMPOUND_DTOR] = NULL, 289 [COMPOUND_PAGE_DTOR] = free_compound_page, 290 #ifdef CONFIG_HUGETLB_PAGE 291 [HUGETLB_PAGE_DTOR] = free_huge_page, 292 #endif 293 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 294 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, 295 #endif 296 }; 297 298 int min_free_kbytes = 1024; 299 int user_min_free_kbytes = -1; 300 static int watermark_boost_factor __read_mostly = 15000; 301 static int watermark_scale_factor = 10; 302 303 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 304 int movable_zone; 305 EXPORT_SYMBOL(movable_zone); 306 307 #if MAX_NUMNODES > 1 308 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 309 unsigned int nr_online_nodes __read_mostly = 1; 310 EXPORT_SYMBOL(nr_node_ids); 311 EXPORT_SYMBOL(nr_online_nodes); 312 #endif 313 314 static bool page_contains_unaccepted(struct page *page, unsigned int order); 315 static void accept_page(struct page *page, unsigned int order); 316 static bool try_to_accept_memory(struct zone *zone, unsigned int order); 317 static inline bool has_unaccepted_memory(void); 318 static bool __free_unaccepted(struct page *page); 319 320 int page_group_by_mobility_disabled __read_mostly; 321 322 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 323 /* 324 * During boot we initialize deferred pages on-demand, as needed, but once 325 * page_alloc_init_late() has finished, the deferred pages are all initialized, 326 * and we can permanently disable that path. 327 */ 328 DEFINE_STATIC_KEY_TRUE(deferred_pages); 329 330 static inline bool deferred_pages_enabled(void) 331 { 332 return static_branch_unlikely(&deferred_pages); 333 } 334 335 /* 336 * deferred_grow_zone() is __init, but it is called from 337 * get_page_from_freelist() during early boot until deferred_pages permanently 338 * disables this call. This is why we have refdata wrapper to avoid warning, 339 * and to ensure that the function body gets unloaded. 340 */ 341 static bool __ref 342 _deferred_grow_zone(struct zone *zone, unsigned int order) 343 { 344 return deferred_grow_zone(zone, order); 345 } 346 #else 347 static inline bool deferred_pages_enabled(void) 348 { 349 return false; 350 } 351 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 352 353 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 354 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 355 unsigned long pfn) 356 { 357 #ifdef CONFIG_SPARSEMEM 358 return section_to_usemap(__pfn_to_section(pfn)); 359 #else 360 return page_zone(page)->pageblock_flags; 361 #endif /* CONFIG_SPARSEMEM */ 362 } 363 364 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 365 { 366 #ifdef CONFIG_SPARSEMEM 367 pfn &= (PAGES_PER_SECTION-1); 368 #else 369 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 370 #endif /* CONFIG_SPARSEMEM */ 371 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 372 } 373 374 /** 375 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 376 * @page: The page within the block of interest 377 * @pfn: The target page frame number 378 * @mask: mask of bits that the caller is interested in 379 * 380 * Return: pageblock_bits flags 381 */ 382 unsigned long get_pfnblock_flags_mask(const struct page *page, 383 unsigned long pfn, unsigned long mask) 384 { 385 unsigned long *bitmap; 386 unsigned long bitidx, word_bitidx; 387 unsigned long word; 388 389 bitmap = get_pageblock_bitmap(page, pfn); 390 bitidx = pfn_to_bitidx(page, pfn); 391 word_bitidx = bitidx / BITS_PER_LONG; 392 bitidx &= (BITS_PER_LONG-1); 393 /* 394 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 395 * a consistent read of the memory array, so that results, even though 396 * racy, are not corrupted. 397 */ 398 word = READ_ONCE(bitmap[word_bitidx]); 399 return (word >> bitidx) & mask; 400 } 401 402 static __always_inline int get_pfnblock_migratetype(const struct page *page, 403 unsigned long pfn) 404 { 405 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 406 } 407 408 /** 409 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 410 * @page: The page within the block of interest 411 * @flags: The flags to set 412 * @pfn: The target page frame number 413 * @mask: mask of bits that the caller is interested in 414 */ 415 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 416 unsigned long pfn, 417 unsigned long mask) 418 { 419 unsigned long *bitmap; 420 unsigned long bitidx, word_bitidx; 421 unsigned long word; 422 423 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 424 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 425 426 bitmap = get_pageblock_bitmap(page, pfn); 427 bitidx = pfn_to_bitidx(page, pfn); 428 word_bitidx = bitidx / BITS_PER_LONG; 429 bitidx &= (BITS_PER_LONG-1); 430 431 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 432 433 mask <<= bitidx; 434 flags <<= bitidx; 435 436 word = READ_ONCE(bitmap[word_bitidx]); 437 do { 438 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 439 } 440 441 void set_pageblock_migratetype(struct page *page, int migratetype) 442 { 443 if (unlikely(page_group_by_mobility_disabled && 444 migratetype < MIGRATE_PCPTYPES)) 445 migratetype = MIGRATE_UNMOVABLE; 446 447 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 448 page_to_pfn(page), MIGRATETYPE_MASK); 449 } 450 451 #ifdef CONFIG_DEBUG_VM 452 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 453 { 454 int ret; 455 unsigned seq; 456 unsigned long pfn = page_to_pfn(page); 457 unsigned long sp, start_pfn; 458 459 do { 460 seq = zone_span_seqbegin(zone); 461 start_pfn = zone->zone_start_pfn; 462 sp = zone->spanned_pages; 463 ret = !zone_spans_pfn(zone, pfn); 464 } while (zone_span_seqretry(zone, seq)); 465 466 if (ret) 467 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 468 pfn, zone_to_nid(zone), zone->name, 469 start_pfn, start_pfn + sp); 470 471 return ret; 472 } 473 474 /* 475 * Temporary debugging check for pages not lying within a given zone. 476 */ 477 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 478 { 479 if (page_outside_zone_boundaries(zone, page)) 480 return 1; 481 if (zone != page_zone(page)) 482 return 1; 483 484 return 0; 485 } 486 #else 487 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 488 { 489 return 0; 490 } 491 #endif 492 493 static void bad_page(struct page *page, const char *reason) 494 { 495 static unsigned long resume; 496 static unsigned long nr_shown; 497 static unsigned long nr_unshown; 498 499 /* 500 * Allow a burst of 60 reports, then keep quiet for that minute; 501 * or allow a steady drip of one report per second. 502 */ 503 if (nr_shown == 60) { 504 if (time_before(jiffies, resume)) { 505 nr_unshown++; 506 goto out; 507 } 508 if (nr_unshown) { 509 pr_alert( 510 "BUG: Bad page state: %lu messages suppressed\n", 511 nr_unshown); 512 nr_unshown = 0; 513 } 514 nr_shown = 0; 515 } 516 if (nr_shown++ == 0) 517 resume = jiffies + 60 * HZ; 518 519 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 520 current->comm, page_to_pfn(page)); 521 dump_page(page, reason); 522 523 print_modules(); 524 dump_stack(); 525 out: 526 /* Leave bad fields for debug, except PageBuddy could make trouble */ 527 page_mapcount_reset(page); /* remove PageBuddy */ 528 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 529 } 530 531 static inline unsigned int order_to_pindex(int migratetype, int order) 532 { 533 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 534 if (order > PAGE_ALLOC_COSTLY_ORDER) { 535 VM_BUG_ON(order != pageblock_order); 536 return NR_LOWORDER_PCP_LISTS; 537 } 538 #else 539 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 540 #endif 541 542 return (MIGRATE_PCPTYPES * order) + migratetype; 543 } 544 545 static inline int pindex_to_order(unsigned int pindex) 546 { 547 int order = pindex / MIGRATE_PCPTYPES; 548 549 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 550 if (pindex == NR_LOWORDER_PCP_LISTS) 551 order = pageblock_order; 552 #else 553 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 554 #endif 555 556 return order; 557 } 558 559 static inline bool pcp_allowed_order(unsigned int order) 560 { 561 if (order <= PAGE_ALLOC_COSTLY_ORDER) 562 return true; 563 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 564 if (order == pageblock_order) 565 return true; 566 #endif 567 return false; 568 } 569 570 static inline void free_the_page(struct page *page, unsigned int order) 571 { 572 if (pcp_allowed_order(order)) /* Via pcp? */ 573 free_unref_page(page, order); 574 else 575 __free_pages_ok(page, order, FPI_NONE); 576 } 577 578 /* 579 * Higher-order pages are called "compound pages". They are structured thusly: 580 * 581 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 582 * 583 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 584 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 585 * 586 * The first tail page's ->compound_dtor holds the offset in array of compound 587 * page destructors. See compound_page_dtors. 588 * 589 * The first tail page's ->compound_order holds the order of allocation. 590 * This usage means that zero-order pages may not be compound. 591 */ 592 593 void free_compound_page(struct page *page) 594 { 595 mem_cgroup_uncharge(page_folio(page)); 596 free_the_page(page, compound_order(page)); 597 } 598 599 void prep_compound_page(struct page *page, unsigned int order) 600 { 601 int i; 602 int nr_pages = 1 << order; 603 604 __SetPageHead(page); 605 for (i = 1; i < nr_pages; i++) 606 prep_compound_tail(page, i); 607 608 prep_compound_head(page, order); 609 } 610 611 void destroy_large_folio(struct folio *folio) 612 { 613 enum compound_dtor_id dtor = folio->_folio_dtor; 614 615 VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio); 616 compound_page_dtors[dtor](&folio->page); 617 } 618 619 static inline void set_buddy_order(struct page *page, unsigned int order) 620 { 621 set_page_private(page, order); 622 __SetPageBuddy(page); 623 } 624 625 #ifdef CONFIG_COMPACTION 626 static inline struct capture_control *task_capc(struct zone *zone) 627 { 628 struct capture_control *capc = current->capture_control; 629 630 return unlikely(capc) && 631 !(current->flags & PF_KTHREAD) && 632 !capc->page && 633 capc->cc->zone == zone ? capc : NULL; 634 } 635 636 static inline bool 637 compaction_capture(struct capture_control *capc, struct page *page, 638 int order, int migratetype) 639 { 640 if (!capc || order != capc->cc->order) 641 return false; 642 643 /* Do not accidentally pollute CMA or isolated regions*/ 644 if (is_migrate_cma(migratetype) || 645 is_migrate_isolate(migratetype)) 646 return false; 647 648 /* 649 * Do not let lower order allocations pollute a movable pageblock. 650 * This might let an unmovable request use a reclaimable pageblock 651 * and vice-versa but no more than normal fallback logic which can 652 * have trouble finding a high-order free page. 653 */ 654 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 655 return false; 656 657 capc->page = page; 658 return true; 659 } 660 661 #else 662 static inline struct capture_control *task_capc(struct zone *zone) 663 { 664 return NULL; 665 } 666 667 static inline bool 668 compaction_capture(struct capture_control *capc, struct page *page, 669 int order, int migratetype) 670 { 671 return false; 672 } 673 #endif /* CONFIG_COMPACTION */ 674 675 /* Used for pages not on another list */ 676 static inline void add_to_free_list(struct page *page, struct zone *zone, 677 unsigned int order, int migratetype) 678 { 679 struct free_area *area = &zone->free_area[order]; 680 681 list_add(&page->buddy_list, &area->free_list[migratetype]); 682 area->nr_free++; 683 } 684 685 /* Used for pages not on another list */ 686 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 687 unsigned int order, int migratetype) 688 { 689 struct free_area *area = &zone->free_area[order]; 690 691 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 692 area->nr_free++; 693 } 694 695 /* 696 * Used for pages which are on another list. Move the pages to the tail 697 * of the list - so the moved pages won't immediately be considered for 698 * allocation again (e.g., optimization for memory onlining). 699 */ 700 static inline void move_to_free_list(struct page *page, struct zone *zone, 701 unsigned int order, int migratetype) 702 { 703 struct free_area *area = &zone->free_area[order]; 704 705 list_move_tail(&page->buddy_list, &area->free_list[migratetype]); 706 } 707 708 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 709 unsigned int order) 710 { 711 /* clear reported state and update reported page count */ 712 if (page_reported(page)) 713 __ClearPageReported(page); 714 715 list_del(&page->buddy_list); 716 __ClearPageBuddy(page); 717 set_page_private(page, 0); 718 zone->free_area[order].nr_free--; 719 } 720 721 static inline struct page *get_page_from_free_area(struct free_area *area, 722 int migratetype) 723 { 724 return list_first_entry_or_null(&area->free_list[migratetype], 725 struct page, buddy_list); 726 } 727 728 /* 729 * If this is not the largest possible page, check if the buddy 730 * of the next-highest order is free. If it is, it's possible 731 * that pages are being freed that will coalesce soon. In case, 732 * that is happening, add the free page to the tail of the list 733 * so it's less likely to be used soon and more likely to be merged 734 * as a higher order page 735 */ 736 static inline bool 737 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 738 struct page *page, unsigned int order) 739 { 740 unsigned long higher_page_pfn; 741 struct page *higher_page; 742 743 if (order >= MAX_ORDER - 1) 744 return false; 745 746 higher_page_pfn = buddy_pfn & pfn; 747 higher_page = page + (higher_page_pfn - pfn); 748 749 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 750 NULL) != NULL; 751 } 752 753 /* 754 * Freeing function for a buddy system allocator. 755 * 756 * The concept of a buddy system is to maintain direct-mapped table 757 * (containing bit values) for memory blocks of various "orders". 758 * The bottom level table contains the map for the smallest allocatable 759 * units of memory (here, pages), and each level above it describes 760 * pairs of units from the levels below, hence, "buddies". 761 * At a high level, all that happens here is marking the table entry 762 * at the bottom level available, and propagating the changes upward 763 * as necessary, plus some accounting needed to play nicely with other 764 * parts of the VM system. 765 * At each level, we keep a list of pages, which are heads of continuous 766 * free pages of length of (1 << order) and marked with PageBuddy. 767 * Page's order is recorded in page_private(page) field. 768 * So when we are allocating or freeing one, we can derive the state of the 769 * other. That is, if we allocate a small block, and both were 770 * free, the remainder of the region must be split into blocks. 771 * If a block is freed, and its buddy is also free, then this 772 * triggers coalescing into a block of larger size. 773 * 774 * -- nyc 775 */ 776 777 static inline void __free_one_page(struct page *page, 778 unsigned long pfn, 779 struct zone *zone, unsigned int order, 780 int migratetype, fpi_t fpi_flags) 781 { 782 struct capture_control *capc = task_capc(zone); 783 unsigned long buddy_pfn = 0; 784 unsigned long combined_pfn; 785 struct page *buddy; 786 bool to_tail; 787 788 VM_BUG_ON(!zone_is_initialized(zone)); 789 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 790 791 VM_BUG_ON(migratetype == -1); 792 if (likely(!is_migrate_isolate(migratetype))) 793 __mod_zone_freepage_state(zone, 1 << order, migratetype); 794 795 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 796 VM_BUG_ON_PAGE(bad_range(zone, page), page); 797 798 while (order < MAX_ORDER) { 799 if (compaction_capture(capc, page, order, migratetype)) { 800 __mod_zone_freepage_state(zone, -(1 << order), 801 migratetype); 802 return; 803 } 804 805 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 806 if (!buddy) 807 goto done_merging; 808 809 if (unlikely(order >= pageblock_order)) { 810 /* 811 * We want to prevent merge between freepages on pageblock 812 * without fallbacks and normal pageblock. Without this, 813 * pageblock isolation could cause incorrect freepage or CMA 814 * accounting or HIGHATOMIC accounting. 815 */ 816 int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 817 818 if (migratetype != buddy_mt 819 && (!migratetype_is_mergeable(migratetype) || 820 !migratetype_is_mergeable(buddy_mt))) 821 goto done_merging; 822 } 823 824 /* 825 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 826 * merge with it and move up one order. 827 */ 828 if (page_is_guard(buddy)) 829 clear_page_guard(zone, buddy, order, migratetype); 830 else 831 del_page_from_free_list(buddy, zone, order); 832 combined_pfn = buddy_pfn & pfn; 833 page = page + (combined_pfn - pfn); 834 pfn = combined_pfn; 835 order++; 836 } 837 838 done_merging: 839 set_buddy_order(page, order); 840 841 if (fpi_flags & FPI_TO_TAIL) 842 to_tail = true; 843 else if (is_shuffle_order(order)) 844 to_tail = shuffle_pick_tail(); 845 else 846 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 847 848 if (to_tail) 849 add_to_free_list_tail(page, zone, order, migratetype); 850 else 851 add_to_free_list(page, zone, order, migratetype); 852 853 /* Notify page reporting subsystem of freed page */ 854 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 855 page_reporting_notify_free(order); 856 } 857 858 /** 859 * split_free_page() -- split a free page at split_pfn_offset 860 * @free_page: the original free page 861 * @order: the order of the page 862 * @split_pfn_offset: split offset within the page 863 * 864 * Return -ENOENT if the free page is changed, otherwise 0 865 * 866 * It is used when the free page crosses two pageblocks with different migratetypes 867 * at split_pfn_offset within the page. The split free page will be put into 868 * separate migratetype lists afterwards. Otherwise, the function achieves 869 * nothing. 870 */ 871 int split_free_page(struct page *free_page, 872 unsigned int order, unsigned long split_pfn_offset) 873 { 874 struct zone *zone = page_zone(free_page); 875 unsigned long free_page_pfn = page_to_pfn(free_page); 876 unsigned long pfn; 877 unsigned long flags; 878 int free_page_order; 879 int mt; 880 int ret = 0; 881 882 if (split_pfn_offset == 0) 883 return ret; 884 885 spin_lock_irqsave(&zone->lock, flags); 886 887 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { 888 ret = -ENOENT; 889 goto out; 890 } 891 892 mt = get_pfnblock_migratetype(free_page, free_page_pfn); 893 if (likely(!is_migrate_isolate(mt))) 894 __mod_zone_freepage_state(zone, -(1UL << order), mt); 895 896 del_page_from_free_list(free_page, zone, order); 897 for (pfn = free_page_pfn; 898 pfn < free_page_pfn + (1UL << order);) { 899 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); 900 901 free_page_order = min_t(unsigned int, 902 pfn ? __ffs(pfn) : order, 903 __fls(split_pfn_offset)); 904 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, 905 mt, FPI_NONE); 906 pfn += 1UL << free_page_order; 907 split_pfn_offset -= (1UL << free_page_order); 908 /* we have done the first part, now switch to second part */ 909 if (split_pfn_offset == 0) 910 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); 911 } 912 out: 913 spin_unlock_irqrestore(&zone->lock, flags); 914 return ret; 915 } 916 /* 917 * A bad page could be due to a number of fields. Instead of multiple branches, 918 * try and check multiple fields with one check. The caller must do a detailed 919 * check if necessary. 920 */ 921 static inline bool page_expected_state(struct page *page, 922 unsigned long check_flags) 923 { 924 if (unlikely(atomic_read(&page->_mapcount) != -1)) 925 return false; 926 927 if (unlikely((unsigned long)page->mapping | 928 page_ref_count(page) | 929 #ifdef CONFIG_MEMCG 930 page->memcg_data | 931 #endif 932 (page->flags & check_flags))) 933 return false; 934 935 return true; 936 } 937 938 static const char *page_bad_reason(struct page *page, unsigned long flags) 939 { 940 const char *bad_reason = NULL; 941 942 if (unlikely(atomic_read(&page->_mapcount) != -1)) 943 bad_reason = "nonzero mapcount"; 944 if (unlikely(page->mapping != NULL)) 945 bad_reason = "non-NULL mapping"; 946 if (unlikely(page_ref_count(page) != 0)) 947 bad_reason = "nonzero _refcount"; 948 if (unlikely(page->flags & flags)) { 949 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 950 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 951 else 952 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 953 } 954 #ifdef CONFIG_MEMCG 955 if (unlikely(page->memcg_data)) 956 bad_reason = "page still charged to cgroup"; 957 #endif 958 return bad_reason; 959 } 960 961 static void free_page_is_bad_report(struct page *page) 962 { 963 bad_page(page, 964 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 965 } 966 967 static inline bool free_page_is_bad(struct page *page) 968 { 969 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 970 return false; 971 972 /* Something has gone sideways, find it */ 973 free_page_is_bad_report(page); 974 return true; 975 } 976 977 static inline bool is_check_pages_enabled(void) 978 { 979 return static_branch_unlikely(&check_pages_enabled); 980 } 981 982 static int free_tail_page_prepare(struct page *head_page, struct page *page) 983 { 984 struct folio *folio = (struct folio *)head_page; 985 int ret = 1; 986 987 /* 988 * We rely page->lru.next never has bit 0 set, unless the page 989 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 990 */ 991 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 992 993 if (!is_check_pages_enabled()) { 994 ret = 0; 995 goto out; 996 } 997 switch (page - head_page) { 998 case 1: 999 /* the first tail page: these may be in place of ->mapping */ 1000 if (unlikely(folio_entire_mapcount(folio))) { 1001 bad_page(page, "nonzero entire_mapcount"); 1002 goto out; 1003 } 1004 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 1005 bad_page(page, "nonzero nr_pages_mapped"); 1006 goto out; 1007 } 1008 if (unlikely(atomic_read(&folio->_pincount))) { 1009 bad_page(page, "nonzero pincount"); 1010 goto out; 1011 } 1012 break; 1013 case 2: 1014 /* 1015 * the second tail page: ->mapping is 1016 * deferred_list.next -- ignore value. 1017 */ 1018 break; 1019 default: 1020 if (page->mapping != TAIL_MAPPING) { 1021 bad_page(page, "corrupted mapping in tail page"); 1022 goto out; 1023 } 1024 break; 1025 } 1026 if (unlikely(!PageTail(page))) { 1027 bad_page(page, "PageTail not set"); 1028 goto out; 1029 } 1030 if (unlikely(compound_head(page) != head_page)) { 1031 bad_page(page, "compound_head not consistent"); 1032 goto out; 1033 } 1034 ret = 0; 1035 out: 1036 page->mapping = NULL; 1037 clear_compound_head(page); 1038 return ret; 1039 } 1040 1041 /* 1042 * Skip KASAN memory poisoning when either: 1043 * 1044 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1045 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1046 * using page tags instead (see below). 1047 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1048 * that error detection is disabled for accesses via the page address. 1049 * 1050 * Pages will have match-all tags in the following circumstances: 1051 * 1052 * 1. Pages are being initialized for the first time, including during deferred 1053 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1054 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1055 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1056 * 3. The allocation was excluded from being checked due to sampling, 1057 * see the call to kasan_unpoison_pages. 1058 * 1059 * Poisoning pages during deferred memory init will greatly lengthen the 1060 * process and cause problem in large memory systems as the deferred pages 1061 * initialization is done with interrupt disabled. 1062 * 1063 * Assuming that there will be no reference to those newly initialized 1064 * pages before they are ever allocated, this should have no effect on 1065 * KASAN memory tracking as the poison will be properly inserted at page 1066 * allocation time. The only corner case is when pages are allocated by 1067 * on-demand allocation and then freed again before the deferred pages 1068 * initialization is done, but this is not likely to happen. 1069 */ 1070 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 1071 { 1072 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1073 return deferred_pages_enabled(); 1074 1075 return page_kasan_tag(page) == 0xff; 1076 } 1077 1078 static void kernel_init_pages(struct page *page, int numpages) 1079 { 1080 int i; 1081 1082 /* s390's use of memset() could override KASAN redzones. */ 1083 kasan_disable_current(); 1084 for (i = 0; i < numpages; i++) 1085 clear_highpage_kasan_tagged(page + i); 1086 kasan_enable_current(); 1087 } 1088 1089 static __always_inline bool free_pages_prepare(struct page *page, 1090 unsigned int order, fpi_t fpi_flags) 1091 { 1092 int bad = 0; 1093 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); 1094 bool init = want_init_on_free(); 1095 1096 VM_BUG_ON_PAGE(PageTail(page), page); 1097 1098 trace_mm_page_free(page, order); 1099 kmsan_free_page(page, order); 1100 1101 if (unlikely(PageHWPoison(page)) && !order) { 1102 /* 1103 * Do not let hwpoison pages hit pcplists/buddy 1104 * Untie memcg state and reset page's owner 1105 */ 1106 if (memcg_kmem_online() && PageMemcgKmem(page)) 1107 __memcg_kmem_uncharge_page(page, order); 1108 reset_page_owner(page, order); 1109 page_table_check_free(page, order); 1110 return false; 1111 } 1112 1113 /* 1114 * Check tail pages before head page information is cleared to 1115 * avoid checking PageCompound for order-0 pages. 1116 */ 1117 if (unlikely(order)) { 1118 bool compound = PageCompound(page); 1119 int i; 1120 1121 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1122 1123 if (compound) 1124 ClearPageHasHWPoisoned(page); 1125 for (i = 1; i < (1 << order); i++) { 1126 if (compound) 1127 bad += free_tail_page_prepare(page, page + i); 1128 if (is_check_pages_enabled()) { 1129 if (free_page_is_bad(page + i)) { 1130 bad++; 1131 continue; 1132 } 1133 } 1134 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1135 } 1136 } 1137 if (PageMappingFlags(page)) 1138 page->mapping = NULL; 1139 if (memcg_kmem_online() && PageMemcgKmem(page)) 1140 __memcg_kmem_uncharge_page(page, order); 1141 if (is_check_pages_enabled()) { 1142 if (free_page_is_bad(page)) 1143 bad++; 1144 if (bad) 1145 return false; 1146 } 1147 1148 page_cpupid_reset_last(page); 1149 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1150 reset_page_owner(page, order); 1151 page_table_check_free(page, order); 1152 1153 if (!PageHighMem(page)) { 1154 debug_check_no_locks_freed(page_address(page), 1155 PAGE_SIZE << order); 1156 debug_check_no_obj_freed(page_address(page), 1157 PAGE_SIZE << order); 1158 } 1159 1160 kernel_poison_pages(page, 1 << order); 1161 1162 /* 1163 * As memory initialization might be integrated into KASAN, 1164 * KASAN poisoning and memory initialization code must be 1165 * kept together to avoid discrepancies in behavior. 1166 * 1167 * With hardware tag-based KASAN, memory tags must be set before the 1168 * page becomes unavailable via debug_pagealloc or arch_free_page. 1169 */ 1170 if (!skip_kasan_poison) { 1171 kasan_poison_pages(page, order, init); 1172 1173 /* Memory is already initialized if KASAN did it internally. */ 1174 if (kasan_has_integrated_init()) 1175 init = false; 1176 } 1177 if (init) 1178 kernel_init_pages(page, 1 << order); 1179 1180 /* 1181 * arch_free_page() can make the page's contents inaccessible. s390 1182 * does this. So nothing which can access the page's contents should 1183 * happen after this. 1184 */ 1185 arch_free_page(page, order); 1186 1187 debug_pagealloc_unmap_pages(page, 1 << order); 1188 1189 return true; 1190 } 1191 1192 /* 1193 * Frees a number of pages from the PCP lists 1194 * Assumes all pages on list are in same zone. 1195 * count is the number of pages to free. 1196 */ 1197 static void free_pcppages_bulk(struct zone *zone, int count, 1198 struct per_cpu_pages *pcp, 1199 int pindex) 1200 { 1201 unsigned long flags; 1202 unsigned int order; 1203 bool isolated_pageblocks; 1204 struct page *page; 1205 1206 /* 1207 * Ensure proper count is passed which otherwise would stuck in the 1208 * below while (list_empty(list)) loop. 1209 */ 1210 count = min(pcp->count, count); 1211 1212 /* Ensure requested pindex is drained first. */ 1213 pindex = pindex - 1; 1214 1215 spin_lock_irqsave(&zone->lock, flags); 1216 isolated_pageblocks = has_isolate_pageblock(zone); 1217 1218 while (count > 0) { 1219 struct list_head *list; 1220 int nr_pages; 1221 1222 /* Remove pages from lists in a round-robin fashion. */ 1223 do { 1224 if (++pindex > NR_PCP_LISTS - 1) 1225 pindex = 0; 1226 list = &pcp->lists[pindex]; 1227 } while (list_empty(list)); 1228 1229 order = pindex_to_order(pindex); 1230 nr_pages = 1 << order; 1231 do { 1232 int mt; 1233 1234 page = list_last_entry(list, struct page, pcp_list); 1235 mt = get_pcppage_migratetype(page); 1236 1237 /* must delete to avoid corrupting pcp list */ 1238 list_del(&page->pcp_list); 1239 count -= nr_pages; 1240 pcp->count -= nr_pages; 1241 1242 /* MIGRATE_ISOLATE page should not go to pcplists */ 1243 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1244 /* Pageblock could have been isolated meanwhile */ 1245 if (unlikely(isolated_pageblocks)) 1246 mt = get_pageblock_migratetype(page); 1247 1248 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1249 trace_mm_page_pcpu_drain(page, order, mt); 1250 } while (count > 0 && !list_empty(list)); 1251 } 1252 1253 spin_unlock_irqrestore(&zone->lock, flags); 1254 } 1255 1256 static void free_one_page(struct zone *zone, 1257 struct page *page, unsigned long pfn, 1258 unsigned int order, 1259 int migratetype, fpi_t fpi_flags) 1260 { 1261 unsigned long flags; 1262 1263 spin_lock_irqsave(&zone->lock, flags); 1264 if (unlikely(has_isolate_pageblock(zone) || 1265 is_migrate_isolate(migratetype))) { 1266 migratetype = get_pfnblock_migratetype(page, pfn); 1267 } 1268 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1269 spin_unlock_irqrestore(&zone->lock, flags); 1270 } 1271 1272 static void __free_pages_ok(struct page *page, unsigned int order, 1273 fpi_t fpi_flags) 1274 { 1275 unsigned long flags; 1276 int migratetype; 1277 unsigned long pfn = page_to_pfn(page); 1278 struct zone *zone = page_zone(page); 1279 1280 if (!free_pages_prepare(page, order, fpi_flags)) 1281 return; 1282 1283 /* 1284 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here 1285 * is used to avoid calling get_pfnblock_migratetype() under the lock. 1286 * This will reduce the lock holding time. 1287 */ 1288 migratetype = get_pfnblock_migratetype(page, pfn); 1289 1290 spin_lock_irqsave(&zone->lock, flags); 1291 if (unlikely(has_isolate_pageblock(zone) || 1292 is_migrate_isolate(migratetype))) { 1293 migratetype = get_pfnblock_migratetype(page, pfn); 1294 } 1295 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1296 spin_unlock_irqrestore(&zone->lock, flags); 1297 1298 __count_vm_events(PGFREE, 1 << order); 1299 } 1300 1301 void __free_pages_core(struct page *page, unsigned int order) 1302 { 1303 unsigned int nr_pages = 1 << order; 1304 struct page *p = page; 1305 unsigned int loop; 1306 1307 /* 1308 * When initializing the memmap, __init_single_page() sets the refcount 1309 * of all pages to 1 ("allocated"/"not free"). We have to set the 1310 * refcount of all involved pages to 0. 1311 */ 1312 prefetchw(p); 1313 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1314 prefetchw(p + 1); 1315 __ClearPageReserved(p); 1316 set_page_count(p, 0); 1317 } 1318 __ClearPageReserved(p); 1319 set_page_count(p, 0); 1320 1321 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1322 1323 if (page_contains_unaccepted(page, order)) { 1324 if (order == MAX_ORDER && __free_unaccepted(page)) 1325 return; 1326 1327 accept_page(page, order); 1328 } 1329 1330 /* 1331 * Bypass PCP and place fresh pages right to the tail, primarily 1332 * relevant for memory onlining. 1333 */ 1334 __free_pages_ok(page, order, FPI_TO_TAIL); 1335 } 1336 1337 /* 1338 * Check that the whole (or subset of) a pageblock given by the interval of 1339 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1340 * with the migration of free compaction scanner. 1341 * 1342 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1343 * 1344 * It's possible on some configurations to have a setup like node0 node1 node0 1345 * i.e. it's possible that all pages within a zones range of pages do not 1346 * belong to a single zone. We assume that a border between node0 and node1 1347 * can occur within a single pageblock, but not a node0 node1 node0 1348 * interleaving within a single pageblock. It is therefore sufficient to check 1349 * the first and last page of a pageblock and avoid checking each individual 1350 * page in a pageblock. 1351 * 1352 * Note: the function may return non-NULL struct page even for a page block 1353 * which contains a memory hole (i.e. there is no physical memory for a subset 1354 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which 1355 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1356 * even though the start pfn is online and valid. This should be safe most of 1357 * the time because struct pages are still initialized via init_unavailable_range() 1358 * and pfn walkers shouldn't touch any physical memory range for which they do 1359 * not recognize any specific metadata in struct pages. 1360 */ 1361 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1362 unsigned long end_pfn, struct zone *zone) 1363 { 1364 struct page *start_page; 1365 struct page *end_page; 1366 1367 /* end_pfn is one past the range we are checking */ 1368 end_pfn--; 1369 1370 if (!pfn_valid(end_pfn)) 1371 return NULL; 1372 1373 start_page = pfn_to_online_page(start_pfn); 1374 if (!start_page) 1375 return NULL; 1376 1377 if (page_zone(start_page) != zone) 1378 return NULL; 1379 1380 end_page = pfn_to_page(end_pfn); 1381 1382 /* This gives a shorter code than deriving page_zone(end_page) */ 1383 if (page_zone_id(start_page) != page_zone_id(end_page)) 1384 return NULL; 1385 1386 return start_page; 1387 } 1388 1389 /* 1390 * The order of subdivision here is critical for the IO subsystem. 1391 * Please do not alter this order without good reasons and regression 1392 * testing. Specifically, as large blocks of memory are subdivided, 1393 * the order in which smaller blocks are delivered depends on the order 1394 * they're subdivided in this function. This is the primary factor 1395 * influencing the order in which pages are delivered to the IO 1396 * subsystem according to empirical testing, and this is also justified 1397 * by considering the behavior of a buddy system containing a single 1398 * large block of memory acted on by a series of small allocations. 1399 * This behavior is a critical factor in sglist merging's success. 1400 * 1401 * -- nyc 1402 */ 1403 static inline void expand(struct zone *zone, struct page *page, 1404 int low, int high, int migratetype) 1405 { 1406 unsigned long size = 1 << high; 1407 1408 while (high > low) { 1409 high--; 1410 size >>= 1; 1411 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1412 1413 /* 1414 * Mark as guard pages (or page), that will allow to 1415 * merge back to allocator when buddy will be freed. 1416 * Corresponding page table entries will not be touched, 1417 * pages will stay not present in virtual address space 1418 */ 1419 if (set_page_guard(zone, &page[size], high, migratetype)) 1420 continue; 1421 1422 add_to_free_list(&page[size], zone, high, migratetype); 1423 set_buddy_order(&page[size], high); 1424 } 1425 } 1426 1427 static void check_new_page_bad(struct page *page) 1428 { 1429 if (unlikely(page->flags & __PG_HWPOISON)) { 1430 /* Don't complain about hwpoisoned pages */ 1431 page_mapcount_reset(page); /* remove PageBuddy */ 1432 return; 1433 } 1434 1435 bad_page(page, 1436 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1437 } 1438 1439 /* 1440 * This page is about to be returned from the page allocator 1441 */ 1442 static int check_new_page(struct page *page) 1443 { 1444 if (likely(page_expected_state(page, 1445 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1446 return 0; 1447 1448 check_new_page_bad(page); 1449 return 1; 1450 } 1451 1452 static inline bool check_new_pages(struct page *page, unsigned int order) 1453 { 1454 if (is_check_pages_enabled()) { 1455 for (int i = 0; i < (1 << order); i++) { 1456 struct page *p = page + i; 1457 1458 if (check_new_page(p)) 1459 return true; 1460 } 1461 } 1462 1463 return false; 1464 } 1465 1466 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1467 { 1468 /* Don't skip if a software KASAN mode is enabled. */ 1469 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1470 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1471 return false; 1472 1473 /* Skip, if hardware tag-based KASAN is not enabled. */ 1474 if (!kasan_hw_tags_enabled()) 1475 return true; 1476 1477 /* 1478 * With hardware tag-based KASAN enabled, skip if this has been 1479 * requested via __GFP_SKIP_KASAN. 1480 */ 1481 return flags & __GFP_SKIP_KASAN; 1482 } 1483 1484 static inline bool should_skip_init(gfp_t flags) 1485 { 1486 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1487 if (!kasan_hw_tags_enabled()) 1488 return false; 1489 1490 /* For hardware tag-based KASAN, skip if requested. */ 1491 return (flags & __GFP_SKIP_ZERO); 1492 } 1493 1494 inline void post_alloc_hook(struct page *page, unsigned int order, 1495 gfp_t gfp_flags) 1496 { 1497 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1498 !should_skip_init(gfp_flags); 1499 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1500 int i; 1501 1502 set_page_private(page, 0); 1503 set_page_refcounted(page); 1504 1505 arch_alloc_page(page, order); 1506 debug_pagealloc_map_pages(page, 1 << order); 1507 1508 /* 1509 * Page unpoisoning must happen before memory initialization. 1510 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1511 * allocations and the page unpoisoning code will complain. 1512 */ 1513 kernel_unpoison_pages(page, 1 << order); 1514 1515 /* 1516 * As memory initialization might be integrated into KASAN, 1517 * KASAN unpoisoning and memory initializion code must be 1518 * kept together to avoid discrepancies in behavior. 1519 */ 1520 1521 /* 1522 * If memory tags should be zeroed 1523 * (which happens only when memory should be initialized as well). 1524 */ 1525 if (zero_tags) { 1526 /* Initialize both memory and memory tags. */ 1527 for (i = 0; i != 1 << order; ++i) 1528 tag_clear_highpage(page + i); 1529 1530 /* Take note that memory was initialized by the loop above. */ 1531 init = false; 1532 } 1533 if (!should_skip_kasan_unpoison(gfp_flags) && 1534 kasan_unpoison_pages(page, order, init)) { 1535 /* Take note that memory was initialized by KASAN. */ 1536 if (kasan_has_integrated_init()) 1537 init = false; 1538 } else { 1539 /* 1540 * If memory tags have not been set by KASAN, reset the page 1541 * tags to ensure page_address() dereferencing does not fault. 1542 */ 1543 for (i = 0; i != 1 << order; ++i) 1544 page_kasan_tag_reset(page + i); 1545 } 1546 /* If memory is still not initialized, initialize it now. */ 1547 if (init) 1548 kernel_init_pages(page, 1 << order); 1549 1550 set_page_owner(page, order, gfp_flags); 1551 page_table_check_alloc(page, order); 1552 } 1553 1554 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1555 unsigned int alloc_flags) 1556 { 1557 post_alloc_hook(page, order, gfp_flags); 1558 1559 if (order && (gfp_flags & __GFP_COMP)) 1560 prep_compound_page(page, order); 1561 1562 /* 1563 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1564 * allocate the page. The expectation is that the caller is taking 1565 * steps that will free more memory. The caller should avoid the page 1566 * being used for !PFMEMALLOC purposes. 1567 */ 1568 if (alloc_flags & ALLOC_NO_WATERMARKS) 1569 set_page_pfmemalloc(page); 1570 else 1571 clear_page_pfmemalloc(page); 1572 } 1573 1574 /* 1575 * Go through the free lists for the given migratetype and remove 1576 * the smallest available page from the freelists 1577 */ 1578 static __always_inline 1579 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1580 int migratetype) 1581 { 1582 unsigned int current_order; 1583 struct free_area *area; 1584 struct page *page; 1585 1586 /* Find a page of the appropriate size in the preferred list */ 1587 for (current_order = order; current_order <= MAX_ORDER; ++current_order) { 1588 area = &(zone->free_area[current_order]); 1589 page = get_page_from_free_area(area, migratetype); 1590 if (!page) 1591 continue; 1592 del_page_from_free_list(page, zone, current_order); 1593 expand(zone, page, order, current_order, migratetype); 1594 set_pcppage_migratetype(page, migratetype); 1595 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1596 pcp_allowed_order(order) && 1597 migratetype < MIGRATE_PCPTYPES); 1598 return page; 1599 } 1600 1601 return NULL; 1602 } 1603 1604 1605 /* 1606 * This array describes the order lists are fallen back to when 1607 * the free lists for the desirable migrate type are depleted 1608 * 1609 * The other migratetypes do not have fallbacks. 1610 */ 1611 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = { 1612 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1613 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1614 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1615 }; 1616 1617 #ifdef CONFIG_CMA 1618 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1619 unsigned int order) 1620 { 1621 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1622 } 1623 #else 1624 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1625 unsigned int order) { return NULL; } 1626 #endif 1627 1628 /* 1629 * Move the free pages in a range to the freelist tail of the requested type. 1630 * Note that start_page and end_pages are not aligned on a pageblock 1631 * boundary. If alignment is required, use move_freepages_block() 1632 */ 1633 static int move_freepages(struct zone *zone, 1634 unsigned long start_pfn, unsigned long end_pfn, 1635 int migratetype, int *num_movable) 1636 { 1637 struct page *page; 1638 unsigned long pfn; 1639 unsigned int order; 1640 int pages_moved = 0; 1641 1642 for (pfn = start_pfn; pfn <= end_pfn;) { 1643 page = pfn_to_page(pfn); 1644 if (!PageBuddy(page)) { 1645 /* 1646 * We assume that pages that could be isolated for 1647 * migration are movable. But we don't actually try 1648 * isolating, as that would be expensive. 1649 */ 1650 if (num_movable && 1651 (PageLRU(page) || __PageMovable(page))) 1652 (*num_movable)++; 1653 pfn++; 1654 continue; 1655 } 1656 1657 /* Make sure we are not inadvertently changing nodes */ 1658 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1659 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1660 1661 order = buddy_order(page); 1662 move_to_free_list(page, zone, order, migratetype); 1663 pfn += 1 << order; 1664 pages_moved += 1 << order; 1665 } 1666 1667 return pages_moved; 1668 } 1669 1670 int move_freepages_block(struct zone *zone, struct page *page, 1671 int migratetype, int *num_movable) 1672 { 1673 unsigned long start_pfn, end_pfn, pfn; 1674 1675 if (num_movable) 1676 *num_movable = 0; 1677 1678 pfn = page_to_pfn(page); 1679 start_pfn = pageblock_start_pfn(pfn); 1680 end_pfn = pageblock_end_pfn(pfn) - 1; 1681 1682 /* Do not cross zone boundaries */ 1683 if (!zone_spans_pfn(zone, start_pfn)) 1684 start_pfn = pfn; 1685 if (!zone_spans_pfn(zone, end_pfn)) 1686 return 0; 1687 1688 return move_freepages(zone, start_pfn, end_pfn, migratetype, 1689 num_movable); 1690 } 1691 1692 static void change_pageblock_range(struct page *pageblock_page, 1693 int start_order, int migratetype) 1694 { 1695 int nr_pageblocks = 1 << (start_order - pageblock_order); 1696 1697 while (nr_pageblocks--) { 1698 set_pageblock_migratetype(pageblock_page, migratetype); 1699 pageblock_page += pageblock_nr_pages; 1700 } 1701 } 1702 1703 /* 1704 * When we are falling back to another migratetype during allocation, try to 1705 * steal extra free pages from the same pageblocks to satisfy further 1706 * allocations, instead of polluting multiple pageblocks. 1707 * 1708 * If we are stealing a relatively large buddy page, it is likely there will 1709 * be more free pages in the pageblock, so try to steal them all. For 1710 * reclaimable and unmovable allocations, we steal regardless of page size, 1711 * as fragmentation caused by those allocations polluting movable pageblocks 1712 * is worse than movable allocations stealing from unmovable and reclaimable 1713 * pageblocks. 1714 */ 1715 static bool can_steal_fallback(unsigned int order, int start_mt) 1716 { 1717 /* 1718 * Leaving this order check is intended, although there is 1719 * relaxed order check in next check. The reason is that 1720 * we can actually steal whole pageblock if this condition met, 1721 * but, below check doesn't guarantee it and that is just heuristic 1722 * so could be changed anytime. 1723 */ 1724 if (order >= pageblock_order) 1725 return true; 1726 1727 if (order >= pageblock_order / 2 || 1728 start_mt == MIGRATE_RECLAIMABLE || 1729 start_mt == MIGRATE_UNMOVABLE || 1730 page_group_by_mobility_disabled) 1731 return true; 1732 1733 return false; 1734 } 1735 1736 static inline bool boost_watermark(struct zone *zone) 1737 { 1738 unsigned long max_boost; 1739 1740 if (!watermark_boost_factor) 1741 return false; 1742 /* 1743 * Don't bother in zones that are unlikely to produce results. 1744 * On small machines, including kdump capture kernels running 1745 * in a small area, boosting the watermark can cause an out of 1746 * memory situation immediately. 1747 */ 1748 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1749 return false; 1750 1751 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1752 watermark_boost_factor, 10000); 1753 1754 /* 1755 * high watermark may be uninitialised if fragmentation occurs 1756 * very early in boot so do not boost. We do not fall 1757 * through and boost by pageblock_nr_pages as failing 1758 * allocations that early means that reclaim is not going 1759 * to help and it may even be impossible to reclaim the 1760 * boosted watermark resulting in a hang. 1761 */ 1762 if (!max_boost) 1763 return false; 1764 1765 max_boost = max(pageblock_nr_pages, max_boost); 1766 1767 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1768 max_boost); 1769 1770 return true; 1771 } 1772 1773 /* 1774 * This function implements actual steal behaviour. If order is large enough, 1775 * we can steal whole pageblock. If not, we first move freepages in this 1776 * pageblock to our migratetype and determine how many already-allocated pages 1777 * are there in the pageblock with a compatible migratetype. If at least half 1778 * of pages are free or compatible, we can change migratetype of the pageblock 1779 * itself, so pages freed in the future will be put on the correct free list. 1780 */ 1781 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1782 unsigned int alloc_flags, int start_type, bool whole_block) 1783 { 1784 unsigned int current_order = buddy_order(page); 1785 int free_pages, movable_pages, alike_pages; 1786 int old_block_type; 1787 1788 old_block_type = get_pageblock_migratetype(page); 1789 1790 /* 1791 * This can happen due to races and we want to prevent broken 1792 * highatomic accounting. 1793 */ 1794 if (is_migrate_highatomic(old_block_type)) 1795 goto single_page; 1796 1797 /* Take ownership for orders >= pageblock_order */ 1798 if (current_order >= pageblock_order) { 1799 change_pageblock_range(page, current_order, start_type); 1800 goto single_page; 1801 } 1802 1803 /* 1804 * Boost watermarks to increase reclaim pressure to reduce the 1805 * likelihood of future fallbacks. Wake kswapd now as the node 1806 * may be balanced overall and kswapd will not wake naturally. 1807 */ 1808 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 1809 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 1810 1811 /* We are not allowed to try stealing from the whole block */ 1812 if (!whole_block) 1813 goto single_page; 1814 1815 free_pages = move_freepages_block(zone, page, start_type, 1816 &movable_pages); 1817 /* moving whole block can fail due to zone boundary conditions */ 1818 if (!free_pages) 1819 goto single_page; 1820 1821 /* 1822 * Determine how many pages are compatible with our allocation. 1823 * For movable allocation, it's the number of movable pages which 1824 * we just obtained. For other types it's a bit more tricky. 1825 */ 1826 if (start_type == MIGRATE_MOVABLE) { 1827 alike_pages = movable_pages; 1828 } else { 1829 /* 1830 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 1831 * to MOVABLE pageblock, consider all non-movable pages as 1832 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 1833 * vice versa, be conservative since we can't distinguish the 1834 * exact migratetype of non-movable pages. 1835 */ 1836 if (old_block_type == MIGRATE_MOVABLE) 1837 alike_pages = pageblock_nr_pages 1838 - (free_pages + movable_pages); 1839 else 1840 alike_pages = 0; 1841 } 1842 /* 1843 * If a sufficient number of pages in the block are either free or of 1844 * compatible migratability as our allocation, claim the whole block. 1845 */ 1846 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 1847 page_group_by_mobility_disabled) 1848 set_pageblock_migratetype(page, start_type); 1849 1850 return; 1851 1852 single_page: 1853 move_to_free_list(page, zone, current_order, start_type); 1854 } 1855 1856 /* 1857 * Check whether there is a suitable fallback freepage with requested order. 1858 * If only_stealable is true, this function returns fallback_mt only if 1859 * we can steal other freepages all together. This would help to reduce 1860 * fragmentation due to mixed migratetype pages in one pageblock. 1861 */ 1862 int find_suitable_fallback(struct free_area *area, unsigned int order, 1863 int migratetype, bool only_stealable, bool *can_steal) 1864 { 1865 int i; 1866 int fallback_mt; 1867 1868 if (area->nr_free == 0) 1869 return -1; 1870 1871 *can_steal = false; 1872 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 1873 fallback_mt = fallbacks[migratetype][i]; 1874 if (free_area_empty(area, fallback_mt)) 1875 continue; 1876 1877 if (can_steal_fallback(order, migratetype)) 1878 *can_steal = true; 1879 1880 if (!only_stealable) 1881 return fallback_mt; 1882 1883 if (*can_steal) 1884 return fallback_mt; 1885 } 1886 1887 return -1; 1888 } 1889 1890 /* 1891 * Reserve a pageblock for exclusive use of high-order atomic allocations if 1892 * there are no empty page blocks that contain a page with a suitable order 1893 */ 1894 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) 1895 { 1896 int mt; 1897 unsigned long max_managed, flags; 1898 1899 /* 1900 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 1901 * Check is race-prone but harmless. 1902 */ 1903 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 1904 if (zone->nr_reserved_highatomic >= max_managed) 1905 return; 1906 1907 spin_lock_irqsave(&zone->lock, flags); 1908 1909 /* Recheck the nr_reserved_highatomic limit under the lock */ 1910 if (zone->nr_reserved_highatomic >= max_managed) 1911 goto out_unlock; 1912 1913 /* Yoink! */ 1914 mt = get_pageblock_migratetype(page); 1915 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 1916 if (migratetype_is_mergeable(mt)) { 1917 zone->nr_reserved_highatomic += pageblock_nr_pages; 1918 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 1919 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 1920 } 1921 1922 out_unlock: 1923 spin_unlock_irqrestore(&zone->lock, flags); 1924 } 1925 1926 /* 1927 * Used when an allocation is about to fail under memory pressure. This 1928 * potentially hurts the reliability of high-order allocations when under 1929 * intense memory pressure but failed atomic allocations should be easier 1930 * to recover from than an OOM. 1931 * 1932 * If @force is true, try to unreserve a pageblock even though highatomic 1933 * pageblock is exhausted. 1934 */ 1935 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 1936 bool force) 1937 { 1938 struct zonelist *zonelist = ac->zonelist; 1939 unsigned long flags; 1940 struct zoneref *z; 1941 struct zone *zone; 1942 struct page *page; 1943 int order; 1944 bool ret; 1945 1946 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 1947 ac->nodemask) { 1948 /* 1949 * Preserve at least one pageblock unless memory pressure 1950 * is really high. 1951 */ 1952 if (!force && zone->nr_reserved_highatomic <= 1953 pageblock_nr_pages) 1954 continue; 1955 1956 spin_lock_irqsave(&zone->lock, flags); 1957 for (order = 0; order <= MAX_ORDER; order++) { 1958 struct free_area *area = &(zone->free_area[order]); 1959 1960 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 1961 if (!page) 1962 continue; 1963 1964 /* 1965 * In page freeing path, migratetype change is racy so 1966 * we can counter several free pages in a pageblock 1967 * in this loop although we changed the pageblock type 1968 * from highatomic to ac->migratetype. So we should 1969 * adjust the count once. 1970 */ 1971 if (is_migrate_highatomic_page(page)) { 1972 /* 1973 * It should never happen but changes to 1974 * locking could inadvertently allow a per-cpu 1975 * drain to add pages to MIGRATE_HIGHATOMIC 1976 * while unreserving so be safe and watch for 1977 * underflows. 1978 */ 1979 zone->nr_reserved_highatomic -= min( 1980 pageblock_nr_pages, 1981 zone->nr_reserved_highatomic); 1982 } 1983 1984 /* 1985 * Convert to ac->migratetype and avoid the normal 1986 * pageblock stealing heuristics. Minimally, the caller 1987 * is doing the work and needs the pages. More 1988 * importantly, if the block was always converted to 1989 * MIGRATE_UNMOVABLE or another type then the number 1990 * of pageblocks that cannot be completely freed 1991 * may increase. 1992 */ 1993 set_pageblock_migratetype(page, ac->migratetype); 1994 ret = move_freepages_block(zone, page, ac->migratetype, 1995 NULL); 1996 if (ret) { 1997 spin_unlock_irqrestore(&zone->lock, flags); 1998 return ret; 1999 } 2000 } 2001 spin_unlock_irqrestore(&zone->lock, flags); 2002 } 2003 2004 return false; 2005 } 2006 2007 /* 2008 * Try finding a free buddy page on the fallback list and put it on the free 2009 * list of requested migratetype, possibly along with other pages from the same 2010 * block, depending on fragmentation avoidance heuristics. Returns true if 2011 * fallback was found so that __rmqueue_smallest() can grab it. 2012 * 2013 * The use of signed ints for order and current_order is a deliberate 2014 * deviation from the rest of this file, to make the for loop 2015 * condition simpler. 2016 */ 2017 static __always_inline bool 2018 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2019 unsigned int alloc_flags) 2020 { 2021 struct free_area *area; 2022 int current_order; 2023 int min_order = order; 2024 struct page *page; 2025 int fallback_mt; 2026 bool can_steal; 2027 2028 /* 2029 * Do not steal pages from freelists belonging to other pageblocks 2030 * i.e. orders < pageblock_order. If there are no local zones free, 2031 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2032 */ 2033 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2034 min_order = pageblock_order; 2035 2036 /* 2037 * Find the largest available free page in the other list. This roughly 2038 * approximates finding the pageblock with the most free pages, which 2039 * would be too costly to do exactly. 2040 */ 2041 for (current_order = MAX_ORDER; current_order >= min_order; 2042 --current_order) { 2043 area = &(zone->free_area[current_order]); 2044 fallback_mt = find_suitable_fallback(area, current_order, 2045 start_migratetype, false, &can_steal); 2046 if (fallback_mt == -1) 2047 continue; 2048 2049 /* 2050 * We cannot steal all free pages from the pageblock and the 2051 * requested migratetype is movable. In that case it's better to 2052 * steal and split the smallest available page instead of the 2053 * largest available page, because even if the next movable 2054 * allocation falls back into a different pageblock than this 2055 * one, it won't cause permanent fragmentation. 2056 */ 2057 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2058 && current_order > order) 2059 goto find_smallest; 2060 2061 goto do_steal; 2062 } 2063 2064 return false; 2065 2066 find_smallest: 2067 for (current_order = order; current_order <= MAX_ORDER; 2068 current_order++) { 2069 area = &(zone->free_area[current_order]); 2070 fallback_mt = find_suitable_fallback(area, current_order, 2071 start_migratetype, false, &can_steal); 2072 if (fallback_mt != -1) 2073 break; 2074 } 2075 2076 /* 2077 * This should not happen - we already found a suitable fallback 2078 * when looking for the largest page. 2079 */ 2080 VM_BUG_ON(current_order > MAX_ORDER); 2081 2082 do_steal: 2083 page = get_page_from_free_area(area, fallback_mt); 2084 2085 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2086 can_steal); 2087 2088 trace_mm_page_alloc_extfrag(page, order, current_order, 2089 start_migratetype, fallback_mt); 2090 2091 return true; 2092 2093 } 2094 2095 /* 2096 * Do the hard work of removing an element from the buddy allocator. 2097 * Call me with the zone->lock already held. 2098 */ 2099 static __always_inline struct page * 2100 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2101 unsigned int alloc_flags) 2102 { 2103 struct page *page; 2104 2105 if (IS_ENABLED(CONFIG_CMA)) { 2106 /* 2107 * Balance movable allocations between regular and CMA areas by 2108 * allocating from CMA when over half of the zone's free memory 2109 * is in the CMA area. 2110 */ 2111 if (alloc_flags & ALLOC_CMA && 2112 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2113 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2114 page = __rmqueue_cma_fallback(zone, order); 2115 if (page) 2116 return page; 2117 } 2118 } 2119 retry: 2120 page = __rmqueue_smallest(zone, order, migratetype); 2121 if (unlikely(!page)) { 2122 if (alloc_flags & ALLOC_CMA) 2123 page = __rmqueue_cma_fallback(zone, order); 2124 2125 if (!page && __rmqueue_fallback(zone, order, migratetype, 2126 alloc_flags)) 2127 goto retry; 2128 } 2129 return page; 2130 } 2131 2132 /* 2133 * Obtain a specified number of elements from the buddy allocator, all under 2134 * a single hold of the lock, for efficiency. Add them to the supplied list. 2135 * Returns the number of new pages which were placed at *list. 2136 */ 2137 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2138 unsigned long count, struct list_head *list, 2139 int migratetype, unsigned int alloc_flags) 2140 { 2141 unsigned long flags; 2142 int i; 2143 2144 spin_lock_irqsave(&zone->lock, flags); 2145 for (i = 0; i < count; ++i) { 2146 struct page *page = __rmqueue(zone, order, migratetype, 2147 alloc_flags); 2148 if (unlikely(page == NULL)) 2149 break; 2150 2151 /* 2152 * Split buddy pages returned by expand() are received here in 2153 * physical page order. The page is added to the tail of 2154 * caller's list. From the callers perspective, the linked list 2155 * is ordered by page number under some conditions. This is 2156 * useful for IO devices that can forward direction from the 2157 * head, thus also in the physical page order. This is useful 2158 * for IO devices that can merge IO requests if the physical 2159 * pages are ordered properly. 2160 */ 2161 list_add_tail(&page->pcp_list, list); 2162 if (is_migrate_cma(get_pcppage_migratetype(page))) 2163 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2164 -(1 << order)); 2165 } 2166 2167 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2168 spin_unlock_irqrestore(&zone->lock, flags); 2169 2170 return i; 2171 } 2172 2173 #ifdef CONFIG_NUMA 2174 /* 2175 * Called from the vmstat counter updater to drain pagesets of this 2176 * currently executing processor on remote nodes after they have 2177 * expired. 2178 */ 2179 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2180 { 2181 int to_drain, batch; 2182 2183 batch = READ_ONCE(pcp->batch); 2184 to_drain = min(pcp->count, batch); 2185 if (to_drain > 0) { 2186 spin_lock(&pcp->lock); 2187 free_pcppages_bulk(zone, to_drain, pcp, 0); 2188 spin_unlock(&pcp->lock); 2189 } 2190 } 2191 #endif 2192 2193 /* 2194 * Drain pcplists of the indicated processor and zone. 2195 */ 2196 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2197 { 2198 struct per_cpu_pages *pcp; 2199 2200 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2201 if (pcp->count) { 2202 spin_lock(&pcp->lock); 2203 free_pcppages_bulk(zone, pcp->count, pcp, 0); 2204 spin_unlock(&pcp->lock); 2205 } 2206 } 2207 2208 /* 2209 * Drain pcplists of all zones on the indicated processor. 2210 */ 2211 static void drain_pages(unsigned int cpu) 2212 { 2213 struct zone *zone; 2214 2215 for_each_populated_zone(zone) { 2216 drain_pages_zone(cpu, zone); 2217 } 2218 } 2219 2220 /* 2221 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2222 */ 2223 void drain_local_pages(struct zone *zone) 2224 { 2225 int cpu = smp_processor_id(); 2226 2227 if (zone) 2228 drain_pages_zone(cpu, zone); 2229 else 2230 drain_pages(cpu); 2231 } 2232 2233 /* 2234 * The implementation of drain_all_pages(), exposing an extra parameter to 2235 * drain on all cpus. 2236 * 2237 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2238 * not empty. The check for non-emptiness can however race with a free to 2239 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2240 * that need the guarantee that every CPU has drained can disable the 2241 * optimizing racy check. 2242 */ 2243 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2244 { 2245 int cpu; 2246 2247 /* 2248 * Allocate in the BSS so we won't require allocation in 2249 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2250 */ 2251 static cpumask_t cpus_with_pcps; 2252 2253 /* 2254 * Do not drain if one is already in progress unless it's specific to 2255 * a zone. Such callers are primarily CMA and memory hotplug and need 2256 * the drain to be complete when the call returns. 2257 */ 2258 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2259 if (!zone) 2260 return; 2261 mutex_lock(&pcpu_drain_mutex); 2262 } 2263 2264 /* 2265 * We don't care about racing with CPU hotplug event 2266 * as offline notification will cause the notified 2267 * cpu to drain that CPU pcps and on_each_cpu_mask 2268 * disables preemption as part of its processing 2269 */ 2270 for_each_online_cpu(cpu) { 2271 struct per_cpu_pages *pcp; 2272 struct zone *z; 2273 bool has_pcps = false; 2274 2275 if (force_all_cpus) { 2276 /* 2277 * The pcp.count check is racy, some callers need a 2278 * guarantee that no cpu is missed. 2279 */ 2280 has_pcps = true; 2281 } else if (zone) { 2282 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2283 if (pcp->count) 2284 has_pcps = true; 2285 } else { 2286 for_each_populated_zone(z) { 2287 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2288 if (pcp->count) { 2289 has_pcps = true; 2290 break; 2291 } 2292 } 2293 } 2294 2295 if (has_pcps) 2296 cpumask_set_cpu(cpu, &cpus_with_pcps); 2297 else 2298 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2299 } 2300 2301 for_each_cpu(cpu, &cpus_with_pcps) { 2302 if (zone) 2303 drain_pages_zone(cpu, zone); 2304 else 2305 drain_pages(cpu); 2306 } 2307 2308 mutex_unlock(&pcpu_drain_mutex); 2309 } 2310 2311 /* 2312 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2313 * 2314 * When zone parameter is non-NULL, spill just the single zone's pages. 2315 */ 2316 void drain_all_pages(struct zone *zone) 2317 { 2318 __drain_all_pages(zone, false); 2319 } 2320 2321 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 2322 unsigned int order) 2323 { 2324 int migratetype; 2325 2326 if (!free_pages_prepare(page, order, FPI_NONE)) 2327 return false; 2328 2329 migratetype = get_pfnblock_migratetype(page, pfn); 2330 set_pcppage_migratetype(page, migratetype); 2331 return true; 2332 } 2333 2334 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high) 2335 { 2336 int min_nr_free, max_nr_free; 2337 int batch = READ_ONCE(pcp->batch); 2338 2339 /* Free everything if batch freeing high-order pages. */ 2340 if (unlikely(free_high)) 2341 return pcp->count; 2342 2343 /* Check for PCP disabled or boot pageset */ 2344 if (unlikely(high < batch)) 2345 return 1; 2346 2347 /* Leave at least pcp->batch pages on the list */ 2348 min_nr_free = batch; 2349 max_nr_free = high - batch; 2350 2351 /* 2352 * Double the number of pages freed each time there is subsequent 2353 * freeing of pages without any allocation. 2354 */ 2355 batch <<= pcp->free_factor; 2356 if (batch < max_nr_free) 2357 pcp->free_factor++; 2358 batch = clamp(batch, min_nr_free, max_nr_free); 2359 2360 return batch; 2361 } 2362 2363 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2364 bool free_high) 2365 { 2366 int high = READ_ONCE(pcp->high); 2367 2368 if (unlikely(!high || free_high)) 2369 return 0; 2370 2371 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 2372 return high; 2373 2374 /* 2375 * If reclaim is active, limit the number of pages that can be 2376 * stored on pcp lists 2377 */ 2378 return min(READ_ONCE(pcp->batch) << 2, high); 2379 } 2380 2381 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 2382 struct page *page, int migratetype, 2383 unsigned int order) 2384 { 2385 int high; 2386 int pindex; 2387 bool free_high; 2388 2389 __count_vm_events(PGFREE, 1 << order); 2390 pindex = order_to_pindex(migratetype, order); 2391 list_add(&page->pcp_list, &pcp->lists[pindex]); 2392 pcp->count += 1 << order; 2393 2394 /* 2395 * As high-order pages other than THP's stored on PCP can contribute 2396 * to fragmentation, limit the number stored when PCP is heavily 2397 * freeing without allocation. The remainder after bulk freeing 2398 * stops will be drained from vmstat refresh context. 2399 */ 2400 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); 2401 2402 high = nr_pcp_high(pcp, zone, free_high); 2403 if (pcp->count >= high) { 2404 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex); 2405 } 2406 } 2407 2408 /* 2409 * Free a pcp page 2410 */ 2411 void free_unref_page(struct page *page, unsigned int order) 2412 { 2413 unsigned long __maybe_unused UP_flags; 2414 struct per_cpu_pages *pcp; 2415 struct zone *zone; 2416 unsigned long pfn = page_to_pfn(page); 2417 int migratetype; 2418 2419 if (!free_unref_page_prepare(page, pfn, order)) 2420 return; 2421 2422 /* 2423 * We only track unmovable, reclaimable and movable on pcp lists. 2424 * Place ISOLATE pages on the isolated list because they are being 2425 * offlined but treat HIGHATOMIC as movable pages so we can get those 2426 * areas back if necessary. Otherwise, we may have to free 2427 * excessively into the page allocator 2428 */ 2429 migratetype = get_pcppage_migratetype(page); 2430 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2431 if (unlikely(is_migrate_isolate(migratetype))) { 2432 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 2433 return; 2434 } 2435 migratetype = MIGRATE_MOVABLE; 2436 } 2437 2438 zone = page_zone(page); 2439 pcp_trylock_prepare(UP_flags); 2440 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2441 if (pcp) { 2442 free_unref_page_commit(zone, pcp, page, migratetype, order); 2443 pcp_spin_unlock(pcp); 2444 } else { 2445 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); 2446 } 2447 pcp_trylock_finish(UP_flags); 2448 } 2449 2450 /* 2451 * Free a list of 0-order pages 2452 */ 2453 void free_unref_page_list(struct list_head *list) 2454 { 2455 unsigned long __maybe_unused UP_flags; 2456 struct page *page, *next; 2457 struct per_cpu_pages *pcp = NULL; 2458 struct zone *locked_zone = NULL; 2459 int batch_count = 0; 2460 int migratetype; 2461 2462 /* Prepare pages for freeing */ 2463 list_for_each_entry_safe(page, next, list, lru) { 2464 unsigned long pfn = page_to_pfn(page); 2465 if (!free_unref_page_prepare(page, pfn, 0)) { 2466 list_del(&page->lru); 2467 continue; 2468 } 2469 2470 /* 2471 * Free isolated pages directly to the allocator, see 2472 * comment in free_unref_page. 2473 */ 2474 migratetype = get_pcppage_migratetype(page); 2475 if (unlikely(is_migrate_isolate(migratetype))) { 2476 list_del(&page->lru); 2477 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 2478 continue; 2479 } 2480 } 2481 2482 list_for_each_entry_safe(page, next, list, lru) { 2483 struct zone *zone = page_zone(page); 2484 2485 list_del(&page->lru); 2486 migratetype = get_pcppage_migratetype(page); 2487 2488 /* 2489 * Either different zone requiring a different pcp lock or 2490 * excessive lock hold times when freeing a large list of 2491 * pages. 2492 */ 2493 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) { 2494 if (pcp) { 2495 pcp_spin_unlock(pcp); 2496 pcp_trylock_finish(UP_flags); 2497 } 2498 2499 batch_count = 0; 2500 2501 /* 2502 * trylock is necessary as pages may be getting freed 2503 * from IRQ or SoftIRQ context after an IO completion. 2504 */ 2505 pcp_trylock_prepare(UP_flags); 2506 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2507 if (unlikely(!pcp)) { 2508 pcp_trylock_finish(UP_flags); 2509 free_one_page(zone, page, page_to_pfn(page), 2510 0, migratetype, FPI_NONE); 2511 locked_zone = NULL; 2512 continue; 2513 } 2514 locked_zone = zone; 2515 } 2516 2517 /* 2518 * Non-isolated types over MIGRATE_PCPTYPES get added 2519 * to the MIGRATE_MOVABLE pcp list. 2520 */ 2521 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2522 migratetype = MIGRATE_MOVABLE; 2523 2524 trace_mm_page_free_batched(page); 2525 free_unref_page_commit(zone, pcp, page, migratetype, 0); 2526 batch_count++; 2527 } 2528 2529 if (pcp) { 2530 pcp_spin_unlock(pcp); 2531 pcp_trylock_finish(UP_flags); 2532 } 2533 } 2534 2535 /* 2536 * split_page takes a non-compound higher-order page, and splits it into 2537 * n (1<<order) sub-pages: page[0..n] 2538 * Each sub-page must be freed individually. 2539 * 2540 * Note: this is probably too low level an operation for use in drivers. 2541 * Please consult with lkml before using this in your driver. 2542 */ 2543 void split_page(struct page *page, unsigned int order) 2544 { 2545 int i; 2546 2547 VM_BUG_ON_PAGE(PageCompound(page), page); 2548 VM_BUG_ON_PAGE(!page_count(page), page); 2549 2550 for (i = 1; i < (1 << order); i++) 2551 set_page_refcounted(page + i); 2552 split_page_owner(page, 1 << order); 2553 split_page_memcg(page, 1 << order); 2554 } 2555 EXPORT_SYMBOL_GPL(split_page); 2556 2557 int __isolate_free_page(struct page *page, unsigned int order) 2558 { 2559 struct zone *zone = page_zone(page); 2560 int mt = get_pageblock_migratetype(page); 2561 2562 if (!is_migrate_isolate(mt)) { 2563 unsigned long watermark; 2564 /* 2565 * Obey watermarks as if the page was being allocated. We can 2566 * emulate a high-order watermark check with a raised order-0 2567 * watermark, because we already know our high-order page 2568 * exists. 2569 */ 2570 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2571 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2572 return 0; 2573 2574 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2575 } 2576 2577 del_page_from_free_list(page, zone, order); 2578 2579 /* 2580 * Set the pageblock if the isolated page is at least half of a 2581 * pageblock 2582 */ 2583 if (order >= pageblock_order - 1) { 2584 struct page *endpage = page + (1 << order) - 1; 2585 for (; page < endpage; page += pageblock_nr_pages) { 2586 int mt = get_pageblock_migratetype(page); 2587 /* 2588 * Only change normal pageblocks (i.e., they can merge 2589 * with others) 2590 */ 2591 if (migratetype_is_mergeable(mt)) 2592 set_pageblock_migratetype(page, 2593 MIGRATE_MOVABLE); 2594 } 2595 } 2596 2597 return 1UL << order; 2598 } 2599 2600 /** 2601 * __putback_isolated_page - Return a now-isolated page back where we got it 2602 * @page: Page that was isolated 2603 * @order: Order of the isolated page 2604 * @mt: The page's pageblock's migratetype 2605 * 2606 * This function is meant to return a page pulled from the free lists via 2607 * __isolate_free_page back to the free lists they were pulled from. 2608 */ 2609 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2610 { 2611 struct zone *zone = page_zone(page); 2612 2613 /* zone lock should be held when this function is called */ 2614 lockdep_assert_held(&zone->lock); 2615 2616 /* Return isolated page to tail of freelist. */ 2617 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2618 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2619 } 2620 2621 /* 2622 * Update NUMA hit/miss statistics 2623 */ 2624 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2625 long nr_account) 2626 { 2627 #ifdef CONFIG_NUMA 2628 enum numa_stat_item local_stat = NUMA_LOCAL; 2629 2630 /* skip numa counters update if numa stats is disabled */ 2631 if (!static_branch_likely(&vm_numa_stat_key)) 2632 return; 2633 2634 if (zone_to_nid(z) != numa_node_id()) 2635 local_stat = NUMA_OTHER; 2636 2637 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2638 __count_numa_events(z, NUMA_HIT, nr_account); 2639 else { 2640 __count_numa_events(z, NUMA_MISS, nr_account); 2641 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2642 } 2643 __count_numa_events(z, local_stat, nr_account); 2644 #endif 2645 } 2646 2647 static __always_inline 2648 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2649 unsigned int order, unsigned int alloc_flags, 2650 int migratetype) 2651 { 2652 struct page *page; 2653 unsigned long flags; 2654 2655 do { 2656 page = NULL; 2657 spin_lock_irqsave(&zone->lock, flags); 2658 /* 2659 * order-0 request can reach here when the pcplist is skipped 2660 * due to non-CMA allocation context. HIGHATOMIC area is 2661 * reserved for high-order atomic allocation, so order-0 2662 * request should skip it. 2663 */ 2664 if (alloc_flags & ALLOC_HIGHATOMIC) 2665 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2666 if (!page) { 2667 page = __rmqueue(zone, order, migratetype, alloc_flags); 2668 2669 /* 2670 * If the allocation fails, allow OOM handling access 2671 * to HIGHATOMIC reserves as failing now is worse than 2672 * failing a high-order atomic allocation in the 2673 * future. 2674 */ 2675 if (!page && (alloc_flags & ALLOC_OOM)) 2676 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2677 2678 if (!page) { 2679 spin_unlock_irqrestore(&zone->lock, flags); 2680 return NULL; 2681 } 2682 } 2683 __mod_zone_freepage_state(zone, -(1 << order), 2684 get_pcppage_migratetype(page)); 2685 spin_unlock_irqrestore(&zone->lock, flags); 2686 } while (check_new_pages(page, order)); 2687 2688 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2689 zone_statistics(preferred_zone, zone, 1); 2690 2691 return page; 2692 } 2693 2694 /* Remove page from the per-cpu list, caller must protect the list */ 2695 static inline 2696 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 2697 int migratetype, 2698 unsigned int alloc_flags, 2699 struct per_cpu_pages *pcp, 2700 struct list_head *list) 2701 { 2702 struct page *page; 2703 2704 do { 2705 if (list_empty(list)) { 2706 int batch = READ_ONCE(pcp->batch); 2707 int alloced; 2708 2709 /* 2710 * Scale batch relative to order if batch implies 2711 * free pages can be stored on the PCP. Batch can 2712 * be 1 for small zones or for boot pagesets which 2713 * should never store free pages as the pages may 2714 * belong to arbitrary zones. 2715 */ 2716 if (batch > 1) 2717 batch = max(batch >> order, 2); 2718 alloced = rmqueue_bulk(zone, order, 2719 batch, list, 2720 migratetype, alloc_flags); 2721 2722 pcp->count += alloced << order; 2723 if (unlikely(list_empty(list))) 2724 return NULL; 2725 } 2726 2727 page = list_first_entry(list, struct page, pcp_list); 2728 list_del(&page->pcp_list); 2729 pcp->count -= 1 << order; 2730 } while (check_new_pages(page, order)); 2731 2732 return page; 2733 } 2734 2735 /* Lock and remove page from the per-cpu list */ 2736 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2737 struct zone *zone, unsigned int order, 2738 int migratetype, unsigned int alloc_flags) 2739 { 2740 struct per_cpu_pages *pcp; 2741 struct list_head *list; 2742 struct page *page; 2743 unsigned long __maybe_unused UP_flags; 2744 2745 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 2746 pcp_trylock_prepare(UP_flags); 2747 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2748 if (!pcp) { 2749 pcp_trylock_finish(UP_flags); 2750 return NULL; 2751 } 2752 2753 /* 2754 * On allocation, reduce the number of pages that are batch freed. 2755 * See nr_pcp_free() where free_factor is increased for subsequent 2756 * frees. 2757 */ 2758 pcp->free_factor >>= 1; 2759 list = &pcp->lists[order_to_pindex(migratetype, order)]; 2760 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 2761 pcp_spin_unlock(pcp); 2762 pcp_trylock_finish(UP_flags); 2763 if (page) { 2764 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2765 zone_statistics(preferred_zone, zone, 1); 2766 } 2767 return page; 2768 } 2769 2770 /* 2771 * Allocate a page from the given zone. 2772 * Use pcplists for THP or "cheap" high-order allocations. 2773 */ 2774 2775 /* 2776 * Do not instrument rmqueue() with KMSAN. This function may call 2777 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 2778 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 2779 * may call rmqueue() again, which will result in a deadlock. 2780 */ 2781 __no_sanitize_memory 2782 static inline 2783 struct page *rmqueue(struct zone *preferred_zone, 2784 struct zone *zone, unsigned int order, 2785 gfp_t gfp_flags, unsigned int alloc_flags, 2786 int migratetype) 2787 { 2788 struct page *page; 2789 2790 /* 2791 * We most definitely don't want callers attempting to 2792 * allocate greater than order-1 page units with __GFP_NOFAIL. 2793 */ 2794 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 2795 2796 if (likely(pcp_allowed_order(order))) { 2797 /* 2798 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and 2799 * we need to skip it when CMA area isn't allowed. 2800 */ 2801 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || 2802 migratetype != MIGRATE_MOVABLE) { 2803 page = rmqueue_pcplist(preferred_zone, zone, order, 2804 migratetype, alloc_flags); 2805 if (likely(page)) 2806 goto out; 2807 } 2808 } 2809 2810 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 2811 migratetype); 2812 2813 out: 2814 /* Separate test+clear to avoid unnecessary atomics */ 2815 if ((alloc_flags & ALLOC_KSWAPD) && 2816 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 2817 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2818 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 2819 } 2820 2821 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 2822 return page; 2823 } 2824 2825 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2826 { 2827 return __should_fail_alloc_page(gfp_mask, order); 2828 } 2829 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 2830 2831 static inline long __zone_watermark_unusable_free(struct zone *z, 2832 unsigned int order, unsigned int alloc_flags) 2833 { 2834 long unusable_free = (1 << order) - 1; 2835 2836 /* 2837 * If the caller does not have rights to reserves below the min 2838 * watermark then subtract the high-atomic reserves. This will 2839 * over-estimate the size of the atomic reserve but it avoids a search. 2840 */ 2841 if (likely(!(alloc_flags & ALLOC_RESERVES))) 2842 unusable_free += z->nr_reserved_highatomic; 2843 2844 #ifdef CONFIG_CMA 2845 /* If allocation can't use CMA areas don't use free CMA pages */ 2846 if (!(alloc_flags & ALLOC_CMA)) 2847 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 2848 #endif 2849 #ifdef CONFIG_UNACCEPTED_MEMORY 2850 unusable_free += zone_page_state(z, NR_UNACCEPTED); 2851 #endif 2852 2853 return unusable_free; 2854 } 2855 2856 /* 2857 * Return true if free base pages are above 'mark'. For high-order checks it 2858 * will return true of the order-0 watermark is reached and there is at least 2859 * one free page of a suitable size. Checking now avoids taking the zone lock 2860 * to check in the allocation paths if no pages are free. 2861 */ 2862 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2863 int highest_zoneidx, unsigned int alloc_flags, 2864 long free_pages) 2865 { 2866 long min = mark; 2867 int o; 2868 2869 /* free_pages may go negative - that's OK */ 2870 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 2871 2872 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 2873 /* 2874 * __GFP_HIGH allows access to 50% of the min reserve as well 2875 * as OOM. 2876 */ 2877 if (alloc_flags & ALLOC_MIN_RESERVE) { 2878 min -= min / 2; 2879 2880 /* 2881 * Non-blocking allocations (e.g. GFP_ATOMIC) can 2882 * access more reserves than just __GFP_HIGH. Other 2883 * non-blocking allocations requests such as GFP_NOWAIT 2884 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 2885 * access to the min reserve. 2886 */ 2887 if (alloc_flags & ALLOC_NON_BLOCK) 2888 min -= min / 4; 2889 } 2890 2891 /* 2892 * OOM victims can try even harder than the normal reserve 2893 * users on the grounds that it's definitely going to be in 2894 * the exit path shortly and free memory. Any allocation it 2895 * makes during the free path will be small and short-lived. 2896 */ 2897 if (alloc_flags & ALLOC_OOM) 2898 min -= min / 2; 2899 } 2900 2901 /* 2902 * Check watermarks for an order-0 allocation request. If these 2903 * are not met, then a high-order request also cannot go ahead 2904 * even if a suitable page happened to be free. 2905 */ 2906 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 2907 return false; 2908 2909 /* If this is an order-0 request then the watermark is fine */ 2910 if (!order) 2911 return true; 2912 2913 /* For a high-order request, check at least one suitable page is free */ 2914 for (o = order; o <= MAX_ORDER; o++) { 2915 struct free_area *area = &z->free_area[o]; 2916 int mt; 2917 2918 if (!area->nr_free) 2919 continue; 2920 2921 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2922 if (!free_area_empty(area, mt)) 2923 return true; 2924 } 2925 2926 #ifdef CONFIG_CMA 2927 if ((alloc_flags & ALLOC_CMA) && 2928 !free_area_empty(area, MIGRATE_CMA)) { 2929 return true; 2930 } 2931 #endif 2932 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 2933 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 2934 return true; 2935 } 2936 } 2937 return false; 2938 } 2939 2940 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2941 int highest_zoneidx, unsigned int alloc_flags) 2942 { 2943 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 2944 zone_page_state(z, NR_FREE_PAGES)); 2945 } 2946 2947 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 2948 unsigned long mark, int highest_zoneidx, 2949 unsigned int alloc_flags, gfp_t gfp_mask) 2950 { 2951 long free_pages; 2952 2953 free_pages = zone_page_state(z, NR_FREE_PAGES); 2954 2955 /* 2956 * Fast check for order-0 only. If this fails then the reserves 2957 * need to be calculated. 2958 */ 2959 if (!order) { 2960 long usable_free; 2961 long reserved; 2962 2963 usable_free = free_pages; 2964 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 2965 2966 /* reserved may over estimate high-atomic reserves. */ 2967 usable_free -= min(usable_free, reserved); 2968 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 2969 return true; 2970 } 2971 2972 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 2973 free_pages)) 2974 return true; 2975 2976 /* 2977 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 2978 * when checking the min watermark. The min watermark is the 2979 * point where boosting is ignored so that kswapd is woken up 2980 * when below the low watermark. 2981 */ 2982 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 2983 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 2984 mark = z->_watermark[WMARK_MIN]; 2985 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 2986 alloc_flags, free_pages); 2987 } 2988 2989 return false; 2990 } 2991 2992 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2993 unsigned long mark, int highest_zoneidx) 2994 { 2995 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2996 2997 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2998 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2999 3000 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 3001 free_pages); 3002 } 3003 3004 #ifdef CONFIG_NUMA 3005 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3006 3007 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3008 { 3009 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3010 node_reclaim_distance; 3011 } 3012 #else /* CONFIG_NUMA */ 3013 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3014 { 3015 return true; 3016 } 3017 #endif /* CONFIG_NUMA */ 3018 3019 /* 3020 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3021 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3022 * premature use of a lower zone may cause lowmem pressure problems that 3023 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3024 * probably too small. It only makes sense to spread allocations to avoid 3025 * fragmentation between the Normal and DMA32 zones. 3026 */ 3027 static inline unsigned int 3028 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3029 { 3030 unsigned int alloc_flags; 3031 3032 /* 3033 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3034 * to save a branch. 3035 */ 3036 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3037 3038 #ifdef CONFIG_ZONE_DMA32 3039 if (!zone) 3040 return alloc_flags; 3041 3042 if (zone_idx(zone) != ZONE_NORMAL) 3043 return alloc_flags; 3044 3045 /* 3046 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3047 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3048 * on UMA that if Normal is populated then so is DMA32. 3049 */ 3050 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3051 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3052 return alloc_flags; 3053 3054 alloc_flags |= ALLOC_NOFRAGMENT; 3055 #endif /* CONFIG_ZONE_DMA32 */ 3056 return alloc_flags; 3057 } 3058 3059 /* Must be called after current_gfp_context() which can change gfp_mask */ 3060 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3061 unsigned int alloc_flags) 3062 { 3063 #ifdef CONFIG_CMA 3064 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3065 alloc_flags |= ALLOC_CMA; 3066 #endif 3067 return alloc_flags; 3068 } 3069 3070 /* 3071 * get_page_from_freelist goes through the zonelist trying to allocate 3072 * a page. 3073 */ 3074 static struct page * 3075 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3076 const struct alloc_context *ac) 3077 { 3078 struct zoneref *z; 3079 struct zone *zone; 3080 struct pglist_data *last_pgdat = NULL; 3081 bool last_pgdat_dirty_ok = false; 3082 bool no_fallback; 3083 3084 retry: 3085 /* 3086 * Scan zonelist, looking for a zone with enough free. 3087 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3088 */ 3089 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3090 z = ac->preferred_zoneref; 3091 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3092 ac->nodemask) { 3093 struct page *page; 3094 unsigned long mark; 3095 3096 if (cpusets_enabled() && 3097 (alloc_flags & ALLOC_CPUSET) && 3098 !__cpuset_zone_allowed(zone, gfp_mask)) 3099 continue; 3100 /* 3101 * When allocating a page cache page for writing, we 3102 * want to get it from a node that is within its dirty 3103 * limit, such that no single node holds more than its 3104 * proportional share of globally allowed dirty pages. 3105 * The dirty limits take into account the node's 3106 * lowmem reserves and high watermark so that kswapd 3107 * should be able to balance it without having to 3108 * write pages from its LRU list. 3109 * 3110 * XXX: For now, allow allocations to potentially 3111 * exceed the per-node dirty limit in the slowpath 3112 * (spread_dirty_pages unset) before going into reclaim, 3113 * which is important when on a NUMA setup the allowed 3114 * nodes are together not big enough to reach the 3115 * global limit. The proper fix for these situations 3116 * will require awareness of nodes in the 3117 * dirty-throttling and the flusher threads. 3118 */ 3119 if (ac->spread_dirty_pages) { 3120 if (last_pgdat != zone->zone_pgdat) { 3121 last_pgdat = zone->zone_pgdat; 3122 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3123 } 3124 3125 if (!last_pgdat_dirty_ok) 3126 continue; 3127 } 3128 3129 if (no_fallback && nr_online_nodes > 1 && 3130 zone != ac->preferred_zoneref->zone) { 3131 int local_nid; 3132 3133 /* 3134 * If moving to a remote node, retry but allow 3135 * fragmenting fallbacks. Locality is more important 3136 * than fragmentation avoidance. 3137 */ 3138 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 3139 if (zone_to_nid(zone) != local_nid) { 3140 alloc_flags &= ~ALLOC_NOFRAGMENT; 3141 goto retry; 3142 } 3143 } 3144 3145 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3146 if (!zone_watermark_fast(zone, order, mark, 3147 ac->highest_zoneidx, alloc_flags, 3148 gfp_mask)) { 3149 int ret; 3150 3151 if (has_unaccepted_memory()) { 3152 if (try_to_accept_memory(zone, order)) 3153 goto try_this_zone; 3154 } 3155 3156 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3157 /* 3158 * Watermark failed for this zone, but see if we can 3159 * grow this zone if it contains deferred pages. 3160 */ 3161 if (deferred_pages_enabled()) { 3162 if (_deferred_grow_zone(zone, order)) 3163 goto try_this_zone; 3164 } 3165 #endif 3166 /* Checked here to keep the fast path fast */ 3167 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3168 if (alloc_flags & ALLOC_NO_WATERMARKS) 3169 goto try_this_zone; 3170 3171 if (!node_reclaim_enabled() || 3172 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3173 continue; 3174 3175 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3176 switch (ret) { 3177 case NODE_RECLAIM_NOSCAN: 3178 /* did not scan */ 3179 continue; 3180 case NODE_RECLAIM_FULL: 3181 /* scanned but unreclaimable */ 3182 continue; 3183 default: 3184 /* did we reclaim enough */ 3185 if (zone_watermark_ok(zone, order, mark, 3186 ac->highest_zoneidx, alloc_flags)) 3187 goto try_this_zone; 3188 3189 continue; 3190 } 3191 } 3192 3193 try_this_zone: 3194 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3195 gfp_mask, alloc_flags, ac->migratetype); 3196 if (page) { 3197 prep_new_page(page, order, gfp_mask, alloc_flags); 3198 3199 /* 3200 * If this is a high-order atomic allocation then check 3201 * if the pageblock should be reserved for the future 3202 */ 3203 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3204 reserve_highatomic_pageblock(page, zone); 3205 3206 return page; 3207 } else { 3208 if (has_unaccepted_memory()) { 3209 if (try_to_accept_memory(zone, order)) 3210 goto try_this_zone; 3211 } 3212 3213 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3214 /* Try again if zone has deferred pages */ 3215 if (deferred_pages_enabled()) { 3216 if (_deferred_grow_zone(zone, order)) 3217 goto try_this_zone; 3218 } 3219 #endif 3220 } 3221 } 3222 3223 /* 3224 * It's possible on a UMA machine to get through all zones that are 3225 * fragmented. If avoiding fragmentation, reset and try again. 3226 */ 3227 if (no_fallback) { 3228 alloc_flags &= ~ALLOC_NOFRAGMENT; 3229 goto retry; 3230 } 3231 3232 return NULL; 3233 } 3234 3235 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3236 { 3237 unsigned int filter = SHOW_MEM_FILTER_NODES; 3238 3239 /* 3240 * This documents exceptions given to allocations in certain 3241 * contexts that are allowed to allocate outside current's set 3242 * of allowed nodes. 3243 */ 3244 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3245 if (tsk_is_oom_victim(current) || 3246 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3247 filter &= ~SHOW_MEM_FILTER_NODES; 3248 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3249 filter &= ~SHOW_MEM_FILTER_NODES; 3250 3251 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3252 } 3253 3254 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3255 { 3256 struct va_format vaf; 3257 va_list args; 3258 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3259 3260 if ((gfp_mask & __GFP_NOWARN) || 3261 !__ratelimit(&nopage_rs) || 3262 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3263 return; 3264 3265 va_start(args, fmt); 3266 vaf.fmt = fmt; 3267 vaf.va = &args; 3268 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3269 current->comm, &vaf, gfp_mask, &gfp_mask, 3270 nodemask_pr_args(nodemask)); 3271 va_end(args); 3272 3273 cpuset_print_current_mems_allowed(); 3274 pr_cont("\n"); 3275 dump_stack(); 3276 warn_alloc_show_mem(gfp_mask, nodemask); 3277 } 3278 3279 static inline struct page * 3280 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3281 unsigned int alloc_flags, 3282 const struct alloc_context *ac) 3283 { 3284 struct page *page; 3285 3286 page = get_page_from_freelist(gfp_mask, order, 3287 alloc_flags|ALLOC_CPUSET, ac); 3288 /* 3289 * fallback to ignore cpuset restriction if our nodes 3290 * are depleted 3291 */ 3292 if (!page) 3293 page = get_page_from_freelist(gfp_mask, order, 3294 alloc_flags, ac); 3295 3296 return page; 3297 } 3298 3299 static inline struct page * 3300 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3301 const struct alloc_context *ac, unsigned long *did_some_progress) 3302 { 3303 struct oom_control oc = { 3304 .zonelist = ac->zonelist, 3305 .nodemask = ac->nodemask, 3306 .memcg = NULL, 3307 .gfp_mask = gfp_mask, 3308 .order = order, 3309 }; 3310 struct page *page; 3311 3312 *did_some_progress = 0; 3313 3314 /* 3315 * Acquire the oom lock. If that fails, somebody else is 3316 * making progress for us. 3317 */ 3318 if (!mutex_trylock(&oom_lock)) { 3319 *did_some_progress = 1; 3320 schedule_timeout_uninterruptible(1); 3321 return NULL; 3322 } 3323 3324 /* 3325 * Go through the zonelist yet one more time, keep very high watermark 3326 * here, this is only to catch a parallel oom killing, we must fail if 3327 * we're still under heavy pressure. But make sure that this reclaim 3328 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3329 * allocation which will never fail due to oom_lock already held. 3330 */ 3331 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3332 ~__GFP_DIRECT_RECLAIM, order, 3333 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3334 if (page) 3335 goto out; 3336 3337 /* Coredumps can quickly deplete all memory reserves */ 3338 if (current->flags & PF_DUMPCORE) 3339 goto out; 3340 /* The OOM killer will not help higher order allocs */ 3341 if (order > PAGE_ALLOC_COSTLY_ORDER) 3342 goto out; 3343 /* 3344 * We have already exhausted all our reclaim opportunities without any 3345 * success so it is time to admit defeat. We will skip the OOM killer 3346 * because it is very likely that the caller has a more reasonable 3347 * fallback than shooting a random task. 3348 * 3349 * The OOM killer may not free memory on a specific node. 3350 */ 3351 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3352 goto out; 3353 /* The OOM killer does not needlessly kill tasks for lowmem */ 3354 if (ac->highest_zoneidx < ZONE_NORMAL) 3355 goto out; 3356 if (pm_suspended_storage()) 3357 goto out; 3358 /* 3359 * XXX: GFP_NOFS allocations should rather fail than rely on 3360 * other request to make a forward progress. 3361 * We are in an unfortunate situation where out_of_memory cannot 3362 * do much for this context but let's try it to at least get 3363 * access to memory reserved if the current task is killed (see 3364 * out_of_memory). Once filesystems are ready to handle allocation 3365 * failures more gracefully we should just bail out here. 3366 */ 3367 3368 /* Exhausted what can be done so it's blame time */ 3369 if (out_of_memory(&oc) || 3370 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3371 *did_some_progress = 1; 3372 3373 /* 3374 * Help non-failing allocations by giving them access to memory 3375 * reserves 3376 */ 3377 if (gfp_mask & __GFP_NOFAIL) 3378 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3379 ALLOC_NO_WATERMARKS, ac); 3380 } 3381 out: 3382 mutex_unlock(&oom_lock); 3383 return page; 3384 } 3385 3386 /* 3387 * Maximum number of compaction retries with a progress before OOM 3388 * killer is consider as the only way to move forward. 3389 */ 3390 #define MAX_COMPACT_RETRIES 16 3391 3392 #ifdef CONFIG_COMPACTION 3393 /* Try memory compaction for high-order allocations before reclaim */ 3394 static struct page * 3395 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3396 unsigned int alloc_flags, const struct alloc_context *ac, 3397 enum compact_priority prio, enum compact_result *compact_result) 3398 { 3399 struct page *page = NULL; 3400 unsigned long pflags; 3401 unsigned int noreclaim_flag; 3402 3403 if (!order) 3404 return NULL; 3405 3406 psi_memstall_enter(&pflags); 3407 delayacct_compact_start(); 3408 noreclaim_flag = memalloc_noreclaim_save(); 3409 3410 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3411 prio, &page); 3412 3413 memalloc_noreclaim_restore(noreclaim_flag); 3414 psi_memstall_leave(&pflags); 3415 delayacct_compact_end(); 3416 3417 if (*compact_result == COMPACT_SKIPPED) 3418 return NULL; 3419 /* 3420 * At least in one zone compaction wasn't deferred or skipped, so let's 3421 * count a compaction stall 3422 */ 3423 count_vm_event(COMPACTSTALL); 3424 3425 /* Prep a captured page if available */ 3426 if (page) 3427 prep_new_page(page, order, gfp_mask, alloc_flags); 3428 3429 /* Try get a page from the freelist if available */ 3430 if (!page) 3431 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3432 3433 if (page) { 3434 struct zone *zone = page_zone(page); 3435 3436 zone->compact_blockskip_flush = false; 3437 compaction_defer_reset(zone, order, true); 3438 count_vm_event(COMPACTSUCCESS); 3439 return page; 3440 } 3441 3442 /* 3443 * It's bad if compaction run occurs and fails. The most likely reason 3444 * is that pages exist, but not enough to satisfy watermarks. 3445 */ 3446 count_vm_event(COMPACTFAIL); 3447 3448 cond_resched(); 3449 3450 return NULL; 3451 } 3452 3453 static inline bool 3454 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3455 enum compact_result compact_result, 3456 enum compact_priority *compact_priority, 3457 int *compaction_retries) 3458 { 3459 int max_retries = MAX_COMPACT_RETRIES; 3460 int min_priority; 3461 bool ret = false; 3462 int retries = *compaction_retries; 3463 enum compact_priority priority = *compact_priority; 3464 3465 if (!order) 3466 return false; 3467 3468 if (fatal_signal_pending(current)) 3469 return false; 3470 3471 /* 3472 * Compaction was skipped due to a lack of free order-0 3473 * migration targets. Continue if reclaim can help. 3474 */ 3475 if (compact_result == COMPACT_SKIPPED) { 3476 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3477 goto out; 3478 } 3479 3480 /* 3481 * Compaction managed to coalesce some page blocks, but the 3482 * allocation failed presumably due to a race. Retry some. 3483 */ 3484 if (compact_result == COMPACT_SUCCESS) { 3485 /* 3486 * !costly requests are much more important than 3487 * __GFP_RETRY_MAYFAIL costly ones because they are de 3488 * facto nofail and invoke OOM killer to move on while 3489 * costly can fail and users are ready to cope with 3490 * that. 1/4 retries is rather arbitrary but we would 3491 * need much more detailed feedback from compaction to 3492 * make a better decision. 3493 */ 3494 if (order > PAGE_ALLOC_COSTLY_ORDER) 3495 max_retries /= 4; 3496 3497 if (++(*compaction_retries) <= max_retries) { 3498 ret = true; 3499 goto out; 3500 } 3501 } 3502 3503 /* 3504 * Compaction failed. Retry with increasing priority. 3505 */ 3506 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3507 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3508 3509 if (*compact_priority > min_priority) { 3510 (*compact_priority)--; 3511 *compaction_retries = 0; 3512 ret = true; 3513 } 3514 out: 3515 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3516 return ret; 3517 } 3518 #else 3519 static inline struct page * 3520 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3521 unsigned int alloc_flags, const struct alloc_context *ac, 3522 enum compact_priority prio, enum compact_result *compact_result) 3523 { 3524 *compact_result = COMPACT_SKIPPED; 3525 return NULL; 3526 } 3527 3528 static inline bool 3529 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3530 enum compact_result compact_result, 3531 enum compact_priority *compact_priority, 3532 int *compaction_retries) 3533 { 3534 struct zone *zone; 3535 struct zoneref *z; 3536 3537 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3538 return false; 3539 3540 /* 3541 * There are setups with compaction disabled which would prefer to loop 3542 * inside the allocator rather than hit the oom killer prematurely. 3543 * Let's give them a good hope and keep retrying while the order-0 3544 * watermarks are OK. 3545 */ 3546 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3547 ac->highest_zoneidx, ac->nodemask) { 3548 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3549 ac->highest_zoneidx, alloc_flags)) 3550 return true; 3551 } 3552 return false; 3553 } 3554 #endif /* CONFIG_COMPACTION */ 3555 3556 #ifdef CONFIG_LOCKDEP 3557 static struct lockdep_map __fs_reclaim_map = 3558 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3559 3560 static bool __need_reclaim(gfp_t gfp_mask) 3561 { 3562 /* no reclaim without waiting on it */ 3563 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3564 return false; 3565 3566 /* this guy won't enter reclaim */ 3567 if (current->flags & PF_MEMALLOC) 3568 return false; 3569 3570 if (gfp_mask & __GFP_NOLOCKDEP) 3571 return false; 3572 3573 return true; 3574 } 3575 3576 void __fs_reclaim_acquire(unsigned long ip) 3577 { 3578 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3579 } 3580 3581 void __fs_reclaim_release(unsigned long ip) 3582 { 3583 lock_release(&__fs_reclaim_map, ip); 3584 } 3585 3586 void fs_reclaim_acquire(gfp_t gfp_mask) 3587 { 3588 gfp_mask = current_gfp_context(gfp_mask); 3589 3590 if (__need_reclaim(gfp_mask)) { 3591 if (gfp_mask & __GFP_FS) 3592 __fs_reclaim_acquire(_RET_IP_); 3593 3594 #ifdef CONFIG_MMU_NOTIFIER 3595 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3596 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3597 #endif 3598 3599 } 3600 } 3601 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3602 3603 void fs_reclaim_release(gfp_t gfp_mask) 3604 { 3605 gfp_mask = current_gfp_context(gfp_mask); 3606 3607 if (__need_reclaim(gfp_mask)) { 3608 if (gfp_mask & __GFP_FS) 3609 __fs_reclaim_release(_RET_IP_); 3610 } 3611 } 3612 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3613 #endif 3614 3615 /* 3616 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3617 * have been rebuilt so allocation retries. Reader side does not lock and 3618 * retries the allocation if zonelist changes. Writer side is protected by the 3619 * embedded spin_lock. 3620 */ 3621 static DEFINE_SEQLOCK(zonelist_update_seq); 3622 3623 static unsigned int zonelist_iter_begin(void) 3624 { 3625 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3626 return read_seqbegin(&zonelist_update_seq); 3627 3628 return 0; 3629 } 3630 3631 static unsigned int check_retry_zonelist(unsigned int seq) 3632 { 3633 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3634 return read_seqretry(&zonelist_update_seq, seq); 3635 3636 return seq; 3637 } 3638 3639 /* Perform direct synchronous page reclaim */ 3640 static unsigned long 3641 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3642 const struct alloc_context *ac) 3643 { 3644 unsigned int noreclaim_flag; 3645 unsigned long progress; 3646 3647 cond_resched(); 3648 3649 /* We now go into synchronous reclaim */ 3650 cpuset_memory_pressure_bump(); 3651 fs_reclaim_acquire(gfp_mask); 3652 noreclaim_flag = memalloc_noreclaim_save(); 3653 3654 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3655 ac->nodemask); 3656 3657 memalloc_noreclaim_restore(noreclaim_flag); 3658 fs_reclaim_release(gfp_mask); 3659 3660 cond_resched(); 3661 3662 return progress; 3663 } 3664 3665 /* The really slow allocator path where we enter direct reclaim */ 3666 static inline struct page * 3667 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3668 unsigned int alloc_flags, const struct alloc_context *ac, 3669 unsigned long *did_some_progress) 3670 { 3671 struct page *page = NULL; 3672 unsigned long pflags; 3673 bool drained = false; 3674 3675 psi_memstall_enter(&pflags); 3676 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3677 if (unlikely(!(*did_some_progress))) 3678 goto out; 3679 3680 retry: 3681 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3682 3683 /* 3684 * If an allocation failed after direct reclaim, it could be because 3685 * pages are pinned on the per-cpu lists or in high alloc reserves. 3686 * Shrink them and try again 3687 */ 3688 if (!page && !drained) { 3689 unreserve_highatomic_pageblock(ac, false); 3690 drain_all_pages(NULL); 3691 drained = true; 3692 goto retry; 3693 } 3694 out: 3695 psi_memstall_leave(&pflags); 3696 3697 return page; 3698 } 3699 3700 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 3701 const struct alloc_context *ac) 3702 { 3703 struct zoneref *z; 3704 struct zone *zone; 3705 pg_data_t *last_pgdat = NULL; 3706 enum zone_type highest_zoneidx = ac->highest_zoneidx; 3707 3708 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 3709 ac->nodemask) { 3710 if (!managed_zone(zone)) 3711 continue; 3712 if (last_pgdat != zone->zone_pgdat) { 3713 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 3714 last_pgdat = zone->zone_pgdat; 3715 } 3716 } 3717 } 3718 3719 static inline unsigned int 3720 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 3721 { 3722 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3723 3724 /* 3725 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 3726 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3727 * to save two branches. 3728 */ 3729 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 3730 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 3731 3732 /* 3733 * The caller may dip into page reserves a bit more if the caller 3734 * cannot run direct reclaim, or if the caller has realtime scheduling 3735 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3736 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 3737 */ 3738 alloc_flags |= (__force int) 3739 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 3740 3741 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 3742 /* 3743 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3744 * if it can't schedule. 3745 */ 3746 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 3747 alloc_flags |= ALLOC_NON_BLOCK; 3748 3749 if (order > 0) 3750 alloc_flags |= ALLOC_HIGHATOMIC; 3751 } 3752 3753 /* 3754 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 3755 * GFP_ATOMIC) rather than fail, see the comment for 3756 * cpuset_node_allowed(). 3757 */ 3758 if (alloc_flags & ALLOC_MIN_RESERVE) 3759 alloc_flags &= ~ALLOC_CPUSET; 3760 } else if (unlikely(rt_task(current)) && in_task()) 3761 alloc_flags |= ALLOC_MIN_RESERVE; 3762 3763 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 3764 3765 return alloc_flags; 3766 } 3767 3768 static bool oom_reserves_allowed(struct task_struct *tsk) 3769 { 3770 if (!tsk_is_oom_victim(tsk)) 3771 return false; 3772 3773 /* 3774 * !MMU doesn't have oom reaper so give access to memory reserves 3775 * only to the thread with TIF_MEMDIE set 3776 */ 3777 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 3778 return false; 3779 3780 return true; 3781 } 3782 3783 /* 3784 * Distinguish requests which really need access to full memory 3785 * reserves from oom victims which can live with a portion of it 3786 */ 3787 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 3788 { 3789 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 3790 return 0; 3791 if (gfp_mask & __GFP_MEMALLOC) 3792 return ALLOC_NO_WATERMARKS; 3793 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 3794 return ALLOC_NO_WATERMARKS; 3795 if (!in_interrupt()) { 3796 if (current->flags & PF_MEMALLOC) 3797 return ALLOC_NO_WATERMARKS; 3798 else if (oom_reserves_allowed(current)) 3799 return ALLOC_OOM; 3800 } 3801 3802 return 0; 3803 } 3804 3805 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 3806 { 3807 return !!__gfp_pfmemalloc_flags(gfp_mask); 3808 } 3809 3810 /* 3811 * Checks whether it makes sense to retry the reclaim to make a forward progress 3812 * for the given allocation request. 3813 * 3814 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 3815 * without success, or when we couldn't even meet the watermark if we 3816 * reclaimed all remaining pages on the LRU lists. 3817 * 3818 * Returns true if a retry is viable or false to enter the oom path. 3819 */ 3820 static inline bool 3821 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 3822 struct alloc_context *ac, int alloc_flags, 3823 bool did_some_progress, int *no_progress_loops) 3824 { 3825 struct zone *zone; 3826 struct zoneref *z; 3827 bool ret = false; 3828 3829 /* 3830 * Costly allocations might have made a progress but this doesn't mean 3831 * their order will become available due to high fragmentation so 3832 * always increment the no progress counter for them 3833 */ 3834 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 3835 *no_progress_loops = 0; 3836 else 3837 (*no_progress_loops)++; 3838 3839 /* 3840 * Make sure we converge to OOM if we cannot make any progress 3841 * several times in the row. 3842 */ 3843 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 3844 /* Before OOM, exhaust highatomic_reserve */ 3845 return unreserve_highatomic_pageblock(ac, true); 3846 } 3847 3848 /* 3849 * Keep reclaiming pages while there is a chance this will lead 3850 * somewhere. If none of the target zones can satisfy our allocation 3851 * request even if all reclaimable pages are considered then we are 3852 * screwed and have to go OOM. 3853 */ 3854 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3855 ac->highest_zoneidx, ac->nodemask) { 3856 unsigned long available; 3857 unsigned long reclaimable; 3858 unsigned long min_wmark = min_wmark_pages(zone); 3859 bool wmark; 3860 3861 available = reclaimable = zone_reclaimable_pages(zone); 3862 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 3863 3864 /* 3865 * Would the allocation succeed if we reclaimed all 3866 * reclaimable pages? 3867 */ 3868 wmark = __zone_watermark_ok(zone, order, min_wmark, 3869 ac->highest_zoneidx, alloc_flags, available); 3870 trace_reclaim_retry_zone(z, order, reclaimable, 3871 available, min_wmark, *no_progress_loops, wmark); 3872 if (wmark) { 3873 ret = true; 3874 break; 3875 } 3876 } 3877 3878 /* 3879 * Memory allocation/reclaim might be called from a WQ context and the 3880 * current implementation of the WQ concurrency control doesn't 3881 * recognize that a particular WQ is congested if the worker thread is 3882 * looping without ever sleeping. Therefore we have to do a short sleep 3883 * here rather than calling cond_resched(). 3884 */ 3885 if (current->flags & PF_WQ_WORKER) 3886 schedule_timeout_uninterruptible(1); 3887 else 3888 cond_resched(); 3889 return ret; 3890 } 3891 3892 static inline bool 3893 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 3894 { 3895 /* 3896 * It's possible that cpuset's mems_allowed and the nodemask from 3897 * mempolicy don't intersect. This should be normally dealt with by 3898 * policy_nodemask(), but it's possible to race with cpuset update in 3899 * such a way the check therein was true, and then it became false 3900 * before we got our cpuset_mems_cookie here. 3901 * This assumes that for all allocations, ac->nodemask can come only 3902 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 3903 * when it does not intersect with the cpuset restrictions) or the 3904 * caller can deal with a violated nodemask. 3905 */ 3906 if (cpusets_enabled() && ac->nodemask && 3907 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 3908 ac->nodemask = NULL; 3909 return true; 3910 } 3911 3912 /* 3913 * When updating a task's mems_allowed or mempolicy nodemask, it is 3914 * possible to race with parallel threads in such a way that our 3915 * allocation can fail while the mask is being updated. If we are about 3916 * to fail, check if the cpuset changed during allocation and if so, 3917 * retry. 3918 */ 3919 if (read_mems_allowed_retry(cpuset_mems_cookie)) 3920 return true; 3921 3922 return false; 3923 } 3924 3925 static inline struct page * 3926 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 3927 struct alloc_context *ac) 3928 { 3929 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3930 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 3931 struct page *page = NULL; 3932 unsigned int alloc_flags; 3933 unsigned long did_some_progress; 3934 enum compact_priority compact_priority; 3935 enum compact_result compact_result; 3936 int compaction_retries; 3937 int no_progress_loops; 3938 unsigned int cpuset_mems_cookie; 3939 unsigned int zonelist_iter_cookie; 3940 int reserve_flags; 3941 3942 restart: 3943 compaction_retries = 0; 3944 no_progress_loops = 0; 3945 compact_priority = DEF_COMPACT_PRIORITY; 3946 cpuset_mems_cookie = read_mems_allowed_begin(); 3947 zonelist_iter_cookie = zonelist_iter_begin(); 3948 3949 /* 3950 * The fast path uses conservative alloc_flags to succeed only until 3951 * kswapd needs to be woken up, and to avoid the cost of setting up 3952 * alloc_flags precisely. So we do that now. 3953 */ 3954 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 3955 3956 /* 3957 * We need to recalculate the starting point for the zonelist iterator 3958 * because we might have used different nodemask in the fast path, or 3959 * there was a cpuset modification and we are retrying - otherwise we 3960 * could end up iterating over non-eligible zones endlessly. 3961 */ 3962 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3963 ac->highest_zoneidx, ac->nodemask); 3964 if (!ac->preferred_zoneref->zone) 3965 goto nopage; 3966 3967 /* 3968 * Check for insane configurations where the cpuset doesn't contain 3969 * any suitable zone to satisfy the request - e.g. non-movable 3970 * GFP_HIGHUSER allocations from MOVABLE nodes only. 3971 */ 3972 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 3973 struct zoneref *z = first_zones_zonelist(ac->zonelist, 3974 ac->highest_zoneidx, 3975 &cpuset_current_mems_allowed); 3976 if (!z->zone) 3977 goto nopage; 3978 } 3979 3980 if (alloc_flags & ALLOC_KSWAPD) 3981 wake_all_kswapds(order, gfp_mask, ac); 3982 3983 /* 3984 * The adjusted alloc_flags might result in immediate success, so try 3985 * that first 3986 */ 3987 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3988 if (page) 3989 goto got_pg; 3990 3991 /* 3992 * For costly allocations, try direct compaction first, as it's likely 3993 * that we have enough base pages and don't need to reclaim. For non- 3994 * movable high-order allocations, do that as well, as compaction will 3995 * try prevent permanent fragmentation by migrating from blocks of the 3996 * same migratetype. 3997 * Don't try this for allocations that are allowed to ignore 3998 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 3999 */ 4000 if (can_direct_reclaim && 4001 (costly_order || 4002 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4003 && !gfp_pfmemalloc_allowed(gfp_mask)) { 4004 page = __alloc_pages_direct_compact(gfp_mask, order, 4005 alloc_flags, ac, 4006 INIT_COMPACT_PRIORITY, 4007 &compact_result); 4008 if (page) 4009 goto got_pg; 4010 4011 /* 4012 * Checks for costly allocations with __GFP_NORETRY, which 4013 * includes some THP page fault allocations 4014 */ 4015 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4016 /* 4017 * If allocating entire pageblock(s) and compaction 4018 * failed because all zones are below low watermarks 4019 * or is prohibited because it recently failed at this 4020 * order, fail immediately unless the allocator has 4021 * requested compaction and reclaim retry. 4022 * 4023 * Reclaim is 4024 * - potentially very expensive because zones are far 4025 * below their low watermarks or this is part of very 4026 * bursty high order allocations, 4027 * - not guaranteed to help because isolate_freepages() 4028 * may not iterate over freed pages as part of its 4029 * linear scan, and 4030 * - unlikely to make entire pageblocks free on its 4031 * own. 4032 */ 4033 if (compact_result == COMPACT_SKIPPED || 4034 compact_result == COMPACT_DEFERRED) 4035 goto nopage; 4036 4037 /* 4038 * Looks like reclaim/compaction is worth trying, but 4039 * sync compaction could be very expensive, so keep 4040 * using async compaction. 4041 */ 4042 compact_priority = INIT_COMPACT_PRIORITY; 4043 } 4044 } 4045 4046 retry: 4047 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4048 if (alloc_flags & ALLOC_KSWAPD) 4049 wake_all_kswapds(order, gfp_mask, ac); 4050 4051 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4052 if (reserve_flags) 4053 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4054 (alloc_flags & ALLOC_KSWAPD); 4055 4056 /* 4057 * Reset the nodemask and zonelist iterators if memory policies can be 4058 * ignored. These allocations are high priority and system rather than 4059 * user oriented. 4060 */ 4061 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4062 ac->nodemask = NULL; 4063 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4064 ac->highest_zoneidx, ac->nodemask); 4065 } 4066 4067 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4068 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4069 if (page) 4070 goto got_pg; 4071 4072 /* Caller is not willing to reclaim, we can't balance anything */ 4073 if (!can_direct_reclaim) 4074 goto nopage; 4075 4076 /* Avoid recursion of direct reclaim */ 4077 if (current->flags & PF_MEMALLOC) 4078 goto nopage; 4079 4080 /* Try direct reclaim and then allocating */ 4081 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4082 &did_some_progress); 4083 if (page) 4084 goto got_pg; 4085 4086 /* Try direct compaction and then allocating */ 4087 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4088 compact_priority, &compact_result); 4089 if (page) 4090 goto got_pg; 4091 4092 /* Do not loop if specifically requested */ 4093 if (gfp_mask & __GFP_NORETRY) 4094 goto nopage; 4095 4096 /* 4097 * Do not retry costly high order allocations unless they are 4098 * __GFP_RETRY_MAYFAIL 4099 */ 4100 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 4101 goto nopage; 4102 4103 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4104 did_some_progress > 0, &no_progress_loops)) 4105 goto retry; 4106 4107 /* 4108 * It doesn't make any sense to retry for the compaction if the order-0 4109 * reclaim is not able to make any progress because the current 4110 * implementation of the compaction depends on the sufficient amount 4111 * of free memory (see __compaction_suitable) 4112 */ 4113 if (did_some_progress > 0 && 4114 should_compact_retry(ac, order, alloc_flags, 4115 compact_result, &compact_priority, 4116 &compaction_retries)) 4117 goto retry; 4118 4119 4120 /* 4121 * Deal with possible cpuset update races or zonelist updates to avoid 4122 * a unnecessary OOM kill. 4123 */ 4124 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4125 check_retry_zonelist(zonelist_iter_cookie)) 4126 goto restart; 4127 4128 /* Reclaim has failed us, start killing things */ 4129 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4130 if (page) 4131 goto got_pg; 4132 4133 /* Avoid allocations with no watermarks from looping endlessly */ 4134 if (tsk_is_oom_victim(current) && 4135 (alloc_flags & ALLOC_OOM || 4136 (gfp_mask & __GFP_NOMEMALLOC))) 4137 goto nopage; 4138 4139 /* Retry as long as the OOM killer is making progress */ 4140 if (did_some_progress) { 4141 no_progress_loops = 0; 4142 goto retry; 4143 } 4144 4145 nopage: 4146 /* 4147 * Deal with possible cpuset update races or zonelist updates to avoid 4148 * a unnecessary OOM kill. 4149 */ 4150 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4151 check_retry_zonelist(zonelist_iter_cookie)) 4152 goto restart; 4153 4154 /* 4155 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4156 * we always retry 4157 */ 4158 if (gfp_mask & __GFP_NOFAIL) { 4159 /* 4160 * All existing users of the __GFP_NOFAIL are blockable, so warn 4161 * of any new users that actually require GFP_NOWAIT 4162 */ 4163 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 4164 goto fail; 4165 4166 /* 4167 * PF_MEMALLOC request from this context is rather bizarre 4168 * because we cannot reclaim anything and only can loop waiting 4169 * for somebody to do a work for us 4170 */ 4171 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 4172 4173 /* 4174 * non failing costly orders are a hard requirement which we 4175 * are not prepared for much so let's warn about these users 4176 * so that we can identify them and convert them to something 4177 * else. 4178 */ 4179 WARN_ON_ONCE_GFP(costly_order, gfp_mask); 4180 4181 /* 4182 * Help non-failing allocations by giving some access to memory 4183 * reserves normally used for high priority non-blocking 4184 * allocations but do not use ALLOC_NO_WATERMARKS because this 4185 * could deplete whole memory reserves which would just make 4186 * the situation worse. 4187 */ 4188 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4189 if (page) 4190 goto got_pg; 4191 4192 cond_resched(); 4193 goto retry; 4194 } 4195 fail: 4196 warn_alloc(gfp_mask, ac->nodemask, 4197 "page allocation failure: order:%u", order); 4198 got_pg: 4199 return page; 4200 } 4201 4202 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4203 int preferred_nid, nodemask_t *nodemask, 4204 struct alloc_context *ac, gfp_t *alloc_gfp, 4205 unsigned int *alloc_flags) 4206 { 4207 ac->highest_zoneidx = gfp_zone(gfp_mask); 4208 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4209 ac->nodemask = nodemask; 4210 ac->migratetype = gfp_migratetype(gfp_mask); 4211 4212 if (cpusets_enabled()) { 4213 *alloc_gfp |= __GFP_HARDWALL; 4214 /* 4215 * When we are in the interrupt context, it is irrelevant 4216 * to the current task context. It means that any node ok. 4217 */ 4218 if (in_task() && !ac->nodemask) 4219 ac->nodemask = &cpuset_current_mems_allowed; 4220 else 4221 *alloc_flags |= ALLOC_CPUSET; 4222 } 4223 4224 might_alloc(gfp_mask); 4225 4226 if (should_fail_alloc_page(gfp_mask, order)) 4227 return false; 4228 4229 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4230 4231 /* Dirty zone balancing only done in the fast path */ 4232 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4233 4234 /* 4235 * The preferred zone is used for statistics but crucially it is 4236 * also used as the starting point for the zonelist iterator. It 4237 * may get reset for allocations that ignore memory policies. 4238 */ 4239 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4240 ac->highest_zoneidx, ac->nodemask); 4241 4242 return true; 4243 } 4244 4245 /* 4246 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 4247 * @gfp: GFP flags for the allocation 4248 * @preferred_nid: The preferred NUMA node ID to allocate from 4249 * @nodemask: Set of nodes to allocate from, may be NULL 4250 * @nr_pages: The number of pages desired on the list or array 4251 * @page_list: Optional list to store the allocated pages 4252 * @page_array: Optional array to store the pages 4253 * 4254 * This is a batched version of the page allocator that attempts to 4255 * allocate nr_pages quickly. Pages are added to page_list if page_list 4256 * is not NULL, otherwise it is assumed that the page_array is valid. 4257 * 4258 * For lists, nr_pages is the number of pages that should be allocated. 4259 * 4260 * For arrays, only NULL elements are populated with pages and nr_pages 4261 * is the maximum number of pages that will be stored in the array. 4262 * 4263 * Returns the number of pages on the list or array. 4264 */ 4265 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 4266 nodemask_t *nodemask, int nr_pages, 4267 struct list_head *page_list, 4268 struct page **page_array) 4269 { 4270 struct page *page; 4271 unsigned long __maybe_unused UP_flags; 4272 struct zone *zone; 4273 struct zoneref *z; 4274 struct per_cpu_pages *pcp; 4275 struct list_head *pcp_list; 4276 struct alloc_context ac; 4277 gfp_t alloc_gfp; 4278 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4279 int nr_populated = 0, nr_account = 0; 4280 4281 /* 4282 * Skip populated array elements to determine if any pages need 4283 * to be allocated before disabling IRQs. 4284 */ 4285 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 4286 nr_populated++; 4287 4288 /* No pages requested? */ 4289 if (unlikely(nr_pages <= 0)) 4290 goto out; 4291 4292 /* Already populated array? */ 4293 if (unlikely(page_array && nr_pages - nr_populated == 0)) 4294 goto out; 4295 4296 /* Bulk allocator does not support memcg accounting. */ 4297 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4298 goto failed; 4299 4300 /* Use the single page allocator for one page. */ 4301 if (nr_pages - nr_populated == 1) 4302 goto failed; 4303 4304 #ifdef CONFIG_PAGE_OWNER 4305 /* 4306 * PAGE_OWNER may recurse into the allocator to allocate space to 4307 * save the stack with pagesets.lock held. Releasing/reacquiring 4308 * removes much of the performance benefit of bulk allocation so 4309 * force the caller to allocate one page at a time as it'll have 4310 * similar performance to added complexity to the bulk allocator. 4311 */ 4312 if (static_branch_unlikely(&page_owner_inited)) 4313 goto failed; 4314 #endif 4315 4316 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4317 gfp &= gfp_allowed_mask; 4318 alloc_gfp = gfp; 4319 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4320 goto out; 4321 gfp = alloc_gfp; 4322 4323 /* Find an allowed local zone that meets the low watermark. */ 4324 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 4325 unsigned long mark; 4326 4327 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4328 !__cpuset_zone_allowed(zone, gfp)) { 4329 continue; 4330 } 4331 4332 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 4333 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 4334 goto failed; 4335 } 4336 4337 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4338 if (zone_watermark_fast(zone, 0, mark, 4339 zonelist_zone_idx(ac.preferred_zoneref), 4340 alloc_flags, gfp)) { 4341 break; 4342 } 4343 } 4344 4345 /* 4346 * If there are no allowed local zones that meets the watermarks then 4347 * try to allocate a single page and reclaim if necessary. 4348 */ 4349 if (unlikely(!zone)) 4350 goto failed; 4351 4352 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4353 pcp_trylock_prepare(UP_flags); 4354 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4355 if (!pcp) 4356 goto failed_irq; 4357 4358 /* Attempt the batch allocation */ 4359 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4360 while (nr_populated < nr_pages) { 4361 4362 /* Skip existing pages */ 4363 if (page_array && page_array[nr_populated]) { 4364 nr_populated++; 4365 continue; 4366 } 4367 4368 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4369 pcp, pcp_list); 4370 if (unlikely(!page)) { 4371 /* Try and allocate at least one page */ 4372 if (!nr_account) { 4373 pcp_spin_unlock(pcp); 4374 goto failed_irq; 4375 } 4376 break; 4377 } 4378 nr_account++; 4379 4380 prep_new_page(page, 0, gfp, 0); 4381 if (page_list) 4382 list_add(&page->lru, page_list); 4383 else 4384 page_array[nr_populated] = page; 4385 nr_populated++; 4386 } 4387 4388 pcp_spin_unlock(pcp); 4389 pcp_trylock_finish(UP_flags); 4390 4391 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4392 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 4393 4394 out: 4395 return nr_populated; 4396 4397 failed_irq: 4398 pcp_trylock_finish(UP_flags); 4399 4400 failed: 4401 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 4402 if (page) { 4403 if (page_list) 4404 list_add(&page->lru, page_list); 4405 else 4406 page_array[nr_populated] = page; 4407 nr_populated++; 4408 } 4409 4410 goto out; 4411 } 4412 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 4413 4414 /* 4415 * This is the 'heart' of the zoned buddy allocator. 4416 */ 4417 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 4418 nodemask_t *nodemask) 4419 { 4420 struct page *page; 4421 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4422 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4423 struct alloc_context ac = { }; 4424 4425 /* 4426 * There are several places where we assume that the order value is sane 4427 * so bail out early if the request is out of bound. 4428 */ 4429 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp)) 4430 return NULL; 4431 4432 gfp &= gfp_allowed_mask; 4433 /* 4434 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4435 * resp. GFP_NOIO which has to be inherited for all allocation requests 4436 * from a particular context which has been marked by 4437 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4438 * movable zones are not used during allocation. 4439 */ 4440 gfp = current_gfp_context(gfp); 4441 alloc_gfp = gfp; 4442 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4443 &alloc_gfp, &alloc_flags)) 4444 return NULL; 4445 4446 /* 4447 * Forbid the first pass from falling back to types that fragment 4448 * memory until all local zones are considered. 4449 */ 4450 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 4451 4452 /* First allocation attempt */ 4453 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4454 if (likely(page)) 4455 goto out; 4456 4457 alloc_gfp = gfp; 4458 ac.spread_dirty_pages = false; 4459 4460 /* 4461 * Restore the original nodemask if it was potentially replaced with 4462 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4463 */ 4464 ac.nodemask = nodemask; 4465 4466 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4467 4468 out: 4469 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4470 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4471 __free_pages(page, order); 4472 page = NULL; 4473 } 4474 4475 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4476 kmsan_alloc_page(page, order, alloc_gfp); 4477 4478 return page; 4479 } 4480 EXPORT_SYMBOL(__alloc_pages); 4481 4482 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 4483 nodemask_t *nodemask) 4484 { 4485 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 4486 preferred_nid, nodemask); 4487 4488 if (page && order > 1) 4489 prep_transhuge_page(page); 4490 return (struct folio *)page; 4491 } 4492 EXPORT_SYMBOL(__folio_alloc); 4493 4494 /* 4495 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4496 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4497 * you need to access high mem. 4498 */ 4499 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 4500 { 4501 struct page *page; 4502 4503 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 4504 if (!page) 4505 return 0; 4506 return (unsigned long) page_address(page); 4507 } 4508 EXPORT_SYMBOL(__get_free_pages); 4509 4510 unsigned long get_zeroed_page(gfp_t gfp_mask) 4511 { 4512 return __get_free_page(gfp_mask | __GFP_ZERO); 4513 } 4514 EXPORT_SYMBOL(get_zeroed_page); 4515 4516 /** 4517 * __free_pages - Free pages allocated with alloc_pages(). 4518 * @page: The page pointer returned from alloc_pages(). 4519 * @order: The order of the allocation. 4520 * 4521 * This function can free multi-page allocations that are not compound 4522 * pages. It does not check that the @order passed in matches that of 4523 * the allocation, so it is easy to leak memory. Freeing more memory 4524 * than was allocated will probably emit a warning. 4525 * 4526 * If the last reference to this page is speculative, it will be released 4527 * by put_page() which only frees the first page of a non-compound 4528 * allocation. To prevent the remaining pages from being leaked, we free 4529 * the subsequent pages here. If you want to use the page's reference 4530 * count to decide when to free the allocation, you should allocate a 4531 * compound page, and use put_page() instead of __free_pages(). 4532 * 4533 * Context: May be called in interrupt context or while holding a normal 4534 * spinlock, but not in NMI context or while holding a raw spinlock. 4535 */ 4536 void __free_pages(struct page *page, unsigned int order) 4537 { 4538 /* get PageHead before we drop reference */ 4539 int head = PageHead(page); 4540 4541 if (put_page_testzero(page)) 4542 free_the_page(page, order); 4543 else if (!head) 4544 while (order-- > 0) 4545 free_the_page(page + (1 << order), order); 4546 } 4547 EXPORT_SYMBOL(__free_pages); 4548 4549 void free_pages(unsigned long addr, unsigned int order) 4550 { 4551 if (addr != 0) { 4552 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4553 __free_pages(virt_to_page((void *)addr), order); 4554 } 4555 } 4556 4557 EXPORT_SYMBOL(free_pages); 4558 4559 /* 4560 * Page Fragment: 4561 * An arbitrary-length arbitrary-offset area of memory which resides 4562 * within a 0 or higher order page. Multiple fragments within that page 4563 * are individually refcounted, in the page's reference counter. 4564 * 4565 * The page_frag functions below provide a simple allocation framework for 4566 * page fragments. This is used by the network stack and network device 4567 * drivers to provide a backing region of memory for use as either an 4568 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4569 */ 4570 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4571 gfp_t gfp_mask) 4572 { 4573 struct page *page = NULL; 4574 gfp_t gfp = gfp_mask; 4575 4576 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4577 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 4578 __GFP_NOMEMALLOC; 4579 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4580 PAGE_FRAG_CACHE_MAX_ORDER); 4581 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4582 #endif 4583 if (unlikely(!page)) 4584 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4585 4586 nc->va = page ? page_address(page) : NULL; 4587 4588 return page; 4589 } 4590 4591 void __page_frag_cache_drain(struct page *page, unsigned int count) 4592 { 4593 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4594 4595 if (page_ref_sub_and_test(page, count)) 4596 free_the_page(page, compound_order(page)); 4597 } 4598 EXPORT_SYMBOL(__page_frag_cache_drain); 4599 4600 void *page_frag_alloc_align(struct page_frag_cache *nc, 4601 unsigned int fragsz, gfp_t gfp_mask, 4602 unsigned int align_mask) 4603 { 4604 unsigned int size = PAGE_SIZE; 4605 struct page *page; 4606 int offset; 4607 4608 if (unlikely(!nc->va)) { 4609 refill: 4610 page = __page_frag_cache_refill(nc, gfp_mask); 4611 if (!page) 4612 return NULL; 4613 4614 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4615 /* if size can vary use size else just use PAGE_SIZE */ 4616 size = nc->size; 4617 #endif 4618 /* Even if we own the page, we do not use atomic_set(). 4619 * This would break get_page_unless_zero() users. 4620 */ 4621 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 4622 4623 /* reset page count bias and offset to start of new frag */ 4624 nc->pfmemalloc = page_is_pfmemalloc(page); 4625 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4626 nc->offset = size; 4627 } 4628 4629 offset = nc->offset - fragsz; 4630 if (unlikely(offset < 0)) { 4631 page = virt_to_page(nc->va); 4632 4633 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4634 goto refill; 4635 4636 if (unlikely(nc->pfmemalloc)) { 4637 free_the_page(page, compound_order(page)); 4638 goto refill; 4639 } 4640 4641 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4642 /* if size can vary use size else just use PAGE_SIZE */ 4643 size = nc->size; 4644 #endif 4645 /* OK, page count is 0, we can safely set it */ 4646 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 4647 4648 /* reset page count bias and offset to start of new frag */ 4649 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4650 offset = size - fragsz; 4651 if (unlikely(offset < 0)) { 4652 /* 4653 * The caller is trying to allocate a fragment 4654 * with fragsz > PAGE_SIZE but the cache isn't big 4655 * enough to satisfy the request, this may 4656 * happen in low memory conditions. 4657 * We don't release the cache page because 4658 * it could make memory pressure worse 4659 * so we simply return NULL here. 4660 */ 4661 return NULL; 4662 } 4663 } 4664 4665 nc->pagecnt_bias--; 4666 offset &= align_mask; 4667 nc->offset = offset; 4668 4669 return nc->va + offset; 4670 } 4671 EXPORT_SYMBOL(page_frag_alloc_align); 4672 4673 /* 4674 * Frees a page fragment allocated out of either a compound or order 0 page. 4675 */ 4676 void page_frag_free(void *addr) 4677 { 4678 struct page *page = virt_to_head_page(addr); 4679 4680 if (unlikely(put_page_testzero(page))) 4681 free_the_page(page, compound_order(page)); 4682 } 4683 EXPORT_SYMBOL(page_frag_free); 4684 4685 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4686 size_t size) 4687 { 4688 if (addr) { 4689 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 4690 struct page *page = virt_to_page((void *)addr); 4691 struct page *last = page + nr; 4692 4693 split_page_owner(page, 1 << order); 4694 split_page_memcg(page, 1 << order); 4695 while (page < --last) 4696 set_page_refcounted(last); 4697 4698 last = page + (1UL << order); 4699 for (page += nr; page < last; page++) 4700 __free_pages_ok(page, 0, FPI_TO_TAIL); 4701 } 4702 return (void *)addr; 4703 } 4704 4705 /** 4706 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4707 * @size: the number of bytes to allocate 4708 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4709 * 4710 * This function is similar to alloc_pages(), except that it allocates the 4711 * minimum number of pages to satisfy the request. alloc_pages() can only 4712 * allocate memory in power-of-two pages. 4713 * 4714 * This function is also limited by MAX_ORDER. 4715 * 4716 * Memory allocated by this function must be released by free_pages_exact(). 4717 * 4718 * Return: pointer to the allocated area or %NULL in case of error. 4719 */ 4720 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 4721 { 4722 unsigned int order = get_order(size); 4723 unsigned long addr; 4724 4725 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4726 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4727 4728 addr = __get_free_pages(gfp_mask, order); 4729 return make_alloc_exact(addr, order, size); 4730 } 4731 EXPORT_SYMBOL(alloc_pages_exact); 4732 4733 /** 4734 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4735 * pages on a node. 4736 * @nid: the preferred node ID where memory should be allocated 4737 * @size: the number of bytes to allocate 4738 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4739 * 4740 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4741 * back. 4742 * 4743 * Return: pointer to the allocated area or %NULL in case of error. 4744 */ 4745 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 4746 { 4747 unsigned int order = get_order(size); 4748 struct page *p; 4749 4750 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4751 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4752 4753 p = alloc_pages_node(nid, gfp_mask, order); 4754 if (!p) 4755 return NULL; 4756 return make_alloc_exact((unsigned long)page_address(p), order, size); 4757 } 4758 4759 /** 4760 * free_pages_exact - release memory allocated via alloc_pages_exact() 4761 * @virt: the value returned by alloc_pages_exact. 4762 * @size: size of allocation, same value as passed to alloc_pages_exact(). 4763 * 4764 * Release the memory allocated by a previous call to alloc_pages_exact. 4765 */ 4766 void free_pages_exact(void *virt, size_t size) 4767 { 4768 unsigned long addr = (unsigned long)virt; 4769 unsigned long end = addr + PAGE_ALIGN(size); 4770 4771 while (addr < end) { 4772 free_page(addr); 4773 addr += PAGE_SIZE; 4774 } 4775 } 4776 EXPORT_SYMBOL(free_pages_exact); 4777 4778 /** 4779 * nr_free_zone_pages - count number of pages beyond high watermark 4780 * @offset: The zone index of the highest zone 4781 * 4782 * nr_free_zone_pages() counts the number of pages which are beyond the 4783 * high watermark within all zones at or below a given zone index. For each 4784 * zone, the number of pages is calculated as: 4785 * 4786 * nr_free_zone_pages = managed_pages - high_pages 4787 * 4788 * Return: number of pages beyond high watermark. 4789 */ 4790 static unsigned long nr_free_zone_pages(int offset) 4791 { 4792 struct zoneref *z; 4793 struct zone *zone; 4794 4795 /* Just pick one node, since fallback list is circular */ 4796 unsigned long sum = 0; 4797 4798 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 4799 4800 for_each_zone_zonelist(zone, z, zonelist, offset) { 4801 unsigned long size = zone_managed_pages(zone); 4802 unsigned long high = high_wmark_pages(zone); 4803 if (size > high) 4804 sum += size - high; 4805 } 4806 4807 return sum; 4808 } 4809 4810 /** 4811 * nr_free_buffer_pages - count number of pages beyond high watermark 4812 * 4813 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4814 * watermark within ZONE_DMA and ZONE_NORMAL. 4815 * 4816 * Return: number of pages beyond high watermark within ZONE_DMA and 4817 * ZONE_NORMAL. 4818 */ 4819 unsigned long nr_free_buffer_pages(void) 4820 { 4821 return nr_free_zone_pages(gfp_zone(GFP_USER)); 4822 } 4823 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 4824 4825 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 4826 { 4827 zoneref->zone = zone; 4828 zoneref->zone_idx = zone_idx(zone); 4829 } 4830 4831 /* 4832 * Builds allocation fallback zone lists. 4833 * 4834 * Add all populated zones of a node to the zonelist. 4835 */ 4836 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 4837 { 4838 struct zone *zone; 4839 enum zone_type zone_type = MAX_NR_ZONES; 4840 int nr_zones = 0; 4841 4842 do { 4843 zone_type--; 4844 zone = pgdat->node_zones + zone_type; 4845 if (populated_zone(zone)) { 4846 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 4847 check_highest_zone(zone_type); 4848 } 4849 } while (zone_type); 4850 4851 return nr_zones; 4852 } 4853 4854 #ifdef CONFIG_NUMA 4855 4856 static int __parse_numa_zonelist_order(char *s) 4857 { 4858 /* 4859 * We used to support different zonelists modes but they turned 4860 * out to be just not useful. Let's keep the warning in place 4861 * if somebody still use the cmd line parameter so that we do 4862 * not fail it silently 4863 */ 4864 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 4865 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 4866 return -EINVAL; 4867 } 4868 return 0; 4869 } 4870 4871 static char numa_zonelist_order[] = "Node"; 4872 #define NUMA_ZONELIST_ORDER_LEN 16 4873 /* 4874 * sysctl handler for numa_zonelist_order 4875 */ 4876 static int numa_zonelist_order_handler(struct ctl_table *table, int write, 4877 void *buffer, size_t *length, loff_t *ppos) 4878 { 4879 if (write) 4880 return __parse_numa_zonelist_order(buffer); 4881 return proc_dostring(table, write, buffer, length, ppos); 4882 } 4883 4884 static int node_load[MAX_NUMNODES]; 4885 4886 /** 4887 * find_next_best_node - find the next node that should appear in a given node's fallback list 4888 * @node: node whose fallback list we're appending 4889 * @used_node_mask: nodemask_t of already used nodes 4890 * 4891 * We use a number of factors to determine which is the next node that should 4892 * appear on a given node's fallback list. The node should not have appeared 4893 * already in @node's fallback list, and it should be the next closest node 4894 * according to the distance array (which contains arbitrary distance values 4895 * from each node to each node in the system), and should also prefer nodes 4896 * with no CPUs, since presumably they'll have very little allocation pressure 4897 * on them otherwise. 4898 * 4899 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 4900 */ 4901 int find_next_best_node(int node, nodemask_t *used_node_mask) 4902 { 4903 int n, val; 4904 int min_val = INT_MAX; 4905 int best_node = NUMA_NO_NODE; 4906 4907 /* Use the local node if we haven't already */ 4908 if (!node_isset(node, *used_node_mask)) { 4909 node_set(node, *used_node_mask); 4910 return node; 4911 } 4912 4913 for_each_node_state(n, N_MEMORY) { 4914 4915 /* Don't want a node to appear more than once */ 4916 if (node_isset(n, *used_node_mask)) 4917 continue; 4918 4919 /* Use the distance array to find the distance */ 4920 val = node_distance(node, n); 4921 4922 /* Penalize nodes under us ("prefer the next node") */ 4923 val += (n < node); 4924 4925 /* Give preference to headless and unused nodes */ 4926 if (!cpumask_empty(cpumask_of_node(n))) 4927 val += PENALTY_FOR_NODE_WITH_CPUS; 4928 4929 /* Slight preference for less loaded node */ 4930 val *= MAX_NUMNODES; 4931 val += node_load[n]; 4932 4933 if (val < min_val) { 4934 min_val = val; 4935 best_node = n; 4936 } 4937 } 4938 4939 if (best_node >= 0) 4940 node_set(best_node, *used_node_mask); 4941 4942 return best_node; 4943 } 4944 4945 4946 /* 4947 * Build zonelists ordered by node and zones within node. 4948 * This results in maximum locality--normal zone overflows into local 4949 * DMA zone, if any--but risks exhausting DMA zone. 4950 */ 4951 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 4952 unsigned nr_nodes) 4953 { 4954 struct zoneref *zonerefs; 4955 int i; 4956 4957 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 4958 4959 for (i = 0; i < nr_nodes; i++) { 4960 int nr_zones; 4961 4962 pg_data_t *node = NODE_DATA(node_order[i]); 4963 4964 nr_zones = build_zonerefs_node(node, zonerefs); 4965 zonerefs += nr_zones; 4966 } 4967 zonerefs->zone = NULL; 4968 zonerefs->zone_idx = 0; 4969 } 4970 4971 /* 4972 * Build gfp_thisnode zonelists 4973 */ 4974 static void build_thisnode_zonelists(pg_data_t *pgdat) 4975 { 4976 struct zoneref *zonerefs; 4977 int nr_zones; 4978 4979 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 4980 nr_zones = build_zonerefs_node(pgdat, zonerefs); 4981 zonerefs += nr_zones; 4982 zonerefs->zone = NULL; 4983 zonerefs->zone_idx = 0; 4984 } 4985 4986 /* 4987 * Build zonelists ordered by zone and nodes within zones. 4988 * This results in conserving DMA zone[s] until all Normal memory is 4989 * exhausted, but results in overflowing to remote node while memory 4990 * may still exist in local DMA zone. 4991 */ 4992 4993 static void build_zonelists(pg_data_t *pgdat) 4994 { 4995 static int node_order[MAX_NUMNODES]; 4996 int node, nr_nodes = 0; 4997 nodemask_t used_mask = NODE_MASK_NONE; 4998 int local_node, prev_node; 4999 5000 /* NUMA-aware ordering of nodes */ 5001 local_node = pgdat->node_id; 5002 prev_node = local_node; 5003 5004 memset(node_order, 0, sizeof(node_order)); 5005 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5006 /* 5007 * We don't want to pressure a particular node. 5008 * So adding penalty to the first node in same 5009 * distance group to make it round-robin. 5010 */ 5011 if (node_distance(local_node, node) != 5012 node_distance(local_node, prev_node)) 5013 node_load[node] += 1; 5014 5015 node_order[nr_nodes++] = node; 5016 prev_node = node; 5017 } 5018 5019 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5020 build_thisnode_zonelists(pgdat); 5021 pr_info("Fallback order for Node %d: ", local_node); 5022 for (node = 0; node < nr_nodes; node++) 5023 pr_cont("%d ", node_order[node]); 5024 pr_cont("\n"); 5025 } 5026 5027 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5028 /* 5029 * Return node id of node used for "local" allocations. 5030 * I.e., first node id of first zone in arg node's generic zonelist. 5031 * Used for initializing percpu 'numa_mem', which is used primarily 5032 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5033 */ 5034 int local_memory_node(int node) 5035 { 5036 struct zoneref *z; 5037 5038 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5039 gfp_zone(GFP_KERNEL), 5040 NULL); 5041 return zone_to_nid(z->zone); 5042 } 5043 #endif 5044 5045 static void setup_min_unmapped_ratio(void); 5046 static void setup_min_slab_ratio(void); 5047 #else /* CONFIG_NUMA */ 5048 5049 static void build_zonelists(pg_data_t *pgdat) 5050 { 5051 int node, local_node; 5052 struct zoneref *zonerefs; 5053 int nr_zones; 5054 5055 local_node = pgdat->node_id; 5056 5057 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5058 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5059 zonerefs += nr_zones; 5060 5061 /* 5062 * Now we build the zonelist so that it contains the zones 5063 * of all the other nodes. 5064 * We don't want to pressure a particular node, so when 5065 * building the zones for node N, we make sure that the 5066 * zones coming right after the local ones are those from 5067 * node N+1 (modulo N) 5068 */ 5069 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 5070 if (!node_online(node)) 5071 continue; 5072 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5073 zonerefs += nr_zones; 5074 } 5075 for (node = 0; node < local_node; node++) { 5076 if (!node_online(node)) 5077 continue; 5078 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5079 zonerefs += nr_zones; 5080 } 5081 5082 zonerefs->zone = NULL; 5083 zonerefs->zone_idx = 0; 5084 } 5085 5086 #endif /* CONFIG_NUMA */ 5087 5088 /* 5089 * Boot pageset table. One per cpu which is going to be used for all 5090 * zones and all nodes. The parameters will be set in such a way 5091 * that an item put on a list will immediately be handed over to 5092 * the buddy list. This is safe since pageset manipulation is done 5093 * with interrupts disabled. 5094 * 5095 * The boot_pagesets must be kept even after bootup is complete for 5096 * unused processors and/or zones. They do play a role for bootstrapping 5097 * hotplugged processors. 5098 * 5099 * zoneinfo_show() and maybe other functions do 5100 * not check if the processor is online before following the pageset pointer. 5101 * Other parts of the kernel may not check if the zone is available. 5102 */ 5103 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5104 /* These effectively disable the pcplists in the boot pageset completely */ 5105 #define BOOT_PAGESET_HIGH 0 5106 #define BOOT_PAGESET_BATCH 1 5107 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5108 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5109 5110 static void __build_all_zonelists(void *data) 5111 { 5112 int nid; 5113 int __maybe_unused cpu; 5114 pg_data_t *self = data; 5115 unsigned long flags; 5116 5117 /* 5118 * The zonelist_update_seq must be acquired with irqsave because the 5119 * reader can be invoked from IRQ with GFP_ATOMIC. 5120 */ 5121 write_seqlock_irqsave(&zonelist_update_seq, flags); 5122 /* 5123 * Also disable synchronous printk() to prevent any printk() from 5124 * trying to hold port->lock, for 5125 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5126 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5127 */ 5128 printk_deferred_enter(); 5129 5130 #ifdef CONFIG_NUMA 5131 memset(node_load, 0, sizeof(node_load)); 5132 #endif 5133 5134 /* 5135 * This node is hotadded and no memory is yet present. So just 5136 * building zonelists is fine - no need to touch other nodes. 5137 */ 5138 if (self && !node_online(self->node_id)) { 5139 build_zonelists(self); 5140 } else { 5141 /* 5142 * All possible nodes have pgdat preallocated 5143 * in free_area_init 5144 */ 5145 for_each_node(nid) { 5146 pg_data_t *pgdat = NODE_DATA(nid); 5147 5148 build_zonelists(pgdat); 5149 } 5150 5151 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5152 /* 5153 * We now know the "local memory node" for each node-- 5154 * i.e., the node of the first zone in the generic zonelist. 5155 * Set up numa_mem percpu variable for on-line cpus. During 5156 * boot, only the boot cpu should be on-line; we'll init the 5157 * secondary cpus' numa_mem as they come on-line. During 5158 * node/memory hotplug, we'll fixup all on-line cpus. 5159 */ 5160 for_each_online_cpu(cpu) 5161 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5162 #endif 5163 } 5164 5165 printk_deferred_exit(); 5166 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5167 } 5168 5169 static noinline void __init 5170 build_all_zonelists_init(void) 5171 { 5172 int cpu; 5173 5174 __build_all_zonelists(NULL); 5175 5176 /* 5177 * Initialize the boot_pagesets that are going to be used 5178 * for bootstrapping processors. The real pagesets for 5179 * each zone will be allocated later when the per cpu 5180 * allocator is available. 5181 * 5182 * boot_pagesets are used also for bootstrapping offline 5183 * cpus if the system is already booted because the pagesets 5184 * are needed to initialize allocators on a specific cpu too. 5185 * F.e. the percpu allocator needs the page allocator which 5186 * needs the percpu allocator in order to allocate its pagesets 5187 * (a chicken-egg dilemma). 5188 */ 5189 for_each_possible_cpu(cpu) 5190 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5191 5192 mminit_verify_zonelist(); 5193 cpuset_init_current_mems_allowed(); 5194 } 5195 5196 /* 5197 * unless system_state == SYSTEM_BOOTING. 5198 * 5199 * __ref due to call of __init annotated helper build_all_zonelists_init 5200 * [protected by SYSTEM_BOOTING]. 5201 */ 5202 void __ref build_all_zonelists(pg_data_t *pgdat) 5203 { 5204 unsigned long vm_total_pages; 5205 5206 if (system_state == SYSTEM_BOOTING) { 5207 build_all_zonelists_init(); 5208 } else { 5209 __build_all_zonelists(pgdat); 5210 /* cpuset refresh routine should be here */ 5211 } 5212 /* Get the number of free pages beyond high watermark in all zones. */ 5213 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5214 /* 5215 * Disable grouping by mobility if the number of pages in the 5216 * system is too low to allow the mechanism to work. It would be 5217 * more accurate, but expensive to check per-zone. This check is 5218 * made on memory-hotadd so a system can start with mobility 5219 * disabled and enable it later 5220 */ 5221 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5222 page_group_by_mobility_disabled = 1; 5223 else 5224 page_group_by_mobility_disabled = 0; 5225 5226 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5227 nr_online_nodes, 5228 page_group_by_mobility_disabled ? "off" : "on", 5229 vm_total_pages); 5230 #ifdef CONFIG_NUMA 5231 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5232 #endif 5233 } 5234 5235 static int zone_batchsize(struct zone *zone) 5236 { 5237 #ifdef CONFIG_MMU 5238 int batch; 5239 5240 /* 5241 * The number of pages to batch allocate is either ~0.1% 5242 * of the zone or 1MB, whichever is smaller. The batch 5243 * size is striking a balance between allocation latency 5244 * and zone lock contention. 5245 */ 5246 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5247 batch /= 4; /* We effectively *= 4 below */ 5248 if (batch < 1) 5249 batch = 1; 5250 5251 /* 5252 * Clamp the batch to a 2^n - 1 value. Having a power 5253 * of 2 value was found to be more likely to have 5254 * suboptimal cache aliasing properties in some cases. 5255 * 5256 * For example if 2 tasks are alternately allocating 5257 * batches of pages, one task can end up with a lot 5258 * of pages of one half of the possible page colors 5259 * and the other with pages of the other colors. 5260 */ 5261 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5262 5263 return batch; 5264 5265 #else 5266 /* The deferral and batching of frees should be suppressed under NOMMU 5267 * conditions. 5268 * 5269 * The problem is that NOMMU needs to be able to allocate large chunks 5270 * of contiguous memory as there's no hardware page translation to 5271 * assemble apparent contiguous memory from discontiguous pages. 5272 * 5273 * Queueing large contiguous runs of pages for batching, however, 5274 * causes the pages to actually be freed in smaller chunks. As there 5275 * can be a significant delay between the individual batches being 5276 * recycled, this leads to the once large chunks of space being 5277 * fragmented and becoming unavailable for high-order allocations. 5278 */ 5279 return 0; 5280 #endif 5281 } 5282 5283 static int percpu_pagelist_high_fraction; 5284 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 5285 { 5286 #ifdef CONFIG_MMU 5287 int high; 5288 int nr_split_cpus; 5289 unsigned long total_pages; 5290 5291 if (!percpu_pagelist_high_fraction) { 5292 /* 5293 * By default, the high value of the pcp is based on the zone 5294 * low watermark so that if they are full then background 5295 * reclaim will not be started prematurely. 5296 */ 5297 total_pages = low_wmark_pages(zone); 5298 } else { 5299 /* 5300 * If percpu_pagelist_high_fraction is configured, the high 5301 * value is based on a fraction of the managed pages in the 5302 * zone. 5303 */ 5304 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 5305 } 5306 5307 /* 5308 * Split the high value across all online CPUs local to the zone. Note 5309 * that early in boot that CPUs may not be online yet and that during 5310 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5311 * onlined. For memory nodes that have no CPUs, split pcp->high across 5312 * all online CPUs to mitigate the risk that reclaim is triggered 5313 * prematurely due to pages stored on pcp lists. 5314 */ 5315 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5316 if (!nr_split_cpus) 5317 nr_split_cpus = num_online_cpus(); 5318 high = total_pages / nr_split_cpus; 5319 5320 /* 5321 * Ensure high is at least batch*4. The multiple is based on the 5322 * historical relationship between high and batch. 5323 */ 5324 high = max(high, batch << 2); 5325 5326 return high; 5327 #else 5328 return 0; 5329 #endif 5330 } 5331 5332 /* 5333 * pcp->high and pcp->batch values are related and generally batch is lower 5334 * than high. They are also related to pcp->count such that count is lower 5335 * than high, and as soon as it reaches high, the pcplist is flushed. 5336 * 5337 * However, guaranteeing these relations at all times would require e.g. write 5338 * barriers here but also careful usage of read barriers at the read side, and 5339 * thus be prone to error and bad for performance. Thus the update only prevents 5340 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 5341 * can cope with those fields changing asynchronously, and fully trust only the 5342 * pcp->count field on the local CPU with interrupts disabled. 5343 * 5344 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5345 * outside of boot time (or some other assurance that no concurrent updaters 5346 * exist). 5347 */ 5348 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 5349 unsigned long batch) 5350 { 5351 WRITE_ONCE(pcp->batch, batch); 5352 WRITE_ONCE(pcp->high, high); 5353 } 5354 5355 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5356 { 5357 int pindex; 5358 5359 memset(pcp, 0, sizeof(*pcp)); 5360 memset(pzstats, 0, sizeof(*pzstats)); 5361 5362 spin_lock_init(&pcp->lock); 5363 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5364 INIT_LIST_HEAD(&pcp->lists[pindex]); 5365 5366 /* 5367 * Set batch and high values safe for a boot pageset. A true percpu 5368 * pageset's initialization will update them subsequently. Here we don't 5369 * need to be as careful as pageset_update() as nobody can access the 5370 * pageset yet. 5371 */ 5372 pcp->high = BOOT_PAGESET_HIGH; 5373 pcp->batch = BOOT_PAGESET_BATCH; 5374 pcp->free_factor = 0; 5375 } 5376 5377 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 5378 unsigned long batch) 5379 { 5380 struct per_cpu_pages *pcp; 5381 int cpu; 5382 5383 for_each_possible_cpu(cpu) { 5384 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5385 pageset_update(pcp, high, batch); 5386 } 5387 } 5388 5389 /* 5390 * Calculate and set new high and batch values for all per-cpu pagesets of a 5391 * zone based on the zone's size. 5392 */ 5393 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5394 { 5395 int new_high, new_batch; 5396 5397 new_batch = max(1, zone_batchsize(zone)); 5398 new_high = zone_highsize(zone, new_batch, cpu_online); 5399 5400 if (zone->pageset_high == new_high && 5401 zone->pageset_batch == new_batch) 5402 return; 5403 5404 zone->pageset_high = new_high; 5405 zone->pageset_batch = new_batch; 5406 5407 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 5408 } 5409 5410 void __meminit setup_zone_pageset(struct zone *zone) 5411 { 5412 int cpu; 5413 5414 /* Size may be 0 on !SMP && !NUMA */ 5415 if (sizeof(struct per_cpu_zonestat) > 0) 5416 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5417 5418 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5419 for_each_possible_cpu(cpu) { 5420 struct per_cpu_pages *pcp; 5421 struct per_cpu_zonestat *pzstats; 5422 5423 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5424 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5425 per_cpu_pages_init(pcp, pzstats); 5426 } 5427 5428 zone_set_pageset_high_and_batch(zone, 0); 5429 } 5430 5431 /* 5432 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5433 * page high values need to be recalculated. 5434 */ 5435 static void zone_pcp_update(struct zone *zone, int cpu_online) 5436 { 5437 mutex_lock(&pcp_batch_high_lock); 5438 zone_set_pageset_high_and_batch(zone, cpu_online); 5439 mutex_unlock(&pcp_batch_high_lock); 5440 } 5441 5442 /* 5443 * Allocate per cpu pagesets and initialize them. 5444 * Before this call only boot pagesets were available. 5445 */ 5446 void __init setup_per_cpu_pageset(void) 5447 { 5448 struct pglist_data *pgdat; 5449 struct zone *zone; 5450 int __maybe_unused cpu; 5451 5452 for_each_populated_zone(zone) 5453 setup_zone_pageset(zone); 5454 5455 #ifdef CONFIG_NUMA 5456 /* 5457 * Unpopulated zones continue using the boot pagesets. 5458 * The numa stats for these pagesets need to be reset. 5459 * Otherwise, they will end up skewing the stats of 5460 * the nodes these zones are associated with. 5461 */ 5462 for_each_possible_cpu(cpu) { 5463 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5464 memset(pzstats->vm_numa_event, 0, 5465 sizeof(pzstats->vm_numa_event)); 5466 } 5467 #endif 5468 5469 for_each_online_pgdat(pgdat) 5470 pgdat->per_cpu_nodestats = 5471 alloc_percpu(struct per_cpu_nodestat); 5472 } 5473 5474 __meminit void zone_pcp_init(struct zone *zone) 5475 { 5476 /* 5477 * per cpu subsystem is not up at this point. The following code 5478 * relies on the ability of the linker to provide the 5479 * offset of a (static) per cpu variable into the per cpu area. 5480 */ 5481 zone->per_cpu_pageset = &boot_pageset; 5482 zone->per_cpu_zonestats = &boot_zonestats; 5483 zone->pageset_high = BOOT_PAGESET_HIGH; 5484 zone->pageset_batch = BOOT_PAGESET_BATCH; 5485 5486 if (populated_zone(zone)) 5487 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5488 zone->present_pages, zone_batchsize(zone)); 5489 } 5490 5491 void adjust_managed_page_count(struct page *page, long count) 5492 { 5493 atomic_long_add(count, &page_zone(page)->managed_pages); 5494 totalram_pages_add(count); 5495 #ifdef CONFIG_HIGHMEM 5496 if (PageHighMem(page)) 5497 totalhigh_pages_add(count); 5498 #endif 5499 } 5500 EXPORT_SYMBOL(adjust_managed_page_count); 5501 5502 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5503 { 5504 void *pos; 5505 unsigned long pages = 0; 5506 5507 start = (void *)PAGE_ALIGN((unsigned long)start); 5508 end = (void *)((unsigned long)end & PAGE_MASK); 5509 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5510 struct page *page = virt_to_page(pos); 5511 void *direct_map_addr; 5512 5513 /* 5514 * 'direct_map_addr' might be different from 'pos' 5515 * because some architectures' virt_to_page() 5516 * work with aliases. Getting the direct map 5517 * address ensures that we get a _writeable_ 5518 * alias for the memset(). 5519 */ 5520 direct_map_addr = page_address(page); 5521 /* 5522 * Perform a kasan-unchecked memset() since this memory 5523 * has not been initialized. 5524 */ 5525 direct_map_addr = kasan_reset_tag(direct_map_addr); 5526 if ((unsigned int)poison <= 0xFF) 5527 memset(direct_map_addr, poison, PAGE_SIZE); 5528 5529 free_reserved_page(page); 5530 } 5531 5532 if (pages && s) 5533 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5534 5535 return pages; 5536 } 5537 5538 static int page_alloc_cpu_dead(unsigned int cpu) 5539 { 5540 struct zone *zone; 5541 5542 lru_add_drain_cpu(cpu); 5543 mlock_drain_remote(cpu); 5544 drain_pages(cpu); 5545 5546 /* 5547 * Spill the event counters of the dead processor 5548 * into the current processors event counters. 5549 * This artificially elevates the count of the current 5550 * processor. 5551 */ 5552 vm_events_fold_cpu(cpu); 5553 5554 /* 5555 * Zero the differential counters of the dead processor 5556 * so that the vm statistics are consistent. 5557 * 5558 * This is only okay since the processor is dead and cannot 5559 * race with what we are doing. 5560 */ 5561 cpu_vm_stats_fold(cpu); 5562 5563 for_each_populated_zone(zone) 5564 zone_pcp_update(zone, 0); 5565 5566 return 0; 5567 } 5568 5569 static int page_alloc_cpu_online(unsigned int cpu) 5570 { 5571 struct zone *zone; 5572 5573 for_each_populated_zone(zone) 5574 zone_pcp_update(zone, 1); 5575 return 0; 5576 } 5577 5578 void __init page_alloc_init_cpuhp(void) 5579 { 5580 int ret; 5581 5582 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 5583 "mm/page_alloc:pcp", 5584 page_alloc_cpu_online, 5585 page_alloc_cpu_dead); 5586 WARN_ON(ret < 0); 5587 } 5588 5589 /* 5590 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5591 * or min_free_kbytes changes. 5592 */ 5593 static void calculate_totalreserve_pages(void) 5594 { 5595 struct pglist_data *pgdat; 5596 unsigned long reserve_pages = 0; 5597 enum zone_type i, j; 5598 5599 for_each_online_pgdat(pgdat) { 5600 5601 pgdat->totalreserve_pages = 0; 5602 5603 for (i = 0; i < MAX_NR_ZONES; i++) { 5604 struct zone *zone = pgdat->node_zones + i; 5605 long max = 0; 5606 unsigned long managed_pages = zone_managed_pages(zone); 5607 5608 /* Find valid and maximum lowmem_reserve in the zone */ 5609 for (j = i; j < MAX_NR_ZONES; j++) { 5610 if (zone->lowmem_reserve[j] > max) 5611 max = zone->lowmem_reserve[j]; 5612 } 5613 5614 /* we treat the high watermark as reserved pages. */ 5615 max += high_wmark_pages(zone); 5616 5617 if (max > managed_pages) 5618 max = managed_pages; 5619 5620 pgdat->totalreserve_pages += max; 5621 5622 reserve_pages += max; 5623 } 5624 } 5625 totalreserve_pages = reserve_pages; 5626 } 5627 5628 /* 5629 * setup_per_zone_lowmem_reserve - called whenever 5630 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5631 * has a correct pages reserved value, so an adequate number of 5632 * pages are left in the zone after a successful __alloc_pages(). 5633 */ 5634 static void setup_per_zone_lowmem_reserve(void) 5635 { 5636 struct pglist_data *pgdat; 5637 enum zone_type i, j; 5638 5639 for_each_online_pgdat(pgdat) { 5640 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 5641 struct zone *zone = &pgdat->node_zones[i]; 5642 int ratio = sysctl_lowmem_reserve_ratio[i]; 5643 bool clear = !ratio || !zone_managed_pages(zone); 5644 unsigned long managed_pages = 0; 5645 5646 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5647 struct zone *upper_zone = &pgdat->node_zones[j]; 5648 5649 managed_pages += zone_managed_pages(upper_zone); 5650 5651 if (clear) 5652 zone->lowmem_reserve[j] = 0; 5653 else 5654 zone->lowmem_reserve[j] = managed_pages / ratio; 5655 } 5656 } 5657 } 5658 5659 /* update totalreserve_pages */ 5660 calculate_totalreserve_pages(); 5661 } 5662 5663 static void __setup_per_zone_wmarks(void) 5664 { 5665 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5666 unsigned long lowmem_pages = 0; 5667 struct zone *zone; 5668 unsigned long flags; 5669 5670 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 5671 for_each_zone(zone) { 5672 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 5673 lowmem_pages += zone_managed_pages(zone); 5674 } 5675 5676 for_each_zone(zone) { 5677 u64 tmp; 5678 5679 spin_lock_irqsave(&zone->lock, flags); 5680 tmp = (u64)pages_min * zone_managed_pages(zone); 5681 do_div(tmp, lowmem_pages); 5682 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 5683 /* 5684 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5685 * need highmem and movable zones pages, so cap pages_min 5686 * to a small value here. 5687 * 5688 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5689 * deltas control async page reclaim, and so should 5690 * not be capped for highmem and movable zones. 5691 */ 5692 unsigned long min_pages; 5693 5694 min_pages = zone_managed_pages(zone) / 1024; 5695 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5696 zone->_watermark[WMARK_MIN] = min_pages; 5697 } else { 5698 /* 5699 * If it's a lowmem zone, reserve a number of pages 5700 * proportionate to the zone's size. 5701 */ 5702 zone->_watermark[WMARK_MIN] = tmp; 5703 } 5704 5705 /* 5706 * Set the kswapd watermarks distance according to the 5707 * scale factor in proportion to available memory, but 5708 * ensure a minimum size on small systems. 5709 */ 5710 tmp = max_t(u64, tmp >> 2, 5711 mult_frac(zone_managed_pages(zone), 5712 watermark_scale_factor, 10000)); 5713 5714 zone->watermark_boost = 0; 5715 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 5716 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 5717 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 5718 5719 spin_unlock_irqrestore(&zone->lock, flags); 5720 } 5721 5722 /* update totalreserve_pages */ 5723 calculate_totalreserve_pages(); 5724 } 5725 5726 /** 5727 * setup_per_zone_wmarks - called when min_free_kbytes changes 5728 * or when memory is hot-{added|removed} 5729 * 5730 * Ensures that the watermark[min,low,high] values for each zone are set 5731 * correctly with respect to min_free_kbytes. 5732 */ 5733 void setup_per_zone_wmarks(void) 5734 { 5735 struct zone *zone; 5736 static DEFINE_SPINLOCK(lock); 5737 5738 spin_lock(&lock); 5739 __setup_per_zone_wmarks(); 5740 spin_unlock(&lock); 5741 5742 /* 5743 * The watermark size have changed so update the pcpu batch 5744 * and high limits or the limits may be inappropriate. 5745 */ 5746 for_each_zone(zone) 5747 zone_pcp_update(zone, 0); 5748 } 5749 5750 /* 5751 * Initialise min_free_kbytes. 5752 * 5753 * For small machines we want it small (128k min). For large machines 5754 * we want it large (256MB max). But it is not linear, because network 5755 * bandwidth does not increase linearly with machine size. We use 5756 * 5757 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5758 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5759 * 5760 * which yields 5761 * 5762 * 16MB: 512k 5763 * 32MB: 724k 5764 * 64MB: 1024k 5765 * 128MB: 1448k 5766 * 256MB: 2048k 5767 * 512MB: 2896k 5768 * 1024MB: 4096k 5769 * 2048MB: 5792k 5770 * 4096MB: 8192k 5771 * 8192MB: 11584k 5772 * 16384MB: 16384k 5773 */ 5774 void calculate_min_free_kbytes(void) 5775 { 5776 unsigned long lowmem_kbytes; 5777 int new_min_free_kbytes; 5778 5779 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5780 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5781 5782 if (new_min_free_kbytes > user_min_free_kbytes) 5783 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 5784 else 5785 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 5786 new_min_free_kbytes, user_min_free_kbytes); 5787 5788 } 5789 5790 int __meminit init_per_zone_wmark_min(void) 5791 { 5792 calculate_min_free_kbytes(); 5793 setup_per_zone_wmarks(); 5794 refresh_zone_stat_thresholds(); 5795 setup_per_zone_lowmem_reserve(); 5796 5797 #ifdef CONFIG_NUMA 5798 setup_min_unmapped_ratio(); 5799 setup_min_slab_ratio(); 5800 #endif 5801 5802 khugepaged_min_free_kbytes_update(); 5803 5804 return 0; 5805 } 5806 postcore_initcall(init_per_zone_wmark_min) 5807 5808 /* 5809 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5810 * that we can call two helper functions whenever min_free_kbytes 5811 * changes. 5812 */ 5813 static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 5814 void *buffer, size_t *length, loff_t *ppos) 5815 { 5816 int rc; 5817 5818 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5819 if (rc) 5820 return rc; 5821 5822 if (write) { 5823 user_min_free_kbytes = min_free_kbytes; 5824 setup_per_zone_wmarks(); 5825 } 5826 return 0; 5827 } 5828 5829 static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 5830 void *buffer, size_t *length, loff_t *ppos) 5831 { 5832 int rc; 5833 5834 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5835 if (rc) 5836 return rc; 5837 5838 if (write) 5839 setup_per_zone_wmarks(); 5840 5841 return 0; 5842 } 5843 5844 #ifdef CONFIG_NUMA 5845 static void setup_min_unmapped_ratio(void) 5846 { 5847 pg_data_t *pgdat; 5848 struct zone *zone; 5849 5850 for_each_online_pgdat(pgdat) 5851 pgdat->min_unmapped_pages = 0; 5852 5853 for_each_zone(zone) 5854 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 5855 sysctl_min_unmapped_ratio) / 100; 5856 } 5857 5858 5859 static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 5860 void *buffer, size_t *length, loff_t *ppos) 5861 { 5862 int rc; 5863 5864 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5865 if (rc) 5866 return rc; 5867 5868 setup_min_unmapped_ratio(); 5869 5870 return 0; 5871 } 5872 5873 static void setup_min_slab_ratio(void) 5874 { 5875 pg_data_t *pgdat; 5876 struct zone *zone; 5877 5878 for_each_online_pgdat(pgdat) 5879 pgdat->min_slab_pages = 0; 5880 5881 for_each_zone(zone) 5882 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 5883 sysctl_min_slab_ratio) / 100; 5884 } 5885 5886 static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 5887 void *buffer, size_t *length, loff_t *ppos) 5888 { 5889 int rc; 5890 5891 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5892 if (rc) 5893 return rc; 5894 5895 setup_min_slab_ratio(); 5896 5897 return 0; 5898 } 5899 #endif 5900 5901 /* 5902 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 5903 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 5904 * whenever sysctl_lowmem_reserve_ratio changes. 5905 * 5906 * The reserve ratio obviously has absolutely no relation with the 5907 * minimum watermarks. The lowmem reserve ratio can only make sense 5908 * if in function of the boot time zone sizes. 5909 */ 5910 static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, 5911 int write, void *buffer, size_t *length, loff_t *ppos) 5912 { 5913 int i; 5914 5915 proc_dointvec_minmax(table, write, buffer, length, ppos); 5916 5917 for (i = 0; i < MAX_NR_ZONES; i++) { 5918 if (sysctl_lowmem_reserve_ratio[i] < 1) 5919 sysctl_lowmem_reserve_ratio[i] = 0; 5920 } 5921 5922 setup_per_zone_lowmem_reserve(); 5923 return 0; 5924 } 5925 5926 /* 5927 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 5928 * cpu. It is the fraction of total pages in each zone that a hot per cpu 5929 * pagelist can have before it gets flushed back to buddy allocator. 5930 */ 5931 static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 5932 int write, void *buffer, size_t *length, loff_t *ppos) 5933 { 5934 struct zone *zone; 5935 int old_percpu_pagelist_high_fraction; 5936 int ret; 5937 5938 mutex_lock(&pcp_batch_high_lock); 5939 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 5940 5941 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5942 if (!write || ret < 0) 5943 goto out; 5944 5945 /* Sanity checking to avoid pcp imbalance */ 5946 if (percpu_pagelist_high_fraction && 5947 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 5948 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 5949 ret = -EINVAL; 5950 goto out; 5951 } 5952 5953 /* No change? */ 5954 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 5955 goto out; 5956 5957 for_each_populated_zone(zone) 5958 zone_set_pageset_high_and_batch(zone, 0); 5959 out: 5960 mutex_unlock(&pcp_batch_high_lock); 5961 return ret; 5962 } 5963 5964 static struct ctl_table page_alloc_sysctl_table[] = { 5965 { 5966 .procname = "min_free_kbytes", 5967 .data = &min_free_kbytes, 5968 .maxlen = sizeof(min_free_kbytes), 5969 .mode = 0644, 5970 .proc_handler = min_free_kbytes_sysctl_handler, 5971 .extra1 = SYSCTL_ZERO, 5972 }, 5973 { 5974 .procname = "watermark_boost_factor", 5975 .data = &watermark_boost_factor, 5976 .maxlen = sizeof(watermark_boost_factor), 5977 .mode = 0644, 5978 .proc_handler = proc_dointvec_minmax, 5979 .extra1 = SYSCTL_ZERO, 5980 }, 5981 { 5982 .procname = "watermark_scale_factor", 5983 .data = &watermark_scale_factor, 5984 .maxlen = sizeof(watermark_scale_factor), 5985 .mode = 0644, 5986 .proc_handler = watermark_scale_factor_sysctl_handler, 5987 .extra1 = SYSCTL_ONE, 5988 .extra2 = SYSCTL_THREE_THOUSAND, 5989 }, 5990 { 5991 .procname = "percpu_pagelist_high_fraction", 5992 .data = &percpu_pagelist_high_fraction, 5993 .maxlen = sizeof(percpu_pagelist_high_fraction), 5994 .mode = 0644, 5995 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 5996 .extra1 = SYSCTL_ZERO, 5997 }, 5998 { 5999 .procname = "lowmem_reserve_ratio", 6000 .data = &sysctl_lowmem_reserve_ratio, 6001 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 6002 .mode = 0644, 6003 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 6004 }, 6005 #ifdef CONFIG_NUMA 6006 { 6007 .procname = "numa_zonelist_order", 6008 .data = &numa_zonelist_order, 6009 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6010 .mode = 0644, 6011 .proc_handler = numa_zonelist_order_handler, 6012 }, 6013 { 6014 .procname = "min_unmapped_ratio", 6015 .data = &sysctl_min_unmapped_ratio, 6016 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6017 .mode = 0644, 6018 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6019 .extra1 = SYSCTL_ZERO, 6020 .extra2 = SYSCTL_ONE_HUNDRED, 6021 }, 6022 { 6023 .procname = "min_slab_ratio", 6024 .data = &sysctl_min_slab_ratio, 6025 .maxlen = sizeof(sysctl_min_slab_ratio), 6026 .mode = 0644, 6027 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6028 .extra1 = SYSCTL_ZERO, 6029 .extra2 = SYSCTL_ONE_HUNDRED, 6030 }, 6031 #endif 6032 {} 6033 }; 6034 6035 void __init page_alloc_sysctl_init(void) 6036 { 6037 register_sysctl_init("vm", page_alloc_sysctl_table); 6038 } 6039 6040 #ifdef CONFIG_CONTIG_ALLOC 6041 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6042 static void alloc_contig_dump_pages(struct list_head *page_list) 6043 { 6044 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6045 6046 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6047 struct page *page; 6048 6049 dump_stack(); 6050 list_for_each_entry(page, page_list, lru) 6051 dump_page(page, "migration failure"); 6052 } 6053 } 6054 6055 /* [start, end) must belong to a single zone. */ 6056 int __alloc_contig_migrate_range(struct compact_control *cc, 6057 unsigned long start, unsigned long end) 6058 { 6059 /* This function is based on compact_zone() from compaction.c. */ 6060 unsigned int nr_reclaimed; 6061 unsigned long pfn = start; 6062 unsigned int tries = 0; 6063 int ret = 0; 6064 struct migration_target_control mtc = { 6065 .nid = zone_to_nid(cc->zone), 6066 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 6067 }; 6068 6069 lru_cache_disable(); 6070 6071 while (pfn < end || !list_empty(&cc->migratepages)) { 6072 if (fatal_signal_pending(current)) { 6073 ret = -EINTR; 6074 break; 6075 } 6076 6077 if (list_empty(&cc->migratepages)) { 6078 cc->nr_migratepages = 0; 6079 ret = isolate_migratepages_range(cc, pfn, end); 6080 if (ret && ret != -EAGAIN) 6081 break; 6082 pfn = cc->migrate_pfn; 6083 tries = 0; 6084 } else if (++tries == 5) { 6085 ret = -EBUSY; 6086 break; 6087 } 6088 6089 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6090 &cc->migratepages); 6091 cc->nr_migratepages -= nr_reclaimed; 6092 6093 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6094 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6095 6096 /* 6097 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6098 * to retry again over this error, so do the same here. 6099 */ 6100 if (ret == -ENOMEM) 6101 break; 6102 } 6103 6104 lru_cache_enable(); 6105 if (ret < 0) { 6106 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6107 alloc_contig_dump_pages(&cc->migratepages); 6108 putback_movable_pages(&cc->migratepages); 6109 return ret; 6110 } 6111 return 0; 6112 } 6113 6114 /** 6115 * alloc_contig_range() -- tries to allocate given range of pages 6116 * @start: start PFN to allocate 6117 * @end: one-past-the-last PFN to allocate 6118 * @migratetype: migratetype of the underlying pageblocks (either 6119 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6120 * in range must have the same migratetype and it must 6121 * be either of the two. 6122 * @gfp_mask: GFP mask to use during compaction 6123 * 6124 * The PFN range does not have to be pageblock aligned. The PFN range must 6125 * belong to a single zone. 6126 * 6127 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6128 * pageblocks in the range. Once isolated, the pageblocks should not 6129 * be modified by others. 6130 * 6131 * Return: zero on success or negative error code. On success all 6132 * pages which PFN is in [start, end) are allocated for the caller and 6133 * need to be freed with free_contig_range(). 6134 */ 6135 int alloc_contig_range(unsigned long start, unsigned long end, 6136 unsigned migratetype, gfp_t gfp_mask) 6137 { 6138 unsigned long outer_start, outer_end; 6139 int order; 6140 int ret = 0; 6141 6142 struct compact_control cc = { 6143 .nr_migratepages = 0, 6144 .order = -1, 6145 .zone = page_zone(pfn_to_page(start)), 6146 .mode = MIGRATE_SYNC, 6147 .ignore_skip_hint = true, 6148 .no_set_skip_hint = true, 6149 .gfp_mask = current_gfp_context(gfp_mask), 6150 .alloc_contig = true, 6151 }; 6152 INIT_LIST_HEAD(&cc.migratepages); 6153 6154 /* 6155 * What we do here is we mark all pageblocks in range as 6156 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6157 * have different sizes, and due to the way page allocator 6158 * work, start_isolate_page_range() has special handlings for this. 6159 * 6160 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6161 * migrate the pages from an unaligned range (ie. pages that 6162 * we are interested in). This will put all the pages in 6163 * range back to page allocator as MIGRATE_ISOLATE. 6164 * 6165 * When this is done, we take the pages in range from page 6166 * allocator removing them from the buddy system. This way 6167 * page allocator will never consider using them. 6168 * 6169 * This lets us mark the pageblocks back as 6170 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6171 * aligned range but not in the unaligned, original range are 6172 * put back to page allocator so that buddy can use them. 6173 */ 6174 6175 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 6176 if (ret) 6177 goto done; 6178 6179 drain_all_pages(cc.zone); 6180 6181 /* 6182 * In case of -EBUSY, we'd like to know which page causes problem. 6183 * So, just fall through. test_pages_isolated() has a tracepoint 6184 * which will report the busy page. 6185 * 6186 * It is possible that busy pages could become available before 6187 * the call to test_pages_isolated, and the range will actually be 6188 * allocated. So, if we fall through be sure to clear ret so that 6189 * -EBUSY is not accidentally used or returned to caller. 6190 */ 6191 ret = __alloc_contig_migrate_range(&cc, start, end); 6192 if (ret && ret != -EBUSY) 6193 goto done; 6194 ret = 0; 6195 6196 /* 6197 * Pages from [start, end) are within a pageblock_nr_pages 6198 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6199 * more, all pages in [start, end) are free in page allocator. 6200 * What we are going to do is to allocate all pages from 6201 * [start, end) (that is remove them from page allocator). 6202 * 6203 * The only problem is that pages at the beginning and at the 6204 * end of interesting range may be not aligned with pages that 6205 * page allocator holds, ie. they can be part of higher order 6206 * pages. Because of this, we reserve the bigger range and 6207 * once this is done free the pages we are not interested in. 6208 * 6209 * We don't have to hold zone->lock here because the pages are 6210 * isolated thus they won't get removed from buddy. 6211 */ 6212 6213 order = 0; 6214 outer_start = start; 6215 while (!PageBuddy(pfn_to_page(outer_start))) { 6216 if (++order > MAX_ORDER) { 6217 outer_start = start; 6218 break; 6219 } 6220 outer_start &= ~0UL << order; 6221 } 6222 6223 if (outer_start != start) { 6224 order = buddy_order(pfn_to_page(outer_start)); 6225 6226 /* 6227 * outer_start page could be small order buddy page and 6228 * it doesn't include start page. Adjust outer_start 6229 * in this case to report failed page properly 6230 * on tracepoint in test_pages_isolated() 6231 */ 6232 if (outer_start + (1UL << order) <= start) 6233 outer_start = start; 6234 } 6235 6236 /* Make sure the range is really isolated. */ 6237 if (test_pages_isolated(outer_start, end, 0)) { 6238 ret = -EBUSY; 6239 goto done; 6240 } 6241 6242 /* Grab isolated pages from freelists. */ 6243 outer_end = isolate_freepages_range(&cc, outer_start, end); 6244 if (!outer_end) { 6245 ret = -EBUSY; 6246 goto done; 6247 } 6248 6249 /* Free head and tail (if any) */ 6250 if (start != outer_start) 6251 free_contig_range(outer_start, start - outer_start); 6252 if (end != outer_end) 6253 free_contig_range(end, outer_end - end); 6254 6255 done: 6256 undo_isolate_page_range(start, end, migratetype); 6257 return ret; 6258 } 6259 EXPORT_SYMBOL(alloc_contig_range); 6260 6261 static int __alloc_contig_pages(unsigned long start_pfn, 6262 unsigned long nr_pages, gfp_t gfp_mask) 6263 { 6264 unsigned long end_pfn = start_pfn + nr_pages; 6265 6266 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 6267 gfp_mask); 6268 } 6269 6270 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6271 unsigned long nr_pages) 6272 { 6273 unsigned long i, end_pfn = start_pfn + nr_pages; 6274 struct page *page; 6275 6276 for (i = start_pfn; i < end_pfn; i++) { 6277 page = pfn_to_online_page(i); 6278 if (!page) 6279 return false; 6280 6281 if (page_zone(page) != z) 6282 return false; 6283 6284 if (PageReserved(page)) 6285 return false; 6286 6287 if (PageHuge(page)) 6288 return false; 6289 } 6290 return true; 6291 } 6292 6293 static bool zone_spans_last_pfn(const struct zone *zone, 6294 unsigned long start_pfn, unsigned long nr_pages) 6295 { 6296 unsigned long last_pfn = start_pfn + nr_pages - 1; 6297 6298 return zone_spans_pfn(zone, last_pfn); 6299 } 6300 6301 /** 6302 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6303 * @nr_pages: Number of contiguous pages to allocate 6304 * @gfp_mask: GFP mask to limit search and used during compaction 6305 * @nid: Target node 6306 * @nodemask: Mask for other possible nodes 6307 * 6308 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6309 * on an applicable zonelist to find a contiguous pfn range which can then be 6310 * tried for allocation with alloc_contig_range(). This routine is intended 6311 * for allocation requests which can not be fulfilled with the buddy allocator. 6312 * 6313 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6314 * power of two, then allocated range is also guaranteed to be aligned to same 6315 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6316 * 6317 * Allocated pages can be freed with free_contig_range() or by manually calling 6318 * __free_page() on each allocated page. 6319 * 6320 * Return: pointer to contiguous pages on success, or NULL if not successful. 6321 */ 6322 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 6323 int nid, nodemask_t *nodemask) 6324 { 6325 unsigned long ret, pfn, flags; 6326 struct zonelist *zonelist; 6327 struct zone *zone; 6328 struct zoneref *z; 6329 6330 zonelist = node_zonelist(nid, gfp_mask); 6331 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6332 gfp_zone(gfp_mask), nodemask) { 6333 spin_lock_irqsave(&zone->lock, flags); 6334 6335 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6336 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6337 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6338 /* 6339 * We release the zone lock here because 6340 * alloc_contig_range() will also lock the zone 6341 * at some point. If there's an allocation 6342 * spinning on this lock, it may win the race 6343 * and cause alloc_contig_range() to fail... 6344 */ 6345 spin_unlock_irqrestore(&zone->lock, flags); 6346 ret = __alloc_contig_pages(pfn, nr_pages, 6347 gfp_mask); 6348 if (!ret) 6349 return pfn_to_page(pfn); 6350 spin_lock_irqsave(&zone->lock, flags); 6351 } 6352 pfn += nr_pages; 6353 } 6354 spin_unlock_irqrestore(&zone->lock, flags); 6355 } 6356 return NULL; 6357 } 6358 #endif /* CONFIG_CONTIG_ALLOC */ 6359 6360 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6361 { 6362 unsigned long count = 0; 6363 6364 for (; nr_pages--; pfn++) { 6365 struct page *page = pfn_to_page(pfn); 6366 6367 count += page_count(page) != 1; 6368 __free_page(page); 6369 } 6370 WARN(count != 0, "%lu pages are still in use!\n", count); 6371 } 6372 EXPORT_SYMBOL(free_contig_range); 6373 6374 /* 6375 * Effectively disable pcplists for the zone by setting the high limit to 0 6376 * and draining all cpus. A concurrent page freeing on another CPU that's about 6377 * to put the page on pcplist will either finish before the drain and the page 6378 * will be drained, or observe the new high limit and skip the pcplist. 6379 * 6380 * Must be paired with a call to zone_pcp_enable(). 6381 */ 6382 void zone_pcp_disable(struct zone *zone) 6383 { 6384 mutex_lock(&pcp_batch_high_lock); 6385 __zone_set_pageset_high_and_batch(zone, 0, 1); 6386 __drain_all_pages(zone, true); 6387 } 6388 6389 void zone_pcp_enable(struct zone *zone) 6390 { 6391 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 6392 mutex_unlock(&pcp_batch_high_lock); 6393 } 6394 6395 void zone_pcp_reset(struct zone *zone) 6396 { 6397 int cpu; 6398 struct per_cpu_zonestat *pzstats; 6399 6400 if (zone->per_cpu_pageset != &boot_pageset) { 6401 for_each_online_cpu(cpu) { 6402 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6403 drain_zonestat(zone, pzstats); 6404 } 6405 free_percpu(zone->per_cpu_pageset); 6406 zone->per_cpu_pageset = &boot_pageset; 6407 if (zone->per_cpu_zonestats != &boot_zonestats) { 6408 free_percpu(zone->per_cpu_zonestats); 6409 zone->per_cpu_zonestats = &boot_zonestats; 6410 } 6411 } 6412 } 6413 6414 #ifdef CONFIG_MEMORY_HOTREMOVE 6415 /* 6416 * All pages in the range must be in a single zone, must not contain holes, 6417 * must span full sections, and must be isolated before calling this function. 6418 */ 6419 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6420 { 6421 unsigned long pfn = start_pfn; 6422 struct page *page; 6423 struct zone *zone; 6424 unsigned int order; 6425 unsigned long flags; 6426 6427 offline_mem_sections(pfn, end_pfn); 6428 zone = page_zone(pfn_to_page(pfn)); 6429 spin_lock_irqsave(&zone->lock, flags); 6430 while (pfn < end_pfn) { 6431 page = pfn_to_page(pfn); 6432 /* 6433 * The HWPoisoned page may be not in buddy system, and 6434 * page_count() is not 0. 6435 */ 6436 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6437 pfn++; 6438 continue; 6439 } 6440 /* 6441 * At this point all remaining PageOffline() pages have a 6442 * reference count of 0 and can simply be skipped. 6443 */ 6444 if (PageOffline(page)) { 6445 BUG_ON(page_count(page)); 6446 BUG_ON(PageBuddy(page)); 6447 pfn++; 6448 continue; 6449 } 6450 6451 BUG_ON(page_count(page)); 6452 BUG_ON(!PageBuddy(page)); 6453 order = buddy_order(page); 6454 del_page_from_free_list(page, zone, order); 6455 pfn += (1 << order); 6456 } 6457 spin_unlock_irqrestore(&zone->lock, flags); 6458 } 6459 #endif 6460 6461 /* 6462 * This function returns a stable result only if called under zone lock. 6463 */ 6464 bool is_free_buddy_page(struct page *page) 6465 { 6466 unsigned long pfn = page_to_pfn(page); 6467 unsigned int order; 6468 6469 for (order = 0; order <= MAX_ORDER; order++) { 6470 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6471 6472 if (PageBuddy(page_head) && 6473 buddy_order_unsafe(page_head) >= order) 6474 break; 6475 } 6476 6477 return order <= MAX_ORDER; 6478 } 6479 EXPORT_SYMBOL(is_free_buddy_page); 6480 6481 #ifdef CONFIG_MEMORY_FAILURE 6482 /* 6483 * Break down a higher-order page in sub-pages, and keep our target out of 6484 * buddy allocator. 6485 */ 6486 static void break_down_buddy_pages(struct zone *zone, struct page *page, 6487 struct page *target, int low, int high, 6488 int migratetype) 6489 { 6490 unsigned long size = 1 << high; 6491 struct page *current_buddy, *next_page; 6492 6493 while (high > low) { 6494 high--; 6495 size >>= 1; 6496 6497 if (target >= &page[size]) { 6498 next_page = page + size; 6499 current_buddy = page; 6500 } else { 6501 next_page = page; 6502 current_buddy = page + size; 6503 } 6504 6505 if (set_page_guard(zone, current_buddy, high, migratetype)) 6506 continue; 6507 6508 if (current_buddy != target) { 6509 add_to_free_list(current_buddy, zone, high, migratetype); 6510 set_buddy_order(current_buddy, high); 6511 page = next_page; 6512 } 6513 } 6514 } 6515 6516 /* 6517 * Take a page that will be marked as poisoned off the buddy allocator. 6518 */ 6519 bool take_page_off_buddy(struct page *page) 6520 { 6521 struct zone *zone = page_zone(page); 6522 unsigned long pfn = page_to_pfn(page); 6523 unsigned long flags; 6524 unsigned int order; 6525 bool ret = false; 6526 6527 spin_lock_irqsave(&zone->lock, flags); 6528 for (order = 0; order <= MAX_ORDER; order++) { 6529 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6530 int page_order = buddy_order(page_head); 6531 6532 if (PageBuddy(page_head) && page_order >= order) { 6533 unsigned long pfn_head = page_to_pfn(page_head); 6534 int migratetype = get_pfnblock_migratetype(page_head, 6535 pfn_head); 6536 6537 del_page_from_free_list(page_head, zone, page_order); 6538 break_down_buddy_pages(zone, page_head, page, 0, 6539 page_order, migratetype); 6540 SetPageHWPoisonTakenOff(page); 6541 if (!is_migrate_isolate(migratetype)) 6542 __mod_zone_freepage_state(zone, -1, migratetype); 6543 ret = true; 6544 break; 6545 } 6546 if (page_count(page_head) > 0) 6547 break; 6548 } 6549 spin_unlock_irqrestore(&zone->lock, flags); 6550 return ret; 6551 } 6552 6553 /* 6554 * Cancel takeoff done by take_page_off_buddy(). 6555 */ 6556 bool put_page_back_buddy(struct page *page) 6557 { 6558 struct zone *zone = page_zone(page); 6559 unsigned long pfn = page_to_pfn(page); 6560 unsigned long flags; 6561 int migratetype = get_pfnblock_migratetype(page, pfn); 6562 bool ret = false; 6563 6564 spin_lock_irqsave(&zone->lock, flags); 6565 if (put_page_testzero(page)) { 6566 ClearPageHWPoisonTakenOff(page); 6567 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 6568 if (TestClearPageHWPoison(page)) { 6569 ret = true; 6570 } 6571 } 6572 spin_unlock_irqrestore(&zone->lock, flags); 6573 6574 return ret; 6575 } 6576 #endif 6577 6578 #ifdef CONFIG_ZONE_DMA 6579 bool has_managed_dma(void) 6580 { 6581 struct pglist_data *pgdat; 6582 6583 for_each_online_pgdat(pgdat) { 6584 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 6585 6586 if (managed_zone(zone)) 6587 return true; 6588 } 6589 return false; 6590 } 6591 #endif /* CONFIG_ZONE_DMA */ 6592 6593 #ifdef CONFIG_UNACCEPTED_MEMORY 6594 6595 /* Counts number of zones with unaccepted pages. */ 6596 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 6597 6598 static bool lazy_accept = true; 6599 6600 static int __init accept_memory_parse(char *p) 6601 { 6602 if (!strcmp(p, "lazy")) { 6603 lazy_accept = true; 6604 return 0; 6605 } else if (!strcmp(p, "eager")) { 6606 lazy_accept = false; 6607 return 0; 6608 } else { 6609 return -EINVAL; 6610 } 6611 } 6612 early_param("accept_memory", accept_memory_parse); 6613 6614 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6615 { 6616 phys_addr_t start = page_to_phys(page); 6617 phys_addr_t end = start + (PAGE_SIZE << order); 6618 6619 return range_contains_unaccepted_memory(start, end); 6620 } 6621 6622 static void accept_page(struct page *page, unsigned int order) 6623 { 6624 phys_addr_t start = page_to_phys(page); 6625 6626 accept_memory(start, start + (PAGE_SIZE << order)); 6627 } 6628 6629 static bool try_to_accept_memory_one(struct zone *zone) 6630 { 6631 unsigned long flags; 6632 struct page *page; 6633 bool last; 6634 6635 if (list_empty(&zone->unaccepted_pages)) 6636 return false; 6637 6638 spin_lock_irqsave(&zone->lock, flags); 6639 page = list_first_entry_or_null(&zone->unaccepted_pages, 6640 struct page, lru); 6641 if (!page) { 6642 spin_unlock_irqrestore(&zone->lock, flags); 6643 return false; 6644 } 6645 6646 list_del(&page->lru); 6647 last = list_empty(&zone->unaccepted_pages); 6648 6649 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6650 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 6651 spin_unlock_irqrestore(&zone->lock, flags); 6652 6653 accept_page(page, MAX_ORDER); 6654 6655 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); 6656 6657 if (last) 6658 static_branch_dec(&zones_with_unaccepted_pages); 6659 6660 return true; 6661 } 6662 6663 static bool try_to_accept_memory(struct zone *zone, unsigned int order) 6664 { 6665 long to_accept; 6666 int ret = false; 6667 6668 /* How much to accept to get to high watermark? */ 6669 to_accept = high_wmark_pages(zone) - 6670 (zone_page_state(zone, NR_FREE_PAGES) - 6671 __zone_watermark_unusable_free(zone, order, 0)); 6672 6673 /* Accept at least one page */ 6674 do { 6675 if (!try_to_accept_memory_one(zone)) 6676 break; 6677 ret = true; 6678 to_accept -= MAX_ORDER_NR_PAGES; 6679 } while (to_accept > 0); 6680 6681 return ret; 6682 } 6683 6684 static inline bool has_unaccepted_memory(void) 6685 { 6686 return static_branch_unlikely(&zones_with_unaccepted_pages); 6687 } 6688 6689 static bool __free_unaccepted(struct page *page) 6690 { 6691 struct zone *zone = page_zone(page); 6692 unsigned long flags; 6693 bool first = false; 6694 6695 if (!lazy_accept) 6696 return false; 6697 6698 spin_lock_irqsave(&zone->lock, flags); 6699 first = list_empty(&zone->unaccepted_pages); 6700 list_add_tail(&page->lru, &zone->unaccepted_pages); 6701 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6702 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 6703 spin_unlock_irqrestore(&zone->lock, flags); 6704 6705 if (first) 6706 static_branch_inc(&zones_with_unaccepted_pages); 6707 6708 return true; 6709 } 6710 6711 #else 6712 6713 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6714 { 6715 return false; 6716 } 6717 6718 static void accept_page(struct page *page, unsigned int order) 6719 { 6720 } 6721 6722 static bool try_to_accept_memory(struct zone *zone, unsigned int order) 6723 { 6724 return false; 6725 } 6726 6727 static inline bool has_unaccepted_memory(void) 6728 { 6729 return false; 6730 } 6731 6732 static bool __free_unaccepted(struct page *page) 6733 { 6734 BUILD_BUG(); 6735 return false; 6736 } 6737 6738 #endif /* CONFIG_UNACCEPTED_MEMORY */ 6739