1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/memory_hotplug.h> 36 #include <linux/nodemask.h> 37 #include <linux/vmstat.h> 38 #include <linux/fault-inject.h> 39 #include <linux/compaction.h> 40 #include <trace/events/kmem.h> 41 #include <trace/events/oom.h> 42 #include <linux/prefetch.h> 43 #include <linux/mm_inline.h> 44 #include <linux/mmu_notifier.h> 45 #include <linux/migrate.h> 46 #include <linux/sched/mm.h> 47 #include <linux/page_owner.h> 48 #include <linux/page_table_check.h> 49 #include <linux/memcontrol.h> 50 #include <linux/ftrace.h> 51 #include <linux/lockdep.h> 52 #include <linux/psi.h> 53 #include <linux/khugepaged.h> 54 #include <linux/delayacct.h> 55 #include <asm/div64.h> 56 #include "internal.h" 57 #include "shuffle.h" 58 #include "page_reporting.h" 59 60 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 61 typedef int __bitwise fpi_t; 62 63 /* No special request */ 64 #define FPI_NONE ((__force fpi_t)0) 65 66 /* 67 * Skip free page reporting notification for the (possibly merged) page. 68 * This does not hinder free page reporting from grabbing the page, 69 * reporting it and marking it "reported" - it only skips notifying 70 * the free page reporting infrastructure about a newly freed page. For 71 * example, used when temporarily pulling a page from a freelist and 72 * putting it back unmodified. 73 */ 74 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 75 76 /* 77 * Place the (possibly merged) page to the tail of the freelist. Will ignore 78 * page shuffling (relevant code - e.g., memory onlining - is expected to 79 * shuffle the whole zone). 80 * 81 * Note: No code should rely on this flag for correctness - it's purely 82 * to allow for optimizations when handing back either fresh pages 83 * (memory onlining) or untouched pages (page isolation, free page 84 * reporting). 85 */ 86 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 87 88 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 89 static DEFINE_MUTEX(pcp_batch_high_lock); 90 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 91 92 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 93 /* 94 * On SMP, spin_trylock is sufficient protection. 95 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 96 */ 97 #define pcp_trylock_prepare(flags) do { } while (0) 98 #define pcp_trylock_finish(flag) do { } while (0) 99 #else 100 101 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 102 #define pcp_trylock_prepare(flags) local_irq_save(flags) 103 #define pcp_trylock_finish(flags) local_irq_restore(flags) 104 #endif 105 106 /* 107 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 108 * a migration causing the wrong PCP to be locked and remote memory being 109 * potentially allocated, pin the task to the CPU for the lookup+lock. 110 * preempt_disable is used on !RT because it is faster than migrate_disable. 111 * migrate_disable is used on RT because otherwise RT spinlock usage is 112 * interfered with and a high priority task cannot preempt the allocator. 113 */ 114 #ifndef CONFIG_PREEMPT_RT 115 #define pcpu_task_pin() preempt_disable() 116 #define pcpu_task_unpin() preempt_enable() 117 #else 118 #define pcpu_task_pin() migrate_disable() 119 #define pcpu_task_unpin() migrate_enable() 120 #endif 121 122 /* 123 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 124 * Return value should be used with equivalent unlock helper. 125 */ 126 #define pcpu_spin_lock(type, member, ptr) \ 127 ({ \ 128 type *_ret; \ 129 pcpu_task_pin(); \ 130 _ret = this_cpu_ptr(ptr); \ 131 spin_lock(&_ret->member); \ 132 _ret; \ 133 }) 134 135 #define pcpu_spin_trylock(type, member, ptr) \ 136 ({ \ 137 type *_ret; \ 138 pcpu_task_pin(); \ 139 _ret = this_cpu_ptr(ptr); \ 140 if (!spin_trylock(&_ret->member)) { \ 141 pcpu_task_unpin(); \ 142 _ret = NULL; \ 143 } \ 144 _ret; \ 145 }) 146 147 #define pcpu_spin_unlock(member, ptr) \ 148 ({ \ 149 spin_unlock(&ptr->member); \ 150 pcpu_task_unpin(); \ 151 }) 152 153 /* struct per_cpu_pages specific helpers. */ 154 #define pcp_spin_lock(ptr) \ 155 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 156 157 #define pcp_spin_trylock(ptr) \ 158 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 159 160 #define pcp_spin_unlock(ptr) \ 161 pcpu_spin_unlock(lock, ptr) 162 163 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 164 DEFINE_PER_CPU(int, numa_node); 165 EXPORT_PER_CPU_SYMBOL(numa_node); 166 #endif 167 168 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 169 170 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 171 /* 172 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 173 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 174 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 175 * defined in <linux/topology.h>. 176 */ 177 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 178 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 179 #endif 180 181 static DEFINE_MUTEX(pcpu_drain_mutex); 182 183 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 184 volatile unsigned long latent_entropy __latent_entropy; 185 EXPORT_SYMBOL(latent_entropy); 186 #endif 187 188 /* 189 * Array of node states. 190 */ 191 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 192 [N_POSSIBLE] = NODE_MASK_ALL, 193 [N_ONLINE] = { { [0] = 1UL } }, 194 #ifndef CONFIG_NUMA 195 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 196 #ifdef CONFIG_HIGHMEM 197 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 198 #endif 199 [N_MEMORY] = { { [0] = 1UL } }, 200 [N_CPU] = { { [0] = 1UL } }, 201 #endif /* NUMA */ 202 }; 203 EXPORT_SYMBOL(node_states); 204 205 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 206 207 /* 208 * A cached value of the page's pageblock's migratetype, used when the page is 209 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 210 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 211 * Also the migratetype set in the page does not necessarily match the pcplist 212 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 213 * other index - this ensures that it will be put on the correct CMA freelist. 214 */ 215 static inline int get_pcppage_migratetype(struct page *page) 216 { 217 return page->index; 218 } 219 220 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 221 { 222 page->index = migratetype; 223 } 224 225 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 226 unsigned int pageblock_order __read_mostly; 227 #endif 228 229 static void __free_pages_ok(struct page *page, unsigned int order, 230 fpi_t fpi_flags); 231 232 /* 233 * results with 256, 32 in the lowmem_reserve sysctl: 234 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 235 * 1G machine -> (16M dma, 784M normal, 224M high) 236 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 237 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 238 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 239 * 240 * TBD: should special case ZONE_DMA32 machines here - in those we normally 241 * don't need any ZONE_NORMAL reservation 242 */ 243 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 244 #ifdef CONFIG_ZONE_DMA 245 [ZONE_DMA] = 256, 246 #endif 247 #ifdef CONFIG_ZONE_DMA32 248 [ZONE_DMA32] = 256, 249 #endif 250 [ZONE_NORMAL] = 32, 251 #ifdef CONFIG_HIGHMEM 252 [ZONE_HIGHMEM] = 0, 253 #endif 254 [ZONE_MOVABLE] = 0, 255 }; 256 257 char * const zone_names[MAX_NR_ZONES] = { 258 #ifdef CONFIG_ZONE_DMA 259 "DMA", 260 #endif 261 #ifdef CONFIG_ZONE_DMA32 262 "DMA32", 263 #endif 264 "Normal", 265 #ifdef CONFIG_HIGHMEM 266 "HighMem", 267 #endif 268 "Movable", 269 #ifdef CONFIG_ZONE_DEVICE 270 "Device", 271 #endif 272 }; 273 274 const char * const migratetype_names[MIGRATE_TYPES] = { 275 "Unmovable", 276 "Movable", 277 "Reclaimable", 278 "HighAtomic", 279 #ifdef CONFIG_CMA 280 "CMA", 281 #endif 282 #ifdef CONFIG_MEMORY_ISOLATION 283 "Isolate", 284 #endif 285 }; 286 287 int min_free_kbytes = 1024; 288 int user_min_free_kbytes = -1; 289 static int watermark_boost_factor __read_mostly = 15000; 290 static int watermark_scale_factor = 10; 291 292 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 293 int movable_zone; 294 EXPORT_SYMBOL(movable_zone); 295 296 #if MAX_NUMNODES > 1 297 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 298 unsigned int nr_online_nodes __read_mostly = 1; 299 EXPORT_SYMBOL(nr_node_ids); 300 EXPORT_SYMBOL(nr_online_nodes); 301 #endif 302 303 static bool page_contains_unaccepted(struct page *page, unsigned int order); 304 static void accept_page(struct page *page, unsigned int order); 305 static bool try_to_accept_memory(struct zone *zone, unsigned int order); 306 static inline bool has_unaccepted_memory(void); 307 static bool __free_unaccepted(struct page *page); 308 309 int page_group_by_mobility_disabled __read_mostly; 310 311 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 312 /* 313 * During boot we initialize deferred pages on-demand, as needed, but once 314 * page_alloc_init_late() has finished, the deferred pages are all initialized, 315 * and we can permanently disable that path. 316 */ 317 DEFINE_STATIC_KEY_TRUE(deferred_pages); 318 319 static inline bool deferred_pages_enabled(void) 320 { 321 return static_branch_unlikely(&deferred_pages); 322 } 323 324 /* 325 * deferred_grow_zone() is __init, but it is called from 326 * get_page_from_freelist() during early boot until deferred_pages permanently 327 * disables this call. This is why we have refdata wrapper to avoid warning, 328 * and to ensure that the function body gets unloaded. 329 */ 330 static bool __ref 331 _deferred_grow_zone(struct zone *zone, unsigned int order) 332 { 333 return deferred_grow_zone(zone, order); 334 } 335 #else 336 static inline bool deferred_pages_enabled(void) 337 { 338 return false; 339 } 340 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 341 342 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 343 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 344 unsigned long pfn) 345 { 346 #ifdef CONFIG_SPARSEMEM 347 return section_to_usemap(__pfn_to_section(pfn)); 348 #else 349 return page_zone(page)->pageblock_flags; 350 #endif /* CONFIG_SPARSEMEM */ 351 } 352 353 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 354 { 355 #ifdef CONFIG_SPARSEMEM 356 pfn &= (PAGES_PER_SECTION-1); 357 #else 358 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 359 #endif /* CONFIG_SPARSEMEM */ 360 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 361 } 362 363 /** 364 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 365 * @page: The page within the block of interest 366 * @pfn: The target page frame number 367 * @mask: mask of bits that the caller is interested in 368 * 369 * Return: pageblock_bits flags 370 */ 371 unsigned long get_pfnblock_flags_mask(const struct page *page, 372 unsigned long pfn, unsigned long mask) 373 { 374 unsigned long *bitmap; 375 unsigned long bitidx, word_bitidx; 376 unsigned long word; 377 378 bitmap = get_pageblock_bitmap(page, pfn); 379 bitidx = pfn_to_bitidx(page, pfn); 380 word_bitidx = bitidx / BITS_PER_LONG; 381 bitidx &= (BITS_PER_LONG-1); 382 /* 383 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 384 * a consistent read of the memory array, so that results, even though 385 * racy, are not corrupted. 386 */ 387 word = READ_ONCE(bitmap[word_bitidx]); 388 return (word >> bitidx) & mask; 389 } 390 391 static __always_inline int get_pfnblock_migratetype(const struct page *page, 392 unsigned long pfn) 393 { 394 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 395 } 396 397 /** 398 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 399 * @page: The page within the block of interest 400 * @flags: The flags to set 401 * @pfn: The target page frame number 402 * @mask: mask of bits that the caller is interested in 403 */ 404 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 405 unsigned long pfn, 406 unsigned long mask) 407 { 408 unsigned long *bitmap; 409 unsigned long bitidx, word_bitidx; 410 unsigned long word; 411 412 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 413 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 414 415 bitmap = get_pageblock_bitmap(page, pfn); 416 bitidx = pfn_to_bitidx(page, pfn); 417 word_bitidx = bitidx / BITS_PER_LONG; 418 bitidx &= (BITS_PER_LONG-1); 419 420 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 421 422 mask <<= bitidx; 423 flags <<= bitidx; 424 425 word = READ_ONCE(bitmap[word_bitidx]); 426 do { 427 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 428 } 429 430 void set_pageblock_migratetype(struct page *page, int migratetype) 431 { 432 if (unlikely(page_group_by_mobility_disabled && 433 migratetype < MIGRATE_PCPTYPES)) 434 migratetype = MIGRATE_UNMOVABLE; 435 436 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 437 page_to_pfn(page), MIGRATETYPE_MASK); 438 } 439 440 #ifdef CONFIG_DEBUG_VM 441 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 442 { 443 int ret; 444 unsigned seq; 445 unsigned long pfn = page_to_pfn(page); 446 unsigned long sp, start_pfn; 447 448 do { 449 seq = zone_span_seqbegin(zone); 450 start_pfn = zone->zone_start_pfn; 451 sp = zone->spanned_pages; 452 ret = !zone_spans_pfn(zone, pfn); 453 } while (zone_span_seqretry(zone, seq)); 454 455 if (ret) 456 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 457 pfn, zone_to_nid(zone), zone->name, 458 start_pfn, start_pfn + sp); 459 460 return ret; 461 } 462 463 /* 464 * Temporary debugging check for pages not lying within a given zone. 465 */ 466 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 467 { 468 if (page_outside_zone_boundaries(zone, page)) 469 return 1; 470 if (zone != page_zone(page)) 471 return 1; 472 473 return 0; 474 } 475 #else 476 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 477 { 478 return 0; 479 } 480 #endif 481 482 static void bad_page(struct page *page, const char *reason) 483 { 484 static unsigned long resume; 485 static unsigned long nr_shown; 486 static unsigned long nr_unshown; 487 488 /* 489 * Allow a burst of 60 reports, then keep quiet for that minute; 490 * or allow a steady drip of one report per second. 491 */ 492 if (nr_shown == 60) { 493 if (time_before(jiffies, resume)) { 494 nr_unshown++; 495 goto out; 496 } 497 if (nr_unshown) { 498 pr_alert( 499 "BUG: Bad page state: %lu messages suppressed\n", 500 nr_unshown); 501 nr_unshown = 0; 502 } 503 nr_shown = 0; 504 } 505 if (nr_shown++ == 0) 506 resume = jiffies + 60 * HZ; 507 508 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 509 current->comm, page_to_pfn(page)); 510 dump_page(page, reason); 511 512 print_modules(); 513 dump_stack(); 514 out: 515 /* Leave bad fields for debug, except PageBuddy could make trouble */ 516 page_mapcount_reset(page); /* remove PageBuddy */ 517 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 518 } 519 520 static inline unsigned int order_to_pindex(int migratetype, int order) 521 { 522 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 523 if (order > PAGE_ALLOC_COSTLY_ORDER) { 524 VM_BUG_ON(order != pageblock_order); 525 return NR_LOWORDER_PCP_LISTS; 526 } 527 #else 528 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 529 #endif 530 531 return (MIGRATE_PCPTYPES * order) + migratetype; 532 } 533 534 static inline int pindex_to_order(unsigned int pindex) 535 { 536 int order = pindex / MIGRATE_PCPTYPES; 537 538 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 539 if (pindex == NR_LOWORDER_PCP_LISTS) 540 order = pageblock_order; 541 #else 542 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 543 #endif 544 545 return order; 546 } 547 548 static inline bool pcp_allowed_order(unsigned int order) 549 { 550 if (order <= PAGE_ALLOC_COSTLY_ORDER) 551 return true; 552 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 553 if (order == pageblock_order) 554 return true; 555 #endif 556 return false; 557 } 558 559 static inline void free_the_page(struct page *page, unsigned int order) 560 { 561 if (pcp_allowed_order(order)) /* Via pcp? */ 562 free_unref_page(page, order); 563 else 564 __free_pages_ok(page, order, FPI_NONE); 565 } 566 567 /* 568 * Higher-order pages are called "compound pages". They are structured thusly: 569 * 570 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 571 * 572 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 573 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 574 * 575 * The first tail page's ->compound_order holds the order of allocation. 576 * This usage means that zero-order pages may not be compound. 577 */ 578 579 void prep_compound_page(struct page *page, unsigned int order) 580 { 581 int i; 582 int nr_pages = 1 << order; 583 584 __SetPageHead(page); 585 for (i = 1; i < nr_pages; i++) 586 prep_compound_tail(page, i); 587 588 prep_compound_head(page, order); 589 } 590 591 void destroy_large_folio(struct folio *folio) 592 { 593 if (folio_test_hugetlb(folio)) { 594 free_huge_folio(folio); 595 return; 596 } 597 598 if (folio_test_large_rmappable(folio)) 599 folio_undo_large_rmappable(folio); 600 601 mem_cgroup_uncharge(folio); 602 free_the_page(&folio->page, folio_order(folio)); 603 } 604 605 static inline void set_buddy_order(struct page *page, unsigned int order) 606 { 607 set_page_private(page, order); 608 __SetPageBuddy(page); 609 } 610 611 #ifdef CONFIG_COMPACTION 612 static inline struct capture_control *task_capc(struct zone *zone) 613 { 614 struct capture_control *capc = current->capture_control; 615 616 return unlikely(capc) && 617 !(current->flags & PF_KTHREAD) && 618 !capc->page && 619 capc->cc->zone == zone ? capc : NULL; 620 } 621 622 static inline bool 623 compaction_capture(struct capture_control *capc, struct page *page, 624 int order, int migratetype) 625 { 626 if (!capc || order != capc->cc->order) 627 return false; 628 629 /* Do not accidentally pollute CMA or isolated regions*/ 630 if (is_migrate_cma(migratetype) || 631 is_migrate_isolate(migratetype)) 632 return false; 633 634 /* 635 * Do not let lower order allocations pollute a movable pageblock. 636 * This might let an unmovable request use a reclaimable pageblock 637 * and vice-versa but no more than normal fallback logic which can 638 * have trouble finding a high-order free page. 639 */ 640 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 641 return false; 642 643 capc->page = page; 644 return true; 645 } 646 647 #else 648 static inline struct capture_control *task_capc(struct zone *zone) 649 { 650 return NULL; 651 } 652 653 static inline bool 654 compaction_capture(struct capture_control *capc, struct page *page, 655 int order, int migratetype) 656 { 657 return false; 658 } 659 #endif /* CONFIG_COMPACTION */ 660 661 /* Used for pages not on another list */ 662 static inline void add_to_free_list(struct page *page, struct zone *zone, 663 unsigned int order, int migratetype) 664 { 665 struct free_area *area = &zone->free_area[order]; 666 667 list_add(&page->buddy_list, &area->free_list[migratetype]); 668 area->nr_free++; 669 } 670 671 /* Used for pages not on another list */ 672 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 673 unsigned int order, int migratetype) 674 { 675 struct free_area *area = &zone->free_area[order]; 676 677 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 678 area->nr_free++; 679 } 680 681 /* 682 * Used for pages which are on another list. Move the pages to the tail 683 * of the list - so the moved pages won't immediately be considered for 684 * allocation again (e.g., optimization for memory onlining). 685 */ 686 static inline void move_to_free_list(struct page *page, struct zone *zone, 687 unsigned int order, int migratetype) 688 { 689 struct free_area *area = &zone->free_area[order]; 690 691 list_move_tail(&page->buddy_list, &area->free_list[migratetype]); 692 } 693 694 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 695 unsigned int order) 696 { 697 /* clear reported state and update reported page count */ 698 if (page_reported(page)) 699 __ClearPageReported(page); 700 701 list_del(&page->buddy_list); 702 __ClearPageBuddy(page); 703 set_page_private(page, 0); 704 zone->free_area[order].nr_free--; 705 } 706 707 static inline struct page *get_page_from_free_area(struct free_area *area, 708 int migratetype) 709 { 710 return list_first_entry_or_null(&area->free_list[migratetype], 711 struct page, buddy_list); 712 } 713 714 /* 715 * If this is not the largest possible page, check if the buddy 716 * of the next-highest order is free. If it is, it's possible 717 * that pages are being freed that will coalesce soon. In case, 718 * that is happening, add the free page to the tail of the list 719 * so it's less likely to be used soon and more likely to be merged 720 * as a higher order page 721 */ 722 static inline bool 723 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 724 struct page *page, unsigned int order) 725 { 726 unsigned long higher_page_pfn; 727 struct page *higher_page; 728 729 if (order >= MAX_ORDER - 1) 730 return false; 731 732 higher_page_pfn = buddy_pfn & pfn; 733 higher_page = page + (higher_page_pfn - pfn); 734 735 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 736 NULL) != NULL; 737 } 738 739 /* 740 * Freeing function for a buddy system allocator. 741 * 742 * The concept of a buddy system is to maintain direct-mapped table 743 * (containing bit values) for memory blocks of various "orders". 744 * The bottom level table contains the map for the smallest allocatable 745 * units of memory (here, pages), and each level above it describes 746 * pairs of units from the levels below, hence, "buddies". 747 * At a high level, all that happens here is marking the table entry 748 * at the bottom level available, and propagating the changes upward 749 * as necessary, plus some accounting needed to play nicely with other 750 * parts of the VM system. 751 * At each level, we keep a list of pages, which are heads of continuous 752 * free pages of length of (1 << order) and marked with PageBuddy. 753 * Page's order is recorded in page_private(page) field. 754 * So when we are allocating or freeing one, we can derive the state of the 755 * other. That is, if we allocate a small block, and both were 756 * free, the remainder of the region must be split into blocks. 757 * If a block is freed, and its buddy is also free, then this 758 * triggers coalescing into a block of larger size. 759 * 760 * -- nyc 761 */ 762 763 static inline void __free_one_page(struct page *page, 764 unsigned long pfn, 765 struct zone *zone, unsigned int order, 766 int migratetype, fpi_t fpi_flags) 767 { 768 struct capture_control *capc = task_capc(zone); 769 unsigned long buddy_pfn = 0; 770 unsigned long combined_pfn; 771 struct page *buddy; 772 bool to_tail; 773 774 VM_BUG_ON(!zone_is_initialized(zone)); 775 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 776 777 VM_BUG_ON(migratetype == -1); 778 if (likely(!is_migrate_isolate(migratetype))) 779 __mod_zone_freepage_state(zone, 1 << order, migratetype); 780 781 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 782 VM_BUG_ON_PAGE(bad_range(zone, page), page); 783 784 while (order < MAX_ORDER) { 785 if (compaction_capture(capc, page, order, migratetype)) { 786 __mod_zone_freepage_state(zone, -(1 << order), 787 migratetype); 788 return; 789 } 790 791 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 792 if (!buddy) 793 goto done_merging; 794 795 if (unlikely(order >= pageblock_order)) { 796 /* 797 * We want to prevent merge between freepages on pageblock 798 * without fallbacks and normal pageblock. Without this, 799 * pageblock isolation could cause incorrect freepage or CMA 800 * accounting or HIGHATOMIC accounting. 801 */ 802 int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 803 804 if (migratetype != buddy_mt 805 && (!migratetype_is_mergeable(migratetype) || 806 !migratetype_is_mergeable(buddy_mt))) 807 goto done_merging; 808 } 809 810 /* 811 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 812 * merge with it and move up one order. 813 */ 814 if (page_is_guard(buddy)) 815 clear_page_guard(zone, buddy, order, migratetype); 816 else 817 del_page_from_free_list(buddy, zone, order); 818 combined_pfn = buddy_pfn & pfn; 819 page = page + (combined_pfn - pfn); 820 pfn = combined_pfn; 821 order++; 822 } 823 824 done_merging: 825 set_buddy_order(page, order); 826 827 if (fpi_flags & FPI_TO_TAIL) 828 to_tail = true; 829 else if (is_shuffle_order(order)) 830 to_tail = shuffle_pick_tail(); 831 else 832 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 833 834 if (to_tail) 835 add_to_free_list_tail(page, zone, order, migratetype); 836 else 837 add_to_free_list(page, zone, order, migratetype); 838 839 /* Notify page reporting subsystem of freed page */ 840 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 841 page_reporting_notify_free(order); 842 } 843 844 /** 845 * split_free_page() -- split a free page at split_pfn_offset 846 * @free_page: the original free page 847 * @order: the order of the page 848 * @split_pfn_offset: split offset within the page 849 * 850 * Return -ENOENT if the free page is changed, otherwise 0 851 * 852 * It is used when the free page crosses two pageblocks with different migratetypes 853 * at split_pfn_offset within the page. The split free page will be put into 854 * separate migratetype lists afterwards. Otherwise, the function achieves 855 * nothing. 856 */ 857 int split_free_page(struct page *free_page, 858 unsigned int order, unsigned long split_pfn_offset) 859 { 860 struct zone *zone = page_zone(free_page); 861 unsigned long free_page_pfn = page_to_pfn(free_page); 862 unsigned long pfn; 863 unsigned long flags; 864 int free_page_order; 865 int mt; 866 int ret = 0; 867 868 if (split_pfn_offset == 0) 869 return ret; 870 871 spin_lock_irqsave(&zone->lock, flags); 872 873 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { 874 ret = -ENOENT; 875 goto out; 876 } 877 878 mt = get_pfnblock_migratetype(free_page, free_page_pfn); 879 if (likely(!is_migrate_isolate(mt))) 880 __mod_zone_freepage_state(zone, -(1UL << order), mt); 881 882 del_page_from_free_list(free_page, zone, order); 883 for (pfn = free_page_pfn; 884 pfn < free_page_pfn + (1UL << order);) { 885 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); 886 887 free_page_order = min_t(unsigned int, 888 pfn ? __ffs(pfn) : order, 889 __fls(split_pfn_offset)); 890 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, 891 mt, FPI_NONE); 892 pfn += 1UL << free_page_order; 893 split_pfn_offset -= (1UL << free_page_order); 894 /* we have done the first part, now switch to second part */ 895 if (split_pfn_offset == 0) 896 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); 897 } 898 out: 899 spin_unlock_irqrestore(&zone->lock, flags); 900 return ret; 901 } 902 /* 903 * A bad page could be due to a number of fields. Instead of multiple branches, 904 * try and check multiple fields with one check. The caller must do a detailed 905 * check if necessary. 906 */ 907 static inline bool page_expected_state(struct page *page, 908 unsigned long check_flags) 909 { 910 if (unlikely(atomic_read(&page->_mapcount) != -1)) 911 return false; 912 913 if (unlikely((unsigned long)page->mapping | 914 page_ref_count(page) | 915 #ifdef CONFIG_MEMCG 916 page->memcg_data | 917 #endif 918 (page->flags & check_flags))) 919 return false; 920 921 return true; 922 } 923 924 static const char *page_bad_reason(struct page *page, unsigned long flags) 925 { 926 const char *bad_reason = NULL; 927 928 if (unlikely(atomic_read(&page->_mapcount) != -1)) 929 bad_reason = "nonzero mapcount"; 930 if (unlikely(page->mapping != NULL)) 931 bad_reason = "non-NULL mapping"; 932 if (unlikely(page_ref_count(page) != 0)) 933 bad_reason = "nonzero _refcount"; 934 if (unlikely(page->flags & flags)) { 935 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 936 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 937 else 938 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 939 } 940 #ifdef CONFIG_MEMCG 941 if (unlikely(page->memcg_data)) 942 bad_reason = "page still charged to cgroup"; 943 #endif 944 return bad_reason; 945 } 946 947 static void free_page_is_bad_report(struct page *page) 948 { 949 bad_page(page, 950 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 951 } 952 953 static inline bool free_page_is_bad(struct page *page) 954 { 955 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 956 return false; 957 958 /* Something has gone sideways, find it */ 959 free_page_is_bad_report(page); 960 return true; 961 } 962 963 static inline bool is_check_pages_enabled(void) 964 { 965 return static_branch_unlikely(&check_pages_enabled); 966 } 967 968 static int free_tail_page_prepare(struct page *head_page, struct page *page) 969 { 970 struct folio *folio = (struct folio *)head_page; 971 int ret = 1; 972 973 /* 974 * We rely page->lru.next never has bit 0 set, unless the page 975 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 976 */ 977 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 978 979 if (!is_check_pages_enabled()) { 980 ret = 0; 981 goto out; 982 } 983 switch (page - head_page) { 984 case 1: 985 /* the first tail page: these may be in place of ->mapping */ 986 if (unlikely(folio_entire_mapcount(folio))) { 987 bad_page(page, "nonzero entire_mapcount"); 988 goto out; 989 } 990 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 991 bad_page(page, "nonzero nr_pages_mapped"); 992 goto out; 993 } 994 if (unlikely(atomic_read(&folio->_pincount))) { 995 bad_page(page, "nonzero pincount"); 996 goto out; 997 } 998 break; 999 case 2: 1000 /* 1001 * the second tail page: ->mapping is 1002 * deferred_list.next -- ignore value. 1003 */ 1004 break; 1005 default: 1006 if (page->mapping != TAIL_MAPPING) { 1007 bad_page(page, "corrupted mapping in tail page"); 1008 goto out; 1009 } 1010 break; 1011 } 1012 if (unlikely(!PageTail(page))) { 1013 bad_page(page, "PageTail not set"); 1014 goto out; 1015 } 1016 if (unlikely(compound_head(page) != head_page)) { 1017 bad_page(page, "compound_head not consistent"); 1018 goto out; 1019 } 1020 ret = 0; 1021 out: 1022 page->mapping = NULL; 1023 clear_compound_head(page); 1024 return ret; 1025 } 1026 1027 /* 1028 * Skip KASAN memory poisoning when either: 1029 * 1030 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1031 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1032 * using page tags instead (see below). 1033 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1034 * that error detection is disabled for accesses via the page address. 1035 * 1036 * Pages will have match-all tags in the following circumstances: 1037 * 1038 * 1. Pages are being initialized for the first time, including during deferred 1039 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1040 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1041 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1042 * 3. The allocation was excluded from being checked due to sampling, 1043 * see the call to kasan_unpoison_pages. 1044 * 1045 * Poisoning pages during deferred memory init will greatly lengthen the 1046 * process and cause problem in large memory systems as the deferred pages 1047 * initialization is done with interrupt disabled. 1048 * 1049 * Assuming that there will be no reference to those newly initialized 1050 * pages before they are ever allocated, this should have no effect on 1051 * KASAN memory tracking as the poison will be properly inserted at page 1052 * allocation time. The only corner case is when pages are allocated by 1053 * on-demand allocation and then freed again before the deferred pages 1054 * initialization is done, but this is not likely to happen. 1055 */ 1056 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 1057 { 1058 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1059 return deferred_pages_enabled(); 1060 1061 return page_kasan_tag(page) == 0xff; 1062 } 1063 1064 static void kernel_init_pages(struct page *page, int numpages) 1065 { 1066 int i; 1067 1068 /* s390's use of memset() could override KASAN redzones. */ 1069 kasan_disable_current(); 1070 for (i = 0; i < numpages; i++) 1071 clear_highpage_kasan_tagged(page + i); 1072 kasan_enable_current(); 1073 } 1074 1075 static __always_inline bool free_pages_prepare(struct page *page, 1076 unsigned int order, fpi_t fpi_flags) 1077 { 1078 int bad = 0; 1079 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); 1080 bool init = want_init_on_free(); 1081 1082 VM_BUG_ON_PAGE(PageTail(page), page); 1083 1084 trace_mm_page_free(page, order); 1085 kmsan_free_page(page, order); 1086 1087 if (unlikely(PageHWPoison(page)) && !order) { 1088 /* 1089 * Do not let hwpoison pages hit pcplists/buddy 1090 * Untie memcg state and reset page's owner 1091 */ 1092 if (memcg_kmem_online() && PageMemcgKmem(page)) 1093 __memcg_kmem_uncharge_page(page, order); 1094 reset_page_owner(page, order); 1095 page_table_check_free(page, order); 1096 return false; 1097 } 1098 1099 /* 1100 * Check tail pages before head page information is cleared to 1101 * avoid checking PageCompound for order-0 pages. 1102 */ 1103 if (unlikely(order)) { 1104 bool compound = PageCompound(page); 1105 int i; 1106 1107 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1108 1109 if (compound) 1110 page[1].flags &= ~PAGE_FLAGS_SECOND; 1111 for (i = 1; i < (1 << order); i++) { 1112 if (compound) 1113 bad += free_tail_page_prepare(page, page + i); 1114 if (is_check_pages_enabled()) { 1115 if (free_page_is_bad(page + i)) { 1116 bad++; 1117 continue; 1118 } 1119 } 1120 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1121 } 1122 } 1123 if (PageMappingFlags(page)) 1124 page->mapping = NULL; 1125 if (memcg_kmem_online() && PageMemcgKmem(page)) 1126 __memcg_kmem_uncharge_page(page, order); 1127 if (is_check_pages_enabled()) { 1128 if (free_page_is_bad(page)) 1129 bad++; 1130 if (bad) 1131 return false; 1132 } 1133 1134 page_cpupid_reset_last(page); 1135 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1136 reset_page_owner(page, order); 1137 page_table_check_free(page, order); 1138 1139 if (!PageHighMem(page)) { 1140 debug_check_no_locks_freed(page_address(page), 1141 PAGE_SIZE << order); 1142 debug_check_no_obj_freed(page_address(page), 1143 PAGE_SIZE << order); 1144 } 1145 1146 kernel_poison_pages(page, 1 << order); 1147 1148 /* 1149 * As memory initialization might be integrated into KASAN, 1150 * KASAN poisoning and memory initialization code must be 1151 * kept together to avoid discrepancies in behavior. 1152 * 1153 * With hardware tag-based KASAN, memory tags must be set before the 1154 * page becomes unavailable via debug_pagealloc or arch_free_page. 1155 */ 1156 if (!skip_kasan_poison) { 1157 kasan_poison_pages(page, order, init); 1158 1159 /* Memory is already initialized if KASAN did it internally. */ 1160 if (kasan_has_integrated_init()) 1161 init = false; 1162 } 1163 if (init) 1164 kernel_init_pages(page, 1 << order); 1165 1166 /* 1167 * arch_free_page() can make the page's contents inaccessible. s390 1168 * does this. So nothing which can access the page's contents should 1169 * happen after this. 1170 */ 1171 arch_free_page(page, order); 1172 1173 debug_pagealloc_unmap_pages(page, 1 << order); 1174 1175 return true; 1176 } 1177 1178 /* 1179 * Frees a number of pages from the PCP lists 1180 * Assumes all pages on list are in same zone. 1181 * count is the number of pages to free. 1182 */ 1183 static void free_pcppages_bulk(struct zone *zone, int count, 1184 struct per_cpu_pages *pcp, 1185 int pindex) 1186 { 1187 unsigned long flags; 1188 unsigned int order; 1189 bool isolated_pageblocks; 1190 struct page *page; 1191 1192 /* 1193 * Ensure proper count is passed which otherwise would stuck in the 1194 * below while (list_empty(list)) loop. 1195 */ 1196 count = min(pcp->count, count); 1197 1198 /* Ensure requested pindex is drained first. */ 1199 pindex = pindex - 1; 1200 1201 spin_lock_irqsave(&zone->lock, flags); 1202 isolated_pageblocks = has_isolate_pageblock(zone); 1203 1204 while (count > 0) { 1205 struct list_head *list; 1206 int nr_pages; 1207 1208 /* Remove pages from lists in a round-robin fashion. */ 1209 do { 1210 if (++pindex > NR_PCP_LISTS - 1) 1211 pindex = 0; 1212 list = &pcp->lists[pindex]; 1213 } while (list_empty(list)); 1214 1215 order = pindex_to_order(pindex); 1216 nr_pages = 1 << order; 1217 do { 1218 int mt; 1219 1220 page = list_last_entry(list, struct page, pcp_list); 1221 mt = get_pcppage_migratetype(page); 1222 1223 /* must delete to avoid corrupting pcp list */ 1224 list_del(&page->pcp_list); 1225 count -= nr_pages; 1226 pcp->count -= nr_pages; 1227 1228 /* MIGRATE_ISOLATE page should not go to pcplists */ 1229 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1230 /* Pageblock could have been isolated meanwhile */ 1231 if (unlikely(isolated_pageblocks)) 1232 mt = get_pageblock_migratetype(page); 1233 1234 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1235 trace_mm_page_pcpu_drain(page, order, mt); 1236 } while (count > 0 && !list_empty(list)); 1237 } 1238 1239 spin_unlock_irqrestore(&zone->lock, flags); 1240 } 1241 1242 static void free_one_page(struct zone *zone, 1243 struct page *page, unsigned long pfn, 1244 unsigned int order, 1245 int migratetype, fpi_t fpi_flags) 1246 { 1247 unsigned long flags; 1248 1249 spin_lock_irqsave(&zone->lock, flags); 1250 if (unlikely(has_isolate_pageblock(zone) || 1251 is_migrate_isolate(migratetype))) { 1252 migratetype = get_pfnblock_migratetype(page, pfn); 1253 } 1254 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1255 spin_unlock_irqrestore(&zone->lock, flags); 1256 } 1257 1258 static void __free_pages_ok(struct page *page, unsigned int order, 1259 fpi_t fpi_flags) 1260 { 1261 unsigned long flags; 1262 int migratetype; 1263 unsigned long pfn = page_to_pfn(page); 1264 struct zone *zone = page_zone(page); 1265 1266 if (!free_pages_prepare(page, order, fpi_flags)) 1267 return; 1268 1269 /* 1270 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here 1271 * is used to avoid calling get_pfnblock_migratetype() under the lock. 1272 * This will reduce the lock holding time. 1273 */ 1274 migratetype = get_pfnblock_migratetype(page, pfn); 1275 1276 spin_lock_irqsave(&zone->lock, flags); 1277 if (unlikely(has_isolate_pageblock(zone) || 1278 is_migrate_isolate(migratetype))) { 1279 migratetype = get_pfnblock_migratetype(page, pfn); 1280 } 1281 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1282 spin_unlock_irqrestore(&zone->lock, flags); 1283 1284 __count_vm_events(PGFREE, 1 << order); 1285 } 1286 1287 void __free_pages_core(struct page *page, unsigned int order) 1288 { 1289 unsigned int nr_pages = 1 << order; 1290 struct page *p = page; 1291 unsigned int loop; 1292 1293 /* 1294 * When initializing the memmap, __init_single_page() sets the refcount 1295 * of all pages to 1 ("allocated"/"not free"). We have to set the 1296 * refcount of all involved pages to 0. 1297 */ 1298 prefetchw(p); 1299 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1300 prefetchw(p + 1); 1301 __ClearPageReserved(p); 1302 set_page_count(p, 0); 1303 } 1304 __ClearPageReserved(p); 1305 set_page_count(p, 0); 1306 1307 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1308 1309 if (page_contains_unaccepted(page, order)) { 1310 if (order == MAX_ORDER && __free_unaccepted(page)) 1311 return; 1312 1313 accept_page(page, order); 1314 } 1315 1316 /* 1317 * Bypass PCP and place fresh pages right to the tail, primarily 1318 * relevant for memory onlining. 1319 */ 1320 __free_pages_ok(page, order, FPI_TO_TAIL); 1321 } 1322 1323 /* 1324 * Check that the whole (or subset of) a pageblock given by the interval of 1325 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1326 * with the migration of free compaction scanner. 1327 * 1328 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1329 * 1330 * It's possible on some configurations to have a setup like node0 node1 node0 1331 * i.e. it's possible that all pages within a zones range of pages do not 1332 * belong to a single zone. We assume that a border between node0 and node1 1333 * can occur within a single pageblock, but not a node0 node1 node0 1334 * interleaving within a single pageblock. It is therefore sufficient to check 1335 * the first and last page of a pageblock and avoid checking each individual 1336 * page in a pageblock. 1337 * 1338 * Note: the function may return non-NULL struct page even for a page block 1339 * which contains a memory hole (i.e. there is no physical memory for a subset 1340 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which 1341 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1342 * even though the start pfn is online and valid. This should be safe most of 1343 * the time because struct pages are still initialized via init_unavailable_range() 1344 * and pfn walkers shouldn't touch any physical memory range for which they do 1345 * not recognize any specific metadata in struct pages. 1346 */ 1347 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1348 unsigned long end_pfn, struct zone *zone) 1349 { 1350 struct page *start_page; 1351 struct page *end_page; 1352 1353 /* end_pfn is one past the range we are checking */ 1354 end_pfn--; 1355 1356 if (!pfn_valid(end_pfn)) 1357 return NULL; 1358 1359 start_page = pfn_to_online_page(start_pfn); 1360 if (!start_page) 1361 return NULL; 1362 1363 if (page_zone(start_page) != zone) 1364 return NULL; 1365 1366 end_page = pfn_to_page(end_pfn); 1367 1368 /* This gives a shorter code than deriving page_zone(end_page) */ 1369 if (page_zone_id(start_page) != page_zone_id(end_page)) 1370 return NULL; 1371 1372 return start_page; 1373 } 1374 1375 /* 1376 * The order of subdivision here is critical for the IO subsystem. 1377 * Please do not alter this order without good reasons and regression 1378 * testing. Specifically, as large blocks of memory are subdivided, 1379 * the order in which smaller blocks are delivered depends on the order 1380 * they're subdivided in this function. This is the primary factor 1381 * influencing the order in which pages are delivered to the IO 1382 * subsystem according to empirical testing, and this is also justified 1383 * by considering the behavior of a buddy system containing a single 1384 * large block of memory acted on by a series of small allocations. 1385 * This behavior is a critical factor in sglist merging's success. 1386 * 1387 * -- nyc 1388 */ 1389 static inline void expand(struct zone *zone, struct page *page, 1390 int low, int high, int migratetype) 1391 { 1392 unsigned long size = 1 << high; 1393 1394 while (high > low) { 1395 high--; 1396 size >>= 1; 1397 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1398 1399 /* 1400 * Mark as guard pages (or page), that will allow to 1401 * merge back to allocator when buddy will be freed. 1402 * Corresponding page table entries will not be touched, 1403 * pages will stay not present in virtual address space 1404 */ 1405 if (set_page_guard(zone, &page[size], high, migratetype)) 1406 continue; 1407 1408 add_to_free_list(&page[size], zone, high, migratetype); 1409 set_buddy_order(&page[size], high); 1410 } 1411 } 1412 1413 static void check_new_page_bad(struct page *page) 1414 { 1415 if (unlikely(page->flags & __PG_HWPOISON)) { 1416 /* Don't complain about hwpoisoned pages */ 1417 page_mapcount_reset(page); /* remove PageBuddy */ 1418 return; 1419 } 1420 1421 bad_page(page, 1422 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1423 } 1424 1425 /* 1426 * This page is about to be returned from the page allocator 1427 */ 1428 static int check_new_page(struct page *page) 1429 { 1430 if (likely(page_expected_state(page, 1431 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1432 return 0; 1433 1434 check_new_page_bad(page); 1435 return 1; 1436 } 1437 1438 static inline bool check_new_pages(struct page *page, unsigned int order) 1439 { 1440 if (is_check_pages_enabled()) { 1441 for (int i = 0; i < (1 << order); i++) { 1442 struct page *p = page + i; 1443 1444 if (check_new_page(p)) 1445 return true; 1446 } 1447 } 1448 1449 return false; 1450 } 1451 1452 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1453 { 1454 /* Don't skip if a software KASAN mode is enabled. */ 1455 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1456 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1457 return false; 1458 1459 /* Skip, if hardware tag-based KASAN is not enabled. */ 1460 if (!kasan_hw_tags_enabled()) 1461 return true; 1462 1463 /* 1464 * With hardware tag-based KASAN enabled, skip if this has been 1465 * requested via __GFP_SKIP_KASAN. 1466 */ 1467 return flags & __GFP_SKIP_KASAN; 1468 } 1469 1470 static inline bool should_skip_init(gfp_t flags) 1471 { 1472 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1473 if (!kasan_hw_tags_enabled()) 1474 return false; 1475 1476 /* For hardware tag-based KASAN, skip if requested. */ 1477 return (flags & __GFP_SKIP_ZERO); 1478 } 1479 1480 inline void post_alloc_hook(struct page *page, unsigned int order, 1481 gfp_t gfp_flags) 1482 { 1483 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1484 !should_skip_init(gfp_flags); 1485 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1486 int i; 1487 1488 set_page_private(page, 0); 1489 set_page_refcounted(page); 1490 1491 arch_alloc_page(page, order); 1492 debug_pagealloc_map_pages(page, 1 << order); 1493 1494 /* 1495 * Page unpoisoning must happen before memory initialization. 1496 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1497 * allocations and the page unpoisoning code will complain. 1498 */ 1499 kernel_unpoison_pages(page, 1 << order); 1500 1501 /* 1502 * As memory initialization might be integrated into KASAN, 1503 * KASAN unpoisoning and memory initializion code must be 1504 * kept together to avoid discrepancies in behavior. 1505 */ 1506 1507 /* 1508 * If memory tags should be zeroed 1509 * (which happens only when memory should be initialized as well). 1510 */ 1511 if (zero_tags) { 1512 /* Initialize both memory and memory tags. */ 1513 for (i = 0; i != 1 << order; ++i) 1514 tag_clear_highpage(page + i); 1515 1516 /* Take note that memory was initialized by the loop above. */ 1517 init = false; 1518 } 1519 if (!should_skip_kasan_unpoison(gfp_flags) && 1520 kasan_unpoison_pages(page, order, init)) { 1521 /* Take note that memory was initialized by KASAN. */ 1522 if (kasan_has_integrated_init()) 1523 init = false; 1524 } else { 1525 /* 1526 * If memory tags have not been set by KASAN, reset the page 1527 * tags to ensure page_address() dereferencing does not fault. 1528 */ 1529 for (i = 0; i != 1 << order; ++i) 1530 page_kasan_tag_reset(page + i); 1531 } 1532 /* If memory is still not initialized, initialize it now. */ 1533 if (init) 1534 kernel_init_pages(page, 1 << order); 1535 1536 set_page_owner(page, order, gfp_flags); 1537 page_table_check_alloc(page, order); 1538 } 1539 1540 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1541 unsigned int alloc_flags) 1542 { 1543 post_alloc_hook(page, order, gfp_flags); 1544 1545 if (order && (gfp_flags & __GFP_COMP)) 1546 prep_compound_page(page, order); 1547 1548 /* 1549 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1550 * allocate the page. The expectation is that the caller is taking 1551 * steps that will free more memory. The caller should avoid the page 1552 * being used for !PFMEMALLOC purposes. 1553 */ 1554 if (alloc_flags & ALLOC_NO_WATERMARKS) 1555 set_page_pfmemalloc(page); 1556 else 1557 clear_page_pfmemalloc(page); 1558 } 1559 1560 /* 1561 * Go through the free lists for the given migratetype and remove 1562 * the smallest available page from the freelists 1563 */ 1564 static __always_inline 1565 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1566 int migratetype) 1567 { 1568 unsigned int current_order; 1569 struct free_area *area; 1570 struct page *page; 1571 1572 /* Find a page of the appropriate size in the preferred list */ 1573 for (current_order = order; current_order <= MAX_ORDER; ++current_order) { 1574 area = &(zone->free_area[current_order]); 1575 page = get_page_from_free_area(area, migratetype); 1576 if (!page) 1577 continue; 1578 del_page_from_free_list(page, zone, current_order); 1579 expand(zone, page, order, current_order, migratetype); 1580 set_pcppage_migratetype(page, migratetype); 1581 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1582 pcp_allowed_order(order) && 1583 migratetype < MIGRATE_PCPTYPES); 1584 return page; 1585 } 1586 1587 return NULL; 1588 } 1589 1590 1591 /* 1592 * This array describes the order lists are fallen back to when 1593 * the free lists for the desirable migrate type are depleted 1594 * 1595 * The other migratetypes do not have fallbacks. 1596 */ 1597 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = { 1598 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1599 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1600 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1601 }; 1602 1603 #ifdef CONFIG_CMA 1604 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1605 unsigned int order) 1606 { 1607 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1608 } 1609 #else 1610 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1611 unsigned int order) { return NULL; } 1612 #endif 1613 1614 /* 1615 * Move the free pages in a range to the freelist tail of the requested type. 1616 * Note that start_page and end_pages are not aligned on a pageblock 1617 * boundary. If alignment is required, use move_freepages_block() 1618 */ 1619 static int move_freepages(struct zone *zone, 1620 unsigned long start_pfn, unsigned long end_pfn, 1621 int migratetype, int *num_movable) 1622 { 1623 struct page *page; 1624 unsigned long pfn; 1625 unsigned int order; 1626 int pages_moved = 0; 1627 1628 for (pfn = start_pfn; pfn <= end_pfn;) { 1629 page = pfn_to_page(pfn); 1630 if (!PageBuddy(page)) { 1631 /* 1632 * We assume that pages that could be isolated for 1633 * migration are movable. But we don't actually try 1634 * isolating, as that would be expensive. 1635 */ 1636 if (num_movable && 1637 (PageLRU(page) || __PageMovable(page))) 1638 (*num_movable)++; 1639 pfn++; 1640 continue; 1641 } 1642 1643 /* Make sure we are not inadvertently changing nodes */ 1644 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1645 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1646 1647 order = buddy_order(page); 1648 move_to_free_list(page, zone, order, migratetype); 1649 pfn += 1 << order; 1650 pages_moved += 1 << order; 1651 } 1652 1653 return pages_moved; 1654 } 1655 1656 int move_freepages_block(struct zone *zone, struct page *page, 1657 int migratetype, int *num_movable) 1658 { 1659 unsigned long start_pfn, end_pfn, pfn; 1660 1661 if (num_movable) 1662 *num_movable = 0; 1663 1664 pfn = page_to_pfn(page); 1665 start_pfn = pageblock_start_pfn(pfn); 1666 end_pfn = pageblock_end_pfn(pfn) - 1; 1667 1668 /* Do not cross zone boundaries */ 1669 if (!zone_spans_pfn(zone, start_pfn)) 1670 start_pfn = pfn; 1671 if (!zone_spans_pfn(zone, end_pfn)) 1672 return 0; 1673 1674 return move_freepages(zone, start_pfn, end_pfn, migratetype, 1675 num_movable); 1676 } 1677 1678 static void change_pageblock_range(struct page *pageblock_page, 1679 int start_order, int migratetype) 1680 { 1681 int nr_pageblocks = 1 << (start_order - pageblock_order); 1682 1683 while (nr_pageblocks--) { 1684 set_pageblock_migratetype(pageblock_page, migratetype); 1685 pageblock_page += pageblock_nr_pages; 1686 } 1687 } 1688 1689 /* 1690 * When we are falling back to another migratetype during allocation, try to 1691 * steal extra free pages from the same pageblocks to satisfy further 1692 * allocations, instead of polluting multiple pageblocks. 1693 * 1694 * If we are stealing a relatively large buddy page, it is likely there will 1695 * be more free pages in the pageblock, so try to steal them all. For 1696 * reclaimable and unmovable allocations, we steal regardless of page size, 1697 * as fragmentation caused by those allocations polluting movable pageblocks 1698 * is worse than movable allocations stealing from unmovable and reclaimable 1699 * pageblocks. 1700 */ 1701 static bool can_steal_fallback(unsigned int order, int start_mt) 1702 { 1703 /* 1704 * Leaving this order check is intended, although there is 1705 * relaxed order check in next check. The reason is that 1706 * we can actually steal whole pageblock if this condition met, 1707 * but, below check doesn't guarantee it and that is just heuristic 1708 * so could be changed anytime. 1709 */ 1710 if (order >= pageblock_order) 1711 return true; 1712 1713 if (order >= pageblock_order / 2 || 1714 start_mt == MIGRATE_RECLAIMABLE || 1715 start_mt == MIGRATE_UNMOVABLE || 1716 page_group_by_mobility_disabled) 1717 return true; 1718 1719 return false; 1720 } 1721 1722 static inline bool boost_watermark(struct zone *zone) 1723 { 1724 unsigned long max_boost; 1725 1726 if (!watermark_boost_factor) 1727 return false; 1728 /* 1729 * Don't bother in zones that are unlikely to produce results. 1730 * On small machines, including kdump capture kernels running 1731 * in a small area, boosting the watermark can cause an out of 1732 * memory situation immediately. 1733 */ 1734 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1735 return false; 1736 1737 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1738 watermark_boost_factor, 10000); 1739 1740 /* 1741 * high watermark may be uninitialised if fragmentation occurs 1742 * very early in boot so do not boost. We do not fall 1743 * through and boost by pageblock_nr_pages as failing 1744 * allocations that early means that reclaim is not going 1745 * to help and it may even be impossible to reclaim the 1746 * boosted watermark resulting in a hang. 1747 */ 1748 if (!max_boost) 1749 return false; 1750 1751 max_boost = max(pageblock_nr_pages, max_boost); 1752 1753 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1754 max_boost); 1755 1756 return true; 1757 } 1758 1759 /* 1760 * This function implements actual steal behaviour. If order is large enough, 1761 * we can steal whole pageblock. If not, we first move freepages in this 1762 * pageblock to our migratetype and determine how many already-allocated pages 1763 * are there in the pageblock with a compatible migratetype. If at least half 1764 * of pages are free or compatible, we can change migratetype of the pageblock 1765 * itself, so pages freed in the future will be put on the correct free list. 1766 */ 1767 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1768 unsigned int alloc_flags, int start_type, bool whole_block) 1769 { 1770 unsigned int current_order = buddy_order(page); 1771 int free_pages, movable_pages, alike_pages; 1772 int old_block_type; 1773 1774 old_block_type = get_pageblock_migratetype(page); 1775 1776 /* 1777 * This can happen due to races and we want to prevent broken 1778 * highatomic accounting. 1779 */ 1780 if (is_migrate_highatomic(old_block_type)) 1781 goto single_page; 1782 1783 /* Take ownership for orders >= pageblock_order */ 1784 if (current_order >= pageblock_order) { 1785 change_pageblock_range(page, current_order, start_type); 1786 goto single_page; 1787 } 1788 1789 /* 1790 * Boost watermarks to increase reclaim pressure to reduce the 1791 * likelihood of future fallbacks. Wake kswapd now as the node 1792 * may be balanced overall and kswapd will not wake naturally. 1793 */ 1794 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 1795 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 1796 1797 /* We are not allowed to try stealing from the whole block */ 1798 if (!whole_block) 1799 goto single_page; 1800 1801 free_pages = move_freepages_block(zone, page, start_type, 1802 &movable_pages); 1803 /* moving whole block can fail due to zone boundary conditions */ 1804 if (!free_pages) 1805 goto single_page; 1806 1807 /* 1808 * Determine how many pages are compatible with our allocation. 1809 * For movable allocation, it's the number of movable pages which 1810 * we just obtained. For other types it's a bit more tricky. 1811 */ 1812 if (start_type == MIGRATE_MOVABLE) { 1813 alike_pages = movable_pages; 1814 } else { 1815 /* 1816 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 1817 * to MOVABLE pageblock, consider all non-movable pages as 1818 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 1819 * vice versa, be conservative since we can't distinguish the 1820 * exact migratetype of non-movable pages. 1821 */ 1822 if (old_block_type == MIGRATE_MOVABLE) 1823 alike_pages = pageblock_nr_pages 1824 - (free_pages + movable_pages); 1825 else 1826 alike_pages = 0; 1827 } 1828 /* 1829 * If a sufficient number of pages in the block are either free or of 1830 * compatible migratability as our allocation, claim the whole block. 1831 */ 1832 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 1833 page_group_by_mobility_disabled) 1834 set_pageblock_migratetype(page, start_type); 1835 1836 return; 1837 1838 single_page: 1839 move_to_free_list(page, zone, current_order, start_type); 1840 } 1841 1842 /* 1843 * Check whether there is a suitable fallback freepage with requested order. 1844 * If only_stealable is true, this function returns fallback_mt only if 1845 * we can steal other freepages all together. This would help to reduce 1846 * fragmentation due to mixed migratetype pages in one pageblock. 1847 */ 1848 int find_suitable_fallback(struct free_area *area, unsigned int order, 1849 int migratetype, bool only_stealable, bool *can_steal) 1850 { 1851 int i; 1852 int fallback_mt; 1853 1854 if (area->nr_free == 0) 1855 return -1; 1856 1857 *can_steal = false; 1858 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 1859 fallback_mt = fallbacks[migratetype][i]; 1860 if (free_area_empty(area, fallback_mt)) 1861 continue; 1862 1863 if (can_steal_fallback(order, migratetype)) 1864 *can_steal = true; 1865 1866 if (!only_stealable) 1867 return fallback_mt; 1868 1869 if (*can_steal) 1870 return fallback_mt; 1871 } 1872 1873 return -1; 1874 } 1875 1876 /* 1877 * Reserve a pageblock for exclusive use of high-order atomic allocations if 1878 * there are no empty page blocks that contain a page with a suitable order 1879 */ 1880 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) 1881 { 1882 int mt; 1883 unsigned long max_managed, flags; 1884 1885 /* 1886 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 1887 * Check is race-prone but harmless. 1888 */ 1889 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 1890 if (zone->nr_reserved_highatomic >= max_managed) 1891 return; 1892 1893 spin_lock_irqsave(&zone->lock, flags); 1894 1895 /* Recheck the nr_reserved_highatomic limit under the lock */ 1896 if (zone->nr_reserved_highatomic >= max_managed) 1897 goto out_unlock; 1898 1899 /* Yoink! */ 1900 mt = get_pageblock_migratetype(page); 1901 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 1902 if (migratetype_is_mergeable(mt)) { 1903 zone->nr_reserved_highatomic += pageblock_nr_pages; 1904 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 1905 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 1906 } 1907 1908 out_unlock: 1909 spin_unlock_irqrestore(&zone->lock, flags); 1910 } 1911 1912 /* 1913 * Used when an allocation is about to fail under memory pressure. This 1914 * potentially hurts the reliability of high-order allocations when under 1915 * intense memory pressure but failed atomic allocations should be easier 1916 * to recover from than an OOM. 1917 * 1918 * If @force is true, try to unreserve a pageblock even though highatomic 1919 * pageblock is exhausted. 1920 */ 1921 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 1922 bool force) 1923 { 1924 struct zonelist *zonelist = ac->zonelist; 1925 unsigned long flags; 1926 struct zoneref *z; 1927 struct zone *zone; 1928 struct page *page; 1929 int order; 1930 bool ret; 1931 1932 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 1933 ac->nodemask) { 1934 /* 1935 * Preserve at least one pageblock unless memory pressure 1936 * is really high. 1937 */ 1938 if (!force && zone->nr_reserved_highatomic <= 1939 pageblock_nr_pages) 1940 continue; 1941 1942 spin_lock_irqsave(&zone->lock, flags); 1943 for (order = 0; order <= MAX_ORDER; order++) { 1944 struct free_area *area = &(zone->free_area[order]); 1945 1946 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 1947 if (!page) 1948 continue; 1949 1950 /* 1951 * In page freeing path, migratetype change is racy so 1952 * we can counter several free pages in a pageblock 1953 * in this loop although we changed the pageblock type 1954 * from highatomic to ac->migratetype. So we should 1955 * adjust the count once. 1956 */ 1957 if (is_migrate_highatomic_page(page)) { 1958 /* 1959 * It should never happen but changes to 1960 * locking could inadvertently allow a per-cpu 1961 * drain to add pages to MIGRATE_HIGHATOMIC 1962 * while unreserving so be safe and watch for 1963 * underflows. 1964 */ 1965 zone->nr_reserved_highatomic -= min( 1966 pageblock_nr_pages, 1967 zone->nr_reserved_highatomic); 1968 } 1969 1970 /* 1971 * Convert to ac->migratetype and avoid the normal 1972 * pageblock stealing heuristics. Minimally, the caller 1973 * is doing the work and needs the pages. More 1974 * importantly, if the block was always converted to 1975 * MIGRATE_UNMOVABLE or another type then the number 1976 * of pageblocks that cannot be completely freed 1977 * may increase. 1978 */ 1979 set_pageblock_migratetype(page, ac->migratetype); 1980 ret = move_freepages_block(zone, page, ac->migratetype, 1981 NULL); 1982 if (ret) { 1983 spin_unlock_irqrestore(&zone->lock, flags); 1984 return ret; 1985 } 1986 } 1987 spin_unlock_irqrestore(&zone->lock, flags); 1988 } 1989 1990 return false; 1991 } 1992 1993 /* 1994 * Try finding a free buddy page on the fallback list and put it on the free 1995 * list of requested migratetype, possibly along with other pages from the same 1996 * block, depending on fragmentation avoidance heuristics. Returns true if 1997 * fallback was found so that __rmqueue_smallest() can grab it. 1998 * 1999 * The use of signed ints for order and current_order is a deliberate 2000 * deviation from the rest of this file, to make the for loop 2001 * condition simpler. 2002 */ 2003 static __always_inline bool 2004 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2005 unsigned int alloc_flags) 2006 { 2007 struct free_area *area; 2008 int current_order; 2009 int min_order = order; 2010 struct page *page; 2011 int fallback_mt; 2012 bool can_steal; 2013 2014 /* 2015 * Do not steal pages from freelists belonging to other pageblocks 2016 * i.e. orders < pageblock_order. If there are no local zones free, 2017 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2018 */ 2019 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2020 min_order = pageblock_order; 2021 2022 /* 2023 * Find the largest available free page in the other list. This roughly 2024 * approximates finding the pageblock with the most free pages, which 2025 * would be too costly to do exactly. 2026 */ 2027 for (current_order = MAX_ORDER; current_order >= min_order; 2028 --current_order) { 2029 area = &(zone->free_area[current_order]); 2030 fallback_mt = find_suitable_fallback(area, current_order, 2031 start_migratetype, false, &can_steal); 2032 if (fallback_mt == -1) 2033 continue; 2034 2035 /* 2036 * We cannot steal all free pages from the pageblock and the 2037 * requested migratetype is movable. In that case it's better to 2038 * steal and split the smallest available page instead of the 2039 * largest available page, because even if the next movable 2040 * allocation falls back into a different pageblock than this 2041 * one, it won't cause permanent fragmentation. 2042 */ 2043 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2044 && current_order > order) 2045 goto find_smallest; 2046 2047 goto do_steal; 2048 } 2049 2050 return false; 2051 2052 find_smallest: 2053 for (current_order = order; current_order <= MAX_ORDER; 2054 current_order++) { 2055 area = &(zone->free_area[current_order]); 2056 fallback_mt = find_suitable_fallback(area, current_order, 2057 start_migratetype, false, &can_steal); 2058 if (fallback_mt != -1) 2059 break; 2060 } 2061 2062 /* 2063 * This should not happen - we already found a suitable fallback 2064 * when looking for the largest page. 2065 */ 2066 VM_BUG_ON(current_order > MAX_ORDER); 2067 2068 do_steal: 2069 page = get_page_from_free_area(area, fallback_mt); 2070 2071 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2072 can_steal); 2073 2074 trace_mm_page_alloc_extfrag(page, order, current_order, 2075 start_migratetype, fallback_mt); 2076 2077 return true; 2078 2079 } 2080 2081 /* 2082 * Do the hard work of removing an element from the buddy allocator. 2083 * Call me with the zone->lock already held. 2084 */ 2085 static __always_inline struct page * 2086 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2087 unsigned int alloc_flags) 2088 { 2089 struct page *page; 2090 2091 if (IS_ENABLED(CONFIG_CMA)) { 2092 /* 2093 * Balance movable allocations between regular and CMA areas by 2094 * allocating from CMA when over half of the zone's free memory 2095 * is in the CMA area. 2096 */ 2097 if (alloc_flags & ALLOC_CMA && 2098 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2099 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2100 page = __rmqueue_cma_fallback(zone, order); 2101 if (page) 2102 return page; 2103 } 2104 } 2105 retry: 2106 page = __rmqueue_smallest(zone, order, migratetype); 2107 if (unlikely(!page)) { 2108 if (alloc_flags & ALLOC_CMA) 2109 page = __rmqueue_cma_fallback(zone, order); 2110 2111 if (!page && __rmqueue_fallback(zone, order, migratetype, 2112 alloc_flags)) 2113 goto retry; 2114 } 2115 return page; 2116 } 2117 2118 /* 2119 * Obtain a specified number of elements from the buddy allocator, all under 2120 * a single hold of the lock, for efficiency. Add them to the supplied list. 2121 * Returns the number of new pages which were placed at *list. 2122 */ 2123 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2124 unsigned long count, struct list_head *list, 2125 int migratetype, unsigned int alloc_flags) 2126 { 2127 unsigned long flags; 2128 int i; 2129 2130 spin_lock_irqsave(&zone->lock, flags); 2131 for (i = 0; i < count; ++i) { 2132 struct page *page = __rmqueue(zone, order, migratetype, 2133 alloc_flags); 2134 if (unlikely(page == NULL)) 2135 break; 2136 2137 /* 2138 * Split buddy pages returned by expand() are received here in 2139 * physical page order. The page is added to the tail of 2140 * caller's list. From the callers perspective, the linked list 2141 * is ordered by page number under some conditions. This is 2142 * useful for IO devices that can forward direction from the 2143 * head, thus also in the physical page order. This is useful 2144 * for IO devices that can merge IO requests if the physical 2145 * pages are ordered properly. 2146 */ 2147 list_add_tail(&page->pcp_list, list); 2148 if (is_migrate_cma(get_pcppage_migratetype(page))) 2149 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2150 -(1 << order)); 2151 } 2152 2153 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2154 spin_unlock_irqrestore(&zone->lock, flags); 2155 2156 return i; 2157 } 2158 2159 #ifdef CONFIG_NUMA 2160 /* 2161 * Called from the vmstat counter updater to drain pagesets of this 2162 * currently executing processor on remote nodes after they have 2163 * expired. 2164 */ 2165 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2166 { 2167 int to_drain, batch; 2168 2169 batch = READ_ONCE(pcp->batch); 2170 to_drain = min(pcp->count, batch); 2171 if (to_drain > 0) { 2172 spin_lock(&pcp->lock); 2173 free_pcppages_bulk(zone, to_drain, pcp, 0); 2174 spin_unlock(&pcp->lock); 2175 } 2176 } 2177 #endif 2178 2179 /* 2180 * Drain pcplists of the indicated processor and zone. 2181 */ 2182 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2183 { 2184 struct per_cpu_pages *pcp; 2185 2186 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2187 if (pcp->count) { 2188 spin_lock(&pcp->lock); 2189 free_pcppages_bulk(zone, pcp->count, pcp, 0); 2190 spin_unlock(&pcp->lock); 2191 } 2192 } 2193 2194 /* 2195 * Drain pcplists of all zones on the indicated processor. 2196 */ 2197 static void drain_pages(unsigned int cpu) 2198 { 2199 struct zone *zone; 2200 2201 for_each_populated_zone(zone) { 2202 drain_pages_zone(cpu, zone); 2203 } 2204 } 2205 2206 /* 2207 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2208 */ 2209 void drain_local_pages(struct zone *zone) 2210 { 2211 int cpu = smp_processor_id(); 2212 2213 if (zone) 2214 drain_pages_zone(cpu, zone); 2215 else 2216 drain_pages(cpu); 2217 } 2218 2219 /* 2220 * The implementation of drain_all_pages(), exposing an extra parameter to 2221 * drain on all cpus. 2222 * 2223 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2224 * not empty. The check for non-emptiness can however race with a free to 2225 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2226 * that need the guarantee that every CPU has drained can disable the 2227 * optimizing racy check. 2228 */ 2229 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2230 { 2231 int cpu; 2232 2233 /* 2234 * Allocate in the BSS so we won't require allocation in 2235 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2236 */ 2237 static cpumask_t cpus_with_pcps; 2238 2239 /* 2240 * Do not drain if one is already in progress unless it's specific to 2241 * a zone. Such callers are primarily CMA and memory hotplug and need 2242 * the drain to be complete when the call returns. 2243 */ 2244 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2245 if (!zone) 2246 return; 2247 mutex_lock(&pcpu_drain_mutex); 2248 } 2249 2250 /* 2251 * We don't care about racing with CPU hotplug event 2252 * as offline notification will cause the notified 2253 * cpu to drain that CPU pcps and on_each_cpu_mask 2254 * disables preemption as part of its processing 2255 */ 2256 for_each_online_cpu(cpu) { 2257 struct per_cpu_pages *pcp; 2258 struct zone *z; 2259 bool has_pcps = false; 2260 2261 if (force_all_cpus) { 2262 /* 2263 * The pcp.count check is racy, some callers need a 2264 * guarantee that no cpu is missed. 2265 */ 2266 has_pcps = true; 2267 } else if (zone) { 2268 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2269 if (pcp->count) 2270 has_pcps = true; 2271 } else { 2272 for_each_populated_zone(z) { 2273 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2274 if (pcp->count) { 2275 has_pcps = true; 2276 break; 2277 } 2278 } 2279 } 2280 2281 if (has_pcps) 2282 cpumask_set_cpu(cpu, &cpus_with_pcps); 2283 else 2284 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2285 } 2286 2287 for_each_cpu(cpu, &cpus_with_pcps) { 2288 if (zone) 2289 drain_pages_zone(cpu, zone); 2290 else 2291 drain_pages(cpu); 2292 } 2293 2294 mutex_unlock(&pcpu_drain_mutex); 2295 } 2296 2297 /* 2298 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2299 * 2300 * When zone parameter is non-NULL, spill just the single zone's pages. 2301 */ 2302 void drain_all_pages(struct zone *zone) 2303 { 2304 __drain_all_pages(zone, false); 2305 } 2306 2307 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 2308 unsigned int order) 2309 { 2310 int migratetype; 2311 2312 if (!free_pages_prepare(page, order, FPI_NONE)) 2313 return false; 2314 2315 migratetype = get_pfnblock_migratetype(page, pfn); 2316 set_pcppage_migratetype(page, migratetype); 2317 return true; 2318 } 2319 2320 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high) 2321 { 2322 int min_nr_free, max_nr_free; 2323 int batch = READ_ONCE(pcp->batch); 2324 2325 /* Free everything if batch freeing high-order pages. */ 2326 if (unlikely(free_high)) 2327 return pcp->count; 2328 2329 /* Check for PCP disabled or boot pageset */ 2330 if (unlikely(high < batch)) 2331 return 1; 2332 2333 /* Leave at least pcp->batch pages on the list */ 2334 min_nr_free = batch; 2335 max_nr_free = high - batch; 2336 2337 /* 2338 * Double the number of pages freed each time there is subsequent 2339 * freeing of pages without any allocation. 2340 */ 2341 batch <<= pcp->free_factor; 2342 if (batch < max_nr_free) 2343 pcp->free_factor++; 2344 batch = clamp(batch, min_nr_free, max_nr_free); 2345 2346 return batch; 2347 } 2348 2349 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2350 bool free_high) 2351 { 2352 int high = READ_ONCE(pcp->high); 2353 2354 if (unlikely(!high || free_high)) 2355 return 0; 2356 2357 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 2358 return high; 2359 2360 /* 2361 * If reclaim is active, limit the number of pages that can be 2362 * stored on pcp lists 2363 */ 2364 return min(READ_ONCE(pcp->batch) << 2, high); 2365 } 2366 2367 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 2368 struct page *page, int migratetype, 2369 unsigned int order) 2370 { 2371 int high; 2372 int pindex; 2373 bool free_high; 2374 2375 __count_vm_events(PGFREE, 1 << order); 2376 pindex = order_to_pindex(migratetype, order); 2377 list_add(&page->pcp_list, &pcp->lists[pindex]); 2378 pcp->count += 1 << order; 2379 2380 /* 2381 * As high-order pages other than THP's stored on PCP can contribute 2382 * to fragmentation, limit the number stored when PCP is heavily 2383 * freeing without allocation. The remainder after bulk freeing 2384 * stops will be drained from vmstat refresh context. 2385 */ 2386 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); 2387 2388 high = nr_pcp_high(pcp, zone, free_high); 2389 if (pcp->count >= high) { 2390 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex); 2391 } 2392 } 2393 2394 /* 2395 * Free a pcp page 2396 */ 2397 void free_unref_page(struct page *page, unsigned int order) 2398 { 2399 unsigned long __maybe_unused UP_flags; 2400 struct per_cpu_pages *pcp; 2401 struct zone *zone; 2402 unsigned long pfn = page_to_pfn(page); 2403 int migratetype, pcpmigratetype; 2404 2405 if (!free_unref_page_prepare(page, pfn, order)) 2406 return; 2407 2408 /* 2409 * We only track unmovable, reclaimable and movable on pcp lists. 2410 * Place ISOLATE pages on the isolated list because they are being 2411 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2412 * get those areas back if necessary. Otherwise, we may have to free 2413 * excessively into the page allocator 2414 */ 2415 migratetype = pcpmigratetype = get_pcppage_migratetype(page); 2416 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2417 if (unlikely(is_migrate_isolate(migratetype))) { 2418 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 2419 return; 2420 } 2421 pcpmigratetype = MIGRATE_MOVABLE; 2422 } 2423 2424 zone = page_zone(page); 2425 pcp_trylock_prepare(UP_flags); 2426 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2427 if (pcp) { 2428 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order); 2429 pcp_spin_unlock(pcp); 2430 } else { 2431 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); 2432 } 2433 pcp_trylock_finish(UP_flags); 2434 } 2435 2436 /* 2437 * Free a list of 0-order pages 2438 */ 2439 void free_unref_page_list(struct list_head *list) 2440 { 2441 unsigned long __maybe_unused UP_flags; 2442 struct page *page, *next; 2443 struct per_cpu_pages *pcp = NULL; 2444 struct zone *locked_zone = NULL; 2445 int batch_count = 0; 2446 int migratetype; 2447 2448 /* Prepare pages for freeing */ 2449 list_for_each_entry_safe(page, next, list, lru) { 2450 unsigned long pfn = page_to_pfn(page); 2451 if (!free_unref_page_prepare(page, pfn, 0)) { 2452 list_del(&page->lru); 2453 continue; 2454 } 2455 2456 /* 2457 * Free isolated pages directly to the allocator, see 2458 * comment in free_unref_page. 2459 */ 2460 migratetype = get_pcppage_migratetype(page); 2461 if (unlikely(is_migrate_isolate(migratetype))) { 2462 list_del(&page->lru); 2463 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 2464 continue; 2465 } 2466 } 2467 2468 list_for_each_entry_safe(page, next, list, lru) { 2469 struct zone *zone = page_zone(page); 2470 2471 list_del(&page->lru); 2472 migratetype = get_pcppage_migratetype(page); 2473 2474 /* 2475 * Either different zone requiring a different pcp lock or 2476 * excessive lock hold times when freeing a large list of 2477 * pages. 2478 */ 2479 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) { 2480 if (pcp) { 2481 pcp_spin_unlock(pcp); 2482 pcp_trylock_finish(UP_flags); 2483 } 2484 2485 batch_count = 0; 2486 2487 /* 2488 * trylock is necessary as pages may be getting freed 2489 * from IRQ or SoftIRQ context after an IO completion. 2490 */ 2491 pcp_trylock_prepare(UP_flags); 2492 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2493 if (unlikely(!pcp)) { 2494 pcp_trylock_finish(UP_flags); 2495 free_one_page(zone, page, page_to_pfn(page), 2496 0, migratetype, FPI_NONE); 2497 locked_zone = NULL; 2498 continue; 2499 } 2500 locked_zone = zone; 2501 } 2502 2503 /* 2504 * Non-isolated types over MIGRATE_PCPTYPES get added 2505 * to the MIGRATE_MOVABLE pcp list. 2506 */ 2507 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2508 migratetype = MIGRATE_MOVABLE; 2509 2510 trace_mm_page_free_batched(page); 2511 free_unref_page_commit(zone, pcp, page, migratetype, 0); 2512 batch_count++; 2513 } 2514 2515 if (pcp) { 2516 pcp_spin_unlock(pcp); 2517 pcp_trylock_finish(UP_flags); 2518 } 2519 } 2520 2521 /* 2522 * split_page takes a non-compound higher-order page, and splits it into 2523 * n (1<<order) sub-pages: page[0..n] 2524 * Each sub-page must be freed individually. 2525 * 2526 * Note: this is probably too low level an operation for use in drivers. 2527 * Please consult with lkml before using this in your driver. 2528 */ 2529 void split_page(struct page *page, unsigned int order) 2530 { 2531 int i; 2532 2533 VM_BUG_ON_PAGE(PageCompound(page), page); 2534 VM_BUG_ON_PAGE(!page_count(page), page); 2535 2536 for (i = 1; i < (1 << order); i++) 2537 set_page_refcounted(page + i); 2538 split_page_owner(page, 1 << order); 2539 split_page_memcg(page, 1 << order); 2540 } 2541 EXPORT_SYMBOL_GPL(split_page); 2542 2543 int __isolate_free_page(struct page *page, unsigned int order) 2544 { 2545 struct zone *zone = page_zone(page); 2546 int mt = get_pageblock_migratetype(page); 2547 2548 if (!is_migrate_isolate(mt)) { 2549 unsigned long watermark; 2550 /* 2551 * Obey watermarks as if the page was being allocated. We can 2552 * emulate a high-order watermark check with a raised order-0 2553 * watermark, because we already know our high-order page 2554 * exists. 2555 */ 2556 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2557 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2558 return 0; 2559 2560 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2561 } 2562 2563 del_page_from_free_list(page, zone, order); 2564 2565 /* 2566 * Set the pageblock if the isolated page is at least half of a 2567 * pageblock 2568 */ 2569 if (order >= pageblock_order - 1) { 2570 struct page *endpage = page + (1 << order) - 1; 2571 for (; page < endpage; page += pageblock_nr_pages) { 2572 int mt = get_pageblock_migratetype(page); 2573 /* 2574 * Only change normal pageblocks (i.e., they can merge 2575 * with others) 2576 */ 2577 if (migratetype_is_mergeable(mt)) 2578 set_pageblock_migratetype(page, 2579 MIGRATE_MOVABLE); 2580 } 2581 } 2582 2583 return 1UL << order; 2584 } 2585 2586 /** 2587 * __putback_isolated_page - Return a now-isolated page back where we got it 2588 * @page: Page that was isolated 2589 * @order: Order of the isolated page 2590 * @mt: The page's pageblock's migratetype 2591 * 2592 * This function is meant to return a page pulled from the free lists via 2593 * __isolate_free_page back to the free lists they were pulled from. 2594 */ 2595 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2596 { 2597 struct zone *zone = page_zone(page); 2598 2599 /* zone lock should be held when this function is called */ 2600 lockdep_assert_held(&zone->lock); 2601 2602 /* Return isolated page to tail of freelist. */ 2603 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2604 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2605 } 2606 2607 /* 2608 * Update NUMA hit/miss statistics 2609 */ 2610 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2611 long nr_account) 2612 { 2613 #ifdef CONFIG_NUMA 2614 enum numa_stat_item local_stat = NUMA_LOCAL; 2615 2616 /* skip numa counters update if numa stats is disabled */ 2617 if (!static_branch_likely(&vm_numa_stat_key)) 2618 return; 2619 2620 if (zone_to_nid(z) != numa_node_id()) 2621 local_stat = NUMA_OTHER; 2622 2623 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2624 __count_numa_events(z, NUMA_HIT, nr_account); 2625 else { 2626 __count_numa_events(z, NUMA_MISS, nr_account); 2627 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2628 } 2629 __count_numa_events(z, local_stat, nr_account); 2630 #endif 2631 } 2632 2633 static __always_inline 2634 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2635 unsigned int order, unsigned int alloc_flags, 2636 int migratetype) 2637 { 2638 struct page *page; 2639 unsigned long flags; 2640 2641 do { 2642 page = NULL; 2643 spin_lock_irqsave(&zone->lock, flags); 2644 if (alloc_flags & ALLOC_HIGHATOMIC) 2645 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2646 if (!page) { 2647 page = __rmqueue(zone, order, migratetype, alloc_flags); 2648 2649 /* 2650 * If the allocation fails, allow OOM handling access 2651 * to HIGHATOMIC reserves as failing now is worse than 2652 * failing a high-order atomic allocation in the 2653 * future. 2654 */ 2655 if (!page && (alloc_flags & ALLOC_OOM)) 2656 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2657 2658 if (!page) { 2659 spin_unlock_irqrestore(&zone->lock, flags); 2660 return NULL; 2661 } 2662 } 2663 __mod_zone_freepage_state(zone, -(1 << order), 2664 get_pcppage_migratetype(page)); 2665 spin_unlock_irqrestore(&zone->lock, flags); 2666 } while (check_new_pages(page, order)); 2667 2668 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2669 zone_statistics(preferred_zone, zone, 1); 2670 2671 return page; 2672 } 2673 2674 /* Remove page from the per-cpu list, caller must protect the list */ 2675 static inline 2676 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 2677 int migratetype, 2678 unsigned int alloc_flags, 2679 struct per_cpu_pages *pcp, 2680 struct list_head *list) 2681 { 2682 struct page *page; 2683 2684 do { 2685 if (list_empty(list)) { 2686 int batch = READ_ONCE(pcp->batch); 2687 int alloced; 2688 2689 /* 2690 * Scale batch relative to order if batch implies 2691 * free pages can be stored on the PCP. Batch can 2692 * be 1 for small zones or for boot pagesets which 2693 * should never store free pages as the pages may 2694 * belong to arbitrary zones. 2695 */ 2696 if (batch > 1) 2697 batch = max(batch >> order, 2); 2698 alloced = rmqueue_bulk(zone, order, 2699 batch, list, 2700 migratetype, alloc_flags); 2701 2702 pcp->count += alloced << order; 2703 if (unlikely(list_empty(list))) 2704 return NULL; 2705 } 2706 2707 page = list_first_entry(list, struct page, pcp_list); 2708 list_del(&page->pcp_list); 2709 pcp->count -= 1 << order; 2710 } while (check_new_pages(page, order)); 2711 2712 return page; 2713 } 2714 2715 /* Lock and remove page from the per-cpu list */ 2716 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2717 struct zone *zone, unsigned int order, 2718 int migratetype, unsigned int alloc_flags) 2719 { 2720 struct per_cpu_pages *pcp; 2721 struct list_head *list; 2722 struct page *page; 2723 unsigned long __maybe_unused UP_flags; 2724 2725 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 2726 pcp_trylock_prepare(UP_flags); 2727 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2728 if (!pcp) { 2729 pcp_trylock_finish(UP_flags); 2730 return NULL; 2731 } 2732 2733 /* 2734 * On allocation, reduce the number of pages that are batch freed. 2735 * See nr_pcp_free() where free_factor is increased for subsequent 2736 * frees. 2737 */ 2738 pcp->free_factor >>= 1; 2739 list = &pcp->lists[order_to_pindex(migratetype, order)]; 2740 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 2741 pcp_spin_unlock(pcp); 2742 pcp_trylock_finish(UP_flags); 2743 if (page) { 2744 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2745 zone_statistics(preferred_zone, zone, 1); 2746 } 2747 return page; 2748 } 2749 2750 /* 2751 * Allocate a page from the given zone. 2752 * Use pcplists for THP or "cheap" high-order allocations. 2753 */ 2754 2755 /* 2756 * Do not instrument rmqueue() with KMSAN. This function may call 2757 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 2758 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 2759 * may call rmqueue() again, which will result in a deadlock. 2760 */ 2761 __no_sanitize_memory 2762 static inline 2763 struct page *rmqueue(struct zone *preferred_zone, 2764 struct zone *zone, unsigned int order, 2765 gfp_t gfp_flags, unsigned int alloc_flags, 2766 int migratetype) 2767 { 2768 struct page *page; 2769 2770 /* 2771 * We most definitely don't want callers attempting to 2772 * allocate greater than order-1 page units with __GFP_NOFAIL. 2773 */ 2774 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 2775 2776 if (likely(pcp_allowed_order(order))) { 2777 page = rmqueue_pcplist(preferred_zone, zone, order, 2778 migratetype, alloc_flags); 2779 if (likely(page)) 2780 goto out; 2781 } 2782 2783 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 2784 migratetype); 2785 2786 out: 2787 /* Separate test+clear to avoid unnecessary atomics */ 2788 if ((alloc_flags & ALLOC_KSWAPD) && 2789 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 2790 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2791 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 2792 } 2793 2794 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 2795 return page; 2796 } 2797 2798 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2799 { 2800 return __should_fail_alloc_page(gfp_mask, order); 2801 } 2802 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 2803 2804 static inline long __zone_watermark_unusable_free(struct zone *z, 2805 unsigned int order, unsigned int alloc_flags) 2806 { 2807 long unusable_free = (1 << order) - 1; 2808 2809 /* 2810 * If the caller does not have rights to reserves below the min 2811 * watermark then subtract the high-atomic reserves. This will 2812 * over-estimate the size of the atomic reserve but it avoids a search. 2813 */ 2814 if (likely(!(alloc_flags & ALLOC_RESERVES))) 2815 unusable_free += z->nr_reserved_highatomic; 2816 2817 #ifdef CONFIG_CMA 2818 /* If allocation can't use CMA areas don't use free CMA pages */ 2819 if (!(alloc_flags & ALLOC_CMA)) 2820 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 2821 #endif 2822 #ifdef CONFIG_UNACCEPTED_MEMORY 2823 unusable_free += zone_page_state(z, NR_UNACCEPTED); 2824 #endif 2825 2826 return unusable_free; 2827 } 2828 2829 /* 2830 * Return true if free base pages are above 'mark'. For high-order checks it 2831 * will return true of the order-0 watermark is reached and there is at least 2832 * one free page of a suitable size. Checking now avoids taking the zone lock 2833 * to check in the allocation paths if no pages are free. 2834 */ 2835 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2836 int highest_zoneidx, unsigned int alloc_flags, 2837 long free_pages) 2838 { 2839 long min = mark; 2840 int o; 2841 2842 /* free_pages may go negative - that's OK */ 2843 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 2844 2845 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 2846 /* 2847 * __GFP_HIGH allows access to 50% of the min reserve as well 2848 * as OOM. 2849 */ 2850 if (alloc_flags & ALLOC_MIN_RESERVE) { 2851 min -= min / 2; 2852 2853 /* 2854 * Non-blocking allocations (e.g. GFP_ATOMIC) can 2855 * access more reserves than just __GFP_HIGH. Other 2856 * non-blocking allocations requests such as GFP_NOWAIT 2857 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 2858 * access to the min reserve. 2859 */ 2860 if (alloc_flags & ALLOC_NON_BLOCK) 2861 min -= min / 4; 2862 } 2863 2864 /* 2865 * OOM victims can try even harder than the normal reserve 2866 * users on the grounds that it's definitely going to be in 2867 * the exit path shortly and free memory. Any allocation it 2868 * makes during the free path will be small and short-lived. 2869 */ 2870 if (alloc_flags & ALLOC_OOM) 2871 min -= min / 2; 2872 } 2873 2874 /* 2875 * Check watermarks for an order-0 allocation request. If these 2876 * are not met, then a high-order request also cannot go ahead 2877 * even if a suitable page happened to be free. 2878 */ 2879 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 2880 return false; 2881 2882 /* If this is an order-0 request then the watermark is fine */ 2883 if (!order) 2884 return true; 2885 2886 /* For a high-order request, check at least one suitable page is free */ 2887 for (o = order; o <= MAX_ORDER; o++) { 2888 struct free_area *area = &z->free_area[o]; 2889 int mt; 2890 2891 if (!area->nr_free) 2892 continue; 2893 2894 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2895 if (!free_area_empty(area, mt)) 2896 return true; 2897 } 2898 2899 #ifdef CONFIG_CMA 2900 if ((alloc_flags & ALLOC_CMA) && 2901 !free_area_empty(area, MIGRATE_CMA)) { 2902 return true; 2903 } 2904 #endif 2905 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 2906 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 2907 return true; 2908 } 2909 } 2910 return false; 2911 } 2912 2913 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2914 int highest_zoneidx, unsigned int alloc_flags) 2915 { 2916 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 2917 zone_page_state(z, NR_FREE_PAGES)); 2918 } 2919 2920 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 2921 unsigned long mark, int highest_zoneidx, 2922 unsigned int alloc_flags, gfp_t gfp_mask) 2923 { 2924 long free_pages; 2925 2926 free_pages = zone_page_state(z, NR_FREE_PAGES); 2927 2928 /* 2929 * Fast check for order-0 only. If this fails then the reserves 2930 * need to be calculated. 2931 */ 2932 if (!order) { 2933 long usable_free; 2934 long reserved; 2935 2936 usable_free = free_pages; 2937 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 2938 2939 /* reserved may over estimate high-atomic reserves. */ 2940 usable_free -= min(usable_free, reserved); 2941 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 2942 return true; 2943 } 2944 2945 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 2946 free_pages)) 2947 return true; 2948 2949 /* 2950 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 2951 * when checking the min watermark. The min watermark is the 2952 * point where boosting is ignored so that kswapd is woken up 2953 * when below the low watermark. 2954 */ 2955 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 2956 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 2957 mark = z->_watermark[WMARK_MIN]; 2958 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 2959 alloc_flags, free_pages); 2960 } 2961 2962 return false; 2963 } 2964 2965 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2966 unsigned long mark, int highest_zoneidx) 2967 { 2968 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2969 2970 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2971 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2972 2973 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 2974 free_pages); 2975 } 2976 2977 #ifdef CONFIG_NUMA 2978 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 2979 2980 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2981 { 2982 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 2983 node_reclaim_distance; 2984 } 2985 #else /* CONFIG_NUMA */ 2986 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2987 { 2988 return true; 2989 } 2990 #endif /* CONFIG_NUMA */ 2991 2992 /* 2993 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 2994 * fragmentation is subtle. If the preferred zone was HIGHMEM then 2995 * premature use of a lower zone may cause lowmem pressure problems that 2996 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 2997 * probably too small. It only makes sense to spread allocations to avoid 2998 * fragmentation between the Normal and DMA32 zones. 2999 */ 3000 static inline unsigned int 3001 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3002 { 3003 unsigned int alloc_flags; 3004 3005 /* 3006 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3007 * to save a branch. 3008 */ 3009 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3010 3011 #ifdef CONFIG_ZONE_DMA32 3012 if (!zone) 3013 return alloc_flags; 3014 3015 if (zone_idx(zone) != ZONE_NORMAL) 3016 return alloc_flags; 3017 3018 /* 3019 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3020 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3021 * on UMA that if Normal is populated then so is DMA32. 3022 */ 3023 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3024 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3025 return alloc_flags; 3026 3027 alloc_flags |= ALLOC_NOFRAGMENT; 3028 #endif /* CONFIG_ZONE_DMA32 */ 3029 return alloc_flags; 3030 } 3031 3032 /* Must be called after current_gfp_context() which can change gfp_mask */ 3033 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3034 unsigned int alloc_flags) 3035 { 3036 #ifdef CONFIG_CMA 3037 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3038 alloc_flags |= ALLOC_CMA; 3039 #endif 3040 return alloc_flags; 3041 } 3042 3043 /* 3044 * get_page_from_freelist goes through the zonelist trying to allocate 3045 * a page. 3046 */ 3047 static struct page * 3048 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3049 const struct alloc_context *ac) 3050 { 3051 struct zoneref *z; 3052 struct zone *zone; 3053 struct pglist_data *last_pgdat = NULL; 3054 bool last_pgdat_dirty_ok = false; 3055 bool no_fallback; 3056 3057 retry: 3058 /* 3059 * Scan zonelist, looking for a zone with enough free. 3060 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3061 */ 3062 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3063 z = ac->preferred_zoneref; 3064 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3065 ac->nodemask) { 3066 struct page *page; 3067 unsigned long mark; 3068 3069 if (cpusets_enabled() && 3070 (alloc_flags & ALLOC_CPUSET) && 3071 !__cpuset_zone_allowed(zone, gfp_mask)) 3072 continue; 3073 /* 3074 * When allocating a page cache page for writing, we 3075 * want to get it from a node that is within its dirty 3076 * limit, such that no single node holds more than its 3077 * proportional share of globally allowed dirty pages. 3078 * The dirty limits take into account the node's 3079 * lowmem reserves and high watermark so that kswapd 3080 * should be able to balance it without having to 3081 * write pages from its LRU list. 3082 * 3083 * XXX: For now, allow allocations to potentially 3084 * exceed the per-node dirty limit in the slowpath 3085 * (spread_dirty_pages unset) before going into reclaim, 3086 * which is important when on a NUMA setup the allowed 3087 * nodes are together not big enough to reach the 3088 * global limit. The proper fix for these situations 3089 * will require awareness of nodes in the 3090 * dirty-throttling and the flusher threads. 3091 */ 3092 if (ac->spread_dirty_pages) { 3093 if (last_pgdat != zone->zone_pgdat) { 3094 last_pgdat = zone->zone_pgdat; 3095 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3096 } 3097 3098 if (!last_pgdat_dirty_ok) 3099 continue; 3100 } 3101 3102 if (no_fallback && nr_online_nodes > 1 && 3103 zone != ac->preferred_zoneref->zone) { 3104 int local_nid; 3105 3106 /* 3107 * If moving to a remote node, retry but allow 3108 * fragmenting fallbacks. Locality is more important 3109 * than fragmentation avoidance. 3110 */ 3111 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 3112 if (zone_to_nid(zone) != local_nid) { 3113 alloc_flags &= ~ALLOC_NOFRAGMENT; 3114 goto retry; 3115 } 3116 } 3117 3118 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3119 if (!zone_watermark_fast(zone, order, mark, 3120 ac->highest_zoneidx, alloc_flags, 3121 gfp_mask)) { 3122 int ret; 3123 3124 if (has_unaccepted_memory()) { 3125 if (try_to_accept_memory(zone, order)) 3126 goto try_this_zone; 3127 } 3128 3129 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3130 /* 3131 * Watermark failed for this zone, but see if we can 3132 * grow this zone if it contains deferred pages. 3133 */ 3134 if (deferred_pages_enabled()) { 3135 if (_deferred_grow_zone(zone, order)) 3136 goto try_this_zone; 3137 } 3138 #endif 3139 /* Checked here to keep the fast path fast */ 3140 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3141 if (alloc_flags & ALLOC_NO_WATERMARKS) 3142 goto try_this_zone; 3143 3144 if (!node_reclaim_enabled() || 3145 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3146 continue; 3147 3148 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3149 switch (ret) { 3150 case NODE_RECLAIM_NOSCAN: 3151 /* did not scan */ 3152 continue; 3153 case NODE_RECLAIM_FULL: 3154 /* scanned but unreclaimable */ 3155 continue; 3156 default: 3157 /* did we reclaim enough */ 3158 if (zone_watermark_ok(zone, order, mark, 3159 ac->highest_zoneidx, alloc_flags)) 3160 goto try_this_zone; 3161 3162 continue; 3163 } 3164 } 3165 3166 try_this_zone: 3167 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3168 gfp_mask, alloc_flags, ac->migratetype); 3169 if (page) { 3170 prep_new_page(page, order, gfp_mask, alloc_flags); 3171 3172 /* 3173 * If this is a high-order atomic allocation then check 3174 * if the pageblock should be reserved for the future 3175 */ 3176 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3177 reserve_highatomic_pageblock(page, zone); 3178 3179 return page; 3180 } else { 3181 if (has_unaccepted_memory()) { 3182 if (try_to_accept_memory(zone, order)) 3183 goto try_this_zone; 3184 } 3185 3186 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3187 /* Try again if zone has deferred pages */ 3188 if (deferred_pages_enabled()) { 3189 if (_deferred_grow_zone(zone, order)) 3190 goto try_this_zone; 3191 } 3192 #endif 3193 } 3194 } 3195 3196 /* 3197 * It's possible on a UMA machine to get through all zones that are 3198 * fragmented. If avoiding fragmentation, reset and try again. 3199 */ 3200 if (no_fallback) { 3201 alloc_flags &= ~ALLOC_NOFRAGMENT; 3202 goto retry; 3203 } 3204 3205 return NULL; 3206 } 3207 3208 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3209 { 3210 unsigned int filter = SHOW_MEM_FILTER_NODES; 3211 3212 /* 3213 * This documents exceptions given to allocations in certain 3214 * contexts that are allowed to allocate outside current's set 3215 * of allowed nodes. 3216 */ 3217 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3218 if (tsk_is_oom_victim(current) || 3219 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3220 filter &= ~SHOW_MEM_FILTER_NODES; 3221 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3222 filter &= ~SHOW_MEM_FILTER_NODES; 3223 3224 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3225 } 3226 3227 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3228 { 3229 struct va_format vaf; 3230 va_list args; 3231 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3232 3233 if ((gfp_mask & __GFP_NOWARN) || 3234 !__ratelimit(&nopage_rs) || 3235 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3236 return; 3237 3238 va_start(args, fmt); 3239 vaf.fmt = fmt; 3240 vaf.va = &args; 3241 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3242 current->comm, &vaf, gfp_mask, &gfp_mask, 3243 nodemask_pr_args(nodemask)); 3244 va_end(args); 3245 3246 cpuset_print_current_mems_allowed(); 3247 pr_cont("\n"); 3248 dump_stack(); 3249 warn_alloc_show_mem(gfp_mask, nodemask); 3250 } 3251 3252 static inline struct page * 3253 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3254 unsigned int alloc_flags, 3255 const struct alloc_context *ac) 3256 { 3257 struct page *page; 3258 3259 page = get_page_from_freelist(gfp_mask, order, 3260 alloc_flags|ALLOC_CPUSET, ac); 3261 /* 3262 * fallback to ignore cpuset restriction if our nodes 3263 * are depleted 3264 */ 3265 if (!page) 3266 page = get_page_from_freelist(gfp_mask, order, 3267 alloc_flags, ac); 3268 3269 return page; 3270 } 3271 3272 static inline struct page * 3273 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3274 const struct alloc_context *ac, unsigned long *did_some_progress) 3275 { 3276 struct oom_control oc = { 3277 .zonelist = ac->zonelist, 3278 .nodemask = ac->nodemask, 3279 .memcg = NULL, 3280 .gfp_mask = gfp_mask, 3281 .order = order, 3282 }; 3283 struct page *page; 3284 3285 *did_some_progress = 0; 3286 3287 /* 3288 * Acquire the oom lock. If that fails, somebody else is 3289 * making progress for us. 3290 */ 3291 if (!mutex_trylock(&oom_lock)) { 3292 *did_some_progress = 1; 3293 schedule_timeout_uninterruptible(1); 3294 return NULL; 3295 } 3296 3297 /* 3298 * Go through the zonelist yet one more time, keep very high watermark 3299 * here, this is only to catch a parallel oom killing, we must fail if 3300 * we're still under heavy pressure. But make sure that this reclaim 3301 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3302 * allocation which will never fail due to oom_lock already held. 3303 */ 3304 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3305 ~__GFP_DIRECT_RECLAIM, order, 3306 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3307 if (page) 3308 goto out; 3309 3310 /* Coredumps can quickly deplete all memory reserves */ 3311 if (current->flags & PF_DUMPCORE) 3312 goto out; 3313 /* The OOM killer will not help higher order allocs */ 3314 if (order > PAGE_ALLOC_COSTLY_ORDER) 3315 goto out; 3316 /* 3317 * We have already exhausted all our reclaim opportunities without any 3318 * success so it is time to admit defeat. We will skip the OOM killer 3319 * because it is very likely that the caller has a more reasonable 3320 * fallback than shooting a random task. 3321 * 3322 * The OOM killer may not free memory on a specific node. 3323 */ 3324 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3325 goto out; 3326 /* The OOM killer does not needlessly kill tasks for lowmem */ 3327 if (ac->highest_zoneidx < ZONE_NORMAL) 3328 goto out; 3329 if (pm_suspended_storage()) 3330 goto out; 3331 /* 3332 * XXX: GFP_NOFS allocations should rather fail than rely on 3333 * other request to make a forward progress. 3334 * We are in an unfortunate situation where out_of_memory cannot 3335 * do much for this context but let's try it to at least get 3336 * access to memory reserved if the current task is killed (see 3337 * out_of_memory). Once filesystems are ready to handle allocation 3338 * failures more gracefully we should just bail out here. 3339 */ 3340 3341 /* Exhausted what can be done so it's blame time */ 3342 if (out_of_memory(&oc) || 3343 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3344 *did_some_progress = 1; 3345 3346 /* 3347 * Help non-failing allocations by giving them access to memory 3348 * reserves 3349 */ 3350 if (gfp_mask & __GFP_NOFAIL) 3351 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3352 ALLOC_NO_WATERMARKS, ac); 3353 } 3354 out: 3355 mutex_unlock(&oom_lock); 3356 return page; 3357 } 3358 3359 /* 3360 * Maximum number of compaction retries with a progress before OOM 3361 * killer is consider as the only way to move forward. 3362 */ 3363 #define MAX_COMPACT_RETRIES 16 3364 3365 #ifdef CONFIG_COMPACTION 3366 /* Try memory compaction for high-order allocations before reclaim */ 3367 static struct page * 3368 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3369 unsigned int alloc_flags, const struct alloc_context *ac, 3370 enum compact_priority prio, enum compact_result *compact_result) 3371 { 3372 struct page *page = NULL; 3373 unsigned long pflags; 3374 unsigned int noreclaim_flag; 3375 3376 if (!order) 3377 return NULL; 3378 3379 psi_memstall_enter(&pflags); 3380 delayacct_compact_start(); 3381 noreclaim_flag = memalloc_noreclaim_save(); 3382 3383 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3384 prio, &page); 3385 3386 memalloc_noreclaim_restore(noreclaim_flag); 3387 psi_memstall_leave(&pflags); 3388 delayacct_compact_end(); 3389 3390 if (*compact_result == COMPACT_SKIPPED) 3391 return NULL; 3392 /* 3393 * At least in one zone compaction wasn't deferred or skipped, so let's 3394 * count a compaction stall 3395 */ 3396 count_vm_event(COMPACTSTALL); 3397 3398 /* Prep a captured page if available */ 3399 if (page) 3400 prep_new_page(page, order, gfp_mask, alloc_flags); 3401 3402 /* Try get a page from the freelist if available */ 3403 if (!page) 3404 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3405 3406 if (page) { 3407 struct zone *zone = page_zone(page); 3408 3409 zone->compact_blockskip_flush = false; 3410 compaction_defer_reset(zone, order, true); 3411 count_vm_event(COMPACTSUCCESS); 3412 return page; 3413 } 3414 3415 /* 3416 * It's bad if compaction run occurs and fails. The most likely reason 3417 * is that pages exist, but not enough to satisfy watermarks. 3418 */ 3419 count_vm_event(COMPACTFAIL); 3420 3421 cond_resched(); 3422 3423 return NULL; 3424 } 3425 3426 static inline bool 3427 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3428 enum compact_result compact_result, 3429 enum compact_priority *compact_priority, 3430 int *compaction_retries) 3431 { 3432 int max_retries = MAX_COMPACT_RETRIES; 3433 int min_priority; 3434 bool ret = false; 3435 int retries = *compaction_retries; 3436 enum compact_priority priority = *compact_priority; 3437 3438 if (!order) 3439 return false; 3440 3441 if (fatal_signal_pending(current)) 3442 return false; 3443 3444 /* 3445 * Compaction was skipped due to a lack of free order-0 3446 * migration targets. Continue if reclaim can help. 3447 */ 3448 if (compact_result == COMPACT_SKIPPED) { 3449 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3450 goto out; 3451 } 3452 3453 /* 3454 * Compaction managed to coalesce some page blocks, but the 3455 * allocation failed presumably due to a race. Retry some. 3456 */ 3457 if (compact_result == COMPACT_SUCCESS) { 3458 /* 3459 * !costly requests are much more important than 3460 * __GFP_RETRY_MAYFAIL costly ones because they are de 3461 * facto nofail and invoke OOM killer to move on while 3462 * costly can fail and users are ready to cope with 3463 * that. 1/4 retries is rather arbitrary but we would 3464 * need much more detailed feedback from compaction to 3465 * make a better decision. 3466 */ 3467 if (order > PAGE_ALLOC_COSTLY_ORDER) 3468 max_retries /= 4; 3469 3470 if (++(*compaction_retries) <= max_retries) { 3471 ret = true; 3472 goto out; 3473 } 3474 } 3475 3476 /* 3477 * Compaction failed. Retry with increasing priority. 3478 */ 3479 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3480 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3481 3482 if (*compact_priority > min_priority) { 3483 (*compact_priority)--; 3484 *compaction_retries = 0; 3485 ret = true; 3486 } 3487 out: 3488 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3489 return ret; 3490 } 3491 #else 3492 static inline struct page * 3493 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3494 unsigned int alloc_flags, const struct alloc_context *ac, 3495 enum compact_priority prio, enum compact_result *compact_result) 3496 { 3497 *compact_result = COMPACT_SKIPPED; 3498 return NULL; 3499 } 3500 3501 static inline bool 3502 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3503 enum compact_result compact_result, 3504 enum compact_priority *compact_priority, 3505 int *compaction_retries) 3506 { 3507 struct zone *zone; 3508 struct zoneref *z; 3509 3510 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3511 return false; 3512 3513 /* 3514 * There are setups with compaction disabled which would prefer to loop 3515 * inside the allocator rather than hit the oom killer prematurely. 3516 * Let's give them a good hope and keep retrying while the order-0 3517 * watermarks are OK. 3518 */ 3519 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3520 ac->highest_zoneidx, ac->nodemask) { 3521 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3522 ac->highest_zoneidx, alloc_flags)) 3523 return true; 3524 } 3525 return false; 3526 } 3527 #endif /* CONFIG_COMPACTION */ 3528 3529 #ifdef CONFIG_LOCKDEP 3530 static struct lockdep_map __fs_reclaim_map = 3531 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3532 3533 static bool __need_reclaim(gfp_t gfp_mask) 3534 { 3535 /* no reclaim without waiting on it */ 3536 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3537 return false; 3538 3539 /* this guy won't enter reclaim */ 3540 if (current->flags & PF_MEMALLOC) 3541 return false; 3542 3543 if (gfp_mask & __GFP_NOLOCKDEP) 3544 return false; 3545 3546 return true; 3547 } 3548 3549 void __fs_reclaim_acquire(unsigned long ip) 3550 { 3551 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3552 } 3553 3554 void __fs_reclaim_release(unsigned long ip) 3555 { 3556 lock_release(&__fs_reclaim_map, ip); 3557 } 3558 3559 void fs_reclaim_acquire(gfp_t gfp_mask) 3560 { 3561 gfp_mask = current_gfp_context(gfp_mask); 3562 3563 if (__need_reclaim(gfp_mask)) { 3564 if (gfp_mask & __GFP_FS) 3565 __fs_reclaim_acquire(_RET_IP_); 3566 3567 #ifdef CONFIG_MMU_NOTIFIER 3568 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3569 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3570 #endif 3571 3572 } 3573 } 3574 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3575 3576 void fs_reclaim_release(gfp_t gfp_mask) 3577 { 3578 gfp_mask = current_gfp_context(gfp_mask); 3579 3580 if (__need_reclaim(gfp_mask)) { 3581 if (gfp_mask & __GFP_FS) 3582 __fs_reclaim_release(_RET_IP_); 3583 } 3584 } 3585 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3586 #endif 3587 3588 /* 3589 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3590 * have been rebuilt so allocation retries. Reader side does not lock and 3591 * retries the allocation if zonelist changes. Writer side is protected by the 3592 * embedded spin_lock. 3593 */ 3594 static DEFINE_SEQLOCK(zonelist_update_seq); 3595 3596 static unsigned int zonelist_iter_begin(void) 3597 { 3598 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3599 return read_seqbegin(&zonelist_update_seq); 3600 3601 return 0; 3602 } 3603 3604 static unsigned int check_retry_zonelist(unsigned int seq) 3605 { 3606 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3607 return read_seqretry(&zonelist_update_seq, seq); 3608 3609 return seq; 3610 } 3611 3612 /* Perform direct synchronous page reclaim */ 3613 static unsigned long 3614 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3615 const struct alloc_context *ac) 3616 { 3617 unsigned int noreclaim_flag; 3618 unsigned long progress; 3619 3620 cond_resched(); 3621 3622 /* We now go into synchronous reclaim */ 3623 cpuset_memory_pressure_bump(); 3624 fs_reclaim_acquire(gfp_mask); 3625 noreclaim_flag = memalloc_noreclaim_save(); 3626 3627 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3628 ac->nodemask); 3629 3630 memalloc_noreclaim_restore(noreclaim_flag); 3631 fs_reclaim_release(gfp_mask); 3632 3633 cond_resched(); 3634 3635 return progress; 3636 } 3637 3638 /* The really slow allocator path where we enter direct reclaim */ 3639 static inline struct page * 3640 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3641 unsigned int alloc_flags, const struct alloc_context *ac, 3642 unsigned long *did_some_progress) 3643 { 3644 struct page *page = NULL; 3645 unsigned long pflags; 3646 bool drained = false; 3647 3648 psi_memstall_enter(&pflags); 3649 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3650 if (unlikely(!(*did_some_progress))) 3651 goto out; 3652 3653 retry: 3654 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3655 3656 /* 3657 * If an allocation failed after direct reclaim, it could be because 3658 * pages are pinned on the per-cpu lists or in high alloc reserves. 3659 * Shrink them and try again 3660 */ 3661 if (!page && !drained) { 3662 unreserve_highatomic_pageblock(ac, false); 3663 drain_all_pages(NULL); 3664 drained = true; 3665 goto retry; 3666 } 3667 out: 3668 psi_memstall_leave(&pflags); 3669 3670 return page; 3671 } 3672 3673 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 3674 const struct alloc_context *ac) 3675 { 3676 struct zoneref *z; 3677 struct zone *zone; 3678 pg_data_t *last_pgdat = NULL; 3679 enum zone_type highest_zoneidx = ac->highest_zoneidx; 3680 3681 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 3682 ac->nodemask) { 3683 if (!managed_zone(zone)) 3684 continue; 3685 if (last_pgdat != zone->zone_pgdat) { 3686 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 3687 last_pgdat = zone->zone_pgdat; 3688 } 3689 } 3690 } 3691 3692 static inline unsigned int 3693 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 3694 { 3695 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3696 3697 /* 3698 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 3699 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3700 * to save two branches. 3701 */ 3702 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 3703 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 3704 3705 /* 3706 * The caller may dip into page reserves a bit more if the caller 3707 * cannot run direct reclaim, or if the caller has realtime scheduling 3708 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3709 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 3710 */ 3711 alloc_flags |= (__force int) 3712 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 3713 3714 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 3715 /* 3716 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3717 * if it can't schedule. 3718 */ 3719 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 3720 alloc_flags |= ALLOC_NON_BLOCK; 3721 3722 if (order > 0) 3723 alloc_flags |= ALLOC_HIGHATOMIC; 3724 } 3725 3726 /* 3727 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 3728 * GFP_ATOMIC) rather than fail, see the comment for 3729 * cpuset_node_allowed(). 3730 */ 3731 if (alloc_flags & ALLOC_MIN_RESERVE) 3732 alloc_flags &= ~ALLOC_CPUSET; 3733 } else if (unlikely(rt_task(current)) && in_task()) 3734 alloc_flags |= ALLOC_MIN_RESERVE; 3735 3736 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 3737 3738 return alloc_flags; 3739 } 3740 3741 static bool oom_reserves_allowed(struct task_struct *tsk) 3742 { 3743 if (!tsk_is_oom_victim(tsk)) 3744 return false; 3745 3746 /* 3747 * !MMU doesn't have oom reaper so give access to memory reserves 3748 * only to the thread with TIF_MEMDIE set 3749 */ 3750 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 3751 return false; 3752 3753 return true; 3754 } 3755 3756 /* 3757 * Distinguish requests which really need access to full memory 3758 * reserves from oom victims which can live with a portion of it 3759 */ 3760 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 3761 { 3762 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 3763 return 0; 3764 if (gfp_mask & __GFP_MEMALLOC) 3765 return ALLOC_NO_WATERMARKS; 3766 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 3767 return ALLOC_NO_WATERMARKS; 3768 if (!in_interrupt()) { 3769 if (current->flags & PF_MEMALLOC) 3770 return ALLOC_NO_WATERMARKS; 3771 else if (oom_reserves_allowed(current)) 3772 return ALLOC_OOM; 3773 } 3774 3775 return 0; 3776 } 3777 3778 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 3779 { 3780 return !!__gfp_pfmemalloc_flags(gfp_mask); 3781 } 3782 3783 /* 3784 * Checks whether it makes sense to retry the reclaim to make a forward progress 3785 * for the given allocation request. 3786 * 3787 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 3788 * without success, or when we couldn't even meet the watermark if we 3789 * reclaimed all remaining pages on the LRU lists. 3790 * 3791 * Returns true if a retry is viable or false to enter the oom path. 3792 */ 3793 static inline bool 3794 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 3795 struct alloc_context *ac, int alloc_flags, 3796 bool did_some_progress, int *no_progress_loops) 3797 { 3798 struct zone *zone; 3799 struct zoneref *z; 3800 bool ret = false; 3801 3802 /* 3803 * Costly allocations might have made a progress but this doesn't mean 3804 * their order will become available due to high fragmentation so 3805 * always increment the no progress counter for them 3806 */ 3807 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 3808 *no_progress_loops = 0; 3809 else 3810 (*no_progress_loops)++; 3811 3812 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 3813 goto out; 3814 3815 3816 /* 3817 * Keep reclaiming pages while there is a chance this will lead 3818 * somewhere. If none of the target zones can satisfy our allocation 3819 * request even if all reclaimable pages are considered then we are 3820 * screwed and have to go OOM. 3821 */ 3822 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3823 ac->highest_zoneidx, ac->nodemask) { 3824 unsigned long available; 3825 unsigned long reclaimable; 3826 unsigned long min_wmark = min_wmark_pages(zone); 3827 bool wmark; 3828 3829 available = reclaimable = zone_reclaimable_pages(zone); 3830 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 3831 3832 /* 3833 * Would the allocation succeed if we reclaimed all 3834 * reclaimable pages? 3835 */ 3836 wmark = __zone_watermark_ok(zone, order, min_wmark, 3837 ac->highest_zoneidx, alloc_flags, available); 3838 trace_reclaim_retry_zone(z, order, reclaimable, 3839 available, min_wmark, *no_progress_loops, wmark); 3840 if (wmark) { 3841 ret = true; 3842 break; 3843 } 3844 } 3845 3846 /* 3847 * Memory allocation/reclaim might be called from a WQ context and the 3848 * current implementation of the WQ concurrency control doesn't 3849 * recognize that a particular WQ is congested if the worker thread is 3850 * looping without ever sleeping. Therefore we have to do a short sleep 3851 * here rather than calling cond_resched(). 3852 */ 3853 if (current->flags & PF_WQ_WORKER) 3854 schedule_timeout_uninterruptible(1); 3855 else 3856 cond_resched(); 3857 out: 3858 /* Before OOM, exhaust highatomic_reserve */ 3859 if (!ret) 3860 return unreserve_highatomic_pageblock(ac, true); 3861 3862 return ret; 3863 } 3864 3865 static inline bool 3866 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 3867 { 3868 /* 3869 * It's possible that cpuset's mems_allowed and the nodemask from 3870 * mempolicy don't intersect. This should be normally dealt with by 3871 * policy_nodemask(), but it's possible to race with cpuset update in 3872 * such a way the check therein was true, and then it became false 3873 * before we got our cpuset_mems_cookie here. 3874 * This assumes that for all allocations, ac->nodemask can come only 3875 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 3876 * when it does not intersect with the cpuset restrictions) or the 3877 * caller can deal with a violated nodemask. 3878 */ 3879 if (cpusets_enabled() && ac->nodemask && 3880 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 3881 ac->nodemask = NULL; 3882 return true; 3883 } 3884 3885 /* 3886 * When updating a task's mems_allowed or mempolicy nodemask, it is 3887 * possible to race with parallel threads in such a way that our 3888 * allocation can fail while the mask is being updated. If we are about 3889 * to fail, check if the cpuset changed during allocation and if so, 3890 * retry. 3891 */ 3892 if (read_mems_allowed_retry(cpuset_mems_cookie)) 3893 return true; 3894 3895 return false; 3896 } 3897 3898 static inline struct page * 3899 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 3900 struct alloc_context *ac) 3901 { 3902 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3903 bool can_compact = gfp_compaction_allowed(gfp_mask); 3904 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 3905 struct page *page = NULL; 3906 unsigned int alloc_flags; 3907 unsigned long did_some_progress; 3908 enum compact_priority compact_priority; 3909 enum compact_result compact_result; 3910 int compaction_retries; 3911 int no_progress_loops; 3912 unsigned int cpuset_mems_cookie; 3913 unsigned int zonelist_iter_cookie; 3914 int reserve_flags; 3915 3916 restart: 3917 compaction_retries = 0; 3918 no_progress_loops = 0; 3919 compact_priority = DEF_COMPACT_PRIORITY; 3920 cpuset_mems_cookie = read_mems_allowed_begin(); 3921 zonelist_iter_cookie = zonelist_iter_begin(); 3922 3923 /* 3924 * The fast path uses conservative alloc_flags to succeed only until 3925 * kswapd needs to be woken up, and to avoid the cost of setting up 3926 * alloc_flags precisely. So we do that now. 3927 */ 3928 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 3929 3930 /* 3931 * We need to recalculate the starting point for the zonelist iterator 3932 * because we might have used different nodemask in the fast path, or 3933 * there was a cpuset modification and we are retrying - otherwise we 3934 * could end up iterating over non-eligible zones endlessly. 3935 */ 3936 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3937 ac->highest_zoneidx, ac->nodemask); 3938 if (!ac->preferred_zoneref->zone) 3939 goto nopage; 3940 3941 /* 3942 * Check for insane configurations where the cpuset doesn't contain 3943 * any suitable zone to satisfy the request - e.g. non-movable 3944 * GFP_HIGHUSER allocations from MOVABLE nodes only. 3945 */ 3946 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 3947 struct zoneref *z = first_zones_zonelist(ac->zonelist, 3948 ac->highest_zoneidx, 3949 &cpuset_current_mems_allowed); 3950 if (!z->zone) 3951 goto nopage; 3952 } 3953 3954 if (alloc_flags & ALLOC_KSWAPD) 3955 wake_all_kswapds(order, gfp_mask, ac); 3956 3957 /* 3958 * The adjusted alloc_flags might result in immediate success, so try 3959 * that first 3960 */ 3961 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3962 if (page) 3963 goto got_pg; 3964 3965 /* 3966 * For costly allocations, try direct compaction first, as it's likely 3967 * that we have enough base pages and don't need to reclaim. For non- 3968 * movable high-order allocations, do that as well, as compaction will 3969 * try prevent permanent fragmentation by migrating from blocks of the 3970 * same migratetype. 3971 * Don't try this for allocations that are allowed to ignore 3972 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 3973 */ 3974 if (can_direct_reclaim && can_compact && 3975 (costly_order || 3976 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 3977 && !gfp_pfmemalloc_allowed(gfp_mask)) { 3978 page = __alloc_pages_direct_compact(gfp_mask, order, 3979 alloc_flags, ac, 3980 INIT_COMPACT_PRIORITY, 3981 &compact_result); 3982 if (page) 3983 goto got_pg; 3984 3985 /* 3986 * Checks for costly allocations with __GFP_NORETRY, which 3987 * includes some THP page fault allocations 3988 */ 3989 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 3990 /* 3991 * If allocating entire pageblock(s) and compaction 3992 * failed because all zones are below low watermarks 3993 * or is prohibited because it recently failed at this 3994 * order, fail immediately unless the allocator has 3995 * requested compaction and reclaim retry. 3996 * 3997 * Reclaim is 3998 * - potentially very expensive because zones are far 3999 * below their low watermarks or this is part of very 4000 * bursty high order allocations, 4001 * - not guaranteed to help because isolate_freepages() 4002 * may not iterate over freed pages as part of its 4003 * linear scan, and 4004 * - unlikely to make entire pageblocks free on its 4005 * own. 4006 */ 4007 if (compact_result == COMPACT_SKIPPED || 4008 compact_result == COMPACT_DEFERRED) 4009 goto nopage; 4010 4011 /* 4012 * Looks like reclaim/compaction is worth trying, but 4013 * sync compaction could be very expensive, so keep 4014 * using async compaction. 4015 */ 4016 compact_priority = INIT_COMPACT_PRIORITY; 4017 } 4018 } 4019 4020 retry: 4021 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4022 if (alloc_flags & ALLOC_KSWAPD) 4023 wake_all_kswapds(order, gfp_mask, ac); 4024 4025 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4026 if (reserve_flags) 4027 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4028 (alloc_flags & ALLOC_KSWAPD); 4029 4030 /* 4031 * Reset the nodemask and zonelist iterators if memory policies can be 4032 * ignored. These allocations are high priority and system rather than 4033 * user oriented. 4034 */ 4035 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4036 ac->nodemask = NULL; 4037 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4038 ac->highest_zoneidx, ac->nodemask); 4039 } 4040 4041 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4042 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4043 if (page) 4044 goto got_pg; 4045 4046 /* Caller is not willing to reclaim, we can't balance anything */ 4047 if (!can_direct_reclaim) 4048 goto nopage; 4049 4050 /* Avoid recursion of direct reclaim */ 4051 if (current->flags & PF_MEMALLOC) 4052 goto nopage; 4053 4054 /* Try direct reclaim and then allocating */ 4055 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4056 &did_some_progress); 4057 if (page) 4058 goto got_pg; 4059 4060 /* Try direct compaction and then allocating */ 4061 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4062 compact_priority, &compact_result); 4063 if (page) 4064 goto got_pg; 4065 4066 /* Do not loop if specifically requested */ 4067 if (gfp_mask & __GFP_NORETRY) 4068 goto nopage; 4069 4070 /* 4071 * Do not retry costly high order allocations unless they are 4072 * __GFP_RETRY_MAYFAIL and we can compact 4073 */ 4074 if (costly_order && (!can_compact || 4075 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4076 goto nopage; 4077 4078 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4079 did_some_progress > 0, &no_progress_loops)) 4080 goto retry; 4081 4082 /* 4083 * It doesn't make any sense to retry for the compaction if the order-0 4084 * reclaim is not able to make any progress because the current 4085 * implementation of the compaction depends on the sufficient amount 4086 * of free memory (see __compaction_suitable) 4087 */ 4088 if (did_some_progress > 0 && can_compact && 4089 should_compact_retry(ac, order, alloc_flags, 4090 compact_result, &compact_priority, 4091 &compaction_retries)) 4092 goto retry; 4093 4094 4095 /* 4096 * Deal with possible cpuset update races or zonelist updates to avoid 4097 * a unnecessary OOM kill. 4098 */ 4099 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4100 check_retry_zonelist(zonelist_iter_cookie)) 4101 goto restart; 4102 4103 /* Reclaim has failed us, start killing things */ 4104 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4105 if (page) 4106 goto got_pg; 4107 4108 /* Avoid allocations with no watermarks from looping endlessly */ 4109 if (tsk_is_oom_victim(current) && 4110 (alloc_flags & ALLOC_OOM || 4111 (gfp_mask & __GFP_NOMEMALLOC))) 4112 goto nopage; 4113 4114 /* Retry as long as the OOM killer is making progress */ 4115 if (did_some_progress) { 4116 no_progress_loops = 0; 4117 goto retry; 4118 } 4119 4120 nopage: 4121 /* 4122 * Deal with possible cpuset update races or zonelist updates to avoid 4123 * a unnecessary OOM kill. 4124 */ 4125 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4126 check_retry_zonelist(zonelist_iter_cookie)) 4127 goto restart; 4128 4129 /* 4130 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4131 * we always retry 4132 */ 4133 if (gfp_mask & __GFP_NOFAIL) { 4134 /* 4135 * All existing users of the __GFP_NOFAIL are blockable, so warn 4136 * of any new users that actually require GFP_NOWAIT 4137 */ 4138 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 4139 goto fail; 4140 4141 /* 4142 * PF_MEMALLOC request from this context is rather bizarre 4143 * because we cannot reclaim anything and only can loop waiting 4144 * for somebody to do a work for us 4145 */ 4146 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 4147 4148 /* 4149 * non failing costly orders are a hard requirement which we 4150 * are not prepared for much so let's warn about these users 4151 * so that we can identify them and convert them to something 4152 * else. 4153 */ 4154 WARN_ON_ONCE_GFP(costly_order, gfp_mask); 4155 4156 /* 4157 * Help non-failing allocations by giving some access to memory 4158 * reserves normally used for high priority non-blocking 4159 * allocations but do not use ALLOC_NO_WATERMARKS because this 4160 * could deplete whole memory reserves which would just make 4161 * the situation worse. 4162 */ 4163 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4164 if (page) 4165 goto got_pg; 4166 4167 cond_resched(); 4168 goto retry; 4169 } 4170 fail: 4171 warn_alloc(gfp_mask, ac->nodemask, 4172 "page allocation failure: order:%u", order); 4173 got_pg: 4174 return page; 4175 } 4176 4177 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4178 int preferred_nid, nodemask_t *nodemask, 4179 struct alloc_context *ac, gfp_t *alloc_gfp, 4180 unsigned int *alloc_flags) 4181 { 4182 ac->highest_zoneidx = gfp_zone(gfp_mask); 4183 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4184 ac->nodemask = nodemask; 4185 ac->migratetype = gfp_migratetype(gfp_mask); 4186 4187 if (cpusets_enabled()) { 4188 *alloc_gfp |= __GFP_HARDWALL; 4189 /* 4190 * When we are in the interrupt context, it is irrelevant 4191 * to the current task context. It means that any node ok. 4192 */ 4193 if (in_task() && !ac->nodemask) 4194 ac->nodemask = &cpuset_current_mems_allowed; 4195 else 4196 *alloc_flags |= ALLOC_CPUSET; 4197 } 4198 4199 might_alloc(gfp_mask); 4200 4201 if (should_fail_alloc_page(gfp_mask, order)) 4202 return false; 4203 4204 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4205 4206 /* Dirty zone balancing only done in the fast path */ 4207 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4208 4209 /* 4210 * The preferred zone is used for statistics but crucially it is 4211 * also used as the starting point for the zonelist iterator. It 4212 * may get reset for allocations that ignore memory policies. 4213 */ 4214 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4215 ac->highest_zoneidx, ac->nodemask); 4216 4217 return true; 4218 } 4219 4220 /* 4221 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 4222 * @gfp: GFP flags for the allocation 4223 * @preferred_nid: The preferred NUMA node ID to allocate from 4224 * @nodemask: Set of nodes to allocate from, may be NULL 4225 * @nr_pages: The number of pages desired on the list or array 4226 * @page_list: Optional list to store the allocated pages 4227 * @page_array: Optional array to store the pages 4228 * 4229 * This is a batched version of the page allocator that attempts to 4230 * allocate nr_pages quickly. Pages are added to page_list if page_list 4231 * is not NULL, otherwise it is assumed that the page_array is valid. 4232 * 4233 * For lists, nr_pages is the number of pages that should be allocated. 4234 * 4235 * For arrays, only NULL elements are populated with pages and nr_pages 4236 * is the maximum number of pages that will be stored in the array. 4237 * 4238 * Returns the number of pages on the list or array. 4239 */ 4240 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 4241 nodemask_t *nodemask, int nr_pages, 4242 struct list_head *page_list, 4243 struct page **page_array) 4244 { 4245 struct page *page; 4246 unsigned long __maybe_unused UP_flags; 4247 struct zone *zone; 4248 struct zoneref *z; 4249 struct per_cpu_pages *pcp; 4250 struct list_head *pcp_list; 4251 struct alloc_context ac; 4252 gfp_t alloc_gfp; 4253 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4254 int nr_populated = 0, nr_account = 0; 4255 4256 /* 4257 * Skip populated array elements to determine if any pages need 4258 * to be allocated before disabling IRQs. 4259 */ 4260 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 4261 nr_populated++; 4262 4263 /* No pages requested? */ 4264 if (unlikely(nr_pages <= 0)) 4265 goto out; 4266 4267 /* Already populated array? */ 4268 if (unlikely(page_array && nr_pages - nr_populated == 0)) 4269 goto out; 4270 4271 /* Bulk allocator does not support memcg accounting. */ 4272 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4273 goto failed; 4274 4275 /* Use the single page allocator for one page. */ 4276 if (nr_pages - nr_populated == 1) 4277 goto failed; 4278 4279 #ifdef CONFIG_PAGE_OWNER 4280 /* 4281 * PAGE_OWNER may recurse into the allocator to allocate space to 4282 * save the stack with pagesets.lock held. Releasing/reacquiring 4283 * removes much of the performance benefit of bulk allocation so 4284 * force the caller to allocate one page at a time as it'll have 4285 * similar performance to added complexity to the bulk allocator. 4286 */ 4287 if (static_branch_unlikely(&page_owner_inited)) 4288 goto failed; 4289 #endif 4290 4291 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4292 gfp &= gfp_allowed_mask; 4293 alloc_gfp = gfp; 4294 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4295 goto out; 4296 gfp = alloc_gfp; 4297 4298 /* Find an allowed local zone that meets the low watermark. */ 4299 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 4300 unsigned long mark; 4301 4302 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4303 !__cpuset_zone_allowed(zone, gfp)) { 4304 continue; 4305 } 4306 4307 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 4308 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 4309 goto failed; 4310 } 4311 4312 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4313 if (zone_watermark_fast(zone, 0, mark, 4314 zonelist_zone_idx(ac.preferred_zoneref), 4315 alloc_flags, gfp)) { 4316 break; 4317 } 4318 } 4319 4320 /* 4321 * If there are no allowed local zones that meets the watermarks then 4322 * try to allocate a single page and reclaim if necessary. 4323 */ 4324 if (unlikely(!zone)) 4325 goto failed; 4326 4327 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4328 pcp_trylock_prepare(UP_flags); 4329 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4330 if (!pcp) 4331 goto failed_irq; 4332 4333 /* Attempt the batch allocation */ 4334 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4335 while (nr_populated < nr_pages) { 4336 4337 /* Skip existing pages */ 4338 if (page_array && page_array[nr_populated]) { 4339 nr_populated++; 4340 continue; 4341 } 4342 4343 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4344 pcp, pcp_list); 4345 if (unlikely(!page)) { 4346 /* Try and allocate at least one page */ 4347 if (!nr_account) { 4348 pcp_spin_unlock(pcp); 4349 goto failed_irq; 4350 } 4351 break; 4352 } 4353 nr_account++; 4354 4355 prep_new_page(page, 0, gfp, 0); 4356 if (page_list) 4357 list_add(&page->lru, page_list); 4358 else 4359 page_array[nr_populated] = page; 4360 nr_populated++; 4361 } 4362 4363 pcp_spin_unlock(pcp); 4364 pcp_trylock_finish(UP_flags); 4365 4366 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4367 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 4368 4369 out: 4370 return nr_populated; 4371 4372 failed_irq: 4373 pcp_trylock_finish(UP_flags); 4374 4375 failed: 4376 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 4377 if (page) { 4378 if (page_list) 4379 list_add(&page->lru, page_list); 4380 else 4381 page_array[nr_populated] = page; 4382 nr_populated++; 4383 } 4384 4385 goto out; 4386 } 4387 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 4388 4389 /* 4390 * This is the 'heart' of the zoned buddy allocator. 4391 */ 4392 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 4393 nodemask_t *nodemask) 4394 { 4395 struct page *page; 4396 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4397 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4398 struct alloc_context ac = { }; 4399 4400 /* 4401 * There are several places where we assume that the order value is sane 4402 * so bail out early if the request is out of bound. 4403 */ 4404 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp)) 4405 return NULL; 4406 4407 gfp &= gfp_allowed_mask; 4408 /* 4409 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4410 * resp. GFP_NOIO which has to be inherited for all allocation requests 4411 * from a particular context which has been marked by 4412 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4413 * movable zones are not used during allocation. 4414 */ 4415 gfp = current_gfp_context(gfp); 4416 alloc_gfp = gfp; 4417 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4418 &alloc_gfp, &alloc_flags)) 4419 return NULL; 4420 4421 /* 4422 * Forbid the first pass from falling back to types that fragment 4423 * memory until all local zones are considered. 4424 */ 4425 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 4426 4427 /* First allocation attempt */ 4428 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4429 if (likely(page)) 4430 goto out; 4431 4432 alloc_gfp = gfp; 4433 ac.spread_dirty_pages = false; 4434 4435 /* 4436 * Restore the original nodemask if it was potentially replaced with 4437 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4438 */ 4439 ac.nodemask = nodemask; 4440 4441 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4442 4443 out: 4444 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4445 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4446 __free_pages(page, order); 4447 page = NULL; 4448 } 4449 4450 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4451 kmsan_alloc_page(page, order, alloc_gfp); 4452 4453 return page; 4454 } 4455 EXPORT_SYMBOL(__alloc_pages); 4456 4457 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 4458 nodemask_t *nodemask) 4459 { 4460 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 4461 preferred_nid, nodemask); 4462 struct folio *folio = (struct folio *)page; 4463 4464 if (folio && order > 1) 4465 folio_prep_large_rmappable(folio); 4466 return folio; 4467 } 4468 EXPORT_SYMBOL(__folio_alloc); 4469 4470 /* 4471 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4472 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4473 * you need to access high mem. 4474 */ 4475 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 4476 { 4477 struct page *page; 4478 4479 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 4480 if (!page) 4481 return 0; 4482 return (unsigned long) page_address(page); 4483 } 4484 EXPORT_SYMBOL(__get_free_pages); 4485 4486 unsigned long get_zeroed_page(gfp_t gfp_mask) 4487 { 4488 return __get_free_page(gfp_mask | __GFP_ZERO); 4489 } 4490 EXPORT_SYMBOL(get_zeroed_page); 4491 4492 /** 4493 * __free_pages - Free pages allocated with alloc_pages(). 4494 * @page: The page pointer returned from alloc_pages(). 4495 * @order: The order of the allocation. 4496 * 4497 * This function can free multi-page allocations that are not compound 4498 * pages. It does not check that the @order passed in matches that of 4499 * the allocation, so it is easy to leak memory. Freeing more memory 4500 * than was allocated will probably emit a warning. 4501 * 4502 * If the last reference to this page is speculative, it will be released 4503 * by put_page() which only frees the first page of a non-compound 4504 * allocation. To prevent the remaining pages from being leaked, we free 4505 * the subsequent pages here. If you want to use the page's reference 4506 * count to decide when to free the allocation, you should allocate a 4507 * compound page, and use put_page() instead of __free_pages(). 4508 * 4509 * Context: May be called in interrupt context or while holding a normal 4510 * spinlock, but not in NMI context or while holding a raw spinlock. 4511 */ 4512 void __free_pages(struct page *page, unsigned int order) 4513 { 4514 /* get PageHead before we drop reference */ 4515 int head = PageHead(page); 4516 4517 if (put_page_testzero(page)) 4518 free_the_page(page, order); 4519 else if (!head) 4520 while (order-- > 0) 4521 free_the_page(page + (1 << order), order); 4522 } 4523 EXPORT_SYMBOL(__free_pages); 4524 4525 void free_pages(unsigned long addr, unsigned int order) 4526 { 4527 if (addr != 0) { 4528 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4529 __free_pages(virt_to_page((void *)addr), order); 4530 } 4531 } 4532 4533 EXPORT_SYMBOL(free_pages); 4534 4535 /* 4536 * Page Fragment: 4537 * An arbitrary-length arbitrary-offset area of memory which resides 4538 * within a 0 or higher order page. Multiple fragments within that page 4539 * are individually refcounted, in the page's reference counter. 4540 * 4541 * The page_frag functions below provide a simple allocation framework for 4542 * page fragments. This is used by the network stack and network device 4543 * drivers to provide a backing region of memory for use as either an 4544 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4545 */ 4546 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4547 gfp_t gfp_mask) 4548 { 4549 struct page *page = NULL; 4550 gfp_t gfp = gfp_mask; 4551 4552 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4553 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 4554 __GFP_NOMEMALLOC; 4555 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4556 PAGE_FRAG_CACHE_MAX_ORDER); 4557 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4558 #endif 4559 if (unlikely(!page)) 4560 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4561 4562 nc->va = page ? page_address(page) : NULL; 4563 4564 return page; 4565 } 4566 4567 void __page_frag_cache_drain(struct page *page, unsigned int count) 4568 { 4569 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4570 4571 if (page_ref_sub_and_test(page, count)) 4572 free_the_page(page, compound_order(page)); 4573 } 4574 EXPORT_SYMBOL(__page_frag_cache_drain); 4575 4576 void *page_frag_alloc_align(struct page_frag_cache *nc, 4577 unsigned int fragsz, gfp_t gfp_mask, 4578 unsigned int align_mask) 4579 { 4580 unsigned int size = PAGE_SIZE; 4581 struct page *page; 4582 int offset; 4583 4584 if (unlikely(!nc->va)) { 4585 refill: 4586 page = __page_frag_cache_refill(nc, gfp_mask); 4587 if (!page) 4588 return NULL; 4589 4590 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4591 /* if size can vary use size else just use PAGE_SIZE */ 4592 size = nc->size; 4593 #endif 4594 /* Even if we own the page, we do not use atomic_set(). 4595 * This would break get_page_unless_zero() users. 4596 */ 4597 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 4598 4599 /* reset page count bias and offset to start of new frag */ 4600 nc->pfmemalloc = page_is_pfmemalloc(page); 4601 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4602 nc->offset = size; 4603 } 4604 4605 offset = nc->offset - fragsz; 4606 if (unlikely(offset < 0)) { 4607 page = virt_to_page(nc->va); 4608 4609 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4610 goto refill; 4611 4612 if (unlikely(nc->pfmemalloc)) { 4613 free_the_page(page, compound_order(page)); 4614 goto refill; 4615 } 4616 4617 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4618 /* if size can vary use size else just use PAGE_SIZE */ 4619 size = nc->size; 4620 #endif 4621 /* OK, page count is 0, we can safely set it */ 4622 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 4623 4624 /* reset page count bias and offset to start of new frag */ 4625 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4626 offset = size - fragsz; 4627 if (unlikely(offset < 0)) { 4628 /* 4629 * The caller is trying to allocate a fragment 4630 * with fragsz > PAGE_SIZE but the cache isn't big 4631 * enough to satisfy the request, this may 4632 * happen in low memory conditions. 4633 * We don't release the cache page because 4634 * it could make memory pressure worse 4635 * so we simply return NULL here. 4636 */ 4637 return NULL; 4638 } 4639 } 4640 4641 nc->pagecnt_bias--; 4642 offset &= align_mask; 4643 nc->offset = offset; 4644 4645 return nc->va + offset; 4646 } 4647 EXPORT_SYMBOL(page_frag_alloc_align); 4648 4649 /* 4650 * Frees a page fragment allocated out of either a compound or order 0 page. 4651 */ 4652 void page_frag_free(void *addr) 4653 { 4654 struct page *page = virt_to_head_page(addr); 4655 4656 if (unlikely(put_page_testzero(page))) 4657 free_the_page(page, compound_order(page)); 4658 } 4659 EXPORT_SYMBOL(page_frag_free); 4660 4661 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4662 size_t size) 4663 { 4664 if (addr) { 4665 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 4666 struct page *page = virt_to_page((void *)addr); 4667 struct page *last = page + nr; 4668 4669 split_page_owner(page, 1 << order); 4670 split_page_memcg(page, 1 << order); 4671 while (page < --last) 4672 set_page_refcounted(last); 4673 4674 last = page + (1UL << order); 4675 for (page += nr; page < last; page++) 4676 __free_pages_ok(page, 0, FPI_TO_TAIL); 4677 } 4678 return (void *)addr; 4679 } 4680 4681 /** 4682 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4683 * @size: the number of bytes to allocate 4684 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4685 * 4686 * This function is similar to alloc_pages(), except that it allocates the 4687 * minimum number of pages to satisfy the request. alloc_pages() can only 4688 * allocate memory in power-of-two pages. 4689 * 4690 * This function is also limited by MAX_ORDER. 4691 * 4692 * Memory allocated by this function must be released by free_pages_exact(). 4693 * 4694 * Return: pointer to the allocated area or %NULL in case of error. 4695 */ 4696 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 4697 { 4698 unsigned int order = get_order(size); 4699 unsigned long addr; 4700 4701 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4702 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4703 4704 addr = __get_free_pages(gfp_mask, order); 4705 return make_alloc_exact(addr, order, size); 4706 } 4707 EXPORT_SYMBOL(alloc_pages_exact); 4708 4709 /** 4710 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4711 * pages on a node. 4712 * @nid: the preferred node ID where memory should be allocated 4713 * @size: the number of bytes to allocate 4714 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4715 * 4716 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4717 * back. 4718 * 4719 * Return: pointer to the allocated area or %NULL in case of error. 4720 */ 4721 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 4722 { 4723 unsigned int order = get_order(size); 4724 struct page *p; 4725 4726 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4727 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4728 4729 p = alloc_pages_node(nid, gfp_mask, order); 4730 if (!p) 4731 return NULL; 4732 return make_alloc_exact((unsigned long)page_address(p), order, size); 4733 } 4734 4735 /** 4736 * free_pages_exact - release memory allocated via alloc_pages_exact() 4737 * @virt: the value returned by alloc_pages_exact. 4738 * @size: size of allocation, same value as passed to alloc_pages_exact(). 4739 * 4740 * Release the memory allocated by a previous call to alloc_pages_exact. 4741 */ 4742 void free_pages_exact(void *virt, size_t size) 4743 { 4744 unsigned long addr = (unsigned long)virt; 4745 unsigned long end = addr + PAGE_ALIGN(size); 4746 4747 while (addr < end) { 4748 free_page(addr); 4749 addr += PAGE_SIZE; 4750 } 4751 } 4752 EXPORT_SYMBOL(free_pages_exact); 4753 4754 /** 4755 * nr_free_zone_pages - count number of pages beyond high watermark 4756 * @offset: The zone index of the highest zone 4757 * 4758 * nr_free_zone_pages() counts the number of pages which are beyond the 4759 * high watermark within all zones at or below a given zone index. For each 4760 * zone, the number of pages is calculated as: 4761 * 4762 * nr_free_zone_pages = managed_pages - high_pages 4763 * 4764 * Return: number of pages beyond high watermark. 4765 */ 4766 static unsigned long nr_free_zone_pages(int offset) 4767 { 4768 struct zoneref *z; 4769 struct zone *zone; 4770 4771 /* Just pick one node, since fallback list is circular */ 4772 unsigned long sum = 0; 4773 4774 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 4775 4776 for_each_zone_zonelist(zone, z, zonelist, offset) { 4777 unsigned long size = zone_managed_pages(zone); 4778 unsigned long high = high_wmark_pages(zone); 4779 if (size > high) 4780 sum += size - high; 4781 } 4782 4783 return sum; 4784 } 4785 4786 /** 4787 * nr_free_buffer_pages - count number of pages beyond high watermark 4788 * 4789 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4790 * watermark within ZONE_DMA and ZONE_NORMAL. 4791 * 4792 * Return: number of pages beyond high watermark within ZONE_DMA and 4793 * ZONE_NORMAL. 4794 */ 4795 unsigned long nr_free_buffer_pages(void) 4796 { 4797 return nr_free_zone_pages(gfp_zone(GFP_USER)); 4798 } 4799 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 4800 4801 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 4802 { 4803 zoneref->zone = zone; 4804 zoneref->zone_idx = zone_idx(zone); 4805 } 4806 4807 /* 4808 * Builds allocation fallback zone lists. 4809 * 4810 * Add all populated zones of a node to the zonelist. 4811 */ 4812 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 4813 { 4814 struct zone *zone; 4815 enum zone_type zone_type = MAX_NR_ZONES; 4816 int nr_zones = 0; 4817 4818 do { 4819 zone_type--; 4820 zone = pgdat->node_zones + zone_type; 4821 if (populated_zone(zone)) { 4822 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 4823 check_highest_zone(zone_type); 4824 } 4825 } while (zone_type); 4826 4827 return nr_zones; 4828 } 4829 4830 #ifdef CONFIG_NUMA 4831 4832 static int __parse_numa_zonelist_order(char *s) 4833 { 4834 /* 4835 * We used to support different zonelists modes but they turned 4836 * out to be just not useful. Let's keep the warning in place 4837 * if somebody still use the cmd line parameter so that we do 4838 * not fail it silently 4839 */ 4840 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 4841 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 4842 return -EINVAL; 4843 } 4844 return 0; 4845 } 4846 4847 static char numa_zonelist_order[] = "Node"; 4848 #define NUMA_ZONELIST_ORDER_LEN 16 4849 /* 4850 * sysctl handler for numa_zonelist_order 4851 */ 4852 static int numa_zonelist_order_handler(struct ctl_table *table, int write, 4853 void *buffer, size_t *length, loff_t *ppos) 4854 { 4855 if (write) 4856 return __parse_numa_zonelist_order(buffer); 4857 return proc_dostring(table, write, buffer, length, ppos); 4858 } 4859 4860 static int node_load[MAX_NUMNODES]; 4861 4862 /** 4863 * find_next_best_node - find the next node that should appear in a given node's fallback list 4864 * @node: node whose fallback list we're appending 4865 * @used_node_mask: nodemask_t of already used nodes 4866 * 4867 * We use a number of factors to determine which is the next node that should 4868 * appear on a given node's fallback list. The node should not have appeared 4869 * already in @node's fallback list, and it should be the next closest node 4870 * according to the distance array (which contains arbitrary distance values 4871 * from each node to each node in the system), and should also prefer nodes 4872 * with no CPUs, since presumably they'll have very little allocation pressure 4873 * on them otherwise. 4874 * 4875 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 4876 */ 4877 int find_next_best_node(int node, nodemask_t *used_node_mask) 4878 { 4879 int n, val; 4880 int min_val = INT_MAX; 4881 int best_node = NUMA_NO_NODE; 4882 4883 /* Use the local node if we haven't already */ 4884 if (!node_isset(node, *used_node_mask)) { 4885 node_set(node, *used_node_mask); 4886 return node; 4887 } 4888 4889 for_each_node_state(n, N_MEMORY) { 4890 4891 /* Don't want a node to appear more than once */ 4892 if (node_isset(n, *used_node_mask)) 4893 continue; 4894 4895 /* Use the distance array to find the distance */ 4896 val = node_distance(node, n); 4897 4898 /* Penalize nodes under us ("prefer the next node") */ 4899 val += (n < node); 4900 4901 /* Give preference to headless and unused nodes */ 4902 if (!cpumask_empty(cpumask_of_node(n))) 4903 val += PENALTY_FOR_NODE_WITH_CPUS; 4904 4905 /* Slight preference for less loaded node */ 4906 val *= MAX_NUMNODES; 4907 val += node_load[n]; 4908 4909 if (val < min_val) { 4910 min_val = val; 4911 best_node = n; 4912 } 4913 } 4914 4915 if (best_node >= 0) 4916 node_set(best_node, *used_node_mask); 4917 4918 return best_node; 4919 } 4920 4921 4922 /* 4923 * Build zonelists ordered by node and zones within node. 4924 * This results in maximum locality--normal zone overflows into local 4925 * DMA zone, if any--but risks exhausting DMA zone. 4926 */ 4927 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 4928 unsigned nr_nodes) 4929 { 4930 struct zoneref *zonerefs; 4931 int i; 4932 4933 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 4934 4935 for (i = 0; i < nr_nodes; i++) { 4936 int nr_zones; 4937 4938 pg_data_t *node = NODE_DATA(node_order[i]); 4939 4940 nr_zones = build_zonerefs_node(node, zonerefs); 4941 zonerefs += nr_zones; 4942 } 4943 zonerefs->zone = NULL; 4944 zonerefs->zone_idx = 0; 4945 } 4946 4947 /* 4948 * Build gfp_thisnode zonelists 4949 */ 4950 static void build_thisnode_zonelists(pg_data_t *pgdat) 4951 { 4952 struct zoneref *zonerefs; 4953 int nr_zones; 4954 4955 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 4956 nr_zones = build_zonerefs_node(pgdat, zonerefs); 4957 zonerefs += nr_zones; 4958 zonerefs->zone = NULL; 4959 zonerefs->zone_idx = 0; 4960 } 4961 4962 /* 4963 * Build zonelists ordered by zone and nodes within zones. 4964 * This results in conserving DMA zone[s] until all Normal memory is 4965 * exhausted, but results in overflowing to remote node while memory 4966 * may still exist in local DMA zone. 4967 */ 4968 4969 static void build_zonelists(pg_data_t *pgdat) 4970 { 4971 static int node_order[MAX_NUMNODES]; 4972 int node, nr_nodes = 0; 4973 nodemask_t used_mask = NODE_MASK_NONE; 4974 int local_node, prev_node; 4975 4976 /* NUMA-aware ordering of nodes */ 4977 local_node = pgdat->node_id; 4978 prev_node = local_node; 4979 4980 memset(node_order, 0, sizeof(node_order)); 4981 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 4982 /* 4983 * We don't want to pressure a particular node. 4984 * So adding penalty to the first node in same 4985 * distance group to make it round-robin. 4986 */ 4987 if (node_distance(local_node, node) != 4988 node_distance(local_node, prev_node)) 4989 node_load[node] += 1; 4990 4991 node_order[nr_nodes++] = node; 4992 prev_node = node; 4993 } 4994 4995 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 4996 build_thisnode_zonelists(pgdat); 4997 pr_info("Fallback order for Node %d: ", local_node); 4998 for (node = 0; node < nr_nodes; node++) 4999 pr_cont("%d ", node_order[node]); 5000 pr_cont("\n"); 5001 } 5002 5003 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5004 /* 5005 * Return node id of node used for "local" allocations. 5006 * I.e., first node id of first zone in arg node's generic zonelist. 5007 * Used for initializing percpu 'numa_mem', which is used primarily 5008 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5009 */ 5010 int local_memory_node(int node) 5011 { 5012 struct zoneref *z; 5013 5014 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5015 gfp_zone(GFP_KERNEL), 5016 NULL); 5017 return zone_to_nid(z->zone); 5018 } 5019 #endif 5020 5021 static void setup_min_unmapped_ratio(void); 5022 static void setup_min_slab_ratio(void); 5023 #else /* CONFIG_NUMA */ 5024 5025 static void build_zonelists(pg_data_t *pgdat) 5026 { 5027 int node, local_node; 5028 struct zoneref *zonerefs; 5029 int nr_zones; 5030 5031 local_node = pgdat->node_id; 5032 5033 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5034 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5035 zonerefs += nr_zones; 5036 5037 /* 5038 * Now we build the zonelist so that it contains the zones 5039 * of all the other nodes. 5040 * We don't want to pressure a particular node, so when 5041 * building the zones for node N, we make sure that the 5042 * zones coming right after the local ones are those from 5043 * node N+1 (modulo N) 5044 */ 5045 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 5046 if (!node_online(node)) 5047 continue; 5048 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5049 zonerefs += nr_zones; 5050 } 5051 for (node = 0; node < local_node; node++) { 5052 if (!node_online(node)) 5053 continue; 5054 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5055 zonerefs += nr_zones; 5056 } 5057 5058 zonerefs->zone = NULL; 5059 zonerefs->zone_idx = 0; 5060 } 5061 5062 #endif /* CONFIG_NUMA */ 5063 5064 /* 5065 * Boot pageset table. One per cpu which is going to be used for all 5066 * zones and all nodes. The parameters will be set in such a way 5067 * that an item put on a list will immediately be handed over to 5068 * the buddy list. This is safe since pageset manipulation is done 5069 * with interrupts disabled. 5070 * 5071 * The boot_pagesets must be kept even after bootup is complete for 5072 * unused processors and/or zones. They do play a role for bootstrapping 5073 * hotplugged processors. 5074 * 5075 * zoneinfo_show() and maybe other functions do 5076 * not check if the processor is online before following the pageset pointer. 5077 * Other parts of the kernel may not check if the zone is available. 5078 */ 5079 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5080 /* These effectively disable the pcplists in the boot pageset completely */ 5081 #define BOOT_PAGESET_HIGH 0 5082 #define BOOT_PAGESET_BATCH 1 5083 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5084 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5085 5086 static void __build_all_zonelists(void *data) 5087 { 5088 int nid; 5089 int __maybe_unused cpu; 5090 pg_data_t *self = data; 5091 unsigned long flags; 5092 5093 /* 5094 * The zonelist_update_seq must be acquired with irqsave because the 5095 * reader can be invoked from IRQ with GFP_ATOMIC. 5096 */ 5097 write_seqlock_irqsave(&zonelist_update_seq, flags); 5098 /* 5099 * Also disable synchronous printk() to prevent any printk() from 5100 * trying to hold port->lock, for 5101 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5102 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5103 */ 5104 printk_deferred_enter(); 5105 5106 #ifdef CONFIG_NUMA 5107 memset(node_load, 0, sizeof(node_load)); 5108 #endif 5109 5110 /* 5111 * This node is hotadded and no memory is yet present. So just 5112 * building zonelists is fine - no need to touch other nodes. 5113 */ 5114 if (self && !node_online(self->node_id)) { 5115 build_zonelists(self); 5116 } else { 5117 /* 5118 * All possible nodes have pgdat preallocated 5119 * in free_area_init 5120 */ 5121 for_each_node(nid) { 5122 pg_data_t *pgdat = NODE_DATA(nid); 5123 5124 build_zonelists(pgdat); 5125 } 5126 5127 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5128 /* 5129 * We now know the "local memory node" for each node-- 5130 * i.e., the node of the first zone in the generic zonelist. 5131 * Set up numa_mem percpu variable for on-line cpus. During 5132 * boot, only the boot cpu should be on-line; we'll init the 5133 * secondary cpus' numa_mem as they come on-line. During 5134 * node/memory hotplug, we'll fixup all on-line cpus. 5135 */ 5136 for_each_online_cpu(cpu) 5137 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5138 #endif 5139 } 5140 5141 printk_deferred_exit(); 5142 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5143 } 5144 5145 static noinline void __init 5146 build_all_zonelists_init(void) 5147 { 5148 int cpu; 5149 5150 __build_all_zonelists(NULL); 5151 5152 /* 5153 * Initialize the boot_pagesets that are going to be used 5154 * for bootstrapping processors. The real pagesets for 5155 * each zone will be allocated later when the per cpu 5156 * allocator is available. 5157 * 5158 * boot_pagesets are used also for bootstrapping offline 5159 * cpus if the system is already booted because the pagesets 5160 * are needed to initialize allocators on a specific cpu too. 5161 * F.e. the percpu allocator needs the page allocator which 5162 * needs the percpu allocator in order to allocate its pagesets 5163 * (a chicken-egg dilemma). 5164 */ 5165 for_each_possible_cpu(cpu) 5166 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5167 5168 mminit_verify_zonelist(); 5169 cpuset_init_current_mems_allowed(); 5170 } 5171 5172 /* 5173 * unless system_state == SYSTEM_BOOTING. 5174 * 5175 * __ref due to call of __init annotated helper build_all_zonelists_init 5176 * [protected by SYSTEM_BOOTING]. 5177 */ 5178 void __ref build_all_zonelists(pg_data_t *pgdat) 5179 { 5180 unsigned long vm_total_pages; 5181 5182 if (system_state == SYSTEM_BOOTING) { 5183 build_all_zonelists_init(); 5184 } else { 5185 __build_all_zonelists(pgdat); 5186 /* cpuset refresh routine should be here */ 5187 } 5188 /* Get the number of free pages beyond high watermark in all zones. */ 5189 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5190 /* 5191 * Disable grouping by mobility if the number of pages in the 5192 * system is too low to allow the mechanism to work. It would be 5193 * more accurate, but expensive to check per-zone. This check is 5194 * made on memory-hotadd so a system can start with mobility 5195 * disabled and enable it later 5196 */ 5197 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5198 page_group_by_mobility_disabled = 1; 5199 else 5200 page_group_by_mobility_disabled = 0; 5201 5202 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5203 nr_online_nodes, 5204 page_group_by_mobility_disabled ? "off" : "on", 5205 vm_total_pages); 5206 #ifdef CONFIG_NUMA 5207 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5208 #endif 5209 } 5210 5211 static int zone_batchsize(struct zone *zone) 5212 { 5213 #ifdef CONFIG_MMU 5214 int batch; 5215 5216 /* 5217 * The number of pages to batch allocate is either ~0.1% 5218 * of the zone or 1MB, whichever is smaller. The batch 5219 * size is striking a balance between allocation latency 5220 * and zone lock contention. 5221 */ 5222 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5223 batch /= 4; /* We effectively *= 4 below */ 5224 if (batch < 1) 5225 batch = 1; 5226 5227 /* 5228 * Clamp the batch to a 2^n - 1 value. Having a power 5229 * of 2 value was found to be more likely to have 5230 * suboptimal cache aliasing properties in some cases. 5231 * 5232 * For example if 2 tasks are alternately allocating 5233 * batches of pages, one task can end up with a lot 5234 * of pages of one half of the possible page colors 5235 * and the other with pages of the other colors. 5236 */ 5237 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5238 5239 return batch; 5240 5241 #else 5242 /* The deferral and batching of frees should be suppressed under NOMMU 5243 * conditions. 5244 * 5245 * The problem is that NOMMU needs to be able to allocate large chunks 5246 * of contiguous memory as there's no hardware page translation to 5247 * assemble apparent contiguous memory from discontiguous pages. 5248 * 5249 * Queueing large contiguous runs of pages for batching, however, 5250 * causes the pages to actually be freed in smaller chunks. As there 5251 * can be a significant delay between the individual batches being 5252 * recycled, this leads to the once large chunks of space being 5253 * fragmented and becoming unavailable for high-order allocations. 5254 */ 5255 return 0; 5256 #endif 5257 } 5258 5259 static int percpu_pagelist_high_fraction; 5260 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 5261 { 5262 #ifdef CONFIG_MMU 5263 int high; 5264 int nr_split_cpus; 5265 unsigned long total_pages; 5266 5267 if (!percpu_pagelist_high_fraction) { 5268 /* 5269 * By default, the high value of the pcp is based on the zone 5270 * low watermark so that if they are full then background 5271 * reclaim will not be started prematurely. 5272 */ 5273 total_pages = low_wmark_pages(zone); 5274 } else { 5275 /* 5276 * If percpu_pagelist_high_fraction is configured, the high 5277 * value is based on a fraction of the managed pages in the 5278 * zone. 5279 */ 5280 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 5281 } 5282 5283 /* 5284 * Split the high value across all online CPUs local to the zone. Note 5285 * that early in boot that CPUs may not be online yet and that during 5286 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5287 * onlined. For memory nodes that have no CPUs, split pcp->high across 5288 * all online CPUs to mitigate the risk that reclaim is triggered 5289 * prematurely due to pages stored on pcp lists. 5290 */ 5291 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5292 if (!nr_split_cpus) 5293 nr_split_cpus = num_online_cpus(); 5294 high = total_pages / nr_split_cpus; 5295 5296 /* 5297 * Ensure high is at least batch*4. The multiple is based on the 5298 * historical relationship between high and batch. 5299 */ 5300 high = max(high, batch << 2); 5301 5302 return high; 5303 #else 5304 return 0; 5305 #endif 5306 } 5307 5308 /* 5309 * pcp->high and pcp->batch values are related and generally batch is lower 5310 * than high. They are also related to pcp->count such that count is lower 5311 * than high, and as soon as it reaches high, the pcplist is flushed. 5312 * 5313 * However, guaranteeing these relations at all times would require e.g. write 5314 * barriers here but also careful usage of read barriers at the read side, and 5315 * thus be prone to error and bad for performance. Thus the update only prevents 5316 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 5317 * can cope with those fields changing asynchronously, and fully trust only the 5318 * pcp->count field on the local CPU with interrupts disabled. 5319 * 5320 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5321 * outside of boot time (or some other assurance that no concurrent updaters 5322 * exist). 5323 */ 5324 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 5325 unsigned long batch) 5326 { 5327 WRITE_ONCE(pcp->batch, batch); 5328 WRITE_ONCE(pcp->high, high); 5329 } 5330 5331 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5332 { 5333 int pindex; 5334 5335 memset(pcp, 0, sizeof(*pcp)); 5336 memset(pzstats, 0, sizeof(*pzstats)); 5337 5338 spin_lock_init(&pcp->lock); 5339 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5340 INIT_LIST_HEAD(&pcp->lists[pindex]); 5341 5342 /* 5343 * Set batch and high values safe for a boot pageset. A true percpu 5344 * pageset's initialization will update them subsequently. Here we don't 5345 * need to be as careful as pageset_update() as nobody can access the 5346 * pageset yet. 5347 */ 5348 pcp->high = BOOT_PAGESET_HIGH; 5349 pcp->batch = BOOT_PAGESET_BATCH; 5350 pcp->free_factor = 0; 5351 } 5352 5353 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 5354 unsigned long batch) 5355 { 5356 struct per_cpu_pages *pcp; 5357 int cpu; 5358 5359 for_each_possible_cpu(cpu) { 5360 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5361 pageset_update(pcp, high, batch); 5362 } 5363 } 5364 5365 /* 5366 * Calculate and set new high and batch values for all per-cpu pagesets of a 5367 * zone based on the zone's size. 5368 */ 5369 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5370 { 5371 int new_high, new_batch; 5372 5373 new_batch = max(1, zone_batchsize(zone)); 5374 new_high = zone_highsize(zone, new_batch, cpu_online); 5375 5376 if (zone->pageset_high == new_high && 5377 zone->pageset_batch == new_batch) 5378 return; 5379 5380 zone->pageset_high = new_high; 5381 zone->pageset_batch = new_batch; 5382 5383 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 5384 } 5385 5386 void __meminit setup_zone_pageset(struct zone *zone) 5387 { 5388 int cpu; 5389 5390 /* Size may be 0 on !SMP && !NUMA */ 5391 if (sizeof(struct per_cpu_zonestat) > 0) 5392 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5393 5394 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5395 for_each_possible_cpu(cpu) { 5396 struct per_cpu_pages *pcp; 5397 struct per_cpu_zonestat *pzstats; 5398 5399 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5400 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5401 per_cpu_pages_init(pcp, pzstats); 5402 } 5403 5404 zone_set_pageset_high_and_batch(zone, 0); 5405 } 5406 5407 /* 5408 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5409 * page high values need to be recalculated. 5410 */ 5411 static void zone_pcp_update(struct zone *zone, int cpu_online) 5412 { 5413 mutex_lock(&pcp_batch_high_lock); 5414 zone_set_pageset_high_and_batch(zone, cpu_online); 5415 mutex_unlock(&pcp_batch_high_lock); 5416 } 5417 5418 /* 5419 * Allocate per cpu pagesets and initialize them. 5420 * Before this call only boot pagesets were available. 5421 */ 5422 void __init setup_per_cpu_pageset(void) 5423 { 5424 struct pglist_data *pgdat; 5425 struct zone *zone; 5426 int __maybe_unused cpu; 5427 5428 for_each_populated_zone(zone) 5429 setup_zone_pageset(zone); 5430 5431 #ifdef CONFIG_NUMA 5432 /* 5433 * Unpopulated zones continue using the boot pagesets. 5434 * The numa stats for these pagesets need to be reset. 5435 * Otherwise, they will end up skewing the stats of 5436 * the nodes these zones are associated with. 5437 */ 5438 for_each_possible_cpu(cpu) { 5439 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5440 memset(pzstats->vm_numa_event, 0, 5441 sizeof(pzstats->vm_numa_event)); 5442 } 5443 #endif 5444 5445 for_each_online_pgdat(pgdat) 5446 pgdat->per_cpu_nodestats = 5447 alloc_percpu(struct per_cpu_nodestat); 5448 } 5449 5450 __meminit void zone_pcp_init(struct zone *zone) 5451 { 5452 /* 5453 * per cpu subsystem is not up at this point. The following code 5454 * relies on the ability of the linker to provide the 5455 * offset of a (static) per cpu variable into the per cpu area. 5456 */ 5457 zone->per_cpu_pageset = &boot_pageset; 5458 zone->per_cpu_zonestats = &boot_zonestats; 5459 zone->pageset_high = BOOT_PAGESET_HIGH; 5460 zone->pageset_batch = BOOT_PAGESET_BATCH; 5461 5462 if (populated_zone(zone)) 5463 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5464 zone->present_pages, zone_batchsize(zone)); 5465 } 5466 5467 void adjust_managed_page_count(struct page *page, long count) 5468 { 5469 atomic_long_add(count, &page_zone(page)->managed_pages); 5470 totalram_pages_add(count); 5471 #ifdef CONFIG_HIGHMEM 5472 if (PageHighMem(page)) 5473 totalhigh_pages_add(count); 5474 #endif 5475 } 5476 EXPORT_SYMBOL(adjust_managed_page_count); 5477 5478 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5479 { 5480 void *pos; 5481 unsigned long pages = 0; 5482 5483 start = (void *)PAGE_ALIGN((unsigned long)start); 5484 end = (void *)((unsigned long)end & PAGE_MASK); 5485 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5486 struct page *page = virt_to_page(pos); 5487 void *direct_map_addr; 5488 5489 /* 5490 * 'direct_map_addr' might be different from 'pos' 5491 * because some architectures' virt_to_page() 5492 * work with aliases. Getting the direct map 5493 * address ensures that we get a _writeable_ 5494 * alias for the memset(). 5495 */ 5496 direct_map_addr = page_address(page); 5497 /* 5498 * Perform a kasan-unchecked memset() since this memory 5499 * has not been initialized. 5500 */ 5501 direct_map_addr = kasan_reset_tag(direct_map_addr); 5502 if ((unsigned int)poison <= 0xFF) 5503 memset(direct_map_addr, poison, PAGE_SIZE); 5504 5505 free_reserved_page(page); 5506 } 5507 5508 if (pages && s) 5509 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5510 5511 return pages; 5512 } 5513 5514 static int page_alloc_cpu_dead(unsigned int cpu) 5515 { 5516 struct zone *zone; 5517 5518 lru_add_drain_cpu(cpu); 5519 mlock_drain_remote(cpu); 5520 drain_pages(cpu); 5521 5522 /* 5523 * Spill the event counters of the dead processor 5524 * into the current processors event counters. 5525 * This artificially elevates the count of the current 5526 * processor. 5527 */ 5528 vm_events_fold_cpu(cpu); 5529 5530 /* 5531 * Zero the differential counters of the dead processor 5532 * so that the vm statistics are consistent. 5533 * 5534 * This is only okay since the processor is dead and cannot 5535 * race with what we are doing. 5536 */ 5537 cpu_vm_stats_fold(cpu); 5538 5539 for_each_populated_zone(zone) 5540 zone_pcp_update(zone, 0); 5541 5542 return 0; 5543 } 5544 5545 static int page_alloc_cpu_online(unsigned int cpu) 5546 { 5547 struct zone *zone; 5548 5549 for_each_populated_zone(zone) 5550 zone_pcp_update(zone, 1); 5551 return 0; 5552 } 5553 5554 void __init page_alloc_init_cpuhp(void) 5555 { 5556 int ret; 5557 5558 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 5559 "mm/page_alloc:pcp", 5560 page_alloc_cpu_online, 5561 page_alloc_cpu_dead); 5562 WARN_ON(ret < 0); 5563 } 5564 5565 /* 5566 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5567 * or min_free_kbytes changes. 5568 */ 5569 static void calculate_totalreserve_pages(void) 5570 { 5571 struct pglist_data *pgdat; 5572 unsigned long reserve_pages = 0; 5573 enum zone_type i, j; 5574 5575 for_each_online_pgdat(pgdat) { 5576 5577 pgdat->totalreserve_pages = 0; 5578 5579 for (i = 0; i < MAX_NR_ZONES; i++) { 5580 struct zone *zone = pgdat->node_zones + i; 5581 long max = 0; 5582 unsigned long managed_pages = zone_managed_pages(zone); 5583 5584 /* Find valid and maximum lowmem_reserve in the zone */ 5585 for (j = i; j < MAX_NR_ZONES; j++) { 5586 if (zone->lowmem_reserve[j] > max) 5587 max = zone->lowmem_reserve[j]; 5588 } 5589 5590 /* we treat the high watermark as reserved pages. */ 5591 max += high_wmark_pages(zone); 5592 5593 if (max > managed_pages) 5594 max = managed_pages; 5595 5596 pgdat->totalreserve_pages += max; 5597 5598 reserve_pages += max; 5599 } 5600 } 5601 totalreserve_pages = reserve_pages; 5602 } 5603 5604 /* 5605 * setup_per_zone_lowmem_reserve - called whenever 5606 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5607 * has a correct pages reserved value, so an adequate number of 5608 * pages are left in the zone after a successful __alloc_pages(). 5609 */ 5610 static void setup_per_zone_lowmem_reserve(void) 5611 { 5612 struct pglist_data *pgdat; 5613 enum zone_type i, j; 5614 5615 for_each_online_pgdat(pgdat) { 5616 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 5617 struct zone *zone = &pgdat->node_zones[i]; 5618 int ratio = sysctl_lowmem_reserve_ratio[i]; 5619 bool clear = !ratio || !zone_managed_pages(zone); 5620 unsigned long managed_pages = 0; 5621 5622 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5623 struct zone *upper_zone = &pgdat->node_zones[j]; 5624 5625 managed_pages += zone_managed_pages(upper_zone); 5626 5627 if (clear) 5628 zone->lowmem_reserve[j] = 0; 5629 else 5630 zone->lowmem_reserve[j] = managed_pages / ratio; 5631 } 5632 } 5633 } 5634 5635 /* update totalreserve_pages */ 5636 calculate_totalreserve_pages(); 5637 } 5638 5639 static void __setup_per_zone_wmarks(void) 5640 { 5641 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5642 unsigned long lowmem_pages = 0; 5643 struct zone *zone; 5644 unsigned long flags; 5645 5646 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 5647 for_each_zone(zone) { 5648 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 5649 lowmem_pages += zone_managed_pages(zone); 5650 } 5651 5652 for_each_zone(zone) { 5653 u64 tmp; 5654 5655 spin_lock_irqsave(&zone->lock, flags); 5656 tmp = (u64)pages_min * zone_managed_pages(zone); 5657 do_div(tmp, lowmem_pages); 5658 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 5659 /* 5660 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5661 * need highmem and movable zones pages, so cap pages_min 5662 * to a small value here. 5663 * 5664 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5665 * deltas control async page reclaim, and so should 5666 * not be capped for highmem and movable zones. 5667 */ 5668 unsigned long min_pages; 5669 5670 min_pages = zone_managed_pages(zone) / 1024; 5671 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5672 zone->_watermark[WMARK_MIN] = min_pages; 5673 } else { 5674 /* 5675 * If it's a lowmem zone, reserve a number of pages 5676 * proportionate to the zone's size. 5677 */ 5678 zone->_watermark[WMARK_MIN] = tmp; 5679 } 5680 5681 /* 5682 * Set the kswapd watermarks distance according to the 5683 * scale factor in proportion to available memory, but 5684 * ensure a minimum size on small systems. 5685 */ 5686 tmp = max_t(u64, tmp >> 2, 5687 mult_frac(zone_managed_pages(zone), 5688 watermark_scale_factor, 10000)); 5689 5690 zone->watermark_boost = 0; 5691 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 5692 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 5693 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 5694 5695 spin_unlock_irqrestore(&zone->lock, flags); 5696 } 5697 5698 /* update totalreserve_pages */ 5699 calculate_totalreserve_pages(); 5700 } 5701 5702 /** 5703 * setup_per_zone_wmarks - called when min_free_kbytes changes 5704 * or when memory is hot-{added|removed} 5705 * 5706 * Ensures that the watermark[min,low,high] values for each zone are set 5707 * correctly with respect to min_free_kbytes. 5708 */ 5709 void setup_per_zone_wmarks(void) 5710 { 5711 struct zone *zone; 5712 static DEFINE_SPINLOCK(lock); 5713 5714 spin_lock(&lock); 5715 __setup_per_zone_wmarks(); 5716 spin_unlock(&lock); 5717 5718 /* 5719 * The watermark size have changed so update the pcpu batch 5720 * and high limits or the limits may be inappropriate. 5721 */ 5722 for_each_zone(zone) 5723 zone_pcp_update(zone, 0); 5724 } 5725 5726 /* 5727 * Initialise min_free_kbytes. 5728 * 5729 * For small machines we want it small (128k min). For large machines 5730 * we want it large (256MB max). But it is not linear, because network 5731 * bandwidth does not increase linearly with machine size. We use 5732 * 5733 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5734 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5735 * 5736 * which yields 5737 * 5738 * 16MB: 512k 5739 * 32MB: 724k 5740 * 64MB: 1024k 5741 * 128MB: 1448k 5742 * 256MB: 2048k 5743 * 512MB: 2896k 5744 * 1024MB: 4096k 5745 * 2048MB: 5792k 5746 * 4096MB: 8192k 5747 * 8192MB: 11584k 5748 * 16384MB: 16384k 5749 */ 5750 void calculate_min_free_kbytes(void) 5751 { 5752 unsigned long lowmem_kbytes; 5753 int new_min_free_kbytes; 5754 5755 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5756 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5757 5758 if (new_min_free_kbytes > user_min_free_kbytes) 5759 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 5760 else 5761 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 5762 new_min_free_kbytes, user_min_free_kbytes); 5763 5764 } 5765 5766 int __meminit init_per_zone_wmark_min(void) 5767 { 5768 calculate_min_free_kbytes(); 5769 setup_per_zone_wmarks(); 5770 refresh_zone_stat_thresholds(); 5771 setup_per_zone_lowmem_reserve(); 5772 5773 #ifdef CONFIG_NUMA 5774 setup_min_unmapped_ratio(); 5775 setup_min_slab_ratio(); 5776 #endif 5777 5778 khugepaged_min_free_kbytes_update(); 5779 5780 return 0; 5781 } 5782 postcore_initcall(init_per_zone_wmark_min) 5783 5784 /* 5785 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5786 * that we can call two helper functions whenever min_free_kbytes 5787 * changes. 5788 */ 5789 static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 5790 void *buffer, size_t *length, loff_t *ppos) 5791 { 5792 int rc; 5793 5794 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5795 if (rc) 5796 return rc; 5797 5798 if (write) { 5799 user_min_free_kbytes = min_free_kbytes; 5800 setup_per_zone_wmarks(); 5801 } 5802 return 0; 5803 } 5804 5805 static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 5806 void *buffer, size_t *length, loff_t *ppos) 5807 { 5808 int rc; 5809 5810 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5811 if (rc) 5812 return rc; 5813 5814 if (write) 5815 setup_per_zone_wmarks(); 5816 5817 return 0; 5818 } 5819 5820 #ifdef CONFIG_NUMA 5821 static void setup_min_unmapped_ratio(void) 5822 { 5823 pg_data_t *pgdat; 5824 struct zone *zone; 5825 5826 for_each_online_pgdat(pgdat) 5827 pgdat->min_unmapped_pages = 0; 5828 5829 for_each_zone(zone) 5830 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 5831 sysctl_min_unmapped_ratio) / 100; 5832 } 5833 5834 5835 static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 5836 void *buffer, size_t *length, loff_t *ppos) 5837 { 5838 int rc; 5839 5840 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5841 if (rc) 5842 return rc; 5843 5844 setup_min_unmapped_ratio(); 5845 5846 return 0; 5847 } 5848 5849 static void setup_min_slab_ratio(void) 5850 { 5851 pg_data_t *pgdat; 5852 struct zone *zone; 5853 5854 for_each_online_pgdat(pgdat) 5855 pgdat->min_slab_pages = 0; 5856 5857 for_each_zone(zone) 5858 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 5859 sysctl_min_slab_ratio) / 100; 5860 } 5861 5862 static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 5863 void *buffer, size_t *length, loff_t *ppos) 5864 { 5865 int rc; 5866 5867 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5868 if (rc) 5869 return rc; 5870 5871 setup_min_slab_ratio(); 5872 5873 return 0; 5874 } 5875 #endif 5876 5877 /* 5878 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 5879 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 5880 * whenever sysctl_lowmem_reserve_ratio changes. 5881 * 5882 * The reserve ratio obviously has absolutely no relation with the 5883 * minimum watermarks. The lowmem reserve ratio can only make sense 5884 * if in function of the boot time zone sizes. 5885 */ 5886 static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, 5887 int write, void *buffer, size_t *length, loff_t *ppos) 5888 { 5889 int i; 5890 5891 proc_dointvec_minmax(table, write, buffer, length, ppos); 5892 5893 for (i = 0; i < MAX_NR_ZONES; i++) { 5894 if (sysctl_lowmem_reserve_ratio[i] < 1) 5895 sysctl_lowmem_reserve_ratio[i] = 0; 5896 } 5897 5898 setup_per_zone_lowmem_reserve(); 5899 return 0; 5900 } 5901 5902 /* 5903 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 5904 * cpu. It is the fraction of total pages in each zone that a hot per cpu 5905 * pagelist can have before it gets flushed back to buddy allocator. 5906 */ 5907 static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 5908 int write, void *buffer, size_t *length, loff_t *ppos) 5909 { 5910 struct zone *zone; 5911 int old_percpu_pagelist_high_fraction; 5912 int ret; 5913 5914 mutex_lock(&pcp_batch_high_lock); 5915 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 5916 5917 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5918 if (!write || ret < 0) 5919 goto out; 5920 5921 /* Sanity checking to avoid pcp imbalance */ 5922 if (percpu_pagelist_high_fraction && 5923 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 5924 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 5925 ret = -EINVAL; 5926 goto out; 5927 } 5928 5929 /* No change? */ 5930 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 5931 goto out; 5932 5933 for_each_populated_zone(zone) 5934 zone_set_pageset_high_and_batch(zone, 0); 5935 out: 5936 mutex_unlock(&pcp_batch_high_lock); 5937 return ret; 5938 } 5939 5940 static struct ctl_table page_alloc_sysctl_table[] = { 5941 { 5942 .procname = "min_free_kbytes", 5943 .data = &min_free_kbytes, 5944 .maxlen = sizeof(min_free_kbytes), 5945 .mode = 0644, 5946 .proc_handler = min_free_kbytes_sysctl_handler, 5947 .extra1 = SYSCTL_ZERO, 5948 }, 5949 { 5950 .procname = "watermark_boost_factor", 5951 .data = &watermark_boost_factor, 5952 .maxlen = sizeof(watermark_boost_factor), 5953 .mode = 0644, 5954 .proc_handler = proc_dointvec_minmax, 5955 .extra1 = SYSCTL_ZERO, 5956 }, 5957 { 5958 .procname = "watermark_scale_factor", 5959 .data = &watermark_scale_factor, 5960 .maxlen = sizeof(watermark_scale_factor), 5961 .mode = 0644, 5962 .proc_handler = watermark_scale_factor_sysctl_handler, 5963 .extra1 = SYSCTL_ONE, 5964 .extra2 = SYSCTL_THREE_THOUSAND, 5965 }, 5966 { 5967 .procname = "percpu_pagelist_high_fraction", 5968 .data = &percpu_pagelist_high_fraction, 5969 .maxlen = sizeof(percpu_pagelist_high_fraction), 5970 .mode = 0644, 5971 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 5972 .extra1 = SYSCTL_ZERO, 5973 }, 5974 { 5975 .procname = "lowmem_reserve_ratio", 5976 .data = &sysctl_lowmem_reserve_ratio, 5977 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 5978 .mode = 0644, 5979 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 5980 }, 5981 #ifdef CONFIG_NUMA 5982 { 5983 .procname = "numa_zonelist_order", 5984 .data = &numa_zonelist_order, 5985 .maxlen = NUMA_ZONELIST_ORDER_LEN, 5986 .mode = 0644, 5987 .proc_handler = numa_zonelist_order_handler, 5988 }, 5989 { 5990 .procname = "min_unmapped_ratio", 5991 .data = &sysctl_min_unmapped_ratio, 5992 .maxlen = sizeof(sysctl_min_unmapped_ratio), 5993 .mode = 0644, 5994 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 5995 .extra1 = SYSCTL_ZERO, 5996 .extra2 = SYSCTL_ONE_HUNDRED, 5997 }, 5998 { 5999 .procname = "min_slab_ratio", 6000 .data = &sysctl_min_slab_ratio, 6001 .maxlen = sizeof(sysctl_min_slab_ratio), 6002 .mode = 0644, 6003 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6004 .extra1 = SYSCTL_ZERO, 6005 .extra2 = SYSCTL_ONE_HUNDRED, 6006 }, 6007 #endif 6008 {} 6009 }; 6010 6011 void __init page_alloc_sysctl_init(void) 6012 { 6013 register_sysctl_init("vm", page_alloc_sysctl_table); 6014 } 6015 6016 #ifdef CONFIG_CONTIG_ALLOC 6017 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6018 static void alloc_contig_dump_pages(struct list_head *page_list) 6019 { 6020 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6021 6022 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6023 struct page *page; 6024 6025 dump_stack(); 6026 list_for_each_entry(page, page_list, lru) 6027 dump_page(page, "migration failure"); 6028 } 6029 } 6030 6031 /* [start, end) must belong to a single zone. */ 6032 int __alloc_contig_migrate_range(struct compact_control *cc, 6033 unsigned long start, unsigned long end) 6034 { 6035 /* This function is based on compact_zone() from compaction.c. */ 6036 unsigned int nr_reclaimed; 6037 unsigned long pfn = start; 6038 unsigned int tries = 0; 6039 int ret = 0; 6040 struct migration_target_control mtc = { 6041 .nid = zone_to_nid(cc->zone), 6042 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 6043 }; 6044 6045 lru_cache_disable(); 6046 6047 while (pfn < end || !list_empty(&cc->migratepages)) { 6048 if (fatal_signal_pending(current)) { 6049 ret = -EINTR; 6050 break; 6051 } 6052 6053 if (list_empty(&cc->migratepages)) { 6054 cc->nr_migratepages = 0; 6055 ret = isolate_migratepages_range(cc, pfn, end); 6056 if (ret && ret != -EAGAIN) 6057 break; 6058 pfn = cc->migrate_pfn; 6059 tries = 0; 6060 } else if (++tries == 5) { 6061 ret = -EBUSY; 6062 break; 6063 } 6064 6065 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6066 &cc->migratepages); 6067 cc->nr_migratepages -= nr_reclaimed; 6068 6069 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6070 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6071 6072 /* 6073 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6074 * to retry again over this error, so do the same here. 6075 */ 6076 if (ret == -ENOMEM) 6077 break; 6078 } 6079 6080 lru_cache_enable(); 6081 if (ret < 0) { 6082 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6083 alloc_contig_dump_pages(&cc->migratepages); 6084 putback_movable_pages(&cc->migratepages); 6085 return ret; 6086 } 6087 return 0; 6088 } 6089 6090 /** 6091 * alloc_contig_range() -- tries to allocate given range of pages 6092 * @start: start PFN to allocate 6093 * @end: one-past-the-last PFN to allocate 6094 * @migratetype: migratetype of the underlying pageblocks (either 6095 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6096 * in range must have the same migratetype and it must 6097 * be either of the two. 6098 * @gfp_mask: GFP mask to use during compaction 6099 * 6100 * The PFN range does not have to be pageblock aligned. The PFN range must 6101 * belong to a single zone. 6102 * 6103 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6104 * pageblocks in the range. Once isolated, the pageblocks should not 6105 * be modified by others. 6106 * 6107 * Return: zero on success or negative error code. On success all 6108 * pages which PFN is in [start, end) are allocated for the caller and 6109 * need to be freed with free_contig_range(). 6110 */ 6111 int alloc_contig_range(unsigned long start, unsigned long end, 6112 unsigned migratetype, gfp_t gfp_mask) 6113 { 6114 unsigned long outer_start, outer_end; 6115 int order; 6116 int ret = 0; 6117 6118 struct compact_control cc = { 6119 .nr_migratepages = 0, 6120 .order = -1, 6121 .zone = page_zone(pfn_to_page(start)), 6122 .mode = MIGRATE_SYNC, 6123 .ignore_skip_hint = true, 6124 .no_set_skip_hint = true, 6125 .gfp_mask = current_gfp_context(gfp_mask), 6126 .alloc_contig = true, 6127 }; 6128 INIT_LIST_HEAD(&cc.migratepages); 6129 6130 /* 6131 * What we do here is we mark all pageblocks in range as 6132 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6133 * have different sizes, and due to the way page allocator 6134 * work, start_isolate_page_range() has special handlings for this. 6135 * 6136 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6137 * migrate the pages from an unaligned range (ie. pages that 6138 * we are interested in). This will put all the pages in 6139 * range back to page allocator as MIGRATE_ISOLATE. 6140 * 6141 * When this is done, we take the pages in range from page 6142 * allocator removing them from the buddy system. This way 6143 * page allocator will never consider using them. 6144 * 6145 * This lets us mark the pageblocks back as 6146 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6147 * aligned range but not in the unaligned, original range are 6148 * put back to page allocator so that buddy can use them. 6149 */ 6150 6151 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 6152 if (ret) 6153 goto done; 6154 6155 drain_all_pages(cc.zone); 6156 6157 /* 6158 * In case of -EBUSY, we'd like to know which page causes problem. 6159 * So, just fall through. test_pages_isolated() has a tracepoint 6160 * which will report the busy page. 6161 * 6162 * It is possible that busy pages could become available before 6163 * the call to test_pages_isolated, and the range will actually be 6164 * allocated. So, if we fall through be sure to clear ret so that 6165 * -EBUSY is not accidentally used or returned to caller. 6166 */ 6167 ret = __alloc_contig_migrate_range(&cc, start, end); 6168 if (ret && ret != -EBUSY) 6169 goto done; 6170 ret = 0; 6171 6172 /* 6173 * Pages from [start, end) are within a pageblock_nr_pages 6174 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6175 * more, all pages in [start, end) are free in page allocator. 6176 * What we are going to do is to allocate all pages from 6177 * [start, end) (that is remove them from page allocator). 6178 * 6179 * The only problem is that pages at the beginning and at the 6180 * end of interesting range may be not aligned with pages that 6181 * page allocator holds, ie. they can be part of higher order 6182 * pages. Because of this, we reserve the bigger range and 6183 * once this is done free the pages we are not interested in. 6184 * 6185 * We don't have to hold zone->lock here because the pages are 6186 * isolated thus they won't get removed from buddy. 6187 */ 6188 6189 order = 0; 6190 outer_start = start; 6191 while (!PageBuddy(pfn_to_page(outer_start))) { 6192 if (++order > MAX_ORDER) { 6193 outer_start = start; 6194 break; 6195 } 6196 outer_start &= ~0UL << order; 6197 } 6198 6199 if (outer_start != start) { 6200 order = buddy_order(pfn_to_page(outer_start)); 6201 6202 /* 6203 * outer_start page could be small order buddy page and 6204 * it doesn't include start page. Adjust outer_start 6205 * in this case to report failed page properly 6206 * on tracepoint in test_pages_isolated() 6207 */ 6208 if (outer_start + (1UL << order) <= start) 6209 outer_start = start; 6210 } 6211 6212 /* Make sure the range is really isolated. */ 6213 if (test_pages_isolated(outer_start, end, 0)) { 6214 ret = -EBUSY; 6215 goto done; 6216 } 6217 6218 /* Grab isolated pages from freelists. */ 6219 outer_end = isolate_freepages_range(&cc, outer_start, end); 6220 if (!outer_end) { 6221 ret = -EBUSY; 6222 goto done; 6223 } 6224 6225 /* Free head and tail (if any) */ 6226 if (start != outer_start) 6227 free_contig_range(outer_start, start - outer_start); 6228 if (end != outer_end) 6229 free_contig_range(end, outer_end - end); 6230 6231 done: 6232 undo_isolate_page_range(start, end, migratetype); 6233 return ret; 6234 } 6235 EXPORT_SYMBOL(alloc_contig_range); 6236 6237 static int __alloc_contig_pages(unsigned long start_pfn, 6238 unsigned long nr_pages, gfp_t gfp_mask) 6239 { 6240 unsigned long end_pfn = start_pfn + nr_pages; 6241 6242 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 6243 gfp_mask); 6244 } 6245 6246 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6247 unsigned long nr_pages) 6248 { 6249 unsigned long i, end_pfn = start_pfn + nr_pages; 6250 struct page *page; 6251 6252 for (i = start_pfn; i < end_pfn; i++) { 6253 page = pfn_to_online_page(i); 6254 if (!page) 6255 return false; 6256 6257 if (page_zone(page) != z) 6258 return false; 6259 6260 if (PageReserved(page)) 6261 return false; 6262 6263 if (PageHuge(page)) 6264 return false; 6265 } 6266 return true; 6267 } 6268 6269 static bool zone_spans_last_pfn(const struct zone *zone, 6270 unsigned long start_pfn, unsigned long nr_pages) 6271 { 6272 unsigned long last_pfn = start_pfn + nr_pages - 1; 6273 6274 return zone_spans_pfn(zone, last_pfn); 6275 } 6276 6277 /** 6278 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6279 * @nr_pages: Number of contiguous pages to allocate 6280 * @gfp_mask: GFP mask to limit search and used during compaction 6281 * @nid: Target node 6282 * @nodemask: Mask for other possible nodes 6283 * 6284 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6285 * on an applicable zonelist to find a contiguous pfn range which can then be 6286 * tried for allocation with alloc_contig_range(). This routine is intended 6287 * for allocation requests which can not be fulfilled with the buddy allocator. 6288 * 6289 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6290 * power of two, then allocated range is also guaranteed to be aligned to same 6291 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6292 * 6293 * Allocated pages can be freed with free_contig_range() or by manually calling 6294 * __free_page() on each allocated page. 6295 * 6296 * Return: pointer to contiguous pages on success, or NULL if not successful. 6297 */ 6298 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 6299 int nid, nodemask_t *nodemask) 6300 { 6301 unsigned long ret, pfn, flags; 6302 struct zonelist *zonelist; 6303 struct zone *zone; 6304 struct zoneref *z; 6305 6306 zonelist = node_zonelist(nid, gfp_mask); 6307 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6308 gfp_zone(gfp_mask), nodemask) { 6309 spin_lock_irqsave(&zone->lock, flags); 6310 6311 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6312 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6313 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6314 /* 6315 * We release the zone lock here because 6316 * alloc_contig_range() will also lock the zone 6317 * at some point. If there's an allocation 6318 * spinning on this lock, it may win the race 6319 * and cause alloc_contig_range() to fail... 6320 */ 6321 spin_unlock_irqrestore(&zone->lock, flags); 6322 ret = __alloc_contig_pages(pfn, nr_pages, 6323 gfp_mask); 6324 if (!ret) 6325 return pfn_to_page(pfn); 6326 spin_lock_irqsave(&zone->lock, flags); 6327 } 6328 pfn += nr_pages; 6329 } 6330 spin_unlock_irqrestore(&zone->lock, flags); 6331 } 6332 return NULL; 6333 } 6334 #endif /* CONFIG_CONTIG_ALLOC */ 6335 6336 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6337 { 6338 unsigned long count = 0; 6339 6340 for (; nr_pages--; pfn++) { 6341 struct page *page = pfn_to_page(pfn); 6342 6343 count += page_count(page) != 1; 6344 __free_page(page); 6345 } 6346 WARN(count != 0, "%lu pages are still in use!\n", count); 6347 } 6348 EXPORT_SYMBOL(free_contig_range); 6349 6350 /* 6351 * Effectively disable pcplists for the zone by setting the high limit to 0 6352 * and draining all cpus. A concurrent page freeing on another CPU that's about 6353 * to put the page on pcplist will either finish before the drain and the page 6354 * will be drained, or observe the new high limit and skip the pcplist. 6355 * 6356 * Must be paired with a call to zone_pcp_enable(). 6357 */ 6358 void zone_pcp_disable(struct zone *zone) 6359 { 6360 mutex_lock(&pcp_batch_high_lock); 6361 __zone_set_pageset_high_and_batch(zone, 0, 1); 6362 __drain_all_pages(zone, true); 6363 } 6364 6365 void zone_pcp_enable(struct zone *zone) 6366 { 6367 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 6368 mutex_unlock(&pcp_batch_high_lock); 6369 } 6370 6371 void zone_pcp_reset(struct zone *zone) 6372 { 6373 int cpu; 6374 struct per_cpu_zonestat *pzstats; 6375 6376 if (zone->per_cpu_pageset != &boot_pageset) { 6377 for_each_online_cpu(cpu) { 6378 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6379 drain_zonestat(zone, pzstats); 6380 } 6381 free_percpu(zone->per_cpu_pageset); 6382 zone->per_cpu_pageset = &boot_pageset; 6383 if (zone->per_cpu_zonestats != &boot_zonestats) { 6384 free_percpu(zone->per_cpu_zonestats); 6385 zone->per_cpu_zonestats = &boot_zonestats; 6386 } 6387 } 6388 } 6389 6390 #ifdef CONFIG_MEMORY_HOTREMOVE 6391 /* 6392 * All pages in the range must be in a single zone, must not contain holes, 6393 * must span full sections, and must be isolated before calling this function. 6394 */ 6395 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6396 { 6397 unsigned long pfn = start_pfn; 6398 struct page *page; 6399 struct zone *zone; 6400 unsigned int order; 6401 unsigned long flags; 6402 6403 offline_mem_sections(pfn, end_pfn); 6404 zone = page_zone(pfn_to_page(pfn)); 6405 spin_lock_irqsave(&zone->lock, flags); 6406 while (pfn < end_pfn) { 6407 page = pfn_to_page(pfn); 6408 /* 6409 * The HWPoisoned page may be not in buddy system, and 6410 * page_count() is not 0. 6411 */ 6412 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6413 pfn++; 6414 continue; 6415 } 6416 /* 6417 * At this point all remaining PageOffline() pages have a 6418 * reference count of 0 and can simply be skipped. 6419 */ 6420 if (PageOffline(page)) { 6421 BUG_ON(page_count(page)); 6422 BUG_ON(PageBuddy(page)); 6423 pfn++; 6424 continue; 6425 } 6426 6427 BUG_ON(page_count(page)); 6428 BUG_ON(!PageBuddy(page)); 6429 order = buddy_order(page); 6430 del_page_from_free_list(page, zone, order); 6431 pfn += (1 << order); 6432 } 6433 spin_unlock_irqrestore(&zone->lock, flags); 6434 } 6435 #endif 6436 6437 /* 6438 * This function returns a stable result only if called under zone lock. 6439 */ 6440 bool is_free_buddy_page(struct page *page) 6441 { 6442 unsigned long pfn = page_to_pfn(page); 6443 unsigned int order; 6444 6445 for (order = 0; order <= MAX_ORDER; order++) { 6446 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6447 6448 if (PageBuddy(page_head) && 6449 buddy_order_unsafe(page_head) >= order) 6450 break; 6451 } 6452 6453 return order <= MAX_ORDER; 6454 } 6455 EXPORT_SYMBOL(is_free_buddy_page); 6456 6457 #ifdef CONFIG_MEMORY_FAILURE 6458 /* 6459 * Break down a higher-order page in sub-pages, and keep our target out of 6460 * buddy allocator. 6461 */ 6462 static void break_down_buddy_pages(struct zone *zone, struct page *page, 6463 struct page *target, int low, int high, 6464 int migratetype) 6465 { 6466 unsigned long size = 1 << high; 6467 struct page *current_buddy, *next_page; 6468 6469 while (high > low) { 6470 high--; 6471 size >>= 1; 6472 6473 if (target >= &page[size]) { 6474 next_page = page + size; 6475 current_buddy = page; 6476 } else { 6477 next_page = page; 6478 current_buddy = page + size; 6479 } 6480 page = next_page; 6481 6482 if (set_page_guard(zone, current_buddy, high, migratetype)) 6483 continue; 6484 6485 if (current_buddy != target) { 6486 add_to_free_list(current_buddy, zone, high, migratetype); 6487 set_buddy_order(current_buddy, high); 6488 } 6489 } 6490 } 6491 6492 /* 6493 * Take a page that will be marked as poisoned off the buddy allocator. 6494 */ 6495 bool take_page_off_buddy(struct page *page) 6496 { 6497 struct zone *zone = page_zone(page); 6498 unsigned long pfn = page_to_pfn(page); 6499 unsigned long flags; 6500 unsigned int order; 6501 bool ret = false; 6502 6503 spin_lock_irqsave(&zone->lock, flags); 6504 for (order = 0; order <= MAX_ORDER; order++) { 6505 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6506 int page_order = buddy_order(page_head); 6507 6508 if (PageBuddy(page_head) && page_order >= order) { 6509 unsigned long pfn_head = page_to_pfn(page_head); 6510 int migratetype = get_pfnblock_migratetype(page_head, 6511 pfn_head); 6512 6513 del_page_from_free_list(page_head, zone, page_order); 6514 break_down_buddy_pages(zone, page_head, page, 0, 6515 page_order, migratetype); 6516 SetPageHWPoisonTakenOff(page); 6517 if (!is_migrate_isolate(migratetype)) 6518 __mod_zone_freepage_state(zone, -1, migratetype); 6519 ret = true; 6520 break; 6521 } 6522 if (page_count(page_head) > 0) 6523 break; 6524 } 6525 spin_unlock_irqrestore(&zone->lock, flags); 6526 return ret; 6527 } 6528 6529 /* 6530 * Cancel takeoff done by take_page_off_buddy(). 6531 */ 6532 bool put_page_back_buddy(struct page *page) 6533 { 6534 struct zone *zone = page_zone(page); 6535 unsigned long pfn = page_to_pfn(page); 6536 unsigned long flags; 6537 int migratetype = get_pfnblock_migratetype(page, pfn); 6538 bool ret = false; 6539 6540 spin_lock_irqsave(&zone->lock, flags); 6541 if (put_page_testzero(page)) { 6542 ClearPageHWPoisonTakenOff(page); 6543 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 6544 if (TestClearPageHWPoison(page)) { 6545 ret = true; 6546 } 6547 } 6548 spin_unlock_irqrestore(&zone->lock, flags); 6549 6550 return ret; 6551 } 6552 #endif 6553 6554 #ifdef CONFIG_ZONE_DMA 6555 bool has_managed_dma(void) 6556 { 6557 struct pglist_data *pgdat; 6558 6559 for_each_online_pgdat(pgdat) { 6560 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 6561 6562 if (managed_zone(zone)) 6563 return true; 6564 } 6565 return false; 6566 } 6567 #endif /* CONFIG_ZONE_DMA */ 6568 6569 #ifdef CONFIG_UNACCEPTED_MEMORY 6570 6571 /* Counts number of zones with unaccepted pages. */ 6572 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 6573 6574 static bool lazy_accept = true; 6575 6576 static int __init accept_memory_parse(char *p) 6577 { 6578 if (!strcmp(p, "lazy")) { 6579 lazy_accept = true; 6580 return 0; 6581 } else if (!strcmp(p, "eager")) { 6582 lazy_accept = false; 6583 return 0; 6584 } else { 6585 return -EINVAL; 6586 } 6587 } 6588 early_param("accept_memory", accept_memory_parse); 6589 6590 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6591 { 6592 phys_addr_t start = page_to_phys(page); 6593 phys_addr_t end = start + (PAGE_SIZE << order); 6594 6595 return range_contains_unaccepted_memory(start, end); 6596 } 6597 6598 static void accept_page(struct page *page, unsigned int order) 6599 { 6600 phys_addr_t start = page_to_phys(page); 6601 6602 accept_memory(start, start + (PAGE_SIZE << order)); 6603 } 6604 6605 static bool try_to_accept_memory_one(struct zone *zone) 6606 { 6607 unsigned long flags; 6608 struct page *page; 6609 bool last; 6610 6611 if (list_empty(&zone->unaccepted_pages)) 6612 return false; 6613 6614 spin_lock_irqsave(&zone->lock, flags); 6615 page = list_first_entry_or_null(&zone->unaccepted_pages, 6616 struct page, lru); 6617 if (!page) { 6618 spin_unlock_irqrestore(&zone->lock, flags); 6619 return false; 6620 } 6621 6622 list_del(&page->lru); 6623 last = list_empty(&zone->unaccepted_pages); 6624 6625 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6626 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 6627 spin_unlock_irqrestore(&zone->lock, flags); 6628 6629 accept_page(page, MAX_ORDER); 6630 6631 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); 6632 6633 if (last) 6634 static_branch_dec(&zones_with_unaccepted_pages); 6635 6636 return true; 6637 } 6638 6639 static bool try_to_accept_memory(struct zone *zone, unsigned int order) 6640 { 6641 long to_accept; 6642 int ret = false; 6643 6644 /* How much to accept to get to high watermark? */ 6645 to_accept = high_wmark_pages(zone) - 6646 (zone_page_state(zone, NR_FREE_PAGES) - 6647 __zone_watermark_unusable_free(zone, order, 0)); 6648 6649 /* Accept at least one page */ 6650 do { 6651 if (!try_to_accept_memory_one(zone)) 6652 break; 6653 ret = true; 6654 to_accept -= MAX_ORDER_NR_PAGES; 6655 } while (to_accept > 0); 6656 6657 return ret; 6658 } 6659 6660 static inline bool has_unaccepted_memory(void) 6661 { 6662 return static_branch_unlikely(&zones_with_unaccepted_pages); 6663 } 6664 6665 static bool __free_unaccepted(struct page *page) 6666 { 6667 struct zone *zone = page_zone(page); 6668 unsigned long flags; 6669 bool first = false; 6670 6671 if (!lazy_accept) 6672 return false; 6673 6674 spin_lock_irqsave(&zone->lock, flags); 6675 first = list_empty(&zone->unaccepted_pages); 6676 list_add_tail(&page->lru, &zone->unaccepted_pages); 6677 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6678 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 6679 spin_unlock_irqrestore(&zone->lock, flags); 6680 6681 if (first) 6682 static_branch_inc(&zones_with_unaccepted_pages); 6683 6684 return true; 6685 } 6686 6687 #else 6688 6689 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6690 { 6691 return false; 6692 } 6693 6694 static void accept_page(struct page *page, unsigned int order) 6695 { 6696 } 6697 6698 static bool try_to_accept_memory(struct zone *zone, unsigned int order) 6699 { 6700 return false; 6701 } 6702 6703 static inline bool has_unaccepted_memory(void) 6704 { 6705 return false; 6706 } 6707 6708 static bool __free_unaccepted(struct page *page) 6709 { 6710 BUILD_BUG(); 6711 return false; 6712 } 6713 6714 #endif /* CONFIG_UNACCEPTED_MEMORY */ 6715