1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/interrupt.h> 22 #include <linux/jiffies.h> 23 #include <linux/compiler.h> 24 #include <linux/kernel.h> 25 #include <linux/kasan.h> 26 #include <linux/kmsan.h> 27 #include <linux/module.h> 28 #include <linux/suspend.h> 29 #include <linux/ratelimit.h> 30 #include <linux/oom.h> 31 #include <linux/topology.h> 32 #include <linux/sysctl.h> 33 #include <linux/cpu.h> 34 #include <linux/cpuset.h> 35 #include <linux/memory_hotplug.h> 36 #include <linux/nodemask.h> 37 #include <linux/vmstat.h> 38 #include <linux/fault-inject.h> 39 #include <linux/compaction.h> 40 #include <trace/events/kmem.h> 41 #include <trace/events/oom.h> 42 #include <linux/prefetch.h> 43 #include <linux/mm_inline.h> 44 #include <linux/mmu_notifier.h> 45 #include <linux/migrate.h> 46 #include <linux/sched/mm.h> 47 #include <linux/page_owner.h> 48 #include <linux/page_table_check.h> 49 #include <linux/memcontrol.h> 50 #include <linux/ftrace.h> 51 #include <linux/lockdep.h> 52 #include <linux/psi.h> 53 #include <linux/khugepaged.h> 54 #include <linux/delayacct.h> 55 #include <asm/div64.h> 56 #include "internal.h" 57 #include "shuffle.h" 58 #include "page_reporting.h" 59 60 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 61 typedef int __bitwise fpi_t; 62 63 /* No special request */ 64 #define FPI_NONE ((__force fpi_t)0) 65 66 /* 67 * Skip free page reporting notification for the (possibly merged) page. 68 * This does not hinder free page reporting from grabbing the page, 69 * reporting it and marking it "reported" - it only skips notifying 70 * the free page reporting infrastructure about a newly freed page. For 71 * example, used when temporarily pulling a page from a freelist and 72 * putting it back unmodified. 73 */ 74 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 75 76 /* 77 * Place the (possibly merged) page to the tail of the freelist. Will ignore 78 * page shuffling (relevant code - e.g., memory onlining - is expected to 79 * shuffle the whole zone). 80 * 81 * Note: No code should rely on this flag for correctness - it's purely 82 * to allow for optimizations when handing back either fresh pages 83 * (memory onlining) or untouched pages (page isolation, free page 84 * reporting). 85 */ 86 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 87 88 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 89 static DEFINE_MUTEX(pcp_batch_high_lock); 90 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 91 92 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 93 /* 94 * On SMP, spin_trylock is sufficient protection. 95 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 96 */ 97 #define pcp_trylock_prepare(flags) do { } while (0) 98 #define pcp_trylock_finish(flag) do { } while (0) 99 #else 100 101 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 102 #define pcp_trylock_prepare(flags) local_irq_save(flags) 103 #define pcp_trylock_finish(flags) local_irq_restore(flags) 104 #endif 105 106 /* 107 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 108 * a migration causing the wrong PCP to be locked and remote memory being 109 * potentially allocated, pin the task to the CPU for the lookup+lock. 110 * preempt_disable is used on !RT because it is faster than migrate_disable. 111 * migrate_disable is used on RT because otherwise RT spinlock usage is 112 * interfered with and a high priority task cannot preempt the allocator. 113 */ 114 #ifndef CONFIG_PREEMPT_RT 115 #define pcpu_task_pin() preempt_disable() 116 #define pcpu_task_unpin() preempt_enable() 117 #else 118 #define pcpu_task_pin() migrate_disable() 119 #define pcpu_task_unpin() migrate_enable() 120 #endif 121 122 /* 123 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 124 * Return value should be used with equivalent unlock helper. 125 */ 126 #define pcpu_spin_lock(type, member, ptr) \ 127 ({ \ 128 type *_ret; \ 129 pcpu_task_pin(); \ 130 _ret = this_cpu_ptr(ptr); \ 131 spin_lock(&_ret->member); \ 132 _ret; \ 133 }) 134 135 #define pcpu_spin_trylock(type, member, ptr) \ 136 ({ \ 137 type *_ret; \ 138 pcpu_task_pin(); \ 139 _ret = this_cpu_ptr(ptr); \ 140 if (!spin_trylock(&_ret->member)) { \ 141 pcpu_task_unpin(); \ 142 _ret = NULL; \ 143 } \ 144 _ret; \ 145 }) 146 147 #define pcpu_spin_unlock(member, ptr) \ 148 ({ \ 149 spin_unlock(&ptr->member); \ 150 pcpu_task_unpin(); \ 151 }) 152 153 /* struct per_cpu_pages specific helpers. */ 154 #define pcp_spin_lock(ptr) \ 155 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 156 157 #define pcp_spin_trylock(ptr) \ 158 pcpu_spin_trylock(struct per_cpu_pages, lock, ptr) 159 160 #define pcp_spin_unlock(ptr) \ 161 pcpu_spin_unlock(lock, ptr) 162 163 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 164 DEFINE_PER_CPU(int, numa_node); 165 EXPORT_PER_CPU_SYMBOL(numa_node); 166 #endif 167 168 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 169 170 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 171 /* 172 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 173 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 174 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 175 * defined in <linux/topology.h>. 176 */ 177 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 178 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 179 #endif 180 181 static DEFINE_MUTEX(pcpu_drain_mutex); 182 183 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 184 volatile unsigned long latent_entropy __latent_entropy; 185 EXPORT_SYMBOL(latent_entropy); 186 #endif 187 188 /* 189 * Array of node states. 190 */ 191 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 192 [N_POSSIBLE] = NODE_MASK_ALL, 193 [N_ONLINE] = { { [0] = 1UL } }, 194 #ifndef CONFIG_NUMA 195 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 196 #ifdef CONFIG_HIGHMEM 197 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 198 #endif 199 [N_MEMORY] = { { [0] = 1UL } }, 200 [N_CPU] = { { [0] = 1UL } }, 201 #endif /* NUMA */ 202 }; 203 EXPORT_SYMBOL(node_states); 204 205 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 206 207 /* 208 * A cached value of the page's pageblock's migratetype, used when the page is 209 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 210 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 211 * Also the migratetype set in the page does not necessarily match the pcplist 212 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 213 * other index - this ensures that it will be put on the correct CMA freelist. 214 */ 215 static inline int get_pcppage_migratetype(struct page *page) 216 { 217 return page->index; 218 } 219 220 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 221 { 222 page->index = migratetype; 223 } 224 225 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 226 unsigned int pageblock_order __read_mostly; 227 #endif 228 229 static void __free_pages_ok(struct page *page, unsigned int order, 230 fpi_t fpi_flags); 231 232 /* 233 * results with 256, 32 in the lowmem_reserve sysctl: 234 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 235 * 1G machine -> (16M dma, 784M normal, 224M high) 236 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 237 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 238 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 239 * 240 * TBD: should special case ZONE_DMA32 machines here - in those we normally 241 * don't need any ZONE_NORMAL reservation 242 */ 243 static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 244 #ifdef CONFIG_ZONE_DMA 245 [ZONE_DMA] = 256, 246 #endif 247 #ifdef CONFIG_ZONE_DMA32 248 [ZONE_DMA32] = 256, 249 #endif 250 [ZONE_NORMAL] = 32, 251 #ifdef CONFIG_HIGHMEM 252 [ZONE_HIGHMEM] = 0, 253 #endif 254 [ZONE_MOVABLE] = 0, 255 }; 256 257 char * const zone_names[MAX_NR_ZONES] = { 258 #ifdef CONFIG_ZONE_DMA 259 "DMA", 260 #endif 261 #ifdef CONFIG_ZONE_DMA32 262 "DMA32", 263 #endif 264 "Normal", 265 #ifdef CONFIG_HIGHMEM 266 "HighMem", 267 #endif 268 "Movable", 269 #ifdef CONFIG_ZONE_DEVICE 270 "Device", 271 #endif 272 }; 273 274 const char * const migratetype_names[MIGRATE_TYPES] = { 275 "Unmovable", 276 "Movable", 277 "Reclaimable", 278 "HighAtomic", 279 #ifdef CONFIG_CMA 280 "CMA", 281 #endif 282 #ifdef CONFIG_MEMORY_ISOLATION 283 "Isolate", 284 #endif 285 }; 286 287 int min_free_kbytes = 1024; 288 int user_min_free_kbytes = -1; 289 static int watermark_boost_factor __read_mostly = 15000; 290 static int watermark_scale_factor = 10; 291 292 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 293 int movable_zone; 294 EXPORT_SYMBOL(movable_zone); 295 296 #if MAX_NUMNODES > 1 297 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 298 unsigned int nr_online_nodes __read_mostly = 1; 299 EXPORT_SYMBOL(nr_node_ids); 300 EXPORT_SYMBOL(nr_online_nodes); 301 #endif 302 303 static bool page_contains_unaccepted(struct page *page, unsigned int order); 304 static void accept_page(struct page *page, unsigned int order); 305 static bool cond_accept_memory(struct zone *zone, unsigned int order); 306 static inline bool has_unaccepted_memory(void); 307 static bool __free_unaccepted(struct page *page); 308 309 int page_group_by_mobility_disabled __read_mostly; 310 311 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 312 /* 313 * During boot we initialize deferred pages on-demand, as needed, but once 314 * page_alloc_init_late() has finished, the deferred pages are all initialized, 315 * and we can permanently disable that path. 316 */ 317 DEFINE_STATIC_KEY_TRUE(deferred_pages); 318 319 static inline bool deferred_pages_enabled(void) 320 { 321 return static_branch_unlikely(&deferred_pages); 322 } 323 324 /* 325 * deferred_grow_zone() is __init, but it is called from 326 * get_page_from_freelist() during early boot until deferred_pages permanently 327 * disables this call. This is why we have refdata wrapper to avoid warning, 328 * and to ensure that the function body gets unloaded. 329 */ 330 static bool __ref 331 _deferred_grow_zone(struct zone *zone, unsigned int order) 332 { 333 return deferred_grow_zone(zone, order); 334 } 335 #else 336 static inline bool deferred_pages_enabled(void) 337 { 338 return false; 339 } 340 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 341 342 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 343 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 344 unsigned long pfn) 345 { 346 #ifdef CONFIG_SPARSEMEM 347 return section_to_usemap(__pfn_to_section(pfn)); 348 #else 349 return page_zone(page)->pageblock_flags; 350 #endif /* CONFIG_SPARSEMEM */ 351 } 352 353 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 354 { 355 #ifdef CONFIG_SPARSEMEM 356 pfn &= (PAGES_PER_SECTION-1); 357 #else 358 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 359 #endif /* CONFIG_SPARSEMEM */ 360 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 361 } 362 363 /** 364 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 365 * @page: The page within the block of interest 366 * @pfn: The target page frame number 367 * @mask: mask of bits that the caller is interested in 368 * 369 * Return: pageblock_bits flags 370 */ 371 unsigned long get_pfnblock_flags_mask(const struct page *page, 372 unsigned long pfn, unsigned long mask) 373 { 374 unsigned long *bitmap; 375 unsigned long bitidx, word_bitidx; 376 unsigned long word; 377 378 bitmap = get_pageblock_bitmap(page, pfn); 379 bitidx = pfn_to_bitidx(page, pfn); 380 word_bitidx = bitidx / BITS_PER_LONG; 381 bitidx &= (BITS_PER_LONG-1); 382 /* 383 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 384 * a consistent read of the memory array, so that results, even though 385 * racy, are not corrupted. 386 */ 387 word = READ_ONCE(bitmap[word_bitidx]); 388 return (word >> bitidx) & mask; 389 } 390 391 static __always_inline int get_pfnblock_migratetype(const struct page *page, 392 unsigned long pfn) 393 { 394 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 395 } 396 397 /** 398 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 399 * @page: The page within the block of interest 400 * @flags: The flags to set 401 * @pfn: The target page frame number 402 * @mask: mask of bits that the caller is interested in 403 */ 404 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 405 unsigned long pfn, 406 unsigned long mask) 407 { 408 unsigned long *bitmap; 409 unsigned long bitidx, word_bitidx; 410 unsigned long word; 411 412 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 413 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 414 415 bitmap = get_pageblock_bitmap(page, pfn); 416 bitidx = pfn_to_bitidx(page, pfn); 417 word_bitidx = bitidx / BITS_PER_LONG; 418 bitidx &= (BITS_PER_LONG-1); 419 420 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 421 422 mask <<= bitidx; 423 flags <<= bitidx; 424 425 word = READ_ONCE(bitmap[word_bitidx]); 426 do { 427 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 428 } 429 430 void set_pageblock_migratetype(struct page *page, int migratetype) 431 { 432 if (unlikely(page_group_by_mobility_disabled && 433 migratetype < MIGRATE_PCPTYPES)) 434 migratetype = MIGRATE_UNMOVABLE; 435 436 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 437 page_to_pfn(page), MIGRATETYPE_MASK); 438 } 439 440 #ifdef CONFIG_DEBUG_VM 441 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 442 { 443 int ret; 444 unsigned seq; 445 unsigned long pfn = page_to_pfn(page); 446 unsigned long sp, start_pfn; 447 448 do { 449 seq = zone_span_seqbegin(zone); 450 start_pfn = zone->zone_start_pfn; 451 sp = zone->spanned_pages; 452 ret = !zone_spans_pfn(zone, pfn); 453 } while (zone_span_seqretry(zone, seq)); 454 455 if (ret) 456 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 457 pfn, zone_to_nid(zone), zone->name, 458 start_pfn, start_pfn + sp); 459 460 return ret; 461 } 462 463 /* 464 * Temporary debugging check for pages not lying within a given zone. 465 */ 466 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 467 { 468 if (page_outside_zone_boundaries(zone, page)) 469 return 1; 470 if (zone != page_zone(page)) 471 return 1; 472 473 return 0; 474 } 475 #else 476 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 477 { 478 return 0; 479 } 480 #endif 481 482 static void bad_page(struct page *page, const char *reason) 483 { 484 static unsigned long resume; 485 static unsigned long nr_shown; 486 static unsigned long nr_unshown; 487 488 /* 489 * Allow a burst of 60 reports, then keep quiet for that minute; 490 * or allow a steady drip of one report per second. 491 */ 492 if (nr_shown == 60) { 493 if (time_before(jiffies, resume)) { 494 nr_unshown++; 495 goto out; 496 } 497 if (nr_unshown) { 498 pr_alert( 499 "BUG: Bad page state: %lu messages suppressed\n", 500 nr_unshown); 501 nr_unshown = 0; 502 } 503 nr_shown = 0; 504 } 505 if (nr_shown++ == 0) 506 resume = jiffies + 60 * HZ; 507 508 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 509 current->comm, page_to_pfn(page)); 510 dump_page(page, reason); 511 512 print_modules(); 513 dump_stack(); 514 out: 515 /* Leave bad fields for debug, except PageBuddy could make trouble */ 516 page_mapcount_reset(page); /* remove PageBuddy */ 517 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 518 } 519 520 static inline unsigned int order_to_pindex(int migratetype, int order) 521 { 522 bool __maybe_unused movable; 523 524 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 525 if (order > PAGE_ALLOC_COSTLY_ORDER) { 526 VM_BUG_ON(order != pageblock_order); 527 528 movable = migratetype == MIGRATE_MOVABLE; 529 530 return NR_LOWORDER_PCP_LISTS + movable; 531 } 532 #else 533 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 534 #endif 535 536 return (MIGRATE_PCPTYPES * order) + migratetype; 537 } 538 539 static inline int pindex_to_order(unsigned int pindex) 540 { 541 int order = pindex / MIGRATE_PCPTYPES; 542 543 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 544 if (pindex >= NR_LOWORDER_PCP_LISTS) 545 order = pageblock_order; 546 #else 547 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 548 #endif 549 550 return order; 551 } 552 553 static inline bool pcp_allowed_order(unsigned int order) 554 { 555 if (order <= PAGE_ALLOC_COSTLY_ORDER) 556 return true; 557 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 558 if (order == pageblock_order) 559 return true; 560 #endif 561 return false; 562 } 563 564 static inline void free_the_page(struct page *page, unsigned int order) 565 { 566 if (pcp_allowed_order(order)) /* Via pcp? */ 567 free_unref_page(page, order); 568 else 569 __free_pages_ok(page, order, FPI_NONE); 570 } 571 572 /* 573 * Higher-order pages are called "compound pages". They are structured thusly: 574 * 575 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 576 * 577 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 578 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 579 * 580 * The first tail page's ->compound_order holds the order of allocation. 581 * This usage means that zero-order pages may not be compound. 582 */ 583 584 void prep_compound_page(struct page *page, unsigned int order) 585 { 586 int i; 587 int nr_pages = 1 << order; 588 589 __SetPageHead(page); 590 for (i = 1; i < nr_pages; i++) 591 prep_compound_tail(page, i); 592 593 prep_compound_head(page, order); 594 } 595 596 void destroy_large_folio(struct folio *folio) 597 { 598 if (folio_test_hugetlb(folio)) { 599 free_huge_folio(folio); 600 return; 601 } 602 603 folio_unqueue_deferred_split(folio); 604 mem_cgroup_uncharge(folio); 605 free_the_page(&folio->page, folio_order(folio)); 606 } 607 608 static inline void set_buddy_order(struct page *page, unsigned int order) 609 { 610 set_page_private(page, order); 611 __SetPageBuddy(page); 612 } 613 614 #ifdef CONFIG_COMPACTION 615 static inline struct capture_control *task_capc(struct zone *zone) 616 { 617 struct capture_control *capc = current->capture_control; 618 619 return unlikely(capc) && 620 !(current->flags & PF_KTHREAD) && 621 !capc->page && 622 capc->cc->zone == zone ? capc : NULL; 623 } 624 625 static inline bool 626 compaction_capture(struct capture_control *capc, struct page *page, 627 int order, int migratetype) 628 { 629 if (!capc || order != capc->cc->order) 630 return false; 631 632 /* Do not accidentally pollute CMA or isolated regions*/ 633 if (is_migrate_cma(migratetype) || 634 is_migrate_isolate(migratetype)) 635 return false; 636 637 /* 638 * Do not let lower order allocations pollute a movable pageblock. 639 * This might let an unmovable request use a reclaimable pageblock 640 * and vice-versa but no more than normal fallback logic which can 641 * have trouble finding a high-order free page. 642 */ 643 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 644 return false; 645 646 capc->page = page; 647 return true; 648 } 649 650 #else 651 static inline struct capture_control *task_capc(struct zone *zone) 652 { 653 return NULL; 654 } 655 656 static inline bool 657 compaction_capture(struct capture_control *capc, struct page *page, 658 int order, int migratetype) 659 { 660 return false; 661 } 662 #endif /* CONFIG_COMPACTION */ 663 664 /* Used for pages not on another list */ 665 static inline void add_to_free_list(struct page *page, struct zone *zone, 666 unsigned int order, int migratetype) 667 { 668 struct free_area *area = &zone->free_area[order]; 669 670 list_add(&page->buddy_list, &area->free_list[migratetype]); 671 area->nr_free++; 672 } 673 674 /* Used for pages not on another list */ 675 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 676 unsigned int order, int migratetype) 677 { 678 struct free_area *area = &zone->free_area[order]; 679 680 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 681 area->nr_free++; 682 } 683 684 /* 685 * Used for pages which are on another list. Move the pages to the tail 686 * of the list - so the moved pages won't immediately be considered for 687 * allocation again (e.g., optimization for memory onlining). 688 */ 689 static inline void move_to_free_list(struct page *page, struct zone *zone, 690 unsigned int order, int migratetype) 691 { 692 struct free_area *area = &zone->free_area[order]; 693 694 list_move_tail(&page->buddy_list, &area->free_list[migratetype]); 695 } 696 697 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 698 unsigned int order) 699 { 700 /* clear reported state and update reported page count */ 701 if (page_reported(page)) 702 __ClearPageReported(page); 703 704 list_del(&page->buddy_list); 705 __ClearPageBuddy(page); 706 set_page_private(page, 0); 707 zone->free_area[order].nr_free--; 708 } 709 710 static inline struct page *get_page_from_free_area(struct free_area *area, 711 int migratetype) 712 { 713 return list_first_entry_or_null(&area->free_list[migratetype], 714 struct page, buddy_list); 715 } 716 717 /* 718 * If this is not the largest possible page, check if the buddy 719 * of the next-highest order is free. If it is, it's possible 720 * that pages are being freed that will coalesce soon. In case, 721 * that is happening, add the free page to the tail of the list 722 * so it's less likely to be used soon and more likely to be merged 723 * as a higher order page 724 */ 725 static inline bool 726 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 727 struct page *page, unsigned int order) 728 { 729 unsigned long higher_page_pfn; 730 struct page *higher_page; 731 732 if (order >= MAX_ORDER - 1) 733 return false; 734 735 higher_page_pfn = buddy_pfn & pfn; 736 higher_page = page + (higher_page_pfn - pfn); 737 738 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 739 NULL) != NULL; 740 } 741 742 /* 743 * Freeing function for a buddy system allocator. 744 * 745 * The concept of a buddy system is to maintain direct-mapped table 746 * (containing bit values) for memory blocks of various "orders". 747 * The bottom level table contains the map for the smallest allocatable 748 * units of memory (here, pages), and each level above it describes 749 * pairs of units from the levels below, hence, "buddies". 750 * At a high level, all that happens here is marking the table entry 751 * at the bottom level available, and propagating the changes upward 752 * as necessary, plus some accounting needed to play nicely with other 753 * parts of the VM system. 754 * At each level, we keep a list of pages, which are heads of continuous 755 * free pages of length of (1 << order) and marked with PageBuddy. 756 * Page's order is recorded in page_private(page) field. 757 * So when we are allocating or freeing one, we can derive the state of the 758 * other. That is, if we allocate a small block, and both were 759 * free, the remainder of the region must be split into blocks. 760 * If a block is freed, and its buddy is also free, then this 761 * triggers coalescing into a block of larger size. 762 * 763 * -- nyc 764 */ 765 766 static inline void __free_one_page(struct page *page, 767 unsigned long pfn, 768 struct zone *zone, unsigned int order, 769 int migratetype, fpi_t fpi_flags) 770 { 771 struct capture_control *capc = task_capc(zone); 772 unsigned long buddy_pfn = 0; 773 unsigned long combined_pfn; 774 struct page *buddy; 775 bool to_tail; 776 777 VM_BUG_ON(!zone_is_initialized(zone)); 778 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 779 780 VM_BUG_ON(migratetype == -1); 781 if (likely(!is_migrate_isolate(migratetype))) 782 __mod_zone_freepage_state(zone, 1 << order, migratetype); 783 784 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 785 VM_BUG_ON_PAGE(bad_range(zone, page), page); 786 787 while (order < MAX_ORDER) { 788 if (compaction_capture(capc, page, order, migratetype)) { 789 __mod_zone_freepage_state(zone, -(1 << order), 790 migratetype); 791 return; 792 } 793 794 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 795 if (!buddy) 796 goto done_merging; 797 798 if (unlikely(order >= pageblock_order)) { 799 /* 800 * We want to prevent merge between freepages on pageblock 801 * without fallbacks and normal pageblock. Without this, 802 * pageblock isolation could cause incorrect freepage or CMA 803 * accounting or HIGHATOMIC accounting. 804 */ 805 int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); 806 807 if (migratetype != buddy_mt 808 && (!migratetype_is_mergeable(migratetype) || 809 !migratetype_is_mergeable(buddy_mt))) 810 goto done_merging; 811 } 812 813 /* 814 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 815 * merge with it and move up one order. 816 */ 817 if (page_is_guard(buddy)) 818 clear_page_guard(zone, buddy, order, migratetype); 819 else 820 del_page_from_free_list(buddy, zone, order); 821 combined_pfn = buddy_pfn & pfn; 822 page = page + (combined_pfn - pfn); 823 pfn = combined_pfn; 824 order++; 825 } 826 827 done_merging: 828 set_buddy_order(page, order); 829 830 if (fpi_flags & FPI_TO_TAIL) 831 to_tail = true; 832 else if (is_shuffle_order(order)) 833 to_tail = shuffle_pick_tail(); 834 else 835 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 836 837 if (to_tail) 838 add_to_free_list_tail(page, zone, order, migratetype); 839 else 840 add_to_free_list(page, zone, order, migratetype); 841 842 /* Notify page reporting subsystem of freed page */ 843 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 844 page_reporting_notify_free(order); 845 } 846 847 /** 848 * split_free_page() -- split a free page at split_pfn_offset 849 * @free_page: the original free page 850 * @order: the order of the page 851 * @split_pfn_offset: split offset within the page 852 * 853 * Return -ENOENT if the free page is changed, otherwise 0 854 * 855 * It is used when the free page crosses two pageblocks with different migratetypes 856 * at split_pfn_offset within the page. The split free page will be put into 857 * separate migratetype lists afterwards. Otherwise, the function achieves 858 * nothing. 859 */ 860 int split_free_page(struct page *free_page, 861 unsigned int order, unsigned long split_pfn_offset) 862 { 863 struct zone *zone = page_zone(free_page); 864 unsigned long free_page_pfn = page_to_pfn(free_page); 865 unsigned long pfn; 866 unsigned long flags; 867 int free_page_order; 868 int mt; 869 int ret = 0; 870 871 if (split_pfn_offset == 0) 872 return ret; 873 874 spin_lock_irqsave(&zone->lock, flags); 875 876 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { 877 ret = -ENOENT; 878 goto out; 879 } 880 881 mt = get_pfnblock_migratetype(free_page, free_page_pfn); 882 if (likely(!is_migrate_isolate(mt))) 883 __mod_zone_freepage_state(zone, -(1UL << order), mt); 884 885 del_page_from_free_list(free_page, zone, order); 886 for (pfn = free_page_pfn; 887 pfn < free_page_pfn + (1UL << order);) { 888 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); 889 890 free_page_order = min_t(unsigned int, 891 pfn ? __ffs(pfn) : order, 892 __fls(split_pfn_offset)); 893 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, 894 mt, FPI_NONE); 895 pfn += 1UL << free_page_order; 896 split_pfn_offset -= (1UL << free_page_order); 897 /* we have done the first part, now switch to second part */ 898 if (split_pfn_offset == 0) 899 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); 900 } 901 out: 902 spin_unlock_irqrestore(&zone->lock, flags); 903 return ret; 904 } 905 /* 906 * A bad page could be due to a number of fields. Instead of multiple branches, 907 * try and check multiple fields with one check. The caller must do a detailed 908 * check if necessary. 909 */ 910 static inline bool page_expected_state(struct page *page, 911 unsigned long check_flags) 912 { 913 if (unlikely(atomic_read(&page->_mapcount) != -1)) 914 return false; 915 916 if (unlikely((unsigned long)page->mapping | 917 page_ref_count(page) | 918 #ifdef CONFIG_MEMCG 919 page->memcg_data | 920 #endif 921 (page->flags & check_flags))) 922 return false; 923 924 return true; 925 } 926 927 static const char *page_bad_reason(struct page *page, unsigned long flags) 928 { 929 const char *bad_reason = NULL; 930 931 if (unlikely(atomic_read(&page->_mapcount) != -1)) 932 bad_reason = "nonzero mapcount"; 933 if (unlikely(page->mapping != NULL)) 934 bad_reason = "non-NULL mapping"; 935 if (unlikely(page_ref_count(page) != 0)) 936 bad_reason = "nonzero _refcount"; 937 if (unlikely(page->flags & flags)) { 938 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 939 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 940 else 941 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 942 } 943 #ifdef CONFIG_MEMCG 944 if (unlikely(page->memcg_data)) 945 bad_reason = "page still charged to cgroup"; 946 #endif 947 return bad_reason; 948 } 949 950 static void free_page_is_bad_report(struct page *page) 951 { 952 bad_page(page, 953 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 954 } 955 956 static inline bool free_page_is_bad(struct page *page) 957 { 958 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 959 return false; 960 961 /* Something has gone sideways, find it */ 962 free_page_is_bad_report(page); 963 return true; 964 } 965 966 static inline bool is_check_pages_enabled(void) 967 { 968 return static_branch_unlikely(&check_pages_enabled); 969 } 970 971 static int free_tail_page_prepare(struct page *head_page, struct page *page) 972 { 973 struct folio *folio = (struct folio *)head_page; 974 int ret = 1; 975 976 /* 977 * We rely page->lru.next never has bit 0 set, unless the page 978 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 979 */ 980 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 981 982 if (!is_check_pages_enabled()) { 983 ret = 0; 984 goto out; 985 } 986 switch (page - head_page) { 987 case 1: 988 /* the first tail page: these may be in place of ->mapping */ 989 if (unlikely(folio_entire_mapcount(folio))) { 990 bad_page(page, "nonzero entire_mapcount"); 991 goto out; 992 } 993 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { 994 bad_page(page, "nonzero nr_pages_mapped"); 995 goto out; 996 } 997 if (unlikely(atomic_read(&folio->_pincount))) { 998 bad_page(page, "nonzero pincount"); 999 goto out; 1000 } 1001 break; 1002 case 2: 1003 /* the second tail page: deferred_list overlaps ->mapping */ 1004 if (unlikely(!list_empty(&folio->_deferred_list))) { 1005 bad_page(page, "on deferred list"); 1006 goto out; 1007 } 1008 break; 1009 default: 1010 if (page->mapping != TAIL_MAPPING) { 1011 bad_page(page, "corrupted mapping in tail page"); 1012 goto out; 1013 } 1014 break; 1015 } 1016 if (unlikely(!PageTail(page))) { 1017 bad_page(page, "PageTail not set"); 1018 goto out; 1019 } 1020 if (unlikely(compound_head(page) != head_page)) { 1021 bad_page(page, "compound_head not consistent"); 1022 goto out; 1023 } 1024 ret = 0; 1025 out: 1026 page->mapping = NULL; 1027 clear_compound_head(page); 1028 return ret; 1029 } 1030 1031 /* 1032 * Skip KASAN memory poisoning when either: 1033 * 1034 * 1. For generic KASAN: deferred memory initialization has not yet completed. 1035 * Tag-based KASAN modes skip pages freed via deferred memory initialization 1036 * using page tags instead (see below). 1037 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating 1038 * that error detection is disabled for accesses via the page address. 1039 * 1040 * Pages will have match-all tags in the following circumstances: 1041 * 1042 * 1. Pages are being initialized for the first time, including during deferred 1043 * memory init; see the call to page_kasan_tag_reset in __init_single_page. 1044 * 2. The allocation was not unpoisoned due to __GFP_SKIP_KASAN, with the 1045 * exception of pages unpoisoned by kasan_unpoison_vmalloc. 1046 * 3. The allocation was excluded from being checked due to sampling, 1047 * see the call to kasan_unpoison_pages. 1048 * 1049 * Poisoning pages during deferred memory init will greatly lengthen the 1050 * process and cause problem in large memory systems as the deferred pages 1051 * initialization is done with interrupt disabled. 1052 * 1053 * Assuming that there will be no reference to those newly initialized 1054 * pages before they are ever allocated, this should have no effect on 1055 * KASAN memory tracking as the poison will be properly inserted at page 1056 * allocation time. The only corner case is when pages are allocated by 1057 * on-demand allocation and then freed again before the deferred pages 1058 * initialization is done, but this is not likely to happen. 1059 */ 1060 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 1061 { 1062 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1063 return deferred_pages_enabled(); 1064 1065 return page_kasan_tag(page) == 0xff; 1066 } 1067 1068 static void kernel_init_pages(struct page *page, int numpages) 1069 { 1070 int i; 1071 1072 /* s390's use of memset() could override KASAN redzones. */ 1073 kasan_disable_current(); 1074 for (i = 0; i < numpages; i++) 1075 clear_highpage_kasan_tagged(page + i); 1076 kasan_enable_current(); 1077 } 1078 1079 static __always_inline bool free_pages_prepare(struct page *page, 1080 unsigned int order, fpi_t fpi_flags) 1081 { 1082 int bad = 0; 1083 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); 1084 bool init = want_init_on_free(); 1085 struct folio *folio = page_folio(page); 1086 1087 VM_BUG_ON_PAGE(PageTail(page), page); 1088 1089 trace_mm_page_free(page, order); 1090 kmsan_free_page(page, order); 1091 1092 /* 1093 * In rare cases, when truncation or holepunching raced with 1094 * munlock after VM_LOCKED was cleared, Mlocked may still be 1095 * found set here. This does not indicate a problem, unless 1096 * "unevictable_pgs_cleared" appears worryingly large. 1097 */ 1098 if (unlikely(folio_test_mlocked(folio))) { 1099 long nr_pages = folio_nr_pages(folio); 1100 1101 __folio_clear_mlocked(folio); 1102 zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 1103 count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 1104 } 1105 1106 if (unlikely(PageHWPoison(page)) && !order) { 1107 /* 1108 * Do not let hwpoison pages hit pcplists/buddy 1109 * Untie memcg state and reset page's owner 1110 */ 1111 if (memcg_kmem_online() && PageMemcgKmem(page)) 1112 __memcg_kmem_uncharge_page(page, order); 1113 reset_page_owner(page, order); 1114 page_table_check_free(page, order); 1115 return false; 1116 } 1117 1118 /* 1119 * Check tail pages before head page information is cleared to 1120 * avoid checking PageCompound for order-0 pages. 1121 */ 1122 if (unlikely(order)) { 1123 bool compound = PageCompound(page); 1124 int i; 1125 1126 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1127 1128 if (compound) 1129 page[1].flags &= ~PAGE_FLAGS_SECOND; 1130 for (i = 1; i < (1 << order); i++) { 1131 if (compound) 1132 bad += free_tail_page_prepare(page, page + i); 1133 if (is_check_pages_enabled()) { 1134 if (free_page_is_bad(page + i)) { 1135 bad++; 1136 continue; 1137 } 1138 } 1139 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1140 } 1141 } 1142 if (PageMappingFlags(page)) 1143 page->mapping = NULL; 1144 if (memcg_kmem_online() && PageMemcgKmem(page)) 1145 __memcg_kmem_uncharge_page(page, order); 1146 if (is_check_pages_enabled()) { 1147 if (free_page_is_bad(page)) 1148 bad++; 1149 if (bad) 1150 return false; 1151 } 1152 1153 page_cpupid_reset_last(page); 1154 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1155 reset_page_owner(page, order); 1156 page_table_check_free(page, order); 1157 1158 if (!PageHighMem(page)) { 1159 debug_check_no_locks_freed(page_address(page), 1160 PAGE_SIZE << order); 1161 debug_check_no_obj_freed(page_address(page), 1162 PAGE_SIZE << order); 1163 } 1164 1165 kernel_poison_pages(page, 1 << order); 1166 1167 /* 1168 * As memory initialization might be integrated into KASAN, 1169 * KASAN poisoning and memory initialization code must be 1170 * kept together to avoid discrepancies in behavior. 1171 * 1172 * With hardware tag-based KASAN, memory tags must be set before the 1173 * page becomes unavailable via debug_pagealloc or arch_free_page. 1174 */ 1175 if (!skip_kasan_poison) { 1176 kasan_poison_pages(page, order, init); 1177 1178 /* Memory is already initialized if KASAN did it internally. */ 1179 if (kasan_has_integrated_init()) 1180 init = false; 1181 } 1182 if (init) 1183 kernel_init_pages(page, 1 << order); 1184 1185 /* 1186 * arch_free_page() can make the page's contents inaccessible. s390 1187 * does this. So nothing which can access the page's contents should 1188 * happen after this. 1189 */ 1190 arch_free_page(page, order); 1191 1192 debug_pagealloc_unmap_pages(page, 1 << order); 1193 1194 return true; 1195 } 1196 1197 /* 1198 * Frees a number of pages from the PCP lists 1199 * Assumes all pages on list are in same zone. 1200 * count is the number of pages to free. 1201 */ 1202 static void free_pcppages_bulk(struct zone *zone, int count, 1203 struct per_cpu_pages *pcp, 1204 int pindex) 1205 { 1206 unsigned long flags; 1207 unsigned int order; 1208 bool isolated_pageblocks; 1209 struct page *page; 1210 1211 /* 1212 * Ensure proper count is passed which otherwise would stuck in the 1213 * below while (list_empty(list)) loop. 1214 */ 1215 count = min(pcp->count, count); 1216 1217 /* Ensure requested pindex is drained first. */ 1218 pindex = pindex - 1; 1219 1220 spin_lock_irqsave(&zone->lock, flags); 1221 isolated_pageblocks = has_isolate_pageblock(zone); 1222 1223 while (count > 0) { 1224 struct list_head *list; 1225 int nr_pages; 1226 1227 /* Remove pages from lists in a round-robin fashion. */ 1228 do { 1229 if (++pindex > NR_PCP_LISTS - 1) 1230 pindex = 0; 1231 list = &pcp->lists[pindex]; 1232 } while (list_empty(list)); 1233 1234 order = pindex_to_order(pindex); 1235 nr_pages = 1 << order; 1236 do { 1237 int mt; 1238 1239 page = list_last_entry(list, struct page, pcp_list); 1240 mt = get_pcppage_migratetype(page); 1241 1242 /* must delete to avoid corrupting pcp list */ 1243 list_del(&page->pcp_list); 1244 count -= nr_pages; 1245 pcp->count -= nr_pages; 1246 1247 /* MIGRATE_ISOLATE page should not go to pcplists */ 1248 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1249 /* Pageblock could have been isolated meanwhile */ 1250 if (unlikely(isolated_pageblocks)) 1251 mt = get_pageblock_migratetype(page); 1252 1253 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1254 trace_mm_page_pcpu_drain(page, order, mt); 1255 } while (count > 0 && !list_empty(list)); 1256 } 1257 1258 spin_unlock_irqrestore(&zone->lock, flags); 1259 } 1260 1261 static void free_one_page(struct zone *zone, 1262 struct page *page, unsigned long pfn, 1263 unsigned int order, 1264 int migratetype, fpi_t fpi_flags) 1265 { 1266 unsigned long flags; 1267 1268 spin_lock_irqsave(&zone->lock, flags); 1269 if (unlikely(has_isolate_pageblock(zone) || 1270 is_migrate_isolate(migratetype))) { 1271 migratetype = get_pfnblock_migratetype(page, pfn); 1272 } 1273 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1274 spin_unlock_irqrestore(&zone->lock, flags); 1275 } 1276 1277 static void __free_pages_ok(struct page *page, unsigned int order, 1278 fpi_t fpi_flags) 1279 { 1280 unsigned long flags; 1281 int migratetype; 1282 unsigned long pfn = page_to_pfn(page); 1283 struct zone *zone = page_zone(page); 1284 1285 if (!free_pages_prepare(page, order, fpi_flags)) 1286 return; 1287 1288 /* 1289 * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here 1290 * is used to avoid calling get_pfnblock_migratetype() under the lock. 1291 * This will reduce the lock holding time. 1292 */ 1293 migratetype = get_pfnblock_migratetype(page, pfn); 1294 1295 spin_lock_irqsave(&zone->lock, flags); 1296 if (unlikely(has_isolate_pageblock(zone) || 1297 is_migrate_isolate(migratetype))) { 1298 migratetype = get_pfnblock_migratetype(page, pfn); 1299 } 1300 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1301 spin_unlock_irqrestore(&zone->lock, flags); 1302 1303 __count_vm_events(PGFREE, 1 << order); 1304 } 1305 1306 void __free_pages_core(struct page *page, unsigned int order) 1307 { 1308 unsigned int nr_pages = 1 << order; 1309 struct page *p = page; 1310 unsigned int loop; 1311 1312 /* 1313 * When initializing the memmap, __init_single_page() sets the refcount 1314 * of all pages to 1 ("allocated"/"not free"). We have to set the 1315 * refcount of all involved pages to 0. 1316 */ 1317 prefetchw(p); 1318 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1319 prefetchw(p + 1); 1320 __ClearPageReserved(p); 1321 set_page_count(p, 0); 1322 } 1323 __ClearPageReserved(p); 1324 set_page_count(p, 0); 1325 1326 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1327 1328 if (page_contains_unaccepted(page, order)) { 1329 if (order == MAX_ORDER && __free_unaccepted(page)) 1330 return; 1331 1332 accept_page(page, order); 1333 } 1334 1335 /* 1336 * Bypass PCP and place fresh pages right to the tail, primarily 1337 * relevant for memory onlining. 1338 */ 1339 __free_pages_ok(page, order, FPI_TO_TAIL); 1340 } 1341 1342 /* 1343 * Check that the whole (or subset of) a pageblock given by the interval of 1344 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1345 * with the migration of free compaction scanner. 1346 * 1347 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1348 * 1349 * It's possible on some configurations to have a setup like node0 node1 node0 1350 * i.e. it's possible that all pages within a zones range of pages do not 1351 * belong to a single zone. We assume that a border between node0 and node1 1352 * can occur within a single pageblock, but not a node0 node1 node0 1353 * interleaving within a single pageblock. It is therefore sufficient to check 1354 * the first and last page of a pageblock and avoid checking each individual 1355 * page in a pageblock. 1356 * 1357 * Note: the function may return non-NULL struct page even for a page block 1358 * which contains a memory hole (i.e. there is no physical memory for a subset 1359 * of the pfn range). For example, if the pageblock order is MAX_ORDER, which 1360 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole 1361 * even though the start pfn is online and valid. This should be safe most of 1362 * the time because struct pages are still initialized via init_unavailable_range() 1363 * and pfn walkers shouldn't touch any physical memory range for which they do 1364 * not recognize any specific metadata in struct pages. 1365 */ 1366 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1367 unsigned long end_pfn, struct zone *zone) 1368 { 1369 struct page *start_page; 1370 struct page *end_page; 1371 1372 /* end_pfn is one past the range we are checking */ 1373 end_pfn--; 1374 1375 if (!pfn_valid(end_pfn)) 1376 return NULL; 1377 1378 start_page = pfn_to_online_page(start_pfn); 1379 if (!start_page) 1380 return NULL; 1381 1382 if (page_zone(start_page) != zone) 1383 return NULL; 1384 1385 end_page = pfn_to_page(end_pfn); 1386 1387 /* This gives a shorter code than deriving page_zone(end_page) */ 1388 if (page_zone_id(start_page) != page_zone_id(end_page)) 1389 return NULL; 1390 1391 return start_page; 1392 } 1393 1394 /* 1395 * The order of subdivision here is critical for the IO subsystem. 1396 * Please do not alter this order without good reasons and regression 1397 * testing. Specifically, as large blocks of memory are subdivided, 1398 * the order in which smaller blocks are delivered depends on the order 1399 * they're subdivided in this function. This is the primary factor 1400 * influencing the order in which pages are delivered to the IO 1401 * subsystem according to empirical testing, and this is also justified 1402 * by considering the behavior of a buddy system containing a single 1403 * large block of memory acted on by a series of small allocations. 1404 * This behavior is a critical factor in sglist merging's success. 1405 * 1406 * -- nyc 1407 */ 1408 static inline void expand(struct zone *zone, struct page *page, 1409 int low, int high, int migratetype) 1410 { 1411 unsigned long size = 1 << high; 1412 1413 while (high > low) { 1414 high--; 1415 size >>= 1; 1416 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1417 1418 /* 1419 * Mark as guard pages (or page), that will allow to 1420 * merge back to allocator when buddy will be freed. 1421 * Corresponding page table entries will not be touched, 1422 * pages will stay not present in virtual address space 1423 */ 1424 if (set_page_guard(zone, &page[size], high, migratetype)) 1425 continue; 1426 1427 add_to_free_list(&page[size], zone, high, migratetype); 1428 set_buddy_order(&page[size], high); 1429 } 1430 } 1431 1432 static void check_new_page_bad(struct page *page) 1433 { 1434 if (unlikely(page->flags & __PG_HWPOISON)) { 1435 /* Don't complain about hwpoisoned pages */ 1436 page_mapcount_reset(page); /* remove PageBuddy */ 1437 return; 1438 } 1439 1440 bad_page(page, 1441 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 1442 } 1443 1444 /* 1445 * This page is about to be returned from the page allocator 1446 */ 1447 static int check_new_page(struct page *page) 1448 { 1449 if (likely(page_expected_state(page, 1450 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1451 return 0; 1452 1453 check_new_page_bad(page); 1454 return 1; 1455 } 1456 1457 static inline bool check_new_pages(struct page *page, unsigned int order) 1458 { 1459 if (is_check_pages_enabled()) { 1460 for (int i = 0; i < (1 << order); i++) { 1461 struct page *p = page + i; 1462 1463 if (check_new_page(p)) 1464 return true; 1465 } 1466 } 1467 1468 return false; 1469 } 1470 1471 static inline bool should_skip_kasan_unpoison(gfp_t flags) 1472 { 1473 /* Don't skip if a software KASAN mode is enabled. */ 1474 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 1475 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 1476 return false; 1477 1478 /* Skip, if hardware tag-based KASAN is not enabled. */ 1479 if (!kasan_hw_tags_enabled()) 1480 return true; 1481 1482 /* 1483 * With hardware tag-based KASAN enabled, skip if this has been 1484 * requested via __GFP_SKIP_KASAN. 1485 */ 1486 return flags & __GFP_SKIP_KASAN; 1487 } 1488 1489 static inline bool should_skip_init(gfp_t flags) 1490 { 1491 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 1492 if (!kasan_hw_tags_enabled()) 1493 return false; 1494 1495 /* For hardware tag-based KASAN, skip if requested. */ 1496 return (flags & __GFP_SKIP_ZERO); 1497 } 1498 1499 inline void post_alloc_hook(struct page *page, unsigned int order, 1500 gfp_t gfp_flags) 1501 { 1502 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 1503 !should_skip_init(gfp_flags); 1504 bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS); 1505 int i; 1506 1507 set_page_private(page, 0); 1508 set_page_refcounted(page); 1509 1510 arch_alloc_page(page, order); 1511 debug_pagealloc_map_pages(page, 1 << order); 1512 1513 /* 1514 * Page unpoisoning must happen before memory initialization. 1515 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 1516 * allocations and the page unpoisoning code will complain. 1517 */ 1518 kernel_unpoison_pages(page, 1 << order); 1519 1520 /* 1521 * As memory initialization might be integrated into KASAN, 1522 * KASAN unpoisoning and memory initializion code must be 1523 * kept together to avoid discrepancies in behavior. 1524 */ 1525 1526 /* 1527 * If memory tags should be zeroed 1528 * (which happens only when memory should be initialized as well). 1529 */ 1530 if (zero_tags) { 1531 /* Initialize both memory and memory tags. */ 1532 for (i = 0; i != 1 << order; ++i) 1533 tag_clear_highpage(page + i); 1534 1535 /* Take note that memory was initialized by the loop above. */ 1536 init = false; 1537 } 1538 if (!should_skip_kasan_unpoison(gfp_flags) && 1539 kasan_unpoison_pages(page, order, init)) { 1540 /* Take note that memory was initialized by KASAN. */ 1541 if (kasan_has_integrated_init()) 1542 init = false; 1543 } else { 1544 /* 1545 * If memory tags have not been set by KASAN, reset the page 1546 * tags to ensure page_address() dereferencing does not fault. 1547 */ 1548 for (i = 0; i != 1 << order; ++i) 1549 page_kasan_tag_reset(page + i); 1550 } 1551 /* If memory is still not initialized, initialize it now. */ 1552 if (init) 1553 kernel_init_pages(page, 1 << order); 1554 1555 set_page_owner(page, order, gfp_flags); 1556 page_table_check_alloc(page, order); 1557 } 1558 1559 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1560 unsigned int alloc_flags) 1561 { 1562 post_alloc_hook(page, order, gfp_flags); 1563 1564 if (order && (gfp_flags & __GFP_COMP)) 1565 prep_compound_page(page, order); 1566 1567 /* 1568 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1569 * allocate the page. The expectation is that the caller is taking 1570 * steps that will free more memory. The caller should avoid the page 1571 * being used for !PFMEMALLOC purposes. 1572 */ 1573 if (alloc_flags & ALLOC_NO_WATERMARKS) 1574 set_page_pfmemalloc(page); 1575 else 1576 clear_page_pfmemalloc(page); 1577 } 1578 1579 /* 1580 * Go through the free lists for the given migratetype and remove 1581 * the smallest available page from the freelists 1582 */ 1583 static __always_inline 1584 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1585 int migratetype) 1586 { 1587 unsigned int current_order; 1588 struct free_area *area; 1589 struct page *page; 1590 1591 /* Find a page of the appropriate size in the preferred list */ 1592 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { 1593 area = &(zone->free_area[current_order]); 1594 page = get_page_from_free_area(area, migratetype); 1595 if (!page) 1596 continue; 1597 del_page_from_free_list(page, zone, current_order); 1598 expand(zone, page, order, current_order, migratetype); 1599 set_pcppage_migratetype(page, migratetype); 1600 trace_mm_page_alloc_zone_locked(page, order, migratetype, 1601 pcp_allowed_order(order) && 1602 migratetype < MIGRATE_PCPTYPES); 1603 return page; 1604 } 1605 1606 return NULL; 1607 } 1608 1609 1610 /* 1611 * This array describes the order lists are fallen back to when 1612 * the free lists for the desirable migrate type are depleted 1613 * 1614 * The other migratetypes do not have fallbacks. 1615 */ 1616 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = { 1617 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, 1618 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, 1619 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, 1620 }; 1621 1622 #ifdef CONFIG_CMA 1623 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1624 unsigned int order) 1625 { 1626 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1627 } 1628 #else 1629 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1630 unsigned int order) { return NULL; } 1631 #endif 1632 1633 /* 1634 * Move the free pages in a range to the freelist tail of the requested type. 1635 * Note that start_page and end_pages are not aligned on a pageblock 1636 * boundary. If alignment is required, use move_freepages_block() 1637 */ 1638 static int move_freepages(struct zone *zone, 1639 unsigned long start_pfn, unsigned long end_pfn, 1640 int migratetype, int *num_movable) 1641 { 1642 struct page *page; 1643 unsigned long pfn; 1644 unsigned int order; 1645 int pages_moved = 0; 1646 1647 for (pfn = start_pfn; pfn <= end_pfn;) { 1648 page = pfn_to_page(pfn); 1649 if (!PageBuddy(page)) { 1650 /* 1651 * We assume that pages that could be isolated for 1652 * migration are movable. But we don't actually try 1653 * isolating, as that would be expensive. 1654 */ 1655 if (num_movable && 1656 (PageLRU(page) || __PageMovable(page))) 1657 (*num_movable)++; 1658 pfn++; 1659 continue; 1660 } 1661 1662 /* Make sure we are not inadvertently changing nodes */ 1663 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1664 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 1665 1666 order = buddy_order(page); 1667 move_to_free_list(page, zone, order, migratetype); 1668 pfn += 1 << order; 1669 pages_moved += 1 << order; 1670 } 1671 1672 return pages_moved; 1673 } 1674 1675 int move_freepages_block(struct zone *zone, struct page *page, 1676 int migratetype, int *num_movable) 1677 { 1678 unsigned long start_pfn, end_pfn, pfn; 1679 1680 if (num_movable) 1681 *num_movable = 0; 1682 1683 pfn = page_to_pfn(page); 1684 start_pfn = pageblock_start_pfn(pfn); 1685 end_pfn = pageblock_end_pfn(pfn) - 1; 1686 1687 /* Do not cross zone boundaries */ 1688 if (!zone_spans_pfn(zone, start_pfn)) 1689 start_pfn = pfn; 1690 if (!zone_spans_pfn(zone, end_pfn)) 1691 return 0; 1692 1693 return move_freepages(zone, start_pfn, end_pfn, migratetype, 1694 num_movable); 1695 } 1696 1697 static void change_pageblock_range(struct page *pageblock_page, 1698 int start_order, int migratetype) 1699 { 1700 int nr_pageblocks = 1 << (start_order - pageblock_order); 1701 1702 while (nr_pageblocks--) { 1703 set_pageblock_migratetype(pageblock_page, migratetype); 1704 pageblock_page += pageblock_nr_pages; 1705 } 1706 } 1707 1708 /* 1709 * When we are falling back to another migratetype during allocation, try to 1710 * steal extra free pages from the same pageblocks to satisfy further 1711 * allocations, instead of polluting multiple pageblocks. 1712 * 1713 * If we are stealing a relatively large buddy page, it is likely there will 1714 * be more free pages in the pageblock, so try to steal them all. For 1715 * reclaimable and unmovable allocations, we steal regardless of page size, 1716 * as fragmentation caused by those allocations polluting movable pageblocks 1717 * is worse than movable allocations stealing from unmovable and reclaimable 1718 * pageblocks. 1719 */ 1720 static bool can_steal_fallback(unsigned int order, int start_mt) 1721 { 1722 /* 1723 * Leaving this order check is intended, although there is 1724 * relaxed order check in next check. The reason is that 1725 * we can actually steal whole pageblock if this condition met, 1726 * but, below check doesn't guarantee it and that is just heuristic 1727 * so could be changed anytime. 1728 */ 1729 if (order >= pageblock_order) 1730 return true; 1731 1732 if (order >= pageblock_order / 2 || 1733 start_mt == MIGRATE_RECLAIMABLE || 1734 start_mt == MIGRATE_UNMOVABLE || 1735 page_group_by_mobility_disabled) 1736 return true; 1737 1738 return false; 1739 } 1740 1741 static inline bool boost_watermark(struct zone *zone) 1742 { 1743 unsigned long max_boost; 1744 1745 if (!watermark_boost_factor) 1746 return false; 1747 /* 1748 * Don't bother in zones that are unlikely to produce results. 1749 * On small machines, including kdump capture kernels running 1750 * in a small area, boosting the watermark can cause an out of 1751 * memory situation immediately. 1752 */ 1753 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 1754 return false; 1755 1756 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 1757 watermark_boost_factor, 10000); 1758 1759 /* 1760 * high watermark may be uninitialised if fragmentation occurs 1761 * very early in boot so do not boost. We do not fall 1762 * through and boost by pageblock_nr_pages as failing 1763 * allocations that early means that reclaim is not going 1764 * to help and it may even be impossible to reclaim the 1765 * boosted watermark resulting in a hang. 1766 */ 1767 if (!max_boost) 1768 return false; 1769 1770 max_boost = max(pageblock_nr_pages, max_boost); 1771 1772 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 1773 max_boost); 1774 1775 return true; 1776 } 1777 1778 /* 1779 * This function implements actual steal behaviour. If order is large enough, 1780 * we can steal whole pageblock. If not, we first move freepages in this 1781 * pageblock to our migratetype and determine how many already-allocated pages 1782 * are there in the pageblock with a compatible migratetype. If at least half 1783 * of pages are free or compatible, we can change migratetype of the pageblock 1784 * itself, so pages freed in the future will be put on the correct free list. 1785 */ 1786 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1787 unsigned int alloc_flags, int start_type, bool whole_block) 1788 { 1789 unsigned int current_order = buddy_order(page); 1790 int free_pages, movable_pages, alike_pages; 1791 int old_block_type; 1792 1793 old_block_type = get_pageblock_migratetype(page); 1794 1795 /* 1796 * This can happen due to races and we want to prevent broken 1797 * highatomic accounting. 1798 */ 1799 if (is_migrate_highatomic(old_block_type)) 1800 goto single_page; 1801 1802 /* Take ownership for orders >= pageblock_order */ 1803 if (current_order >= pageblock_order) { 1804 change_pageblock_range(page, current_order, start_type); 1805 goto single_page; 1806 } 1807 1808 /* 1809 * Boost watermarks to increase reclaim pressure to reduce the 1810 * likelihood of future fallbacks. Wake kswapd now as the node 1811 * may be balanced overall and kswapd will not wake naturally. 1812 */ 1813 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 1814 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 1815 1816 /* We are not allowed to try stealing from the whole block */ 1817 if (!whole_block) 1818 goto single_page; 1819 1820 free_pages = move_freepages_block(zone, page, start_type, 1821 &movable_pages); 1822 /* moving whole block can fail due to zone boundary conditions */ 1823 if (!free_pages) 1824 goto single_page; 1825 1826 /* 1827 * Determine how many pages are compatible with our allocation. 1828 * For movable allocation, it's the number of movable pages which 1829 * we just obtained. For other types it's a bit more tricky. 1830 */ 1831 if (start_type == MIGRATE_MOVABLE) { 1832 alike_pages = movable_pages; 1833 } else { 1834 /* 1835 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 1836 * to MOVABLE pageblock, consider all non-movable pages as 1837 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 1838 * vice versa, be conservative since we can't distinguish the 1839 * exact migratetype of non-movable pages. 1840 */ 1841 if (old_block_type == MIGRATE_MOVABLE) 1842 alike_pages = pageblock_nr_pages 1843 - (free_pages + movable_pages); 1844 else 1845 alike_pages = 0; 1846 } 1847 /* 1848 * If a sufficient number of pages in the block are either free or of 1849 * compatible migratability as our allocation, claim the whole block. 1850 */ 1851 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 1852 page_group_by_mobility_disabled) 1853 set_pageblock_migratetype(page, start_type); 1854 1855 return; 1856 1857 single_page: 1858 move_to_free_list(page, zone, current_order, start_type); 1859 } 1860 1861 /* 1862 * Check whether there is a suitable fallback freepage with requested order. 1863 * If only_stealable is true, this function returns fallback_mt only if 1864 * we can steal other freepages all together. This would help to reduce 1865 * fragmentation due to mixed migratetype pages in one pageblock. 1866 */ 1867 int find_suitable_fallback(struct free_area *area, unsigned int order, 1868 int migratetype, bool only_stealable, bool *can_steal) 1869 { 1870 int i; 1871 int fallback_mt; 1872 1873 if (area->nr_free == 0) 1874 return -1; 1875 1876 *can_steal = false; 1877 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { 1878 fallback_mt = fallbacks[migratetype][i]; 1879 if (free_area_empty(area, fallback_mt)) 1880 continue; 1881 1882 if (can_steal_fallback(order, migratetype)) 1883 *can_steal = true; 1884 1885 if (!only_stealable) 1886 return fallback_mt; 1887 1888 if (*can_steal) 1889 return fallback_mt; 1890 } 1891 1892 return -1; 1893 } 1894 1895 /* 1896 * Reserve a pageblock for exclusive use of high-order atomic allocations if 1897 * there are no empty page blocks that contain a page with a suitable order 1898 */ 1899 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) 1900 { 1901 int mt; 1902 unsigned long max_managed, flags; 1903 1904 /* 1905 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 1906 * Check is race-prone but harmless. 1907 */ 1908 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 1909 if (zone->nr_reserved_highatomic >= max_managed) 1910 return; 1911 1912 spin_lock_irqsave(&zone->lock, flags); 1913 1914 /* Recheck the nr_reserved_highatomic limit under the lock */ 1915 if (zone->nr_reserved_highatomic >= max_managed) 1916 goto out_unlock; 1917 1918 /* Yoink! */ 1919 mt = get_pageblock_migratetype(page); 1920 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 1921 if (migratetype_is_mergeable(mt)) { 1922 zone->nr_reserved_highatomic += pageblock_nr_pages; 1923 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 1924 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 1925 } 1926 1927 out_unlock: 1928 spin_unlock_irqrestore(&zone->lock, flags); 1929 } 1930 1931 /* 1932 * Used when an allocation is about to fail under memory pressure. This 1933 * potentially hurts the reliability of high-order allocations when under 1934 * intense memory pressure but failed atomic allocations should be easier 1935 * to recover from than an OOM. 1936 * 1937 * If @force is true, try to unreserve a pageblock even though highatomic 1938 * pageblock is exhausted. 1939 */ 1940 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 1941 bool force) 1942 { 1943 struct zonelist *zonelist = ac->zonelist; 1944 unsigned long flags; 1945 struct zoneref *z; 1946 struct zone *zone; 1947 struct page *page; 1948 int order; 1949 bool ret; 1950 1951 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 1952 ac->nodemask) { 1953 /* 1954 * Preserve at least one pageblock unless memory pressure 1955 * is really high. 1956 */ 1957 if (!force && zone->nr_reserved_highatomic <= 1958 pageblock_nr_pages) 1959 continue; 1960 1961 spin_lock_irqsave(&zone->lock, flags); 1962 for (order = 0; order < NR_PAGE_ORDERS; order++) { 1963 struct free_area *area = &(zone->free_area[order]); 1964 1965 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 1966 if (!page) 1967 continue; 1968 1969 /* 1970 * In page freeing path, migratetype change is racy so 1971 * we can counter several free pages in a pageblock 1972 * in this loop although we changed the pageblock type 1973 * from highatomic to ac->migratetype. So we should 1974 * adjust the count once. 1975 */ 1976 if (is_migrate_highatomic_page(page)) { 1977 /* 1978 * It should never happen but changes to 1979 * locking could inadvertently allow a per-cpu 1980 * drain to add pages to MIGRATE_HIGHATOMIC 1981 * while unreserving so be safe and watch for 1982 * underflows. 1983 */ 1984 zone->nr_reserved_highatomic -= min( 1985 pageblock_nr_pages, 1986 zone->nr_reserved_highatomic); 1987 } 1988 1989 /* 1990 * Convert to ac->migratetype and avoid the normal 1991 * pageblock stealing heuristics. Minimally, the caller 1992 * is doing the work and needs the pages. More 1993 * importantly, if the block was always converted to 1994 * MIGRATE_UNMOVABLE or another type then the number 1995 * of pageblocks that cannot be completely freed 1996 * may increase. 1997 */ 1998 set_pageblock_migratetype(page, ac->migratetype); 1999 ret = move_freepages_block(zone, page, ac->migratetype, 2000 NULL); 2001 if (ret) { 2002 spin_unlock_irqrestore(&zone->lock, flags); 2003 return ret; 2004 } 2005 } 2006 spin_unlock_irqrestore(&zone->lock, flags); 2007 } 2008 2009 return false; 2010 } 2011 2012 /* 2013 * Try finding a free buddy page on the fallback list and put it on the free 2014 * list of requested migratetype, possibly along with other pages from the same 2015 * block, depending on fragmentation avoidance heuristics. Returns true if 2016 * fallback was found so that __rmqueue_smallest() can grab it. 2017 * 2018 * The use of signed ints for order and current_order is a deliberate 2019 * deviation from the rest of this file, to make the for loop 2020 * condition simpler. 2021 */ 2022 static __always_inline bool 2023 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 2024 unsigned int alloc_flags) 2025 { 2026 struct free_area *area; 2027 int current_order; 2028 int min_order = order; 2029 struct page *page; 2030 int fallback_mt; 2031 bool can_steal; 2032 2033 /* 2034 * Do not steal pages from freelists belonging to other pageblocks 2035 * i.e. orders < pageblock_order. If there are no local zones free, 2036 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 2037 */ 2038 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 2039 min_order = pageblock_order; 2040 2041 /* 2042 * Find the largest available free page in the other list. This roughly 2043 * approximates finding the pageblock with the most free pages, which 2044 * would be too costly to do exactly. 2045 */ 2046 for (current_order = MAX_ORDER; current_order >= min_order; 2047 --current_order) { 2048 area = &(zone->free_area[current_order]); 2049 fallback_mt = find_suitable_fallback(area, current_order, 2050 start_migratetype, false, &can_steal); 2051 if (fallback_mt == -1) 2052 continue; 2053 2054 /* 2055 * We cannot steal all free pages from the pageblock and the 2056 * requested migratetype is movable. In that case it's better to 2057 * steal and split the smallest available page instead of the 2058 * largest available page, because even if the next movable 2059 * allocation falls back into a different pageblock than this 2060 * one, it won't cause permanent fragmentation. 2061 */ 2062 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 2063 && current_order > order) 2064 goto find_smallest; 2065 2066 goto do_steal; 2067 } 2068 2069 return false; 2070 2071 find_smallest: 2072 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { 2073 area = &(zone->free_area[current_order]); 2074 fallback_mt = find_suitable_fallback(area, current_order, 2075 start_migratetype, false, &can_steal); 2076 if (fallback_mt != -1) 2077 break; 2078 } 2079 2080 /* 2081 * This should not happen - we already found a suitable fallback 2082 * when looking for the largest page. 2083 */ 2084 VM_BUG_ON(current_order > MAX_ORDER); 2085 2086 do_steal: 2087 page = get_page_from_free_area(area, fallback_mt); 2088 2089 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 2090 can_steal); 2091 2092 trace_mm_page_alloc_extfrag(page, order, current_order, 2093 start_migratetype, fallback_mt); 2094 2095 return true; 2096 2097 } 2098 2099 /* 2100 * Do the hard work of removing an element from the buddy allocator. 2101 * Call me with the zone->lock already held. 2102 */ 2103 static __always_inline struct page * 2104 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 2105 unsigned int alloc_flags) 2106 { 2107 struct page *page; 2108 2109 if (IS_ENABLED(CONFIG_CMA)) { 2110 /* 2111 * Balance movable allocations between regular and CMA areas by 2112 * allocating from CMA when over half of the zone's free memory 2113 * is in the CMA area. 2114 */ 2115 if (alloc_flags & ALLOC_CMA && 2116 zone_page_state(zone, NR_FREE_CMA_PAGES) > 2117 zone_page_state(zone, NR_FREE_PAGES) / 2) { 2118 page = __rmqueue_cma_fallback(zone, order); 2119 if (page) 2120 return page; 2121 } 2122 } 2123 retry: 2124 page = __rmqueue_smallest(zone, order, migratetype); 2125 if (unlikely(!page)) { 2126 if (alloc_flags & ALLOC_CMA) 2127 page = __rmqueue_cma_fallback(zone, order); 2128 2129 if (!page && __rmqueue_fallback(zone, order, migratetype, 2130 alloc_flags)) 2131 goto retry; 2132 } 2133 return page; 2134 } 2135 2136 /* 2137 * Obtain a specified number of elements from the buddy allocator, all under 2138 * a single hold of the lock, for efficiency. Add them to the supplied list. 2139 * Returns the number of new pages which were placed at *list. 2140 */ 2141 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2142 unsigned long count, struct list_head *list, 2143 int migratetype, unsigned int alloc_flags) 2144 { 2145 unsigned long flags; 2146 int i; 2147 2148 spin_lock_irqsave(&zone->lock, flags); 2149 for (i = 0; i < count; ++i) { 2150 struct page *page = __rmqueue(zone, order, migratetype, 2151 alloc_flags); 2152 if (unlikely(page == NULL)) 2153 break; 2154 2155 /* 2156 * Split buddy pages returned by expand() are received here in 2157 * physical page order. The page is added to the tail of 2158 * caller's list. From the callers perspective, the linked list 2159 * is ordered by page number under some conditions. This is 2160 * useful for IO devices that can forward direction from the 2161 * head, thus also in the physical page order. This is useful 2162 * for IO devices that can merge IO requests if the physical 2163 * pages are ordered properly. 2164 */ 2165 list_add_tail(&page->pcp_list, list); 2166 if (is_migrate_cma(get_pcppage_migratetype(page))) 2167 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2168 -(1 << order)); 2169 } 2170 2171 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2172 spin_unlock_irqrestore(&zone->lock, flags); 2173 2174 return i; 2175 } 2176 2177 #ifdef CONFIG_NUMA 2178 /* 2179 * Called from the vmstat counter updater to drain pagesets of this 2180 * currently executing processor on remote nodes after they have 2181 * expired. 2182 */ 2183 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2184 { 2185 int to_drain, batch; 2186 2187 batch = READ_ONCE(pcp->batch); 2188 to_drain = min(pcp->count, batch); 2189 if (to_drain > 0) { 2190 spin_lock(&pcp->lock); 2191 free_pcppages_bulk(zone, to_drain, pcp, 0); 2192 spin_unlock(&pcp->lock); 2193 } 2194 } 2195 #endif 2196 2197 /* 2198 * Drain pcplists of the indicated processor and zone. 2199 */ 2200 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2201 { 2202 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2203 int count; 2204 2205 do { 2206 spin_lock(&pcp->lock); 2207 count = pcp->count; 2208 if (count) { 2209 int to_drain = min(count, 2210 pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX); 2211 2212 free_pcppages_bulk(zone, to_drain, pcp, 0); 2213 count -= to_drain; 2214 } 2215 spin_unlock(&pcp->lock); 2216 } while (count); 2217 } 2218 2219 /* 2220 * Drain pcplists of all zones on the indicated processor. 2221 */ 2222 static void drain_pages(unsigned int cpu) 2223 { 2224 struct zone *zone; 2225 2226 for_each_populated_zone(zone) { 2227 drain_pages_zone(cpu, zone); 2228 } 2229 } 2230 2231 /* 2232 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2233 */ 2234 void drain_local_pages(struct zone *zone) 2235 { 2236 int cpu = smp_processor_id(); 2237 2238 if (zone) 2239 drain_pages_zone(cpu, zone); 2240 else 2241 drain_pages(cpu); 2242 } 2243 2244 /* 2245 * The implementation of drain_all_pages(), exposing an extra parameter to 2246 * drain on all cpus. 2247 * 2248 * drain_all_pages() is optimized to only execute on cpus where pcplists are 2249 * not empty. The check for non-emptiness can however race with a free to 2250 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 2251 * that need the guarantee that every CPU has drained can disable the 2252 * optimizing racy check. 2253 */ 2254 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 2255 { 2256 int cpu; 2257 2258 /* 2259 * Allocate in the BSS so we won't require allocation in 2260 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2261 */ 2262 static cpumask_t cpus_with_pcps; 2263 2264 /* 2265 * Do not drain if one is already in progress unless it's specific to 2266 * a zone. Such callers are primarily CMA and memory hotplug and need 2267 * the drain to be complete when the call returns. 2268 */ 2269 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2270 if (!zone) 2271 return; 2272 mutex_lock(&pcpu_drain_mutex); 2273 } 2274 2275 /* 2276 * We don't care about racing with CPU hotplug event 2277 * as offline notification will cause the notified 2278 * cpu to drain that CPU pcps and on_each_cpu_mask 2279 * disables preemption as part of its processing 2280 */ 2281 for_each_online_cpu(cpu) { 2282 struct per_cpu_pages *pcp; 2283 struct zone *z; 2284 bool has_pcps = false; 2285 2286 if (force_all_cpus) { 2287 /* 2288 * The pcp.count check is racy, some callers need a 2289 * guarantee that no cpu is missed. 2290 */ 2291 has_pcps = true; 2292 } else if (zone) { 2293 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 2294 if (pcp->count) 2295 has_pcps = true; 2296 } else { 2297 for_each_populated_zone(z) { 2298 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 2299 if (pcp->count) { 2300 has_pcps = true; 2301 break; 2302 } 2303 } 2304 } 2305 2306 if (has_pcps) 2307 cpumask_set_cpu(cpu, &cpus_with_pcps); 2308 else 2309 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2310 } 2311 2312 for_each_cpu(cpu, &cpus_with_pcps) { 2313 if (zone) 2314 drain_pages_zone(cpu, zone); 2315 else 2316 drain_pages(cpu); 2317 } 2318 2319 mutex_unlock(&pcpu_drain_mutex); 2320 } 2321 2322 /* 2323 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2324 * 2325 * When zone parameter is non-NULL, spill just the single zone's pages. 2326 */ 2327 void drain_all_pages(struct zone *zone) 2328 { 2329 __drain_all_pages(zone, false); 2330 } 2331 2332 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 2333 unsigned int order) 2334 { 2335 int migratetype; 2336 2337 if (!free_pages_prepare(page, order, FPI_NONE)) 2338 return false; 2339 2340 migratetype = get_pfnblock_migratetype(page, pfn); 2341 set_pcppage_migratetype(page, migratetype); 2342 return true; 2343 } 2344 2345 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high) 2346 { 2347 int min_nr_free, max_nr_free; 2348 int batch = READ_ONCE(pcp->batch); 2349 2350 /* Free everything if batch freeing high-order pages. */ 2351 if (unlikely(free_high)) 2352 return pcp->count; 2353 2354 /* Check for PCP disabled or boot pageset */ 2355 if (unlikely(high < batch)) 2356 return 1; 2357 2358 /* Leave at least pcp->batch pages on the list */ 2359 min_nr_free = batch; 2360 max_nr_free = high - batch; 2361 2362 /* 2363 * Double the number of pages freed each time there is subsequent 2364 * freeing of pages without any allocation. 2365 */ 2366 batch <<= pcp->free_factor; 2367 if (batch < max_nr_free && pcp->free_factor < CONFIG_PCP_BATCH_SCALE_MAX) 2368 pcp->free_factor++; 2369 batch = clamp(batch, min_nr_free, max_nr_free); 2370 2371 return batch; 2372 } 2373 2374 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 2375 bool free_high) 2376 { 2377 int high = READ_ONCE(pcp->high); 2378 2379 if (unlikely(!high || free_high)) 2380 return 0; 2381 2382 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 2383 return high; 2384 2385 /* 2386 * If reclaim is active, limit the number of pages that can be 2387 * stored on pcp lists 2388 */ 2389 return min(READ_ONCE(pcp->batch) << 2, high); 2390 } 2391 2392 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 2393 struct page *page, int migratetype, 2394 unsigned int order) 2395 { 2396 int high; 2397 int pindex; 2398 bool free_high; 2399 2400 __count_vm_events(PGFREE, 1 << order); 2401 pindex = order_to_pindex(migratetype, order); 2402 list_add(&page->pcp_list, &pcp->lists[pindex]); 2403 pcp->count += 1 << order; 2404 2405 /* 2406 * As high-order pages other than THP's stored on PCP can contribute 2407 * to fragmentation, limit the number stored when PCP is heavily 2408 * freeing without allocation. The remainder after bulk freeing 2409 * stops will be drained from vmstat refresh context. 2410 */ 2411 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); 2412 2413 high = nr_pcp_high(pcp, zone, free_high); 2414 if (pcp->count >= high) { 2415 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, free_high), pcp, pindex); 2416 } 2417 } 2418 2419 /* 2420 * Free a pcp page 2421 */ 2422 void free_unref_page(struct page *page, unsigned int order) 2423 { 2424 unsigned long __maybe_unused UP_flags; 2425 struct per_cpu_pages *pcp; 2426 struct zone *zone; 2427 unsigned long pfn = page_to_pfn(page); 2428 int migratetype, pcpmigratetype; 2429 2430 if (!free_unref_page_prepare(page, pfn, order)) 2431 return; 2432 2433 /* 2434 * We only track unmovable, reclaimable and movable on pcp lists. 2435 * Place ISOLATE pages on the isolated list because they are being 2436 * offlined but treat HIGHATOMIC and CMA as movable pages so we can 2437 * get those areas back if necessary. Otherwise, we may have to free 2438 * excessively into the page allocator 2439 */ 2440 migratetype = pcpmigratetype = get_pcppage_migratetype(page); 2441 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 2442 if (unlikely(is_migrate_isolate(migratetype))) { 2443 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 2444 return; 2445 } 2446 pcpmigratetype = MIGRATE_MOVABLE; 2447 } 2448 2449 zone = page_zone(page); 2450 pcp_trylock_prepare(UP_flags); 2451 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2452 if (pcp) { 2453 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order); 2454 pcp_spin_unlock(pcp); 2455 } else { 2456 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); 2457 } 2458 pcp_trylock_finish(UP_flags); 2459 } 2460 2461 /* 2462 * Free a list of 0-order pages 2463 */ 2464 void free_unref_page_list(struct list_head *list) 2465 { 2466 unsigned long __maybe_unused UP_flags; 2467 struct page *page, *next; 2468 struct per_cpu_pages *pcp = NULL; 2469 struct zone *locked_zone = NULL; 2470 int batch_count = 0; 2471 int migratetype; 2472 2473 /* Prepare pages for freeing */ 2474 list_for_each_entry_safe(page, next, list, lru) { 2475 unsigned long pfn = page_to_pfn(page); 2476 if (!free_unref_page_prepare(page, pfn, 0)) { 2477 list_del(&page->lru); 2478 continue; 2479 } 2480 2481 /* 2482 * Free isolated pages directly to the allocator, see 2483 * comment in free_unref_page. 2484 */ 2485 migratetype = get_pcppage_migratetype(page); 2486 if (unlikely(is_migrate_isolate(migratetype))) { 2487 list_del(&page->lru); 2488 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 2489 continue; 2490 } 2491 } 2492 2493 list_for_each_entry_safe(page, next, list, lru) { 2494 struct zone *zone = page_zone(page); 2495 2496 list_del(&page->lru); 2497 migratetype = get_pcppage_migratetype(page); 2498 2499 /* 2500 * Either different zone requiring a different pcp lock or 2501 * excessive lock hold times when freeing a large list of 2502 * pages. 2503 */ 2504 if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) { 2505 if (pcp) { 2506 pcp_spin_unlock(pcp); 2507 pcp_trylock_finish(UP_flags); 2508 } 2509 2510 batch_count = 0; 2511 2512 /* 2513 * trylock is necessary as pages may be getting freed 2514 * from IRQ or SoftIRQ context after an IO completion. 2515 */ 2516 pcp_trylock_prepare(UP_flags); 2517 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2518 if (unlikely(!pcp)) { 2519 pcp_trylock_finish(UP_flags); 2520 free_one_page(zone, page, page_to_pfn(page), 2521 0, migratetype, FPI_NONE); 2522 locked_zone = NULL; 2523 continue; 2524 } 2525 locked_zone = zone; 2526 } 2527 2528 /* 2529 * Non-isolated types over MIGRATE_PCPTYPES get added 2530 * to the MIGRATE_MOVABLE pcp list. 2531 */ 2532 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 2533 migratetype = MIGRATE_MOVABLE; 2534 2535 trace_mm_page_free_batched(page); 2536 free_unref_page_commit(zone, pcp, page, migratetype, 0); 2537 batch_count++; 2538 } 2539 2540 if (pcp) { 2541 pcp_spin_unlock(pcp); 2542 pcp_trylock_finish(UP_flags); 2543 } 2544 } 2545 2546 /* 2547 * split_page takes a non-compound higher-order page, and splits it into 2548 * n (1<<order) sub-pages: page[0..n] 2549 * Each sub-page must be freed individually. 2550 * 2551 * Note: this is probably too low level an operation for use in drivers. 2552 * Please consult with lkml before using this in your driver. 2553 */ 2554 void split_page(struct page *page, unsigned int order) 2555 { 2556 int i; 2557 2558 VM_BUG_ON_PAGE(PageCompound(page), page); 2559 VM_BUG_ON_PAGE(!page_count(page), page); 2560 2561 for (i = 1; i < (1 << order); i++) 2562 set_page_refcounted(page + i); 2563 split_page_owner(page, 1 << order); 2564 split_page_memcg(page, 1 << order); 2565 } 2566 EXPORT_SYMBOL_GPL(split_page); 2567 2568 int __isolate_free_page(struct page *page, unsigned int order) 2569 { 2570 struct zone *zone = page_zone(page); 2571 int mt = get_pageblock_migratetype(page); 2572 2573 if (!is_migrate_isolate(mt)) { 2574 unsigned long watermark; 2575 /* 2576 * Obey watermarks as if the page was being allocated. We can 2577 * emulate a high-order watermark check with a raised order-0 2578 * watermark, because we already know our high-order page 2579 * exists. 2580 */ 2581 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 2582 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2583 return 0; 2584 2585 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2586 } 2587 2588 del_page_from_free_list(page, zone, order); 2589 2590 /* 2591 * Set the pageblock if the isolated page is at least half of a 2592 * pageblock 2593 */ 2594 if (order >= pageblock_order - 1) { 2595 struct page *endpage = page + (1 << order) - 1; 2596 for (; page < endpage; page += pageblock_nr_pages) { 2597 int mt = get_pageblock_migratetype(page); 2598 /* 2599 * Only change normal pageblocks (i.e., they can merge 2600 * with others) 2601 */ 2602 if (migratetype_is_mergeable(mt)) 2603 set_pageblock_migratetype(page, 2604 MIGRATE_MOVABLE); 2605 } 2606 } 2607 2608 return 1UL << order; 2609 } 2610 2611 /** 2612 * __putback_isolated_page - Return a now-isolated page back where we got it 2613 * @page: Page that was isolated 2614 * @order: Order of the isolated page 2615 * @mt: The page's pageblock's migratetype 2616 * 2617 * This function is meant to return a page pulled from the free lists via 2618 * __isolate_free_page back to the free lists they were pulled from. 2619 */ 2620 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 2621 { 2622 struct zone *zone = page_zone(page); 2623 2624 /* zone lock should be held when this function is called */ 2625 lockdep_assert_held(&zone->lock); 2626 2627 /* Return isolated page to tail of freelist. */ 2628 __free_one_page(page, page_to_pfn(page), zone, order, mt, 2629 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 2630 } 2631 2632 /* 2633 * Update NUMA hit/miss statistics 2634 */ 2635 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 2636 long nr_account) 2637 { 2638 #ifdef CONFIG_NUMA 2639 enum numa_stat_item local_stat = NUMA_LOCAL; 2640 2641 /* skip numa counters update if numa stats is disabled */ 2642 if (!static_branch_likely(&vm_numa_stat_key)) 2643 return; 2644 2645 if (zone_to_nid(z) != numa_node_id()) 2646 local_stat = NUMA_OTHER; 2647 2648 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 2649 __count_numa_events(z, NUMA_HIT, nr_account); 2650 else { 2651 __count_numa_events(z, NUMA_MISS, nr_account); 2652 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 2653 } 2654 __count_numa_events(z, local_stat, nr_account); 2655 #endif 2656 } 2657 2658 static __always_inline 2659 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 2660 unsigned int order, unsigned int alloc_flags, 2661 int migratetype) 2662 { 2663 struct page *page; 2664 unsigned long flags; 2665 2666 do { 2667 page = NULL; 2668 spin_lock_irqsave(&zone->lock, flags); 2669 if (alloc_flags & ALLOC_HIGHATOMIC) 2670 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2671 if (!page) { 2672 page = __rmqueue(zone, order, migratetype, alloc_flags); 2673 2674 /* 2675 * If the allocation fails, allow OOM handling and 2676 * order-0 (atomic) allocs access to HIGHATOMIC 2677 * reserves as failing now is worse than failing a 2678 * high-order atomic allocation in the future. 2679 */ 2680 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) 2681 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2682 2683 if (!page) { 2684 spin_unlock_irqrestore(&zone->lock, flags); 2685 return NULL; 2686 } 2687 } 2688 __mod_zone_freepage_state(zone, -(1 << order), 2689 get_pcppage_migratetype(page)); 2690 spin_unlock_irqrestore(&zone->lock, flags); 2691 } while (check_new_pages(page, order)); 2692 2693 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2694 zone_statistics(preferred_zone, zone, 1); 2695 2696 return page; 2697 } 2698 2699 /* Remove page from the per-cpu list, caller must protect the list */ 2700 static inline 2701 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 2702 int migratetype, 2703 unsigned int alloc_flags, 2704 struct per_cpu_pages *pcp, 2705 struct list_head *list) 2706 { 2707 struct page *page; 2708 2709 do { 2710 if (list_empty(list)) { 2711 int batch = READ_ONCE(pcp->batch); 2712 int alloced; 2713 2714 /* 2715 * Scale batch relative to order if batch implies 2716 * free pages can be stored on the PCP. Batch can 2717 * be 1 for small zones or for boot pagesets which 2718 * should never store free pages as the pages may 2719 * belong to arbitrary zones. 2720 */ 2721 if (batch > 1) 2722 batch = max(batch >> order, 2); 2723 alloced = rmqueue_bulk(zone, order, 2724 batch, list, 2725 migratetype, alloc_flags); 2726 2727 pcp->count += alloced << order; 2728 if (unlikely(list_empty(list))) 2729 return NULL; 2730 } 2731 2732 page = list_first_entry(list, struct page, pcp_list); 2733 list_del(&page->pcp_list); 2734 pcp->count -= 1 << order; 2735 } while (check_new_pages(page, order)); 2736 2737 return page; 2738 } 2739 2740 /* Lock and remove page from the per-cpu list */ 2741 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2742 struct zone *zone, unsigned int order, 2743 int migratetype, unsigned int alloc_flags) 2744 { 2745 struct per_cpu_pages *pcp; 2746 struct list_head *list; 2747 struct page *page; 2748 unsigned long __maybe_unused UP_flags; 2749 2750 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 2751 pcp_trylock_prepare(UP_flags); 2752 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 2753 if (!pcp) { 2754 pcp_trylock_finish(UP_flags); 2755 return NULL; 2756 } 2757 2758 /* 2759 * On allocation, reduce the number of pages that are batch freed. 2760 * See nr_pcp_free() where free_factor is increased for subsequent 2761 * frees. 2762 */ 2763 pcp->free_factor >>= 1; 2764 list = &pcp->lists[order_to_pindex(migratetype, order)]; 2765 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 2766 pcp_spin_unlock(pcp); 2767 pcp_trylock_finish(UP_flags); 2768 if (page) { 2769 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2770 zone_statistics(preferred_zone, zone, 1); 2771 } 2772 return page; 2773 } 2774 2775 /* 2776 * Allocate a page from the given zone. 2777 * Use pcplists for THP or "cheap" high-order allocations. 2778 */ 2779 2780 /* 2781 * Do not instrument rmqueue() with KMSAN. This function may call 2782 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 2783 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 2784 * may call rmqueue() again, which will result in a deadlock. 2785 */ 2786 __no_sanitize_memory 2787 static inline 2788 struct page *rmqueue(struct zone *preferred_zone, 2789 struct zone *zone, unsigned int order, 2790 gfp_t gfp_flags, unsigned int alloc_flags, 2791 int migratetype) 2792 { 2793 struct page *page; 2794 2795 /* 2796 * We most definitely don't want callers attempting to 2797 * allocate greater than order-1 page units with __GFP_NOFAIL. 2798 */ 2799 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 2800 2801 if (likely(pcp_allowed_order(order))) { 2802 page = rmqueue_pcplist(preferred_zone, zone, order, 2803 migratetype, alloc_flags); 2804 if (likely(page)) 2805 goto out; 2806 } 2807 2808 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 2809 migratetype); 2810 2811 out: 2812 /* Separate test+clear to avoid unnecessary atomics */ 2813 if ((alloc_flags & ALLOC_KSWAPD) && 2814 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 2815 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2816 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 2817 } 2818 2819 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 2820 return page; 2821 } 2822 2823 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2824 { 2825 return __should_fail_alloc_page(gfp_mask, order); 2826 } 2827 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 2828 2829 static inline long __zone_watermark_unusable_free(struct zone *z, 2830 unsigned int order, unsigned int alloc_flags) 2831 { 2832 long unusable_free = (1 << order) - 1; 2833 2834 /* 2835 * If the caller does not have rights to reserves below the min 2836 * watermark then subtract the high-atomic reserves. This will 2837 * over-estimate the size of the atomic reserve but it avoids a search. 2838 */ 2839 if (likely(!(alloc_flags & ALLOC_RESERVES))) 2840 unusable_free += z->nr_reserved_highatomic; 2841 2842 #ifdef CONFIG_CMA 2843 /* If allocation can't use CMA areas don't use free CMA pages */ 2844 if (!(alloc_flags & ALLOC_CMA)) 2845 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 2846 #endif 2847 2848 return unusable_free; 2849 } 2850 2851 /* 2852 * Return true if free base pages are above 'mark'. For high-order checks it 2853 * will return true of the order-0 watermark is reached and there is at least 2854 * one free page of a suitable size. Checking now avoids taking the zone lock 2855 * to check in the allocation paths if no pages are free. 2856 */ 2857 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2858 int highest_zoneidx, unsigned int alloc_flags, 2859 long free_pages) 2860 { 2861 long min = mark; 2862 int o; 2863 2864 /* free_pages may go negative - that's OK */ 2865 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 2866 2867 if (unlikely(alloc_flags & ALLOC_RESERVES)) { 2868 /* 2869 * __GFP_HIGH allows access to 50% of the min reserve as well 2870 * as OOM. 2871 */ 2872 if (alloc_flags & ALLOC_MIN_RESERVE) { 2873 min -= min / 2; 2874 2875 /* 2876 * Non-blocking allocations (e.g. GFP_ATOMIC) can 2877 * access more reserves than just __GFP_HIGH. Other 2878 * non-blocking allocations requests such as GFP_NOWAIT 2879 * or (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) do not get 2880 * access to the min reserve. 2881 */ 2882 if (alloc_flags & ALLOC_NON_BLOCK) 2883 min -= min / 4; 2884 } 2885 2886 /* 2887 * OOM victims can try even harder than the normal reserve 2888 * users on the grounds that it's definitely going to be in 2889 * the exit path shortly and free memory. Any allocation it 2890 * makes during the free path will be small and short-lived. 2891 */ 2892 if (alloc_flags & ALLOC_OOM) 2893 min -= min / 2; 2894 } 2895 2896 /* 2897 * Check watermarks for an order-0 allocation request. If these 2898 * are not met, then a high-order request also cannot go ahead 2899 * even if a suitable page happened to be free. 2900 */ 2901 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 2902 return false; 2903 2904 /* If this is an order-0 request then the watermark is fine */ 2905 if (!order) 2906 return true; 2907 2908 /* For a high-order request, check at least one suitable page is free */ 2909 for (o = order; o < NR_PAGE_ORDERS; o++) { 2910 struct free_area *area = &z->free_area[o]; 2911 int mt; 2912 2913 if (!area->nr_free) 2914 continue; 2915 2916 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2917 if (!free_area_empty(area, mt)) 2918 return true; 2919 } 2920 2921 #ifdef CONFIG_CMA 2922 if ((alloc_flags & ALLOC_CMA) && 2923 !free_area_empty(area, MIGRATE_CMA)) { 2924 return true; 2925 } 2926 #endif 2927 if ((alloc_flags & (ALLOC_HIGHATOMIC|ALLOC_OOM)) && 2928 !free_area_empty(area, MIGRATE_HIGHATOMIC)) { 2929 return true; 2930 } 2931 } 2932 return false; 2933 } 2934 2935 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2936 int highest_zoneidx, unsigned int alloc_flags) 2937 { 2938 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 2939 zone_page_state(z, NR_FREE_PAGES)); 2940 } 2941 2942 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 2943 unsigned long mark, int highest_zoneidx, 2944 unsigned int alloc_flags, gfp_t gfp_mask) 2945 { 2946 long free_pages; 2947 2948 free_pages = zone_page_state(z, NR_FREE_PAGES); 2949 2950 /* 2951 * Fast check for order-0 only. If this fails then the reserves 2952 * need to be calculated. 2953 */ 2954 if (!order) { 2955 long usable_free; 2956 long reserved; 2957 2958 usable_free = free_pages; 2959 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 2960 2961 /* reserved may over estimate high-atomic reserves. */ 2962 usable_free -= min(usable_free, reserved); 2963 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 2964 return true; 2965 } 2966 2967 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 2968 free_pages)) 2969 return true; 2970 2971 /* 2972 * Ignore watermark boosting for __GFP_HIGH order-0 allocations 2973 * when checking the min watermark. The min watermark is the 2974 * point where boosting is ignored so that kswapd is woken up 2975 * when below the low watermark. 2976 */ 2977 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost 2978 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 2979 mark = z->_watermark[WMARK_MIN]; 2980 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 2981 alloc_flags, free_pages); 2982 } 2983 2984 return false; 2985 } 2986 2987 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2988 unsigned long mark, int highest_zoneidx) 2989 { 2990 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2991 2992 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2993 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2994 2995 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 2996 free_pages); 2997 } 2998 2999 #ifdef CONFIG_NUMA 3000 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 3001 3002 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3003 { 3004 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 3005 node_reclaim_distance; 3006 } 3007 #else /* CONFIG_NUMA */ 3008 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 3009 { 3010 return true; 3011 } 3012 #endif /* CONFIG_NUMA */ 3013 3014 /* 3015 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 3016 * fragmentation is subtle. If the preferred zone was HIGHMEM then 3017 * premature use of a lower zone may cause lowmem pressure problems that 3018 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 3019 * probably too small. It only makes sense to spread allocations to avoid 3020 * fragmentation between the Normal and DMA32 zones. 3021 */ 3022 static inline unsigned int 3023 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 3024 { 3025 unsigned int alloc_flags; 3026 3027 /* 3028 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3029 * to save a branch. 3030 */ 3031 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 3032 3033 #ifdef CONFIG_ZONE_DMA32 3034 if (!zone) 3035 return alloc_flags; 3036 3037 if (zone_idx(zone) != ZONE_NORMAL) 3038 return alloc_flags; 3039 3040 /* 3041 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 3042 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 3043 * on UMA that if Normal is populated then so is DMA32. 3044 */ 3045 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 3046 if (nr_online_nodes > 1 && !populated_zone(--zone)) 3047 return alloc_flags; 3048 3049 alloc_flags |= ALLOC_NOFRAGMENT; 3050 #endif /* CONFIG_ZONE_DMA32 */ 3051 return alloc_flags; 3052 } 3053 3054 /* Must be called after current_gfp_context() which can change gfp_mask */ 3055 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 3056 unsigned int alloc_flags) 3057 { 3058 #ifdef CONFIG_CMA 3059 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3060 alloc_flags |= ALLOC_CMA; 3061 #endif 3062 return alloc_flags; 3063 } 3064 3065 /* 3066 * get_page_from_freelist goes through the zonelist trying to allocate 3067 * a page. 3068 */ 3069 static struct page * 3070 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3071 const struct alloc_context *ac) 3072 { 3073 struct zoneref *z; 3074 struct zone *zone; 3075 struct pglist_data *last_pgdat = NULL; 3076 bool last_pgdat_dirty_ok = false; 3077 bool no_fallback; 3078 3079 retry: 3080 /* 3081 * Scan zonelist, looking for a zone with enough free. 3082 * See also cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 3083 */ 3084 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 3085 z = ac->preferred_zoneref; 3086 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 3087 ac->nodemask) { 3088 struct page *page; 3089 unsigned long mark; 3090 3091 if (cpusets_enabled() && 3092 (alloc_flags & ALLOC_CPUSET) && 3093 !__cpuset_zone_allowed(zone, gfp_mask)) 3094 continue; 3095 /* 3096 * When allocating a page cache page for writing, we 3097 * want to get it from a node that is within its dirty 3098 * limit, such that no single node holds more than its 3099 * proportional share of globally allowed dirty pages. 3100 * The dirty limits take into account the node's 3101 * lowmem reserves and high watermark so that kswapd 3102 * should be able to balance it without having to 3103 * write pages from its LRU list. 3104 * 3105 * XXX: For now, allow allocations to potentially 3106 * exceed the per-node dirty limit in the slowpath 3107 * (spread_dirty_pages unset) before going into reclaim, 3108 * which is important when on a NUMA setup the allowed 3109 * nodes are together not big enough to reach the 3110 * global limit. The proper fix for these situations 3111 * will require awareness of nodes in the 3112 * dirty-throttling and the flusher threads. 3113 */ 3114 if (ac->spread_dirty_pages) { 3115 if (last_pgdat != zone->zone_pgdat) { 3116 last_pgdat = zone->zone_pgdat; 3117 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 3118 } 3119 3120 if (!last_pgdat_dirty_ok) 3121 continue; 3122 } 3123 3124 if (no_fallback && nr_online_nodes > 1 && 3125 zone != ac->preferred_zoneref->zone) { 3126 int local_nid; 3127 3128 /* 3129 * If moving to a remote node, retry but allow 3130 * fragmenting fallbacks. Locality is more important 3131 * than fragmentation avoidance. 3132 */ 3133 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 3134 if (zone_to_nid(zone) != local_nid) { 3135 alloc_flags &= ~ALLOC_NOFRAGMENT; 3136 goto retry; 3137 } 3138 } 3139 3140 cond_accept_memory(zone, order); 3141 3142 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 3143 if (!zone_watermark_fast(zone, order, mark, 3144 ac->highest_zoneidx, alloc_flags, 3145 gfp_mask)) { 3146 int ret; 3147 3148 if (cond_accept_memory(zone, order)) 3149 goto try_this_zone; 3150 3151 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3152 /* 3153 * Watermark failed for this zone, but see if we can 3154 * grow this zone if it contains deferred pages. 3155 */ 3156 if (deferred_pages_enabled()) { 3157 if (_deferred_grow_zone(zone, order)) 3158 goto try_this_zone; 3159 } 3160 #endif 3161 /* Checked here to keep the fast path fast */ 3162 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3163 if (alloc_flags & ALLOC_NO_WATERMARKS) 3164 goto try_this_zone; 3165 3166 if (!node_reclaim_enabled() || 3167 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3168 continue; 3169 3170 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3171 switch (ret) { 3172 case NODE_RECLAIM_NOSCAN: 3173 /* did not scan */ 3174 continue; 3175 case NODE_RECLAIM_FULL: 3176 /* scanned but unreclaimable */ 3177 continue; 3178 default: 3179 /* did we reclaim enough */ 3180 if (zone_watermark_ok(zone, order, mark, 3181 ac->highest_zoneidx, alloc_flags)) 3182 goto try_this_zone; 3183 3184 continue; 3185 } 3186 } 3187 3188 try_this_zone: 3189 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3190 gfp_mask, alloc_flags, ac->migratetype); 3191 if (page) { 3192 prep_new_page(page, order, gfp_mask, alloc_flags); 3193 3194 /* 3195 * If this is a high-order atomic allocation then check 3196 * if the pageblock should be reserved for the future 3197 */ 3198 if (unlikely(alloc_flags & ALLOC_HIGHATOMIC)) 3199 reserve_highatomic_pageblock(page, zone); 3200 3201 return page; 3202 } else { 3203 if (cond_accept_memory(zone, order)) 3204 goto try_this_zone; 3205 3206 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 3207 /* Try again if zone has deferred pages */ 3208 if (deferred_pages_enabled()) { 3209 if (_deferred_grow_zone(zone, order)) 3210 goto try_this_zone; 3211 } 3212 #endif 3213 } 3214 } 3215 3216 /* 3217 * It's possible on a UMA machine to get through all zones that are 3218 * fragmented. If avoiding fragmentation, reset and try again. 3219 */ 3220 if (no_fallback) { 3221 alloc_flags &= ~ALLOC_NOFRAGMENT; 3222 goto retry; 3223 } 3224 3225 return NULL; 3226 } 3227 3228 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3229 { 3230 unsigned int filter = SHOW_MEM_FILTER_NODES; 3231 3232 /* 3233 * This documents exceptions given to allocations in certain 3234 * contexts that are allowed to allocate outside current's set 3235 * of allowed nodes. 3236 */ 3237 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3238 if (tsk_is_oom_victim(current) || 3239 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3240 filter &= ~SHOW_MEM_FILTER_NODES; 3241 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3242 filter &= ~SHOW_MEM_FILTER_NODES; 3243 3244 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 3245 } 3246 3247 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3248 { 3249 struct va_format vaf; 3250 va_list args; 3251 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 3252 3253 if ((gfp_mask & __GFP_NOWARN) || 3254 !__ratelimit(&nopage_rs) || 3255 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 3256 return; 3257 3258 va_start(args, fmt); 3259 vaf.fmt = fmt; 3260 vaf.va = &args; 3261 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 3262 current->comm, &vaf, gfp_mask, &gfp_mask, 3263 nodemask_pr_args(nodemask)); 3264 va_end(args); 3265 3266 cpuset_print_current_mems_allowed(); 3267 pr_cont("\n"); 3268 dump_stack(); 3269 warn_alloc_show_mem(gfp_mask, nodemask); 3270 } 3271 3272 static inline struct page * 3273 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3274 unsigned int alloc_flags, 3275 const struct alloc_context *ac) 3276 { 3277 struct page *page; 3278 3279 page = get_page_from_freelist(gfp_mask, order, 3280 alloc_flags|ALLOC_CPUSET, ac); 3281 /* 3282 * fallback to ignore cpuset restriction if our nodes 3283 * are depleted 3284 */ 3285 if (!page) 3286 page = get_page_from_freelist(gfp_mask, order, 3287 alloc_flags, ac); 3288 3289 return page; 3290 } 3291 3292 static inline struct page * 3293 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3294 const struct alloc_context *ac, unsigned long *did_some_progress) 3295 { 3296 struct oom_control oc = { 3297 .zonelist = ac->zonelist, 3298 .nodemask = ac->nodemask, 3299 .memcg = NULL, 3300 .gfp_mask = gfp_mask, 3301 .order = order, 3302 }; 3303 struct page *page; 3304 3305 *did_some_progress = 0; 3306 3307 /* 3308 * Acquire the oom lock. If that fails, somebody else is 3309 * making progress for us. 3310 */ 3311 if (!mutex_trylock(&oom_lock)) { 3312 *did_some_progress = 1; 3313 schedule_timeout_uninterruptible(1); 3314 return NULL; 3315 } 3316 3317 /* 3318 * Go through the zonelist yet one more time, keep very high watermark 3319 * here, this is only to catch a parallel oom killing, we must fail if 3320 * we're still under heavy pressure. But make sure that this reclaim 3321 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 3322 * allocation which will never fail due to oom_lock already held. 3323 */ 3324 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 3325 ~__GFP_DIRECT_RECLAIM, order, 3326 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3327 if (page) 3328 goto out; 3329 3330 /* Coredumps can quickly deplete all memory reserves */ 3331 if (current->flags & PF_DUMPCORE) 3332 goto out; 3333 /* The OOM killer will not help higher order allocs */ 3334 if (order > PAGE_ALLOC_COSTLY_ORDER) 3335 goto out; 3336 /* 3337 * We have already exhausted all our reclaim opportunities without any 3338 * success so it is time to admit defeat. We will skip the OOM killer 3339 * because it is very likely that the caller has a more reasonable 3340 * fallback than shooting a random task. 3341 * 3342 * The OOM killer may not free memory on a specific node. 3343 */ 3344 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 3345 goto out; 3346 /* The OOM killer does not needlessly kill tasks for lowmem */ 3347 if (ac->highest_zoneidx < ZONE_NORMAL) 3348 goto out; 3349 if (pm_suspended_storage()) 3350 goto out; 3351 /* 3352 * XXX: GFP_NOFS allocations should rather fail than rely on 3353 * other request to make a forward progress. 3354 * We are in an unfortunate situation where out_of_memory cannot 3355 * do much for this context but let's try it to at least get 3356 * access to memory reserved if the current task is killed (see 3357 * out_of_memory). Once filesystems are ready to handle allocation 3358 * failures more gracefully we should just bail out here. 3359 */ 3360 3361 /* Exhausted what can be done so it's blame time */ 3362 if (out_of_memory(&oc) || 3363 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 3364 *did_some_progress = 1; 3365 3366 /* 3367 * Help non-failing allocations by giving them access to memory 3368 * reserves 3369 */ 3370 if (gfp_mask & __GFP_NOFAIL) 3371 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3372 ALLOC_NO_WATERMARKS, ac); 3373 } 3374 out: 3375 mutex_unlock(&oom_lock); 3376 return page; 3377 } 3378 3379 /* 3380 * Maximum number of compaction retries with a progress before OOM 3381 * killer is consider as the only way to move forward. 3382 */ 3383 #define MAX_COMPACT_RETRIES 16 3384 3385 #ifdef CONFIG_COMPACTION 3386 /* Try memory compaction for high-order allocations before reclaim */ 3387 static struct page * 3388 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3389 unsigned int alloc_flags, const struct alloc_context *ac, 3390 enum compact_priority prio, enum compact_result *compact_result) 3391 { 3392 struct page *page = NULL; 3393 unsigned long pflags; 3394 unsigned int noreclaim_flag; 3395 3396 if (!order) 3397 return NULL; 3398 3399 psi_memstall_enter(&pflags); 3400 delayacct_compact_start(); 3401 noreclaim_flag = memalloc_noreclaim_save(); 3402 3403 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3404 prio, &page); 3405 3406 memalloc_noreclaim_restore(noreclaim_flag); 3407 psi_memstall_leave(&pflags); 3408 delayacct_compact_end(); 3409 3410 if (*compact_result == COMPACT_SKIPPED) 3411 return NULL; 3412 /* 3413 * At least in one zone compaction wasn't deferred or skipped, so let's 3414 * count a compaction stall 3415 */ 3416 count_vm_event(COMPACTSTALL); 3417 3418 /* Prep a captured page if available */ 3419 if (page) 3420 prep_new_page(page, order, gfp_mask, alloc_flags); 3421 3422 /* Try get a page from the freelist if available */ 3423 if (!page) 3424 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3425 3426 if (page) { 3427 struct zone *zone = page_zone(page); 3428 3429 zone->compact_blockskip_flush = false; 3430 compaction_defer_reset(zone, order, true); 3431 count_vm_event(COMPACTSUCCESS); 3432 return page; 3433 } 3434 3435 /* 3436 * It's bad if compaction run occurs and fails. The most likely reason 3437 * is that pages exist, but not enough to satisfy watermarks. 3438 */ 3439 count_vm_event(COMPACTFAIL); 3440 3441 cond_resched(); 3442 3443 return NULL; 3444 } 3445 3446 static inline bool 3447 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3448 enum compact_result compact_result, 3449 enum compact_priority *compact_priority, 3450 int *compaction_retries) 3451 { 3452 int max_retries = MAX_COMPACT_RETRIES; 3453 int min_priority; 3454 bool ret = false; 3455 int retries = *compaction_retries; 3456 enum compact_priority priority = *compact_priority; 3457 3458 if (!order) 3459 return false; 3460 3461 if (fatal_signal_pending(current)) 3462 return false; 3463 3464 /* 3465 * Compaction was skipped due to a lack of free order-0 3466 * migration targets. Continue if reclaim can help. 3467 */ 3468 if (compact_result == COMPACT_SKIPPED) { 3469 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3470 goto out; 3471 } 3472 3473 /* 3474 * Compaction managed to coalesce some page blocks, but the 3475 * allocation failed presumably due to a race. Retry some. 3476 */ 3477 if (compact_result == COMPACT_SUCCESS) { 3478 /* 3479 * !costly requests are much more important than 3480 * __GFP_RETRY_MAYFAIL costly ones because they are de 3481 * facto nofail and invoke OOM killer to move on while 3482 * costly can fail and users are ready to cope with 3483 * that. 1/4 retries is rather arbitrary but we would 3484 * need much more detailed feedback from compaction to 3485 * make a better decision. 3486 */ 3487 if (order > PAGE_ALLOC_COSTLY_ORDER) 3488 max_retries /= 4; 3489 3490 if (++(*compaction_retries) <= max_retries) { 3491 ret = true; 3492 goto out; 3493 } 3494 } 3495 3496 /* 3497 * Compaction failed. Retry with increasing priority. 3498 */ 3499 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3500 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3501 3502 if (*compact_priority > min_priority) { 3503 (*compact_priority)--; 3504 *compaction_retries = 0; 3505 ret = true; 3506 } 3507 out: 3508 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3509 return ret; 3510 } 3511 #else 3512 static inline struct page * 3513 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3514 unsigned int alloc_flags, const struct alloc_context *ac, 3515 enum compact_priority prio, enum compact_result *compact_result) 3516 { 3517 *compact_result = COMPACT_SKIPPED; 3518 return NULL; 3519 } 3520 3521 static inline bool 3522 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3523 enum compact_result compact_result, 3524 enum compact_priority *compact_priority, 3525 int *compaction_retries) 3526 { 3527 struct zone *zone; 3528 struct zoneref *z; 3529 3530 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3531 return false; 3532 3533 /* 3534 * There are setups with compaction disabled which would prefer to loop 3535 * inside the allocator rather than hit the oom killer prematurely. 3536 * Let's give them a good hope and keep retrying while the order-0 3537 * watermarks are OK. 3538 */ 3539 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3540 ac->highest_zoneidx, ac->nodemask) { 3541 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3542 ac->highest_zoneidx, alloc_flags)) 3543 return true; 3544 } 3545 return false; 3546 } 3547 #endif /* CONFIG_COMPACTION */ 3548 3549 #ifdef CONFIG_LOCKDEP 3550 static struct lockdep_map __fs_reclaim_map = 3551 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 3552 3553 static bool __need_reclaim(gfp_t gfp_mask) 3554 { 3555 /* no reclaim without waiting on it */ 3556 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 3557 return false; 3558 3559 /* this guy won't enter reclaim */ 3560 if (current->flags & PF_MEMALLOC) 3561 return false; 3562 3563 if (gfp_mask & __GFP_NOLOCKDEP) 3564 return false; 3565 3566 return true; 3567 } 3568 3569 void __fs_reclaim_acquire(unsigned long ip) 3570 { 3571 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 3572 } 3573 3574 void __fs_reclaim_release(unsigned long ip) 3575 { 3576 lock_release(&__fs_reclaim_map, ip); 3577 } 3578 3579 void fs_reclaim_acquire(gfp_t gfp_mask) 3580 { 3581 gfp_mask = current_gfp_context(gfp_mask); 3582 3583 if (__need_reclaim(gfp_mask)) { 3584 if (gfp_mask & __GFP_FS) 3585 __fs_reclaim_acquire(_RET_IP_); 3586 3587 #ifdef CONFIG_MMU_NOTIFIER 3588 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 3589 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 3590 #endif 3591 3592 } 3593 } 3594 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 3595 3596 void fs_reclaim_release(gfp_t gfp_mask) 3597 { 3598 gfp_mask = current_gfp_context(gfp_mask); 3599 3600 if (__need_reclaim(gfp_mask)) { 3601 if (gfp_mask & __GFP_FS) 3602 __fs_reclaim_release(_RET_IP_); 3603 } 3604 } 3605 EXPORT_SYMBOL_GPL(fs_reclaim_release); 3606 #endif 3607 3608 /* 3609 * Zonelists may change due to hotplug during allocation. Detect when zonelists 3610 * have been rebuilt so allocation retries. Reader side does not lock and 3611 * retries the allocation if zonelist changes. Writer side is protected by the 3612 * embedded spin_lock. 3613 */ 3614 static DEFINE_SEQLOCK(zonelist_update_seq); 3615 3616 static unsigned int zonelist_iter_begin(void) 3617 { 3618 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3619 return read_seqbegin(&zonelist_update_seq); 3620 3621 return 0; 3622 } 3623 3624 static unsigned int check_retry_zonelist(unsigned int seq) 3625 { 3626 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 3627 return read_seqretry(&zonelist_update_seq, seq); 3628 3629 return seq; 3630 } 3631 3632 /* Perform direct synchronous page reclaim */ 3633 static unsigned long 3634 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3635 const struct alloc_context *ac) 3636 { 3637 unsigned int noreclaim_flag; 3638 unsigned long progress; 3639 3640 cond_resched(); 3641 3642 /* We now go into synchronous reclaim */ 3643 cpuset_memory_pressure_bump(); 3644 fs_reclaim_acquire(gfp_mask); 3645 noreclaim_flag = memalloc_noreclaim_save(); 3646 3647 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3648 ac->nodemask); 3649 3650 memalloc_noreclaim_restore(noreclaim_flag); 3651 fs_reclaim_release(gfp_mask); 3652 3653 cond_resched(); 3654 3655 return progress; 3656 } 3657 3658 /* The really slow allocator path where we enter direct reclaim */ 3659 static inline struct page * 3660 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3661 unsigned int alloc_flags, const struct alloc_context *ac, 3662 unsigned long *did_some_progress) 3663 { 3664 struct page *page = NULL; 3665 unsigned long pflags; 3666 bool drained = false; 3667 3668 psi_memstall_enter(&pflags); 3669 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3670 if (unlikely(!(*did_some_progress))) 3671 goto out; 3672 3673 retry: 3674 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3675 3676 /* 3677 * If an allocation failed after direct reclaim, it could be because 3678 * pages are pinned on the per-cpu lists or in high alloc reserves. 3679 * Shrink them and try again 3680 */ 3681 if (!page && !drained) { 3682 unreserve_highatomic_pageblock(ac, false); 3683 drain_all_pages(NULL); 3684 drained = true; 3685 goto retry; 3686 } 3687 out: 3688 psi_memstall_leave(&pflags); 3689 3690 return page; 3691 } 3692 3693 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 3694 const struct alloc_context *ac) 3695 { 3696 struct zoneref *z; 3697 struct zone *zone; 3698 pg_data_t *last_pgdat = NULL; 3699 enum zone_type highest_zoneidx = ac->highest_zoneidx; 3700 3701 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 3702 ac->nodemask) { 3703 if (!managed_zone(zone)) 3704 continue; 3705 if (last_pgdat != zone->zone_pgdat) { 3706 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 3707 last_pgdat = zone->zone_pgdat; 3708 } 3709 } 3710 } 3711 3712 static inline unsigned int 3713 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) 3714 { 3715 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3716 3717 /* 3718 * __GFP_HIGH is assumed to be the same as ALLOC_MIN_RESERVE 3719 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 3720 * to save two branches. 3721 */ 3722 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); 3723 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 3724 3725 /* 3726 * The caller may dip into page reserves a bit more if the caller 3727 * cannot run direct reclaim, or if the caller has realtime scheduling 3728 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3729 * set both ALLOC_NON_BLOCK and ALLOC_MIN_RESERVE(__GFP_HIGH). 3730 */ 3731 alloc_flags |= (__force int) 3732 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 3733 3734 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) { 3735 /* 3736 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3737 * if it can't schedule. 3738 */ 3739 if (!(gfp_mask & __GFP_NOMEMALLOC)) { 3740 alloc_flags |= ALLOC_NON_BLOCK; 3741 3742 if (order > 0) 3743 alloc_flags |= ALLOC_HIGHATOMIC; 3744 } 3745 3746 /* 3747 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably 3748 * GFP_ATOMIC) rather than fail, see the comment for 3749 * cpuset_node_allowed(). 3750 */ 3751 if (alloc_flags & ALLOC_MIN_RESERVE) 3752 alloc_flags &= ~ALLOC_CPUSET; 3753 } else if (unlikely(rt_task(current)) && in_task()) 3754 alloc_flags |= ALLOC_MIN_RESERVE; 3755 3756 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 3757 3758 return alloc_flags; 3759 } 3760 3761 static bool oom_reserves_allowed(struct task_struct *tsk) 3762 { 3763 if (!tsk_is_oom_victim(tsk)) 3764 return false; 3765 3766 /* 3767 * !MMU doesn't have oom reaper so give access to memory reserves 3768 * only to the thread with TIF_MEMDIE set 3769 */ 3770 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 3771 return false; 3772 3773 return true; 3774 } 3775 3776 /* 3777 * Distinguish requests which really need access to full memory 3778 * reserves from oom victims which can live with a portion of it 3779 */ 3780 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 3781 { 3782 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 3783 return 0; 3784 if (gfp_mask & __GFP_MEMALLOC) 3785 return ALLOC_NO_WATERMARKS; 3786 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 3787 return ALLOC_NO_WATERMARKS; 3788 if (!in_interrupt()) { 3789 if (current->flags & PF_MEMALLOC) 3790 return ALLOC_NO_WATERMARKS; 3791 else if (oom_reserves_allowed(current)) 3792 return ALLOC_OOM; 3793 } 3794 3795 return 0; 3796 } 3797 3798 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 3799 { 3800 return !!__gfp_pfmemalloc_flags(gfp_mask); 3801 } 3802 3803 /* 3804 * Checks whether it makes sense to retry the reclaim to make a forward progress 3805 * for the given allocation request. 3806 * 3807 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 3808 * without success, or when we couldn't even meet the watermark if we 3809 * reclaimed all remaining pages on the LRU lists. 3810 * 3811 * Returns true if a retry is viable or false to enter the oom path. 3812 */ 3813 static inline bool 3814 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 3815 struct alloc_context *ac, int alloc_flags, 3816 bool did_some_progress, int *no_progress_loops) 3817 { 3818 struct zone *zone; 3819 struct zoneref *z; 3820 bool ret = false; 3821 3822 /* 3823 * Costly allocations might have made a progress but this doesn't mean 3824 * their order will become available due to high fragmentation so 3825 * always increment the no progress counter for them 3826 */ 3827 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 3828 *no_progress_loops = 0; 3829 else 3830 (*no_progress_loops)++; 3831 3832 if (*no_progress_loops > MAX_RECLAIM_RETRIES) 3833 goto out; 3834 3835 3836 /* 3837 * Keep reclaiming pages while there is a chance this will lead 3838 * somewhere. If none of the target zones can satisfy our allocation 3839 * request even if all reclaimable pages are considered then we are 3840 * screwed and have to go OOM. 3841 */ 3842 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3843 ac->highest_zoneidx, ac->nodemask) { 3844 unsigned long available; 3845 unsigned long reclaimable; 3846 unsigned long min_wmark = min_wmark_pages(zone); 3847 bool wmark; 3848 3849 available = reclaimable = zone_reclaimable_pages(zone); 3850 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 3851 3852 /* 3853 * Would the allocation succeed if we reclaimed all 3854 * reclaimable pages? 3855 */ 3856 wmark = __zone_watermark_ok(zone, order, min_wmark, 3857 ac->highest_zoneidx, alloc_flags, available); 3858 trace_reclaim_retry_zone(z, order, reclaimable, 3859 available, min_wmark, *no_progress_loops, wmark); 3860 if (wmark) { 3861 ret = true; 3862 break; 3863 } 3864 } 3865 3866 /* 3867 * Memory allocation/reclaim might be called from a WQ context and the 3868 * current implementation of the WQ concurrency control doesn't 3869 * recognize that a particular WQ is congested if the worker thread is 3870 * looping without ever sleeping. Therefore we have to do a short sleep 3871 * here rather than calling cond_resched(). 3872 */ 3873 if (current->flags & PF_WQ_WORKER) 3874 schedule_timeout_uninterruptible(1); 3875 else 3876 cond_resched(); 3877 out: 3878 /* Before OOM, exhaust highatomic_reserve */ 3879 if (!ret) 3880 return unreserve_highatomic_pageblock(ac, true); 3881 3882 return ret; 3883 } 3884 3885 static inline bool 3886 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 3887 { 3888 /* 3889 * It's possible that cpuset's mems_allowed and the nodemask from 3890 * mempolicy don't intersect. This should be normally dealt with by 3891 * policy_nodemask(), but it's possible to race with cpuset update in 3892 * such a way the check therein was true, and then it became false 3893 * before we got our cpuset_mems_cookie here. 3894 * This assumes that for all allocations, ac->nodemask can come only 3895 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 3896 * when it does not intersect with the cpuset restrictions) or the 3897 * caller can deal with a violated nodemask. 3898 */ 3899 if (cpusets_enabled() && ac->nodemask && 3900 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 3901 ac->nodemask = NULL; 3902 return true; 3903 } 3904 3905 /* 3906 * When updating a task's mems_allowed or mempolicy nodemask, it is 3907 * possible to race with parallel threads in such a way that our 3908 * allocation can fail while the mask is being updated. If we are about 3909 * to fail, check if the cpuset changed during allocation and if so, 3910 * retry. 3911 */ 3912 if (read_mems_allowed_retry(cpuset_mems_cookie)) 3913 return true; 3914 3915 return false; 3916 } 3917 3918 static inline struct page * 3919 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 3920 struct alloc_context *ac) 3921 { 3922 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3923 bool can_compact = gfp_compaction_allowed(gfp_mask); 3924 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 3925 struct page *page = NULL; 3926 unsigned int alloc_flags; 3927 unsigned long did_some_progress; 3928 enum compact_priority compact_priority; 3929 enum compact_result compact_result; 3930 int compaction_retries; 3931 int no_progress_loops; 3932 unsigned int cpuset_mems_cookie; 3933 unsigned int zonelist_iter_cookie; 3934 int reserve_flags; 3935 3936 restart: 3937 compaction_retries = 0; 3938 no_progress_loops = 0; 3939 compact_priority = DEF_COMPACT_PRIORITY; 3940 cpuset_mems_cookie = read_mems_allowed_begin(); 3941 zonelist_iter_cookie = zonelist_iter_begin(); 3942 3943 /* 3944 * The fast path uses conservative alloc_flags to succeed only until 3945 * kswapd needs to be woken up, and to avoid the cost of setting up 3946 * alloc_flags precisely. So we do that now. 3947 */ 3948 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); 3949 3950 /* 3951 * We need to recalculate the starting point for the zonelist iterator 3952 * because we might have used different nodemask in the fast path, or 3953 * there was a cpuset modification and we are retrying - otherwise we 3954 * could end up iterating over non-eligible zones endlessly. 3955 */ 3956 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3957 ac->highest_zoneidx, ac->nodemask); 3958 if (!ac->preferred_zoneref->zone) 3959 goto nopage; 3960 3961 /* 3962 * Check for insane configurations where the cpuset doesn't contain 3963 * any suitable zone to satisfy the request - e.g. non-movable 3964 * GFP_HIGHUSER allocations from MOVABLE nodes only. 3965 */ 3966 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 3967 struct zoneref *z = first_zones_zonelist(ac->zonelist, 3968 ac->highest_zoneidx, 3969 &cpuset_current_mems_allowed); 3970 if (!z->zone) 3971 goto nopage; 3972 } 3973 3974 if (alloc_flags & ALLOC_KSWAPD) 3975 wake_all_kswapds(order, gfp_mask, ac); 3976 3977 /* 3978 * The adjusted alloc_flags might result in immediate success, so try 3979 * that first 3980 */ 3981 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3982 if (page) 3983 goto got_pg; 3984 3985 /* 3986 * For costly allocations, try direct compaction first, as it's likely 3987 * that we have enough base pages and don't need to reclaim. For non- 3988 * movable high-order allocations, do that as well, as compaction will 3989 * try prevent permanent fragmentation by migrating from blocks of the 3990 * same migratetype. 3991 * Don't try this for allocations that are allowed to ignore 3992 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 3993 */ 3994 if (can_direct_reclaim && can_compact && 3995 (costly_order || 3996 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 3997 && !gfp_pfmemalloc_allowed(gfp_mask)) { 3998 page = __alloc_pages_direct_compact(gfp_mask, order, 3999 alloc_flags, ac, 4000 INIT_COMPACT_PRIORITY, 4001 &compact_result); 4002 if (page) 4003 goto got_pg; 4004 4005 /* 4006 * Checks for costly allocations with __GFP_NORETRY, which 4007 * includes some THP page fault allocations 4008 */ 4009 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 4010 /* 4011 * If allocating entire pageblock(s) and compaction 4012 * failed because all zones are below low watermarks 4013 * or is prohibited because it recently failed at this 4014 * order, fail immediately unless the allocator has 4015 * requested compaction and reclaim retry. 4016 * 4017 * Reclaim is 4018 * - potentially very expensive because zones are far 4019 * below their low watermarks or this is part of very 4020 * bursty high order allocations, 4021 * - not guaranteed to help because isolate_freepages() 4022 * may not iterate over freed pages as part of its 4023 * linear scan, and 4024 * - unlikely to make entire pageblocks free on its 4025 * own. 4026 */ 4027 if (compact_result == COMPACT_SKIPPED || 4028 compact_result == COMPACT_DEFERRED) 4029 goto nopage; 4030 4031 /* 4032 * Looks like reclaim/compaction is worth trying, but 4033 * sync compaction could be very expensive, so keep 4034 * using async compaction. 4035 */ 4036 compact_priority = INIT_COMPACT_PRIORITY; 4037 } 4038 } 4039 4040 retry: 4041 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 4042 if (alloc_flags & ALLOC_KSWAPD) 4043 wake_all_kswapds(order, gfp_mask, ac); 4044 4045 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 4046 if (reserve_flags) 4047 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 4048 (alloc_flags & ALLOC_KSWAPD); 4049 4050 /* 4051 * Reset the nodemask and zonelist iterators if memory policies can be 4052 * ignored. These allocations are high priority and system rather than 4053 * user oriented. 4054 */ 4055 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 4056 ac->nodemask = NULL; 4057 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4058 ac->highest_zoneidx, ac->nodemask); 4059 } 4060 4061 /* Attempt with potentially adjusted zonelist and alloc_flags */ 4062 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4063 if (page) 4064 goto got_pg; 4065 4066 /* Caller is not willing to reclaim, we can't balance anything */ 4067 if (!can_direct_reclaim) 4068 goto nopage; 4069 4070 /* Avoid recursion of direct reclaim */ 4071 if (current->flags & PF_MEMALLOC) 4072 goto nopage; 4073 4074 /* Try direct reclaim and then allocating */ 4075 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 4076 &did_some_progress); 4077 if (page) 4078 goto got_pg; 4079 4080 /* Try direct compaction and then allocating */ 4081 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 4082 compact_priority, &compact_result); 4083 if (page) 4084 goto got_pg; 4085 4086 /* Do not loop if specifically requested */ 4087 if (gfp_mask & __GFP_NORETRY) 4088 goto nopage; 4089 4090 /* 4091 * Do not retry costly high order allocations unless they are 4092 * __GFP_RETRY_MAYFAIL and we can compact 4093 */ 4094 if (costly_order && (!can_compact || 4095 !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4096 goto nopage; 4097 4098 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 4099 did_some_progress > 0, &no_progress_loops)) 4100 goto retry; 4101 4102 /* 4103 * It doesn't make any sense to retry for the compaction if the order-0 4104 * reclaim is not able to make any progress because the current 4105 * implementation of the compaction depends on the sufficient amount 4106 * of free memory (see __compaction_suitable) 4107 */ 4108 if (did_some_progress > 0 && can_compact && 4109 should_compact_retry(ac, order, alloc_flags, 4110 compact_result, &compact_priority, 4111 &compaction_retries)) 4112 goto retry; 4113 4114 4115 /* 4116 * Deal with possible cpuset update races or zonelist updates to avoid 4117 * a unnecessary OOM kill. 4118 */ 4119 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4120 check_retry_zonelist(zonelist_iter_cookie)) 4121 goto restart; 4122 4123 /* Reclaim has failed us, start killing things */ 4124 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 4125 if (page) 4126 goto got_pg; 4127 4128 /* Avoid allocations with no watermarks from looping endlessly */ 4129 if (tsk_is_oom_victim(current) && 4130 (alloc_flags & ALLOC_OOM || 4131 (gfp_mask & __GFP_NOMEMALLOC))) 4132 goto nopage; 4133 4134 /* Retry as long as the OOM killer is making progress */ 4135 if (did_some_progress) { 4136 no_progress_loops = 0; 4137 goto retry; 4138 } 4139 4140 nopage: 4141 /* 4142 * Deal with possible cpuset update races or zonelist updates to avoid 4143 * a unnecessary OOM kill. 4144 */ 4145 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 4146 check_retry_zonelist(zonelist_iter_cookie)) 4147 goto restart; 4148 4149 /* 4150 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 4151 * we always retry 4152 */ 4153 if (gfp_mask & __GFP_NOFAIL) { 4154 /* 4155 * All existing users of the __GFP_NOFAIL are blockable, so warn 4156 * of any new users that actually require GFP_NOWAIT 4157 */ 4158 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 4159 goto fail; 4160 4161 /* 4162 * PF_MEMALLOC request from this context is rather bizarre 4163 * because we cannot reclaim anything and only can loop waiting 4164 * for somebody to do a work for us 4165 */ 4166 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 4167 4168 /* 4169 * non failing costly orders are a hard requirement which we 4170 * are not prepared for much so let's warn about these users 4171 * so that we can identify them and convert them to something 4172 * else. 4173 */ 4174 WARN_ON_ONCE_GFP(costly_order, gfp_mask); 4175 4176 /* 4177 * Help non-failing allocations by giving some access to memory 4178 * reserves normally used for high priority non-blocking 4179 * allocations but do not use ALLOC_NO_WATERMARKS because this 4180 * could deplete whole memory reserves which would just make 4181 * the situation worse. 4182 */ 4183 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); 4184 if (page) 4185 goto got_pg; 4186 4187 cond_resched(); 4188 goto retry; 4189 } 4190 fail: 4191 warn_alloc(gfp_mask, ac->nodemask, 4192 "page allocation failure: order:%u", order); 4193 got_pg: 4194 return page; 4195 } 4196 4197 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 4198 int preferred_nid, nodemask_t *nodemask, 4199 struct alloc_context *ac, gfp_t *alloc_gfp, 4200 unsigned int *alloc_flags) 4201 { 4202 ac->highest_zoneidx = gfp_zone(gfp_mask); 4203 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 4204 ac->nodemask = nodemask; 4205 ac->migratetype = gfp_migratetype(gfp_mask); 4206 4207 if (cpusets_enabled()) { 4208 *alloc_gfp |= __GFP_HARDWALL; 4209 /* 4210 * When we are in the interrupt context, it is irrelevant 4211 * to the current task context. It means that any node ok. 4212 */ 4213 if (in_task() && !ac->nodemask) 4214 ac->nodemask = &cpuset_current_mems_allowed; 4215 else 4216 *alloc_flags |= ALLOC_CPUSET; 4217 } 4218 4219 might_alloc(gfp_mask); 4220 4221 if (should_fail_alloc_page(gfp_mask, order)) 4222 return false; 4223 4224 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 4225 4226 /* Dirty zone balancing only done in the fast path */ 4227 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 4228 4229 /* 4230 * The preferred zone is used for statistics but crucially it is 4231 * also used as the starting point for the zonelist iterator. It 4232 * may get reset for allocations that ignore memory policies. 4233 */ 4234 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 4235 ac->highest_zoneidx, ac->nodemask); 4236 4237 return true; 4238 } 4239 4240 /* 4241 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 4242 * @gfp: GFP flags for the allocation 4243 * @preferred_nid: The preferred NUMA node ID to allocate from 4244 * @nodemask: Set of nodes to allocate from, may be NULL 4245 * @nr_pages: The number of pages desired on the list or array 4246 * @page_list: Optional list to store the allocated pages 4247 * @page_array: Optional array to store the pages 4248 * 4249 * This is a batched version of the page allocator that attempts to 4250 * allocate nr_pages quickly. Pages are added to page_list if page_list 4251 * is not NULL, otherwise it is assumed that the page_array is valid. 4252 * 4253 * For lists, nr_pages is the number of pages that should be allocated. 4254 * 4255 * For arrays, only NULL elements are populated with pages and nr_pages 4256 * is the maximum number of pages that will be stored in the array. 4257 * 4258 * Returns the number of pages on the list or array. 4259 */ 4260 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 4261 nodemask_t *nodemask, int nr_pages, 4262 struct list_head *page_list, 4263 struct page **page_array) 4264 { 4265 struct page *page; 4266 unsigned long __maybe_unused UP_flags; 4267 struct zone *zone; 4268 struct zoneref *z; 4269 struct per_cpu_pages *pcp; 4270 struct list_head *pcp_list; 4271 struct alloc_context ac; 4272 gfp_t alloc_gfp; 4273 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4274 int nr_populated = 0, nr_account = 0; 4275 4276 /* 4277 * Skip populated array elements to determine if any pages need 4278 * to be allocated before disabling IRQs. 4279 */ 4280 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 4281 nr_populated++; 4282 4283 /* No pages requested? */ 4284 if (unlikely(nr_pages <= 0)) 4285 goto out; 4286 4287 /* Already populated array? */ 4288 if (unlikely(page_array && nr_pages - nr_populated == 0)) 4289 goto out; 4290 4291 /* Bulk allocator does not support memcg accounting. */ 4292 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT)) 4293 goto failed; 4294 4295 /* Use the single page allocator for one page. */ 4296 if (nr_pages - nr_populated == 1) 4297 goto failed; 4298 4299 #ifdef CONFIG_PAGE_OWNER 4300 /* 4301 * PAGE_OWNER may recurse into the allocator to allocate space to 4302 * save the stack with pagesets.lock held. Releasing/reacquiring 4303 * removes much of the performance benefit of bulk allocation so 4304 * force the caller to allocate one page at a time as it'll have 4305 * similar performance to added complexity to the bulk allocator. 4306 */ 4307 if (static_branch_unlikely(&page_owner_inited)) 4308 goto failed; 4309 #endif 4310 4311 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 4312 gfp &= gfp_allowed_mask; 4313 alloc_gfp = gfp; 4314 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 4315 goto out; 4316 gfp = alloc_gfp; 4317 4318 /* Find an allowed local zone that meets the low watermark. */ 4319 z = ac.preferred_zoneref; 4320 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 4321 unsigned long mark; 4322 4323 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 4324 !__cpuset_zone_allowed(zone, gfp)) { 4325 continue; 4326 } 4327 4328 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 4329 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 4330 goto failed; 4331 } 4332 4333 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 4334 if (zone_watermark_fast(zone, 0, mark, 4335 zonelist_zone_idx(ac.preferred_zoneref), 4336 alloc_flags, gfp)) { 4337 break; 4338 } 4339 } 4340 4341 /* 4342 * If there are no allowed local zones that meets the watermarks then 4343 * try to allocate a single page and reclaim if necessary. 4344 */ 4345 if (unlikely(!zone)) 4346 goto failed; 4347 4348 /* spin_trylock may fail due to a parallel drain or IRQ reentrancy. */ 4349 pcp_trylock_prepare(UP_flags); 4350 pcp = pcp_spin_trylock(zone->per_cpu_pageset); 4351 if (!pcp) 4352 goto failed_irq; 4353 4354 /* Attempt the batch allocation */ 4355 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 4356 while (nr_populated < nr_pages) { 4357 4358 /* Skip existing pages */ 4359 if (page_array && page_array[nr_populated]) { 4360 nr_populated++; 4361 continue; 4362 } 4363 4364 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 4365 pcp, pcp_list); 4366 if (unlikely(!page)) { 4367 /* Try and allocate at least one page */ 4368 if (!nr_account) { 4369 pcp_spin_unlock(pcp); 4370 goto failed_irq; 4371 } 4372 break; 4373 } 4374 nr_account++; 4375 4376 prep_new_page(page, 0, gfp, 0); 4377 if (page_list) 4378 list_add(&page->lru, page_list); 4379 else 4380 page_array[nr_populated] = page; 4381 nr_populated++; 4382 } 4383 4384 pcp_spin_unlock(pcp); 4385 pcp_trylock_finish(UP_flags); 4386 4387 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 4388 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 4389 4390 out: 4391 return nr_populated; 4392 4393 failed_irq: 4394 pcp_trylock_finish(UP_flags); 4395 4396 failed: 4397 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 4398 if (page) { 4399 if (page_list) 4400 list_add(&page->lru, page_list); 4401 else 4402 page_array[nr_populated] = page; 4403 nr_populated++; 4404 } 4405 4406 goto out; 4407 } 4408 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 4409 4410 /* 4411 * This is the 'heart' of the zoned buddy allocator. 4412 */ 4413 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 4414 nodemask_t *nodemask) 4415 { 4416 struct page *page; 4417 unsigned int alloc_flags = ALLOC_WMARK_LOW; 4418 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 4419 struct alloc_context ac = { }; 4420 4421 /* 4422 * There are several places where we assume that the order value is sane 4423 * so bail out early if the request is out of bound. 4424 */ 4425 if (WARN_ON_ONCE_GFP(order > MAX_ORDER, gfp)) 4426 return NULL; 4427 4428 gfp &= gfp_allowed_mask; 4429 /* 4430 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4431 * resp. GFP_NOIO which has to be inherited for all allocation requests 4432 * from a particular context which has been marked by 4433 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 4434 * movable zones are not used during allocation. 4435 */ 4436 gfp = current_gfp_context(gfp); 4437 alloc_gfp = gfp; 4438 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 4439 &alloc_gfp, &alloc_flags)) 4440 return NULL; 4441 4442 /* 4443 * Forbid the first pass from falling back to types that fragment 4444 * memory until all local zones are considered. 4445 */ 4446 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 4447 4448 /* First allocation attempt */ 4449 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 4450 if (likely(page)) 4451 goto out; 4452 4453 alloc_gfp = gfp; 4454 ac.spread_dirty_pages = false; 4455 4456 /* 4457 * Restore the original nodemask if it was potentially replaced with 4458 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4459 */ 4460 ac.nodemask = nodemask; 4461 4462 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 4463 4464 out: 4465 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && 4466 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 4467 __free_pages(page, order); 4468 page = NULL; 4469 } 4470 4471 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 4472 kmsan_alloc_page(page, order, alloc_gfp); 4473 4474 return page; 4475 } 4476 EXPORT_SYMBOL(__alloc_pages); 4477 4478 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 4479 nodemask_t *nodemask) 4480 { 4481 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 4482 preferred_nid, nodemask); 4483 return page_rmappable_folio(page); 4484 } 4485 EXPORT_SYMBOL(__folio_alloc); 4486 4487 /* 4488 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 4489 * address cannot represent highmem pages. Use alloc_pages and then kmap if 4490 * you need to access high mem. 4491 */ 4492 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 4493 { 4494 struct page *page; 4495 4496 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 4497 if (!page) 4498 return 0; 4499 return (unsigned long) page_address(page); 4500 } 4501 EXPORT_SYMBOL(__get_free_pages); 4502 4503 unsigned long get_zeroed_page(gfp_t gfp_mask) 4504 { 4505 return __get_free_page(gfp_mask | __GFP_ZERO); 4506 } 4507 EXPORT_SYMBOL(get_zeroed_page); 4508 4509 /** 4510 * __free_pages - Free pages allocated with alloc_pages(). 4511 * @page: The page pointer returned from alloc_pages(). 4512 * @order: The order of the allocation. 4513 * 4514 * This function can free multi-page allocations that are not compound 4515 * pages. It does not check that the @order passed in matches that of 4516 * the allocation, so it is easy to leak memory. Freeing more memory 4517 * than was allocated will probably emit a warning. 4518 * 4519 * If the last reference to this page is speculative, it will be released 4520 * by put_page() which only frees the first page of a non-compound 4521 * allocation. To prevent the remaining pages from being leaked, we free 4522 * the subsequent pages here. If you want to use the page's reference 4523 * count to decide when to free the allocation, you should allocate a 4524 * compound page, and use put_page() instead of __free_pages(). 4525 * 4526 * Context: May be called in interrupt context or while holding a normal 4527 * spinlock, but not in NMI context or while holding a raw spinlock. 4528 */ 4529 void __free_pages(struct page *page, unsigned int order) 4530 { 4531 /* get PageHead before we drop reference */ 4532 int head = PageHead(page); 4533 4534 if (put_page_testzero(page)) 4535 free_the_page(page, order); 4536 else if (!head) 4537 while (order-- > 0) 4538 free_the_page(page + (1 << order), order); 4539 } 4540 EXPORT_SYMBOL(__free_pages); 4541 4542 void free_pages(unsigned long addr, unsigned int order) 4543 { 4544 if (addr != 0) { 4545 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4546 __free_pages(virt_to_page((void *)addr), order); 4547 } 4548 } 4549 4550 EXPORT_SYMBOL(free_pages); 4551 4552 /* 4553 * Page Fragment: 4554 * An arbitrary-length arbitrary-offset area of memory which resides 4555 * within a 0 or higher order page. Multiple fragments within that page 4556 * are individually refcounted, in the page's reference counter. 4557 * 4558 * The page_frag functions below provide a simple allocation framework for 4559 * page fragments. This is used by the network stack and network device 4560 * drivers to provide a backing region of memory for use as either an 4561 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4562 */ 4563 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4564 gfp_t gfp_mask) 4565 { 4566 struct page *page = NULL; 4567 gfp_t gfp = gfp_mask; 4568 4569 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4570 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 4571 __GFP_NOMEMALLOC; 4572 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4573 PAGE_FRAG_CACHE_MAX_ORDER); 4574 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4575 #endif 4576 if (unlikely(!page)) 4577 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4578 4579 nc->va = page ? page_address(page) : NULL; 4580 4581 return page; 4582 } 4583 4584 void __page_frag_cache_drain(struct page *page, unsigned int count) 4585 { 4586 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4587 4588 if (page_ref_sub_and_test(page, count)) 4589 free_the_page(page, compound_order(page)); 4590 } 4591 EXPORT_SYMBOL(__page_frag_cache_drain); 4592 4593 void *page_frag_alloc_align(struct page_frag_cache *nc, 4594 unsigned int fragsz, gfp_t gfp_mask, 4595 unsigned int align_mask) 4596 { 4597 unsigned int size = PAGE_SIZE; 4598 struct page *page; 4599 int offset; 4600 4601 if (unlikely(!nc->va)) { 4602 refill: 4603 page = __page_frag_cache_refill(nc, gfp_mask); 4604 if (!page) 4605 return NULL; 4606 4607 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4608 /* if size can vary use size else just use PAGE_SIZE */ 4609 size = nc->size; 4610 #endif 4611 /* Even if we own the page, we do not use atomic_set(). 4612 * This would break get_page_unless_zero() users. 4613 */ 4614 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 4615 4616 /* reset page count bias and offset to start of new frag */ 4617 nc->pfmemalloc = page_is_pfmemalloc(page); 4618 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4619 nc->offset = size; 4620 } 4621 4622 offset = nc->offset - fragsz; 4623 if (unlikely(offset < 0)) { 4624 page = virt_to_page(nc->va); 4625 4626 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4627 goto refill; 4628 4629 if (unlikely(nc->pfmemalloc)) { 4630 free_the_page(page, compound_order(page)); 4631 goto refill; 4632 } 4633 4634 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4635 /* if size can vary use size else just use PAGE_SIZE */ 4636 size = nc->size; 4637 #endif 4638 /* OK, page count is 0, we can safely set it */ 4639 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 4640 4641 /* reset page count bias and offset to start of new frag */ 4642 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 4643 offset = size - fragsz; 4644 if (unlikely(offset < 0)) { 4645 /* 4646 * The caller is trying to allocate a fragment 4647 * with fragsz > PAGE_SIZE but the cache isn't big 4648 * enough to satisfy the request, this may 4649 * happen in low memory conditions. 4650 * We don't release the cache page because 4651 * it could make memory pressure worse 4652 * so we simply return NULL here. 4653 */ 4654 return NULL; 4655 } 4656 } 4657 4658 nc->pagecnt_bias--; 4659 offset &= align_mask; 4660 nc->offset = offset; 4661 4662 return nc->va + offset; 4663 } 4664 EXPORT_SYMBOL(page_frag_alloc_align); 4665 4666 /* 4667 * Frees a page fragment allocated out of either a compound or order 0 page. 4668 */ 4669 void page_frag_free(void *addr) 4670 { 4671 struct page *page = virt_to_head_page(addr); 4672 4673 if (unlikely(put_page_testzero(page))) 4674 free_the_page(page, compound_order(page)); 4675 } 4676 EXPORT_SYMBOL(page_frag_free); 4677 4678 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4679 size_t size) 4680 { 4681 if (addr) { 4682 unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE); 4683 struct page *page = virt_to_page((void *)addr); 4684 struct page *last = page + nr; 4685 4686 split_page_owner(page, 1 << order); 4687 split_page_memcg(page, 1 << order); 4688 while (page < --last) 4689 set_page_refcounted(last); 4690 4691 last = page + (1UL << order); 4692 for (page += nr; page < last; page++) 4693 __free_pages_ok(page, 0, FPI_TO_TAIL); 4694 } 4695 return (void *)addr; 4696 } 4697 4698 /** 4699 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4700 * @size: the number of bytes to allocate 4701 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4702 * 4703 * This function is similar to alloc_pages(), except that it allocates the 4704 * minimum number of pages to satisfy the request. alloc_pages() can only 4705 * allocate memory in power-of-two pages. 4706 * 4707 * This function is also limited by MAX_ORDER. 4708 * 4709 * Memory allocated by this function must be released by free_pages_exact(). 4710 * 4711 * Return: pointer to the allocated area or %NULL in case of error. 4712 */ 4713 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 4714 { 4715 unsigned int order = get_order(size); 4716 unsigned long addr; 4717 4718 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4719 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4720 4721 addr = __get_free_pages(gfp_mask, order); 4722 return make_alloc_exact(addr, order, size); 4723 } 4724 EXPORT_SYMBOL(alloc_pages_exact); 4725 4726 /** 4727 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4728 * pages on a node. 4729 * @nid: the preferred node ID where memory should be allocated 4730 * @size: the number of bytes to allocate 4731 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 4732 * 4733 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4734 * back. 4735 * 4736 * Return: pointer to the allocated area or %NULL in case of error. 4737 */ 4738 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 4739 { 4740 unsigned int order = get_order(size); 4741 struct page *p; 4742 4743 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 4744 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 4745 4746 p = alloc_pages_node(nid, gfp_mask, order); 4747 if (!p) 4748 return NULL; 4749 return make_alloc_exact((unsigned long)page_address(p), order, size); 4750 } 4751 4752 /** 4753 * free_pages_exact - release memory allocated via alloc_pages_exact() 4754 * @virt: the value returned by alloc_pages_exact. 4755 * @size: size of allocation, same value as passed to alloc_pages_exact(). 4756 * 4757 * Release the memory allocated by a previous call to alloc_pages_exact. 4758 */ 4759 void free_pages_exact(void *virt, size_t size) 4760 { 4761 unsigned long addr = (unsigned long)virt; 4762 unsigned long end = addr + PAGE_ALIGN(size); 4763 4764 while (addr < end) { 4765 free_page(addr); 4766 addr += PAGE_SIZE; 4767 } 4768 } 4769 EXPORT_SYMBOL(free_pages_exact); 4770 4771 /** 4772 * nr_free_zone_pages - count number of pages beyond high watermark 4773 * @offset: The zone index of the highest zone 4774 * 4775 * nr_free_zone_pages() counts the number of pages which are beyond the 4776 * high watermark within all zones at or below a given zone index. For each 4777 * zone, the number of pages is calculated as: 4778 * 4779 * nr_free_zone_pages = managed_pages - high_pages 4780 * 4781 * Return: number of pages beyond high watermark. 4782 */ 4783 static unsigned long nr_free_zone_pages(int offset) 4784 { 4785 struct zoneref *z; 4786 struct zone *zone; 4787 4788 /* Just pick one node, since fallback list is circular */ 4789 unsigned long sum = 0; 4790 4791 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 4792 4793 for_each_zone_zonelist(zone, z, zonelist, offset) { 4794 unsigned long size = zone_managed_pages(zone); 4795 unsigned long high = high_wmark_pages(zone); 4796 if (size > high) 4797 sum += size - high; 4798 } 4799 4800 return sum; 4801 } 4802 4803 /** 4804 * nr_free_buffer_pages - count number of pages beyond high watermark 4805 * 4806 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4807 * watermark within ZONE_DMA and ZONE_NORMAL. 4808 * 4809 * Return: number of pages beyond high watermark within ZONE_DMA and 4810 * ZONE_NORMAL. 4811 */ 4812 unsigned long nr_free_buffer_pages(void) 4813 { 4814 return nr_free_zone_pages(gfp_zone(GFP_USER)); 4815 } 4816 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 4817 4818 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 4819 { 4820 zoneref->zone = zone; 4821 zoneref->zone_idx = zone_idx(zone); 4822 } 4823 4824 /* 4825 * Builds allocation fallback zone lists. 4826 * 4827 * Add all populated zones of a node to the zonelist. 4828 */ 4829 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 4830 { 4831 struct zone *zone; 4832 enum zone_type zone_type = MAX_NR_ZONES; 4833 int nr_zones = 0; 4834 4835 do { 4836 zone_type--; 4837 zone = pgdat->node_zones + zone_type; 4838 if (populated_zone(zone)) { 4839 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 4840 check_highest_zone(zone_type); 4841 } 4842 } while (zone_type); 4843 4844 return nr_zones; 4845 } 4846 4847 #ifdef CONFIG_NUMA 4848 4849 static int __parse_numa_zonelist_order(char *s) 4850 { 4851 /* 4852 * We used to support different zonelists modes but they turned 4853 * out to be just not useful. Let's keep the warning in place 4854 * if somebody still use the cmd line parameter so that we do 4855 * not fail it silently 4856 */ 4857 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 4858 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 4859 return -EINVAL; 4860 } 4861 return 0; 4862 } 4863 4864 static char numa_zonelist_order[] = "Node"; 4865 #define NUMA_ZONELIST_ORDER_LEN 16 4866 /* 4867 * sysctl handler for numa_zonelist_order 4868 */ 4869 static int numa_zonelist_order_handler(struct ctl_table *table, int write, 4870 void *buffer, size_t *length, loff_t *ppos) 4871 { 4872 if (write) 4873 return __parse_numa_zonelist_order(buffer); 4874 return proc_dostring(table, write, buffer, length, ppos); 4875 } 4876 4877 static int node_load[MAX_NUMNODES]; 4878 4879 /** 4880 * find_next_best_node - find the next node that should appear in a given node's fallback list 4881 * @node: node whose fallback list we're appending 4882 * @used_node_mask: nodemask_t of already used nodes 4883 * 4884 * We use a number of factors to determine which is the next node that should 4885 * appear on a given node's fallback list. The node should not have appeared 4886 * already in @node's fallback list, and it should be the next closest node 4887 * according to the distance array (which contains arbitrary distance values 4888 * from each node to each node in the system), and should also prefer nodes 4889 * with no CPUs, since presumably they'll have very little allocation pressure 4890 * on them otherwise. 4891 * 4892 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 4893 */ 4894 int find_next_best_node(int node, nodemask_t *used_node_mask) 4895 { 4896 int n, val; 4897 int min_val = INT_MAX; 4898 int best_node = NUMA_NO_NODE; 4899 4900 /* Use the local node if we haven't already */ 4901 if (!node_isset(node, *used_node_mask)) { 4902 node_set(node, *used_node_mask); 4903 return node; 4904 } 4905 4906 for_each_node_state(n, N_MEMORY) { 4907 4908 /* Don't want a node to appear more than once */ 4909 if (node_isset(n, *used_node_mask)) 4910 continue; 4911 4912 /* Use the distance array to find the distance */ 4913 val = node_distance(node, n); 4914 4915 /* Penalize nodes under us ("prefer the next node") */ 4916 val += (n < node); 4917 4918 /* Give preference to headless and unused nodes */ 4919 if (!cpumask_empty(cpumask_of_node(n))) 4920 val += PENALTY_FOR_NODE_WITH_CPUS; 4921 4922 /* Slight preference for less loaded node */ 4923 val *= MAX_NUMNODES; 4924 val += node_load[n]; 4925 4926 if (val < min_val) { 4927 min_val = val; 4928 best_node = n; 4929 } 4930 } 4931 4932 if (best_node >= 0) 4933 node_set(best_node, *used_node_mask); 4934 4935 return best_node; 4936 } 4937 4938 4939 /* 4940 * Build zonelists ordered by node and zones within node. 4941 * This results in maximum locality--normal zone overflows into local 4942 * DMA zone, if any--but risks exhausting DMA zone. 4943 */ 4944 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 4945 unsigned nr_nodes) 4946 { 4947 struct zoneref *zonerefs; 4948 int i; 4949 4950 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 4951 4952 for (i = 0; i < nr_nodes; i++) { 4953 int nr_zones; 4954 4955 pg_data_t *node = NODE_DATA(node_order[i]); 4956 4957 nr_zones = build_zonerefs_node(node, zonerefs); 4958 zonerefs += nr_zones; 4959 } 4960 zonerefs->zone = NULL; 4961 zonerefs->zone_idx = 0; 4962 } 4963 4964 /* 4965 * Build gfp_thisnode zonelists 4966 */ 4967 static void build_thisnode_zonelists(pg_data_t *pgdat) 4968 { 4969 struct zoneref *zonerefs; 4970 int nr_zones; 4971 4972 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 4973 nr_zones = build_zonerefs_node(pgdat, zonerefs); 4974 zonerefs += nr_zones; 4975 zonerefs->zone = NULL; 4976 zonerefs->zone_idx = 0; 4977 } 4978 4979 /* 4980 * Build zonelists ordered by zone and nodes within zones. 4981 * This results in conserving DMA zone[s] until all Normal memory is 4982 * exhausted, but results in overflowing to remote node while memory 4983 * may still exist in local DMA zone. 4984 */ 4985 4986 static void build_zonelists(pg_data_t *pgdat) 4987 { 4988 static int node_order[MAX_NUMNODES]; 4989 int node, nr_nodes = 0; 4990 nodemask_t used_mask = NODE_MASK_NONE; 4991 int local_node, prev_node; 4992 4993 /* NUMA-aware ordering of nodes */ 4994 local_node = pgdat->node_id; 4995 prev_node = local_node; 4996 4997 memset(node_order, 0, sizeof(node_order)); 4998 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 4999 /* 5000 * We don't want to pressure a particular node. 5001 * So adding penalty to the first node in same 5002 * distance group to make it round-robin. 5003 */ 5004 if (node_distance(local_node, node) != 5005 node_distance(local_node, prev_node)) 5006 node_load[node] += 1; 5007 5008 node_order[nr_nodes++] = node; 5009 prev_node = node; 5010 } 5011 5012 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 5013 build_thisnode_zonelists(pgdat); 5014 pr_info("Fallback order for Node %d: ", local_node); 5015 for (node = 0; node < nr_nodes; node++) 5016 pr_cont("%d ", node_order[node]); 5017 pr_cont("\n"); 5018 } 5019 5020 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5021 /* 5022 * Return node id of node used for "local" allocations. 5023 * I.e., first node id of first zone in arg node's generic zonelist. 5024 * Used for initializing percpu 'numa_mem', which is used primarily 5025 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5026 */ 5027 int local_memory_node(int node) 5028 { 5029 struct zoneref *z; 5030 5031 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5032 gfp_zone(GFP_KERNEL), 5033 NULL); 5034 return zone_to_nid(z->zone); 5035 } 5036 #endif 5037 5038 static void setup_min_unmapped_ratio(void); 5039 static void setup_min_slab_ratio(void); 5040 #else /* CONFIG_NUMA */ 5041 5042 static void build_zonelists(pg_data_t *pgdat) 5043 { 5044 int node, local_node; 5045 struct zoneref *zonerefs; 5046 int nr_zones; 5047 5048 local_node = pgdat->node_id; 5049 5050 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 5051 nr_zones = build_zonerefs_node(pgdat, zonerefs); 5052 zonerefs += nr_zones; 5053 5054 /* 5055 * Now we build the zonelist so that it contains the zones 5056 * of all the other nodes. 5057 * We don't want to pressure a particular node, so when 5058 * building the zones for node N, we make sure that the 5059 * zones coming right after the local ones are those from 5060 * node N+1 (modulo N) 5061 */ 5062 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 5063 if (!node_online(node)) 5064 continue; 5065 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5066 zonerefs += nr_zones; 5067 } 5068 for (node = 0; node < local_node; node++) { 5069 if (!node_online(node)) 5070 continue; 5071 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 5072 zonerefs += nr_zones; 5073 } 5074 5075 zonerefs->zone = NULL; 5076 zonerefs->zone_idx = 0; 5077 } 5078 5079 #endif /* CONFIG_NUMA */ 5080 5081 /* 5082 * Boot pageset table. One per cpu which is going to be used for all 5083 * zones and all nodes. The parameters will be set in such a way 5084 * that an item put on a list will immediately be handed over to 5085 * the buddy list. This is safe since pageset manipulation is done 5086 * with interrupts disabled. 5087 * 5088 * The boot_pagesets must be kept even after bootup is complete for 5089 * unused processors and/or zones. They do play a role for bootstrapping 5090 * hotplugged processors. 5091 * 5092 * zoneinfo_show() and maybe other functions do 5093 * not check if the processor is online before following the pageset pointer. 5094 * Other parts of the kernel may not check if the zone is available. 5095 */ 5096 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 5097 /* These effectively disable the pcplists in the boot pageset completely */ 5098 #define BOOT_PAGESET_HIGH 0 5099 #define BOOT_PAGESET_BATCH 1 5100 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 5101 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 5102 5103 static void __build_all_zonelists(void *data) 5104 { 5105 int nid; 5106 int __maybe_unused cpu; 5107 pg_data_t *self = data; 5108 unsigned long flags; 5109 5110 /* 5111 * The zonelist_update_seq must be acquired with irqsave because the 5112 * reader can be invoked from IRQ with GFP_ATOMIC. 5113 */ 5114 write_seqlock_irqsave(&zonelist_update_seq, flags); 5115 /* 5116 * Also disable synchronous printk() to prevent any printk() from 5117 * trying to hold port->lock, for 5118 * tty_insert_flip_string_and_push_buffer() on other CPU might be 5119 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. 5120 */ 5121 printk_deferred_enter(); 5122 5123 #ifdef CONFIG_NUMA 5124 memset(node_load, 0, sizeof(node_load)); 5125 #endif 5126 5127 /* 5128 * This node is hotadded and no memory is yet present. So just 5129 * building zonelists is fine - no need to touch other nodes. 5130 */ 5131 if (self && !node_online(self->node_id)) { 5132 build_zonelists(self); 5133 } else { 5134 /* 5135 * All possible nodes have pgdat preallocated 5136 * in free_area_init 5137 */ 5138 for_each_node(nid) { 5139 pg_data_t *pgdat = NODE_DATA(nid); 5140 5141 build_zonelists(pgdat); 5142 } 5143 5144 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5145 /* 5146 * We now know the "local memory node" for each node-- 5147 * i.e., the node of the first zone in the generic zonelist. 5148 * Set up numa_mem percpu variable for on-line cpus. During 5149 * boot, only the boot cpu should be on-line; we'll init the 5150 * secondary cpus' numa_mem as they come on-line. During 5151 * node/memory hotplug, we'll fixup all on-line cpus. 5152 */ 5153 for_each_online_cpu(cpu) 5154 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5155 #endif 5156 } 5157 5158 printk_deferred_exit(); 5159 write_sequnlock_irqrestore(&zonelist_update_seq, flags); 5160 } 5161 5162 static noinline void __init 5163 build_all_zonelists_init(void) 5164 { 5165 int cpu; 5166 5167 __build_all_zonelists(NULL); 5168 5169 /* 5170 * Initialize the boot_pagesets that are going to be used 5171 * for bootstrapping processors. The real pagesets for 5172 * each zone will be allocated later when the per cpu 5173 * allocator is available. 5174 * 5175 * boot_pagesets are used also for bootstrapping offline 5176 * cpus if the system is already booted because the pagesets 5177 * are needed to initialize allocators on a specific cpu too. 5178 * F.e. the percpu allocator needs the page allocator which 5179 * needs the percpu allocator in order to allocate its pagesets 5180 * (a chicken-egg dilemma). 5181 */ 5182 for_each_possible_cpu(cpu) 5183 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 5184 5185 mminit_verify_zonelist(); 5186 cpuset_init_current_mems_allowed(); 5187 } 5188 5189 /* 5190 * unless system_state == SYSTEM_BOOTING. 5191 * 5192 * __ref due to call of __init annotated helper build_all_zonelists_init 5193 * [protected by SYSTEM_BOOTING]. 5194 */ 5195 void __ref build_all_zonelists(pg_data_t *pgdat) 5196 { 5197 unsigned long vm_total_pages; 5198 5199 if (system_state == SYSTEM_BOOTING) { 5200 build_all_zonelists_init(); 5201 } else { 5202 __build_all_zonelists(pgdat); 5203 /* cpuset refresh routine should be here */ 5204 } 5205 /* Get the number of free pages beyond high watermark in all zones. */ 5206 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 5207 /* 5208 * Disable grouping by mobility if the number of pages in the 5209 * system is too low to allow the mechanism to work. It would be 5210 * more accurate, but expensive to check per-zone. This check is 5211 * made on memory-hotadd so a system can start with mobility 5212 * disabled and enable it later 5213 */ 5214 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5215 page_group_by_mobility_disabled = 1; 5216 else 5217 page_group_by_mobility_disabled = 0; 5218 5219 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 5220 nr_online_nodes, 5221 page_group_by_mobility_disabled ? "off" : "on", 5222 vm_total_pages); 5223 #ifdef CONFIG_NUMA 5224 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5225 #endif 5226 } 5227 5228 static int zone_batchsize(struct zone *zone) 5229 { 5230 #ifdef CONFIG_MMU 5231 int batch; 5232 5233 /* 5234 * The number of pages to batch allocate is either ~0.1% 5235 * of the zone or 1MB, whichever is smaller. The batch 5236 * size is striking a balance between allocation latency 5237 * and zone lock contention. 5238 */ 5239 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 5240 batch /= 4; /* We effectively *= 4 below */ 5241 if (batch < 1) 5242 batch = 1; 5243 5244 /* 5245 * Clamp the batch to a 2^n - 1 value. Having a power 5246 * of 2 value was found to be more likely to have 5247 * suboptimal cache aliasing properties in some cases. 5248 * 5249 * For example if 2 tasks are alternately allocating 5250 * batches of pages, one task can end up with a lot 5251 * of pages of one half of the possible page colors 5252 * and the other with pages of the other colors. 5253 */ 5254 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5255 5256 return batch; 5257 5258 #else 5259 /* The deferral and batching of frees should be suppressed under NOMMU 5260 * conditions. 5261 * 5262 * The problem is that NOMMU needs to be able to allocate large chunks 5263 * of contiguous memory as there's no hardware page translation to 5264 * assemble apparent contiguous memory from discontiguous pages. 5265 * 5266 * Queueing large contiguous runs of pages for batching, however, 5267 * causes the pages to actually be freed in smaller chunks. As there 5268 * can be a significant delay between the individual batches being 5269 * recycled, this leads to the once large chunks of space being 5270 * fragmented and becoming unavailable for high-order allocations. 5271 */ 5272 return 0; 5273 #endif 5274 } 5275 5276 static int percpu_pagelist_high_fraction; 5277 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 5278 { 5279 #ifdef CONFIG_MMU 5280 int high; 5281 int nr_split_cpus; 5282 unsigned long total_pages; 5283 5284 if (!percpu_pagelist_high_fraction) { 5285 /* 5286 * By default, the high value of the pcp is based on the zone 5287 * low watermark so that if they are full then background 5288 * reclaim will not be started prematurely. 5289 */ 5290 total_pages = low_wmark_pages(zone); 5291 } else { 5292 /* 5293 * If percpu_pagelist_high_fraction is configured, the high 5294 * value is based on a fraction of the managed pages in the 5295 * zone. 5296 */ 5297 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 5298 } 5299 5300 /* 5301 * Split the high value across all online CPUs local to the zone. Note 5302 * that early in boot that CPUs may not be online yet and that during 5303 * CPU hotplug that the cpumask is not yet updated when a CPU is being 5304 * onlined. For memory nodes that have no CPUs, split pcp->high across 5305 * all online CPUs to mitigate the risk that reclaim is triggered 5306 * prematurely due to pages stored on pcp lists. 5307 */ 5308 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 5309 if (!nr_split_cpus) 5310 nr_split_cpus = num_online_cpus(); 5311 high = total_pages / nr_split_cpus; 5312 5313 /* 5314 * Ensure high is at least batch*4. The multiple is based on the 5315 * historical relationship between high and batch. 5316 */ 5317 high = max(high, batch << 2); 5318 5319 return high; 5320 #else 5321 return 0; 5322 #endif 5323 } 5324 5325 /* 5326 * pcp->high and pcp->batch values are related and generally batch is lower 5327 * than high. They are also related to pcp->count such that count is lower 5328 * than high, and as soon as it reaches high, the pcplist is flushed. 5329 * 5330 * However, guaranteeing these relations at all times would require e.g. write 5331 * barriers here but also careful usage of read barriers at the read side, and 5332 * thus be prone to error and bad for performance. Thus the update only prevents 5333 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 5334 * can cope with those fields changing asynchronously, and fully trust only the 5335 * pcp->count field on the local CPU with interrupts disabled. 5336 * 5337 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5338 * outside of boot time (or some other assurance that no concurrent updaters 5339 * exist). 5340 */ 5341 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 5342 unsigned long batch) 5343 { 5344 WRITE_ONCE(pcp->batch, batch); 5345 WRITE_ONCE(pcp->high, high); 5346 } 5347 5348 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 5349 { 5350 int pindex; 5351 5352 memset(pcp, 0, sizeof(*pcp)); 5353 memset(pzstats, 0, sizeof(*pzstats)); 5354 5355 spin_lock_init(&pcp->lock); 5356 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 5357 INIT_LIST_HEAD(&pcp->lists[pindex]); 5358 5359 /* 5360 * Set batch and high values safe for a boot pageset. A true percpu 5361 * pageset's initialization will update them subsequently. Here we don't 5362 * need to be as careful as pageset_update() as nobody can access the 5363 * pageset yet. 5364 */ 5365 pcp->high = BOOT_PAGESET_HIGH; 5366 pcp->batch = BOOT_PAGESET_BATCH; 5367 pcp->free_factor = 0; 5368 } 5369 5370 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 5371 unsigned long batch) 5372 { 5373 struct per_cpu_pages *pcp; 5374 int cpu; 5375 5376 for_each_possible_cpu(cpu) { 5377 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5378 pageset_update(pcp, high, batch); 5379 } 5380 } 5381 5382 /* 5383 * Calculate and set new high and batch values for all per-cpu pagesets of a 5384 * zone based on the zone's size. 5385 */ 5386 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 5387 { 5388 int new_high, new_batch; 5389 5390 new_batch = max(1, zone_batchsize(zone)); 5391 new_high = zone_highsize(zone, new_batch, cpu_online); 5392 5393 if (zone->pageset_high == new_high && 5394 zone->pageset_batch == new_batch) 5395 return; 5396 5397 zone->pageset_high = new_high; 5398 zone->pageset_batch = new_batch; 5399 5400 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 5401 } 5402 5403 void __meminit setup_zone_pageset(struct zone *zone) 5404 { 5405 int cpu; 5406 5407 /* Size may be 0 on !SMP && !NUMA */ 5408 if (sizeof(struct per_cpu_zonestat) > 0) 5409 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 5410 5411 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 5412 for_each_possible_cpu(cpu) { 5413 struct per_cpu_pages *pcp; 5414 struct per_cpu_zonestat *pzstats; 5415 5416 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 5417 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 5418 per_cpu_pages_init(pcp, pzstats); 5419 } 5420 5421 zone_set_pageset_high_and_batch(zone, 0); 5422 } 5423 5424 /* 5425 * The zone indicated has a new number of managed_pages; batch sizes and percpu 5426 * page high values need to be recalculated. 5427 */ 5428 static void zone_pcp_update(struct zone *zone, int cpu_online) 5429 { 5430 mutex_lock(&pcp_batch_high_lock); 5431 zone_set_pageset_high_and_batch(zone, cpu_online); 5432 mutex_unlock(&pcp_batch_high_lock); 5433 } 5434 5435 /* 5436 * Allocate per cpu pagesets and initialize them. 5437 * Before this call only boot pagesets were available. 5438 */ 5439 void __init setup_per_cpu_pageset(void) 5440 { 5441 struct pglist_data *pgdat; 5442 struct zone *zone; 5443 int __maybe_unused cpu; 5444 5445 for_each_populated_zone(zone) 5446 setup_zone_pageset(zone); 5447 5448 #ifdef CONFIG_NUMA 5449 /* 5450 * Unpopulated zones continue using the boot pagesets. 5451 * The numa stats for these pagesets need to be reset. 5452 * Otherwise, they will end up skewing the stats of 5453 * the nodes these zones are associated with. 5454 */ 5455 for_each_possible_cpu(cpu) { 5456 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 5457 memset(pzstats->vm_numa_event, 0, 5458 sizeof(pzstats->vm_numa_event)); 5459 } 5460 #endif 5461 5462 for_each_online_pgdat(pgdat) 5463 pgdat->per_cpu_nodestats = 5464 alloc_percpu(struct per_cpu_nodestat); 5465 } 5466 5467 __meminit void zone_pcp_init(struct zone *zone) 5468 { 5469 /* 5470 * per cpu subsystem is not up at this point. The following code 5471 * relies on the ability of the linker to provide the 5472 * offset of a (static) per cpu variable into the per cpu area. 5473 */ 5474 zone->per_cpu_pageset = &boot_pageset; 5475 zone->per_cpu_zonestats = &boot_zonestats; 5476 zone->pageset_high = BOOT_PAGESET_HIGH; 5477 zone->pageset_batch = BOOT_PAGESET_BATCH; 5478 5479 if (populated_zone(zone)) 5480 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 5481 zone->present_pages, zone_batchsize(zone)); 5482 } 5483 5484 void adjust_managed_page_count(struct page *page, long count) 5485 { 5486 atomic_long_add(count, &page_zone(page)->managed_pages); 5487 totalram_pages_add(count); 5488 #ifdef CONFIG_HIGHMEM 5489 if (PageHighMem(page)) 5490 totalhigh_pages_add(count); 5491 #endif 5492 } 5493 EXPORT_SYMBOL(adjust_managed_page_count); 5494 5495 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 5496 { 5497 void *pos; 5498 unsigned long pages = 0; 5499 5500 start = (void *)PAGE_ALIGN((unsigned long)start); 5501 end = (void *)((unsigned long)end & PAGE_MASK); 5502 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5503 struct page *page = virt_to_page(pos); 5504 void *direct_map_addr; 5505 5506 /* 5507 * 'direct_map_addr' might be different from 'pos' 5508 * because some architectures' virt_to_page() 5509 * work with aliases. Getting the direct map 5510 * address ensures that we get a _writeable_ 5511 * alias for the memset(). 5512 */ 5513 direct_map_addr = page_address(page); 5514 /* 5515 * Perform a kasan-unchecked memset() since this memory 5516 * has not been initialized. 5517 */ 5518 direct_map_addr = kasan_reset_tag(direct_map_addr); 5519 if ((unsigned int)poison <= 0xFF) 5520 memset(direct_map_addr, poison, PAGE_SIZE); 5521 5522 free_reserved_page(page); 5523 } 5524 5525 if (pages && s) 5526 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 5527 5528 return pages; 5529 } 5530 5531 static int page_alloc_cpu_dead(unsigned int cpu) 5532 { 5533 struct zone *zone; 5534 5535 lru_add_drain_cpu(cpu); 5536 mlock_drain_remote(cpu); 5537 drain_pages(cpu); 5538 5539 /* 5540 * Spill the event counters of the dead processor 5541 * into the current processors event counters. 5542 * This artificially elevates the count of the current 5543 * processor. 5544 */ 5545 vm_events_fold_cpu(cpu); 5546 5547 /* 5548 * Zero the differential counters of the dead processor 5549 * so that the vm statistics are consistent. 5550 * 5551 * This is only okay since the processor is dead and cannot 5552 * race with what we are doing. 5553 */ 5554 cpu_vm_stats_fold(cpu); 5555 5556 for_each_populated_zone(zone) 5557 zone_pcp_update(zone, 0); 5558 5559 return 0; 5560 } 5561 5562 static int page_alloc_cpu_online(unsigned int cpu) 5563 { 5564 struct zone *zone; 5565 5566 for_each_populated_zone(zone) 5567 zone_pcp_update(zone, 1); 5568 return 0; 5569 } 5570 5571 void __init page_alloc_init_cpuhp(void) 5572 { 5573 int ret; 5574 5575 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 5576 "mm/page_alloc:pcp", 5577 page_alloc_cpu_online, 5578 page_alloc_cpu_dead); 5579 WARN_ON(ret < 0); 5580 } 5581 5582 /* 5583 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 5584 * or min_free_kbytes changes. 5585 */ 5586 static void calculate_totalreserve_pages(void) 5587 { 5588 struct pglist_data *pgdat; 5589 unsigned long reserve_pages = 0; 5590 enum zone_type i, j; 5591 5592 for_each_online_pgdat(pgdat) { 5593 5594 pgdat->totalreserve_pages = 0; 5595 5596 for (i = 0; i < MAX_NR_ZONES; i++) { 5597 struct zone *zone = pgdat->node_zones + i; 5598 long max = 0; 5599 unsigned long managed_pages = zone_managed_pages(zone); 5600 5601 /* Find valid and maximum lowmem_reserve in the zone */ 5602 for (j = i; j < MAX_NR_ZONES; j++) { 5603 if (zone->lowmem_reserve[j] > max) 5604 max = zone->lowmem_reserve[j]; 5605 } 5606 5607 /* we treat the high watermark as reserved pages. */ 5608 max += high_wmark_pages(zone); 5609 5610 if (max > managed_pages) 5611 max = managed_pages; 5612 5613 pgdat->totalreserve_pages += max; 5614 5615 reserve_pages += max; 5616 } 5617 } 5618 totalreserve_pages = reserve_pages; 5619 } 5620 5621 /* 5622 * setup_per_zone_lowmem_reserve - called whenever 5623 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 5624 * has a correct pages reserved value, so an adequate number of 5625 * pages are left in the zone after a successful __alloc_pages(). 5626 */ 5627 static void setup_per_zone_lowmem_reserve(void) 5628 { 5629 struct pglist_data *pgdat; 5630 enum zone_type i, j; 5631 5632 for_each_online_pgdat(pgdat) { 5633 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 5634 struct zone *zone = &pgdat->node_zones[i]; 5635 int ratio = sysctl_lowmem_reserve_ratio[i]; 5636 bool clear = !ratio || !zone_managed_pages(zone); 5637 unsigned long managed_pages = 0; 5638 5639 for (j = i + 1; j < MAX_NR_ZONES; j++) { 5640 struct zone *upper_zone = &pgdat->node_zones[j]; 5641 5642 managed_pages += zone_managed_pages(upper_zone); 5643 5644 if (clear) 5645 zone->lowmem_reserve[j] = 0; 5646 else 5647 zone->lowmem_reserve[j] = managed_pages / ratio; 5648 } 5649 } 5650 } 5651 5652 /* update totalreserve_pages */ 5653 calculate_totalreserve_pages(); 5654 } 5655 5656 static void __setup_per_zone_wmarks(void) 5657 { 5658 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5659 unsigned long lowmem_pages = 0; 5660 struct zone *zone; 5661 unsigned long flags; 5662 5663 /* Calculate total number of !ZONE_HIGHMEM and !ZONE_MOVABLE pages */ 5664 for_each_zone(zone) { 5665 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) 5666 lowmem_pages += zone_managed_pages(zone); 5667 } 5668 5669 for_each_zone(zone) { 5670 u64 tmp; 5671 5672 spin_lock_irqsave(&zone->lock, flags); 5673 tmp = (u64)pages_min * zone_managed_pages(zone); 5674 do_div(tmp, lowmem_pages); 5675 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { 5676 /* 5677 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5678 * need highmem and movable zones pages, so cap pages_min 5679 * to a small value here. 5680 * 5681 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5682 * deltas control async page reclaim, and so should 5683 * not be capped for highmem and movable zones. 5684 */ 5685 unsigned long min_pages; 5686 5687 min_pages = zone_managed_pages(zone) / 1024; 5688 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5689 zone->_watermark[WMARK_MIN] = min_pages; 5690 } else { 5691 /* 5692 * If it's a lowmem zone, reserve a number of pages 5693 * proportionate to the zone's size. 5694 */ 5695 zone->_watermark[WMARK_MIN] = tmp; 5696 } 5697 5698 /* 5699 * Set the kswapd watermarks distance according to the 5700 * scale factor in proportion to available memory, but 5701 * ensure a minimum size on small systems. 5702 */ 5703 tmp = max_t(u64, tmp >> 2, 5704 mult_frac(zone_managed_pages(zone), 5705 watermark_scale_factor, 10000)); 5706 5707 zone->watermark_boost = 0; 5708 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 5709 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 5710 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 5711 5712 spin_unlock_irqrestore(&zone->lock, flags); 5713 } 5714 5715 /* update totalreserve_pages */ 5716 calculate_totalreserve_pages(); 5717 } 5718 5719 /** 5720 * setup_per_zone_wmarks - called when min_free_kbytes changes 5721 * or when memory is hot-{added|removed} 5722 * 5723 * Ensures that the watermark[min,low,high] values for each zone are set 5724 * correctly with respect to min_free_kbytes. 5725 */ 5726 void setup_per_zone_wmarks(void) 5727 { 5728 struct zone *zone; 5729 static DEFINE_SPINLOCK(lock); 5730 5731 spin_lock(&lock); 5732 __setup_per_zone_wmarks(); 5733 spin_unlock(&lock); 5734 5735 /* 5736 * The watermark size have changed so update the pcpu batch 5737 * and high limits or the limits may be inappropriate. 5738 */ 5739 for_each_zone(zone) 5740 zone_pcp_update(zone, 0); 5741 } 5742 5743 /* 5744 * Initialise min_free_kbytes. 5745 * 5746 * For small machines we want it small (128k min). For large machines 5747 * we want it large (256MB max). But it is not linear, because network 5748 * bandwidth does not increase linearly with machine size. We use 5749 * 5750 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5751 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5752 * 5753 * which yields 5754 * 5755 * 16MB: 512k 5756 * 32MB: 724k 5757 * 64MB: 1024k 5758 * 128MB: 1448k 5759 * 256MB: 2048k 5760 * 512MB: 2896k 5761 * 1024MB: 4096k 5762 * 2048MB: 5792k 5763 * 4096MB: 8192k 5764 * 8192MB: 11584k 5765 * 16384MB: 16384k 5766 */ 5767 void calculate_min_free_kbytes(void) 5768 { 5769 unsigned long lowmem_kbytes; 5770 int new_min_free_kbytes; 5771 5772 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5773 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5774 5775 if (new_min_free_kbytes > user_min_free_kbytes) 5776 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 5777 else 5778 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 5779 new_min_free_kbytes, user_min_free_kbytes); 5780 5781 } 5782 5783 int __meminit init_per_zone_wmark_min(void) 5784 { 5785 calculate_min_free_kbytes(); 5786 setup_per_zone_wmarks(); 5787 refresh_zone_stat_thresholds(); 5788 setup_per_zone_lowmem_reserve(); 5789 5790 #ifdef CONFIG_NUMA 5791 setup_min_unmapped_ratio(); 5792 setup_min_slab_ratio(); 5793 #endif 5794 5795 khugepaged_min_free_kbytes_update(); 5796 5797 return 0; 5798 } 5799 postcore_initcall(init_per_zone_wmark_min) 5800 5801 /* 5802 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5803 * that we can call two helper functions whenever min_free_kbytes 5804 * changes. 5805 */ 5806 static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 5807 void *buffer, size_t *length, loff_t *ppos) 5808 { 5809 int rc; 5810 5811 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5812 if (rc) 5813 return rc; 5814 5815 if (write) { 5816 user_min_free_kbytes = min_free_kbytes; 5817 setup_per_zone_wmarks(); 5818 } 5819 return 0; 5820 } 5821 5822 static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 5823 void *buffer, size_t *length, loff_t *ppos) 5824 { 5825 int rc; 5826 5827 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5828 if (rc) 5829 return rc; 5830 5831 if (write) 5832 setup_per_zone_wmarks(); 5833 5834 return 0; 5835 } 5836 5837 #ifdef CONFIG_NUMA 5838 static void setup_min_unmapped_ratio(void) 5839 { 5840 pg_data_t *pgdat; 5841 struct zone *zone; 5842 5843 for_each_online_pgdat(pgdat) 5844 pgdat->min_unmapped_pages = 0; 5845 5846 for_each_zone(zone) 5847 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 5848 sysctl_min_unmapped_ratio) / 100; 5849 } 5850 5851 5852 static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 5853 void *buffer, size_t *length, loff_t *ppos) 5854 { 5855 int rc; 5856 5857 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5858 if (rc) 5859 return rc; 5860 5861 setup_min_unmapped_ratio(); 5862 5863 return 0; 5864 } 5865 5866 static void setup_min_slab_ratio(void) 5867 { 5868 pg_data_t *pgdat; 5869 struct zone *zone; 5870 5871 for_each_online_pgdat(pgdat) 5872 pgdat->min_slab_pages = 0; 5873 5874 for_each_zone(zone) 5875 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 5876 sysctl_min_slab_ratio) / 100; 5877 } 5878 5879 static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 5880 void *buffer, size_t *length, loff_t *ppos) 5881 { 5882 int rc; 5883 5884 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5885 if (rc) 5886 return rc; 5887 5888 setup_min_slab_ratio(); 5889 5890 return 0; 5891 } 5892 #endif 5893 5894 /* 5895 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 5896 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 5897 * whenever sysctl_lowmem_reserve_ratio changes. 5898 * 5899 * The reserve ratio obviously has absolutely no relation with the 5900 * minimum watermarks. The lowmem reserve ratio can only make sense 5901 * if in function of the boot time zone sizes. 5902 */ 5903 static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, 5904 int write, void *buffer, size_t *length, loff_t *ppos) 5905 { 5906 int i; 5907 5908 proc_dointvec_minmax(table, write, buffer, length, ppos); 5909 5910 for (i = 0; i < MAX_NR_ZONES; i++) { 5911 if (sysctl_lowmem_reserve_ratio[i] < 1) 5912 sysctl_lowmem_reserve_ratio[i] = 0; 5913 } 5914 5915 setup_per_zone_lowmem_reserve(); 5916 return 0; 5917 } 5918 5919 /* 5920 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 5921 * cpu. It is the fraction of total pages in each zone that a hot per cpu 5922 * pagelist can have before it gets flushed back to buddy allocator. 5923 */ 5924 static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 5925 int write, void *buffer, size_t *length, loff_t *ppos) 5926 { 5927 struct zone *zone; 5928 int old_percpu_pagelist_high_fraction; 5929 int ret; 5930 5931 mutex_lock(&pcp_batch_high_lock); 5932 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 5933 5934 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5935 if (!write || ret < 0) 5936 goto out; 5937 5938 /* Sanity checking to avoid pcp imbalance */ 5939 if (percpu_pagelist_high_fraction && 5940 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 5941 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 5942 ret = -EINVAL; 5943 goto out; 5944 } 5945 5946 /* No change? */ 5947 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 5948 goto out; 5949 5950 for_each_populated_zone(zone) 5951 zone_set_pageset_high_and_batch(zone, 0); 5952 out: 5953 mutex_unlock(&pcp_batch_high_lock); 5954 return ret; 5955 } 5956 5957 static struct ctl_table page_alloc_sysctl_table[] = { 5958 { 5959 .procname = "min_free_kbytes", 5960 .data = &min_free_kbytes, 5961 .maxlen = sizeof(min_free_kbytes), 5962 .mode = 0644, 5963 .proc_handler = min_free_kbytes_sysctl_handler, 5964 .extra1 = SYSCTL_ZERO, 5965 }, 5966 { 5967 .procname = "watermark_boost_factor", 5968 .data = &watermark_boost_factor, 5969 .maxlen = sizeof(watermark_boost_factor), 5970 .mode = 0644, 5971 .proc_handler = proc_dointvec_minmax, 5972 .extra1 = SYSCTL_ZERO, 5973 }, 5974 { 5975 .procname = "watermark_scale_factor", 5976 .data = &watermark_scale_factor, 5977 .maxlen = sizeof(watermark_scale_factor), 5978 .mode = 0644, 5979 .proc_handler = watermark_scale_factor_sysctl_handler, 5980 .extra1 = SYSCTL_ONE, 5981 .extra2 = SYSCTL_THREE_THOUSAND, 5982 }, 5983 { 5984 .procname = "percpu_pagelist_high_fraction", 5985 .data = &percpu_pagelist_high_fraction, 5986 .maxlen = sizeof(percpu_pagelist_high_fraction), 5987 .mode = 0644, 5988 .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, 5989 .extra1 = SYSCTL_ZERO, 5990 }, 5991 { 5992 .procname = "lowmem_reserve_ratio", 5993 .data = &sysctl_lowmem_reserve_ratio, 5994 .maxlen = sizeof(sysctl_lowmem_reserve_ratio), 5995 .mode = 0644, 5996 .proc_handler = lowmem_reserve_ratio_sysctl_handler, 5997 }, 5998 #ifdef CONFIG_NUMA 5999 { 6000 .procname = "numa_zonelist_order", 6001 .data = &numa_zonelist_order, 6002 .maxlen = NUMA_ZONELIST_ORDER_LEN, 6003 .mode = 0644, 6004 .proc_handler = numa_zonelist_order_handler, 6005 }, 6006 { 6007 .procname = "min_unmapped_ratio", 6008 .data = &sysctl_min_unmapped_ratio, 6009 .maxlen = sizeof(sysctl_min_unmapped_ratio), 6010 .mode = 0644, 6011 .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, 6012 .extra1 = SYSCTL_ZERO, 6013 .extra2 = SYSCTL_ONE_HUNDRED, 6014 }, 6015 { 6016 .procname = "min_slab_ratio", 6017 .data = &sysctl_min_slab_ratio, 6018 .maxlen = sizeof(sysctl_min_slab_ratio), 6019 .mode = 0644, 6020 .proc_handler = sysctl_min_slab_ratio_sysctl_handler, 6021 .extra1 = SYSCTL_ZERO, 6022 .extra2 = SYSCTL_ONE_HUNDRED, 6023 }, 6024 #endif 6025 {} 6026 }; 6027 6028 void __init page_alloc_sysctl_init(void) 6029 { 6030 register_sysctl_init("vm", page_alloc_sysctl_table); 6031 } 6032 6033 #ifdef CONFIG_CONTIG_ALLOC 6034 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 6035 static void alloc_contig_dump_pages(struct list_head *page_list) 6036 { 6037 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 6038 6039 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 6040 struct page *page; 6041 6042 dump_stack(); 6043 list_for_each_entry(page, page_list, lru) 6044 dump_page(page, "migration failure"); 6045 } 6046 } 6047 6048 /* [start, end) must belong to a single zone. */ 6049 int __alloc_contig_migrate_range(struct compact_control *cc, 6050 unsigned long start, unsigned long end) 6051 { 6052 /* This function is based on compact_zone() from compaction.c. */ 6053 unsigned int nr_reclaimed; 6054 unsigned long pfn = start; 6055 unsigned int tries = 0; 6056 int ret = 0; 6057 struct migration_target_control mtc = { 6058 .nid = zone_to_nid(cc->zone), 6059 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 6060 }; 6061 6062 lru_cache_disable(); 6063 6064 while (pfn < end || !list_empty(&cc->migratepages)) { 6065 if (fatal_signal_pending(current)) { 6066 ret = -EINTR; 6067 break; 6068 } 6069 6070 if (list_empty(&cc->migratepages)) { 6071 cc->nr_migratepages = 0; 6072 ret = isolate_migratepages_range(cc, pfn, end); 6073 if (ret && ret != -EAGAIN) 6074 break; 6075 pfn = cc->migrate_pfn; 6076 tries = 0; 6077 } else if (++tries == 5) { 6078 ret = -EBUSY; 6079 break; 6080 } 6081 6082 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6083 &cc->migratepages); 6084 cc->nr_migratepages -= nr_reclaimed; 6085 6086 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 6087 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 6088 6089 /* 6090 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 6091 * to retry again over this error, so do the same here. 6092 */ 6093 if (ret == -ENOMEM) 6094 break; 6095 } 6096 6097 lru_cache_enable(); 6098 if (ret < 0) { 6099 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 6100 alloc_contig_dump_pages(&cc->migratepages); 6101 putback_movable_pages(&cc->migratepages); 6102 return ret; 6103 } 6104 return 0; 6105 } 6106 6107 /** 6108 * alloc_contig_range() -- tries to allocate given range of pages 6109 * @start: start PFN to allocate 6110 * @end: one-past-the-last PFN to allocate 6111 * @migratetype: migratetype of the underlying pageblocks (either 6112 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6113 * in range must have the same migratetype and it must 6114 * be either of the two. 6115 * @gfp_mask: GFP mask to use during compaction 6116 * 6117 * The PFN range does not have to be pageblock aligned. The PFN range must 6118 * belong to a single zone. 6119 * 6120 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 6121 * pageblocks in the range. Once isolated, the pageblocks should not 6122 * be modified by others. 6123 * 6124 * Return: zero on success or negative error code. On success all 6125 * pages which PFN is in [start, end) are allocated for the caller and 6126 * need to be freed with free_contig_range(). 6127 */ 6128 int alloc_contig_range(unsigned long start, unsigned long end, 6129 unsigned migratetype, gfp_t gfp_mask) 6130 { 6131 unsigned long outer_start, outer_end; 6132 int order; 6133 int ret = 0; 6134 6135 struct compact_control cc = { 6136 .nr_migratepages = 0, 6137 .order = -1, 6138 .zone = page_zone(pfn_to_page(start)), 6139 .mode = MIGRATE_SYNC, 6140 .ignore_skip_hint = true, 6141 .no_set_skip_hint = true, 6142 .gfp_mask = current_gfp_context(gfp_mask), 6143 .alloc_contig = true, 6144 }; 6145 INIT_LIST_HEAD(&cc.migratepages); 6146 6147 /* 6148 * What we do here is we mark all pageblocks in range as 6149 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6150 * have different sizes, and due to the way page allocator 6151 * work, start_isolate_page_range() has special handlings for this. 6152 * 6153 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6154 * migrate the pages from an unaligned range (ie. pages that 6155 * we are interested in). This will put all the pages in 6156 * range back to page allocator as MIGRATE_ISOLATE. 6157 * 6158 * When this is done, we take the pages in range from page 6159 * allocator removing them from the buddy system. This way 6160 * page allocator will never consider using them. 6161 * 6162 * This lets us mark the pageblocks back as 6163 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6164 * aligned range but not in the unaligned, original range are 6165 * put back to page allocator so that buddy can use them. 6166 */ 6167 6168 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 6169 if (ret) 6170 goto done; 6171 6172 drain_all_pages(cc.zone); 6173 6174 /* 6175 * In case of -EBUSY, we'd like to know which page causes problem. 6176 * So, just fall through. test_pages_isolated() has a tracepoint 6177 * which will report the busy page. 6178 * 6179 * It is possible that busy pages could become available before 6180 * the call to test_pages_isolated, and the range will actually be 6181 * allocated. So, if we fall through be sure to clear ret so that 6182 * -EBUSY is not accidentally used or returned to caller. 6183 */ 6184 ret = __alloc_contig_migrate_range(&cc, start, end); 6185 if (ret && ret != -EBUSY) 6186 goto done; 6187 ret = 0; 6188 6189 /* 6190 * Pages from [start, end) are within a pageblock_nr_pages 6191 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6192 * more, all pages in [start, end) are free in page allocator. 6193 * What we are going to do is to allocate all pages from 6194 * [start, end) (that is remove them from page allocator). 6195 * 6196 * The only problem is that pages at the beginning and at the 6197 * end of interesting range may be not aligned with pages that 6198 * page allocator holds, ie. they can be part of higher order 6199 * pages. Because of this, we reserve the bigger range and 6200 * once this is done free the pages we are not interested in. 6201 * 6202 * We don't have to hold zone->lock here because the pages are 6203 * isolated thus they won't get removed from buddy. 6204 */ 6205 6206 order = 0; 6207 outer_start = start; 6208 while (!PageBuddy(pfn_to_page(outer_start))) { 6209 if (++order > MAX_ORDER) { 6210 outer_start = start; 6211 break; 6212 } 6213 outer_start &= ~0UL << order; 6214 } 6215 6216 if (outer_start != start) { 6217 order = buddy_order(pfn_to_page(outer_start)); 6218 6219 /* 6220 * outer_start page could be small order buddy page and 6221 * it doesn't include start page. Adjust outer_start 6222 * in this case to report failed page properly 6223 * on tracepoint in test_pages_isolated() 6224 */ 6225 if (outer_start + (1UL << order) <= start) 6226 outer_start = start; 6227 } 6228 6229 /* Make sure the range is really isolated. */ 6230 if (test_pages_isolated(outer_start, end, 0)) { 6231 ret = -EBUSY; 6232 goto done; 6233 } 6234 6235 /* Grab isolated pages from freelists. */ 6236 outer_end = isolate_freepages_range(&cc, outer_start, end); 6237 if (!outer_end) { 6238 ret = -EBUSY; 6239 goto done; 6240 } 6241 6242 /* Free head and tail (if any) */ 6243 if (start != outer_start) 6244 free_contig_range(outer_start, start - outer_start); 6245 if (end != outer_end) 6246 free_contig_range(end, outer_end - end); 6247 6248 done: 6249 undo_isolate_page_range(start, end, migratetype); 6250 return ret; 6251 } 6252 EXPORT_SYMBOL(alloc_contig_range); 6253 6254 static int __alloc_contig_pages(unsigned long start_pfn, 6255 unsigned long nr_pages, gfp_t gfp_mask) 6256 { 6257 unsigned long end_pfn = start_pfn + nr_pages; 6258 6259 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 6260 gfp_mask); 6261 } 6262 6263 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 6264 unsigned long nr_pages) 6265 { 6266 unsigned long i, end_pfn = start_pfn + nr_pages; 6267 struct page *page; 6268 6269 for (i = start_pfn; i < end_pfn; i++) { 6270 page = pfn_to_online_page(i); 6271 if (!page) 6272 return false; 6273 6274 if (page_zone(page) != z) 6275 return false; 6276 6277 if (PageReserved(page)) 6278 return false; 6279 6280 if (PageHuge(page)) 6281 return false; 6282 } 6283 return true; 6284 } 6285 6286 static bool zone_spans_last_pfn(const struct zone *zone, 6287 unsigned long start_pfn, unsigned long nr_pages) 6288 { 6289 unsigned long last_pfn = start_pfn + nr_pages - 1; 6290 6291 return zone_spans_pfn(zone, last_pfn); 6292 } 6293 6294 /** 6295 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 6296 * @nr_pages: Number of contiguous pages to allocate 6297 * @gfp_mask: GFP mask to limit search and used during compaction 6298 * @nid: Target node 6299 * @nodemask: Mask for other possible nodes 6300 * 6301 * This routine is a wrapper around alloc_contig_range(). It scans over zones 6302 * on an applicable zonelist to find a contiguous pfn range which can then be 6303 * tried for allocation with alloc_contig_range(). This routine is intended 6304 * for allocation requests which can not be fulfilled with the buddy allocator. 6305 * 6306 * The allocated memory is always aligned to a page boundary. If nr_pages is a 6307 * power of two, then allocated range is also guaranteed to be aligned to same 6308 * nr_pages (e.g. 1GB request would be aligned to 1GB). 6309 * 6310 * Allocated pages can be freed with free_contig_range() or by manually calling 6311 * __free_page() on each allocated page. 6312 * 6313 * Return: pointer to contiguous pages on success, or NULL if not successful. 6314 */ 6315 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 6316 int nid, nodemask_t *nodemask) 6317 { 6318 unsigned long ret, pfn, flags; 6319 struct zonelist *zonelist; 6320 struct zone *zone; 6321 struct zoneref *z; 6322 6323 zonelist = node_zonelist(nid, gfp_mask); 6324 for_each_zone_zonelist_nodemask(zone, z, zonelist, 6325 gfp_zone(gfp_mask), nodemask) { 6326 spin_lock_irqsave(&zone->lock, flags); 6327 6328 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 6329 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 6330 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 6331 /* 6332 * We release the zone lock here because 6333 * alloc_contig_range() will also lock the zone 6334 * at some point. If there's an allocation 6335 * spinning on this lock, it may win the race 6336 * and cause alloc_contig_range() to fail... 6337 */ 6338 spin_unlock_irqrestore(&zone->lock, flags); 6339 ret = __alloc_contig_pages(pfn, nr_pages, 6340 gfp_mask); 6341 if (!ret) 6342 return pfn_to_page(pfn); 6343 spin_lock_irqsave(&zone->lock, flags); 6344 } 6345 pfn += nr_pages; 6346 } 6347 spin_unlock_irqrestore(&zone->lock, flags); 6348 } 6349 return NULL; 6350 } 6351 #endif /* CONFIG_CONTIG_ALLOC */ 6352 6353 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 6354 { 6355 unsigned long count = 0; 6356 6357 for (; nr_pages--; pfn++) { 6358 struct page *page = pfn_to_page(pfn); 6359 6360 count += page_count(page) != 1; 6361 __free_page(page); 6362 } 6363 WARN(count != 0, "%lu pages are still in use!\n", count); 6364 } 6365 EXPORT_SYMBOL(free_contig_range); 6366 6367 /* 6368 * Effectively disable pcplists for the zone by setting the high limit to 0 6369 * and draining all cpus. A concurrent page freeing on another CPU that's about 6370 * to put the page on pcplist will either finish before the drain and the page 6371 * will be drained, or observe the new high limit and skip the pcplist. 6372 * 6373 * Must be paired with a call to zone_pcp_enable(). 6374 */ 6375 void zone_pcp_disable(struct zone *zone) 6376 { 6377 mutex_lock(&pcp_batch_high_lock); 6378 __zone_set_pageset_high_and_batch(zone, 0, 1); 6379 __drain_all_pages(zone, true); 6380 } 6381 6382 void zone_pcp_enable(struct zone *zone) 6383 { 6384 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 6385 mutex_unlock(&pcp_batch_high_lock); 6386 } 6387 6388 void zone_pcp_reset(struct zone *zone) 6389 { 6390 int cpu; 6391 struct per_cpu_zonestat *pzstats; 6392 6393 if (zone->per_cpu_pageset != &boot_pageset) { 6394 for_each_online_cpu(cpu) { 6395 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 6396 drain_zonestat(zone, pzstats); 6397 } 6398 free_percpu(zone->per_cpu_pageset); 6399 zone->per_cpu_pageset = &boot_pageset; 6400 if (zone->per_cpu_zonestats != &boot_zonestats) { 6401 free_percpu(zone->per_cpu_zonestats); 6402 zone->per_cpu_zonestats = &boot_zonestats; 6403 } 6404 } 6405 } 6406 6407 #ifdef CONFIG_MEMORY_HOTREMOVE 6408 /* 6409 * All pages in the range must be in a single zone, must not contain holes, 6410 * must span full sections, and must be isolated before calling this function. 6411 */ 6412 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6413 { 6414 unsigned long pfn = start_pfn; 6415 struct page *page; 6416 struct zone *zone; 6417 unsigned int order; 6418 unsigned long flags; 6419 6420 offline_mem_sections(pfn, end_pfn); 6421 zone = page_zone(pfn_to_page(pfn)); 6422 spin_lock_irqsave(&zone->lock, flags); 6423 while (pfn < end_pfn) { 6424 page = pfn_to_page(pfn); 6425 /* 6426 * The HWPoisoned page may be not in buddy system, and 6427 * page_count() is not 0. 6428 */ 6429 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6430 pfn++; 6431 continue; 6432 } 6433 /* 6434 * At this point all remaining PageOffline() pages have a 6435 * reference count of 0 and can simply be skipped. 6436 */ 6437 if (PageOffline(page)) { 6438 BUG_ON(page_count(page)); 6439 BUG_ON(PageBuddy(page)); 6440 pfn++; 6441 continue; 6442 } 6443 6444 BUG_ON(page_count(page)); 6445 BUG_ON(!PageBuddy(page)); 6446 order = buddy_order(page); 6447 del_page_from_free_list(page, zone, order); 6448 pfn += (1 << order); 6449 } 6450 spin_unlock_irqrestore(&zone->lock, flags); 6451 } 6452 #endif 6453 6454 /* 6455 * This function returns a stable result only if called under zone lock. 6456 */ 6457 bool is_free_buddy_page(struct page *page) 6458 { 6459 unsigned long pfn = page_to_pfn(page); 6460 unsigned int order; 6461 6462 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6463 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6464 6465 if (PageBuddy(page_head) && 6466 buddy_order_unsafe(page_head) >= order) 6467 break; 6468 } 6469 6470 return order <= MAX_ORDER; 6471 } 6472 EXPORT_SYMBOL(is_free_buddy_page); 6473 6474 #ifdef CONFIG_MEMORY_FAILURE 6475 /* 6476 * Break down a higher-order page in sub-pages, and keep our target out of 6477 * buddy allocator. 6478 */ 6479 static void break_down_buddy_pages(struct zone *zone, struct page *page, 6480 struct page *target, int low, int high, 6481 int migratetype) 6482 { 6483 unsigned long size = 1 << high; 6484 struct page *current_buddy, *next_page; 6485 6486 while (high > low) { 6487 high--; 6488 size >>= 1; 6489 6490 if (target >= &page[size]) { 6491 next_page = page + size; 6492 current_buddy = page; 6493 } else { 6494 next_page = page; 6495 current_buddy = page + size; 6496 } 6497 page = next_page; 6498 6499 if (set_page_guard(zone, current_buddy, high, migratetype)) 6500 continue; 6501 6502 if (current_buddy != target) { 6503 add_to_free_list(current_buddy, zone, high, migratetype); 6504 set_buddy_order(current_buddy, high); 6505 } 6506 } 6507 } 6508 6509 /* 6510 * Take a page that will be marked as poisoned off the buddy allocator. 6511 */ 6512 bool take_page_off_buddy(struct page *page) 6513 { 6514 struct zone *zone = page_zone(page); 6515 unsigned long pfn = page_to_pfn(page); 6516 unsigned long flags; 6517 unsigned int order; 6518 bool ret = false; 6519 6520 spin_lock_irqsave(&zone->lock, flags); 6521 for (order = 0; order < NR_PAGE_ORDERS; order++) { 6522 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6523 int page_order = buddy_order(page_head); 6524 6525 if (PageBuddy(page_head) && page_order >= order) { 6526 unsigned long pfn_head = page_to_pfn(page_head); 6527 int migratetype = get_pfnblock_migratetype(page_head, 6528 pfn_head); 6529 6530 del_page_from_free_list(page_head, zone, page_order); 6531 break_down_buddy_pages(zone, page_head, page, 0, 6532 page_order, migratetype); 6533 SetPageHWPoisonTakenOff(page); 6534 if (!is_migrate_isolate(migratetype)) 6535 __mod_zone_freepage_state(zone, -1, migratetype); 6536 ret = true; 6537 break; 6538 } 6539 if (page_count(page_head) > 0) 6540 break; 6541 } 6542 spin_unlock_irqrestore(&zone->lock, flags); 6543 return ret; 6544 } 6545 6546 /* 6547 * Cancel takeoff done by take_page_off_buddy(). 6548 */ 6549 bool put_page_back_buddy(struct page *page) 6550 { 6551 struct zone *zone = page_zone(page); 6552 unsigned long pfn = page_to_pfn(page); 6553 unsigned long flags; 6554 int migratetype = get_pfnblock_migratetype(page, pfn); 6555 bool ret = false; 6556 6557 spin_lock_irqsave(&zone->lock, flags); 6558 if (put_page_testzero(page)) { 6559 ClearPageHWPoisonTakenOff(page); 6560 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 6561 if (TestClearPageHWPoison(page)) { 6562 ret = true; 6563 } 6564 } 6565 spin_unlock_irqrestore(&zone->lock, flags); 6566 6567 return ret; 6568 } 6569 #endif 6570 6571 #ifdef CONFIG_ZONE_DMA 6572 bool has_managed_dma(void) 6573 { 6574 struct pglist_data *pgdat; 6575 6576 for_each_online_pgdat(pgdat) { 6577 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 6578 6579 if (managed_zone(zone)) 6580 return true; 6581 } 6582 return false; 6583 } 6584 #endif /* CONFIG_ZONE_DMA */ 6585 6586 #ifdef CONFIG_UNACCEPTED_MEMORY 6587 6588 /* Counts number of zones with unaccepted pages. */ 6589 static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); 6590 6591 static bool lazy_accept = true; 6592 6593 static int __init accept_memory_parse(char *p) 6594 { 6595 if (!strcmp(p, "lazy")) { 6596 lazy_accept = true; 6597 return 0; 6598 } else if (!strcmp(p, "eager")) { 6599 lazy_accept = false; 6600 return 0; 6601 } else { 6602 return -EINVAL; 6603 } 6604 } 6605 early_param("accept_memory", accept_memory_parse); 6606 6607 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6608 { 6609 phys_addr_t start = page_to_phys(page); 6610 phys_addr_t end = start + (PAGE_SIZE << order); 6611 6612 return range_contains_unaccepted_memory(start, end); 6613 } 6614 6615 static void accept_page(struct page *page, unsigned int order) 6616 { 6617 phys_addr_t start = page_to_phys(page); 6618 6619 accept_memory(start, start + (PAGE_SIZE << order)); 6620 } 6621 6622 static bool try_to_accept_memory_one(struct zone *zone) 6623 { 6624 unsigned long flags; 6625 struct page *page; 6626 bool last; 6627 6628 spin_lock_irqsave(&zone->lock, flags); 6629 page = list_first_entry_or_null(&zone->unaccepted_pages, 6630 struct page, lru); 6631 if (!page) { 6632 spin_unlock_irqrestore(&zone->lock, flags); 6633 return false; 6634 } 6635 6636 list_del(&page->lru); 6637 last = list_empty(&zone->unaccepted_pages); 6638 6639 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6640 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); 6641 spin_unlock_irqrestore(&zone->lock, flags); 6642 6643 accept_page(page, MAX_ORDER); 6644 6645 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); 6646 6647 if (last) 6648 static_branch_dec(&zones_with_unaccepted_pages); 6649 6650 return true; 6651 } 6652 6653 static bool cond_accept_memory(struct zone *zone, unsigned int order) 6654 { 6655 long to_accept; 6656 bool ret = false; 6657 6658 if (!has_unaccepted_memory()) 6659 return false; 6660 6661 if (list_empty(&zone->unaccepted_pages)) 6662 return false; 6663 6664 /* How much to accept to get to high watermark? */ 6665 to_accept = high_wmark_pages(zone) - 6666 (zone_page_state(zone, NR_FREE_PAGES) - 6667 __zone_watermark_unusable_free(zone, order, 0) - 6668 zone_page_state(zone, NR_UNACCEPTED)); 6669 6670 while (to_accept > 0) { 6671 if (!try_to_accept_memory_one(zone)) 6672 break; 6673 ret = true; 6674 to_accept -= MAX_ORDER_NR_PAGES; 6675 } 6676 6677 return ret; 6678 } 6679 6680 static inline bool has_unaccepted_memory(void) 6681 { 6682 return static_branch_unlikely(&zones_with_unaccepted_pages); 6683 } 6684 6685 static bool __free_unaccepted(struct page *page) 6686 { 6687 struct zone *zone = page_zone(page); 6688 unsigned long flags; 6689 bool first = false; 6690 6691 if (!lazy_accept) 6692 return false; 6693 6694 spin_lock_irqsave(&zone->lock, flags); 6695 first = list_empty(&zone->unaccepted_pages); 6696 list_add_tail(&page->lru, &zone->unaccepted_pages); 6697 __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); 6698 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); 6699 spin_unlock_irqrestore(&zone->lock, flags); 6700 6701 if (first) 6702 static_branch_inc(&zones_with_unaccepted_pages); 6703 6704 return true; 6705 } 6706 6707 #else 6708 6709 static bool page_contains_unaccepted(struct page *page, unsigned int order) 6710 { 6711 return false; 6712 } 6713 6714 static void accept_page(struct page *page, unsigned int order) 6715 { 6716 } 6717 6718 static bool cond_accept_memory(struct zone *zone, unsigned int order) 6719 { 6720 return false; 6721 } 6722 6723 static inline bool has_unaccepted_memory(void) 6724 { 6725 return false; 6726 } 6727 6728 static bool __free_unaccepted(struct page *page) 6729 { 6730 BUILD_BUG(); 6731 return false; 6732 } 6733 6734 #endif /* CONFIG_UNACCEPTED_MEMORY */ 6735