1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/page_alloc.c 4 * 5 * Manages the free list, the system allocates free pages here. 6 * Note that kmalloc() lives in slab.c 7 * 8 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 9 * Swap reorganised 29.12.95, Stephen Tweedie 10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 11 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 12 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 13 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 14 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 15 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 16 */ 17 18 #include <linux/stddef.h> 19 #include <linux/mm.h> 20 #include <linux/highmem.h> 21 #include <linux/swap.h> 22 #include <linux/swapops.h> 23 #include <linux/interrupt.h> 24 #include <linux/pagemap.h> 25 #include <linux/jiffies.h> 26 #include <linux/memblock.h> 27 #include <linux/compiler.h> 28 #include <linux/kernel.h> 29 #include <linux/kasan.h> 30 #include <linux/kmsan.h> 31 #include <linux/module.h> 32 #include <linux/suspend.h> 33 #include <linux/pagevec.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/ratelimit.h> 37 #include <linux/oom.h> 38 #include <linux/topology.h> 39 #include <linux/sysctl.h> 40 #include <linux/cpu.h> 41 #include <linux/cpuset.h> 42 #include <linux/memory_hotplug.h> 43 #include <linux/nodemask.h> 44 #include <linux/vmalloc.h> 45 #include <linux/vmstat.h> 46 #include <linux/mempolicy.h> 47 #include <linux/memremap.h> 48 #include <linux/stop_machine.h> 49 #include <linux/random.h> 50 #include <linux/sort.h> 51 #include <linux/pfn.h> 52 #include <linux/backing-dev.h> 53 #include <linux/fault-inject.h> 54 #include <linux/page-isolation.h> 55 #include <linux/debugobjects.h> 56 #include <linux/kmemleak.h> 57 #include <linux/compaction.h> 58 #include <trace/events/kmem.h> 59 #include <trace/events/oom.h> 60 #include <linux/prefetch.h> 61 #include <linux/mm_inline.h> 62 #include <linux/mmu_notifier.h> 63 #include <linux/migrate.h> 64 #include <linux/hugetlb.h> 65 #include <linux/sched/rt.h> 66 #include <linux/sched/mm.h> 67 #include <linux/page_owner.h> 68 #include <linux/page_table_check.h> 69 #include <linux/kthread.h> 70 #include <linux/memcontrol.h> 71 #include <linux/ftrace.h> 72 #include <linux/lockdep.h> 73 #include <linux/nmi.h> 74 #include <linux/psi.h> 75 #include <linux/padata.h> 76 #include <linux/khugepaged.h> 77 #include <linux/buffer_head.h> 78 #include <linux/delayacct.h> 79 #include <asm/sections.h> 80 #include <asm/tlbflush.h> 81 #include <asm/div64.h> 82 #include "internal.h" 83 #include "shuffle.h" 84 #include "page_reporting.h" 85 #include "swap.h" 86 87 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ 88 typedef int __bitwise fpi_t; 89 90 /* No special request */ 91 #define FPI_NONE ((__force fpi_t)0) 92 93 /* 94 * Skip free page reporting notification for the (possibly merged) page. 95 * This does not hinder free page reporting from grabbing the page, 96 * reporting it and marking it "reported" - it only skips notifying 97 * the free page reporting infrastructure about a newly freed page. For 98 * example, used when temporarily pulling a page from a freelist and 99 * putting it back unmodified. 100 */ 101 #define FPI_SKIP_REPORT_NOTIFY ((__force fpi_t)BIT(0)) 102 103 /* 104 * Place the (possibly merged) page to the tail of the freelist. Will ignore 105 * page shuffling (relevant code - e.g., memory onlining - is expected to 106 * shuffle the whole zone). 107 * 108 * Note: No code should rely on this flag for correctness - it's purely 109 * to allow for optimizations when handing back either fresh pages 110 * (memory onlining) or untouched pages (page isolation, free page 111 * reporting). 112 */ 113 #define FPI_TO_TAIL ((__force fpi_t)BIT(1)) 114 115 /* 116 * Don't poison memory with KASAN (only for the tag-based modes). 117 * During boot, all non-reserved memblock memory is exposed to page_alloc. 118 * Poisoning all that memory lengthens boot time, especially on systems with 119 * large amount of RAM. This flag is used to skip that poisoning. 120 * This is only done for the tag-based KASAN modes, as those are able to 121 * detect memory corruptions with the memory tags assigned by default. 122 * All memory allocated normally after boot gets poisoned as usual. 123 */ 124 #define FPI_SKIP_KASAN_POISON ((__force fpi_t)BIT(2)) 125 126 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 127 static DEFINE_MUTEX(pcp_batch_high_lock); 128 #define MIN_PERCPU_PAGELIST_HIGH_FRACTION (8) 129 130 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) 131 /* 132 * On SMP, spin_trylock is sufficient protection. 133 * On PREEMPT_RT, spin_trylock is equivalent on both SMP and UP. 134 */ 135 #define pcp_trylock_prepare(flags) do { } while (0) 136 #define pcp_trylock_finish(flag) do { } while (0) 137 #else 138 139 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */ 140 #define pcp_trylock_prepare(flags) local_irq_save(flags) 141 #define pcp_trylock_finish(flags) local_irq_restore(flags) 142 #endif 143 144 /* 145 * Locking a pcp requires a PCP lookup followed by a spinlock. To avoid 146 * a migration causing the wrong PCP to be locked and remote memory being 147 * potentially allocated, pin the task to the CPU for the lookup+lock. 148 * preempt_disable is used on !RT because it is faster than migrate_disable. 149 * migrate_disable is used on RT because otherwise RT spinlock usage is 150 * interfered with and a high priority task cannot preempt the allocator. 151 */ 152 #ifndef CONFIG_PREEMPT_RT 153 #define pcpu_task_pin() preempt_disable() 154 #define pcpu_task_unpin() preempt_enable() 155 #else 156 #define pcpu_task_pin() migrate_disable() 157 #define pcpu_task_unpin() migrate_enable() 158 #endif 159 160 /* 161 * Generic helper to lookup and a per-cpu variable with an embedded spinlock. 162 * Return value should be used with equivalent unlock helper. 163 */ 164 #define pcpu_spin_lock(type, member, ptr) \ 165 ({ \ 166 type *_ret; \ 167 pcpu_task_pin(); \ 168 _ret = this_cpu_ptr(ptr); \ 169 spin_lock(&_ret->member); \ 170 _ret; \ 171 }) 172 173 #define pcpu_spin_lock_irqsave(type, member, ptr, flags) \ 174 ({ \ 175 type *_ret; \ 176 pcpu_task_pin(); \ 177 _ret = this_cpu_ptr(ptr); \ 178 spin_lock_irqsave(&_ret->member, flags); \ 179 _ret; \ 180 }) 181 182 #define pcpu_spin_trylock_irqsave(type, member, ptr, flags) \ 183 ({ \ 184 type *_ret; \ 185 pcpu_task_pin(); \ 186 _ret = this_cpu_ptr(ptr); \ 187 if (!spin_trylock_irqsave(&_ret->member, flags)) { \ 188 pcpu_task_unpin(); \ 189 _ret = NULL; \ 190 } \ 191 _ret; \ 192 }) 193 194 #define pcpu_spin_unlock(member, ptr) \ 195 ({ \ 196 spin_unlock(&ptr->member); \ 197 pcpu_task_unpin(); \ 198 }) 199 200 #define pcpu_spin_unlock_irqrestore(member, ptr, flags) \ 201 ({ \ 202 spin_unlock_irqrestore(&ptr->member, flags); \ 203 pcpu_task_unpin(); \ 204 }) 205 206 /* struct per_cpu_pages specific helpers. */ 207 #define pcp_spin_lock(ptr) \ 208 pcpu_spin_lock(struct per_cpu_pages, lock, ptr) 209 210 #define pcp_spin_lock_irqsave(ptr, flags) \ 211 pcpu_spin_lock_irqsave(struct per_cpu_pages, lock, ptr, flags) 212 213 #define pcp_spin_trylock_irqsave(ptr, flags) \ 214 pcpu_spin_trylock_irqsave(struct per_cpu_pages, lock, ptr, flags) 215 216 #define pcp_spin_unlock(ptr) \ 217 pcpu_spin_unlock(lock, ptr) 218 219 #define pcp_spin_unlock_irqrestore(ptr, flags) \ 220 pcpu_spin_unlock_irqrestore(lock, ptr, flags) 221 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 222 DEFINE_PER_CPU(int, numa_node); 223 EXPORT_PER_CPU_SYMBOL(numa_node); 224 #endif 225 226 DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key); 227 228 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 229 /* 230 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 231 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 232 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 233 * defined in <linux/topology.h>. 234 */ 235 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 236 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 237 #endif 238 239 static DEFINE_MUTEX(pcpu_drain_mutex); 240 241 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 242 volatile unsigned long latent_entropy __latent_entropy; 243 EXPORT_SYMBOL(latent_entropy); 244 #endif 245 246 /* 247 * Array of node states. 248 */ 249 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 250 [N_POSSIBLE] = NODE_MASK_ALL, 251 [N_ONLINE] = { { [0] = 1UL } }, 252 #ifndef CONFIG_NUMA 253 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 254 #ifdef CONFIG_HIGHMEM 255 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 256 #endif 257 [N_MEMORY] = { { [0] = 1UL } }, 258 [N_CPU] = { { [0] = 1UL } }, 259 #endif /* NUMA */ 260 }; 261 EXPORT_SYMBOL(node_states); 262 263 atomic_long_t _totalram_pages __read_mostly; 264 EXPORT_SYMBOL(_totalram_pages); 265 unsigned long totalreserve_pages __read_mostly; 266 unsigned long totalcma_pages __read_mostly; 267 268 int percpu_pagelist_high_fraction; 269 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 270 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 271 EXPORT_SYMBOL(init_on_alloc); 272 273 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 274 EXPORT_SYMBOL(init_on_free); 275 276 static bool _init_on_alloc_enabled_early __read_mostly 277 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 278 static int __init early_init_on_alloc(char *buf) 279 { 280 281 return kstrtobool(buf, &_init_on_alloc_enabled_early); 282 } 283 early_param("init_on_alloc", early_init_on_alloc); 284 285 static bool _init_on_free_enabled_early __read_mostly 286 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 287 static int __init early_init_on_free(char *buf) 288 { 289 return kstrtobool(buf, &_init_on_free_enabled_early); 290 } 291 early_param("init_on_free", early_init_on_free); 292 293 /* 294 * A cached value of the page's pageblock's migratetype, used when the page is 295 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 296 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 297 * Also the migratetype set in the page does not necessarily match the pcplist 298 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 299 * other index - this ensures that it will be put on the correct CMA freelist. 300 */ 301 static inline int get_pcppage_migratetype(struct page *page) 302 { 303 return page->index; 304 } 305 306 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 307 { 308 page->index = migratetype; 309 } 310 311 #ifdef CONFIG_PM_SLEEP 312 /* 313 * The following functions are used by the suspend/hibernate code to temporarily 314 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 315 * while devices are suspended. To avoid races with the suspend/hibernate code, 316 * they should always be called with system_transition_mutex held 317 * (gfp_allowed_mask also should only be modified with system_transition_mutex 318 * held, unless the suspend/hibernate code is guaranteed not to run in parallel 319 * with that modification). 320 */ 321 322 static gfp_t saved_gfp_mask; 323 324 void pm_restore_gfp_mask(void) 325 { 326 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 327 if (saved_gfp_mask) { 328 gfp_allowed_mask = saved_gfp_mask; 329 saved_gfp_mask = 0; 330 } 331 } 332 333 void pm_restrict_gfp_mask(void) 334 { 335 WARN_ON(!mutex_is_locked(&system_transition_mutex)); 336 WARN_ON(saved_gfp_mask); 337 saved_gfp_mask = gfp_allowed_mask; 338 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 339 } 340 341 bool pm_suspended_storage(void) 342 { 343 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 344 return false; 345 return true; 346 } 347 #endif /* CONFIG_PM_SLEEP */ 348 349 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 350 unsigned int pageblock_order __read_mostly; 351 #endif 352 353 static void __free_pages_ok(struct page *page, unsigned int order, 354 fpi_t fpi_flags); 355 356 /* 357 * results with 256, 32 in the lowmem_reserve sysctl: 358 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 359 * 1G machine -> (16M dma, 784M normal, 224M high) 360 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 361 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 362 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 363 * 364 * TBD: should special case ZONE_DMA32 machines here - in those we normally 365 * don't need any ZONE_NORMAL reservation 366 */ 367 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { 368 #ifdef CONFIG_ZONE_DMA 369 [ZONE_DMA] = 256, 370 #endif 371 #ifdef CONFIG_ZONE_DMA32 372 [ZONE_DMA32] = 256, 373 #endif 374 [ZONE_NORMAL] = 32, 375 #ifdef CONFIG_HIGHMEM 376 [ZONE_HIGHMEM] = 0, 377 #endif 378 [ZONE_MOVABLE] = 0, 379 }; 380 381 static char * const zone_names[MAX_NR_ZONES] = { 382 #ifdef CONFIG_ZONE_DMA 383 "DMA", 384 #endif 385 #ifdef CONFIG_ZONE_DMA32 386 "DMA32", 387 #endif 388 "Normal", 389 #ifdef CONFIG_HIGHMEM 390 "HighMem", 391 #endif 392 "Movable", 393 #ifdef CONFIG_ZONE_DEVICE 394 "Device", 395 #endif 396 }; 397 398 const char * const migratetype_names[MIGRATE_TYPES] = { 399 "Unmovable", 400 "Movable", 401 "Reclaimable", 402 "HighAtomic", 403 #ifdef CONFIG_CMA 404 "CMA", 405 #endif 406 #ifdef CONFIG_MEMORY_ISOLATION 407 "Isolate", 408 #endif 409 }; 410 411 compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { 412 [NULL_COMPOUND_DTOR] = NULL, 413 [COMPOUND_PAGE_DTOR] = free_compound_page, 414 #ifdef CONFIG_HUGETLB_PAGE 415 [HUGETLB_PAGE_DTOR] = free_huge_page, 416 #endif 417 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 418 [TRANSHUGE_PAGE_DTOR] = free_transhuge_page, 419 #endif 420 }; 421 422 int min_free_kbytes = 1024; 423 int user_min_free_kbytes = -1; 424 int watermark_boost_factor __read_mostly = 15000; 425 int watermark_scale_factor = 10; 426 427 static unsigned long nr_kernel_pages __initdata; 428 static unsigned long nr_all_pages __initdata; 429 static unsigned long dma_reserve __initdata; 430 431 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 432 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 433 static unsigned long required_kernelcore __initdata; 434 static unsigned long required_kernelcore_percent __initdata; 435 static unsigned long required_movablecore __initdata; 436 static unsigned long required_movablecore_percent __initdata; 437 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 438 bool mirrored_kernelcore __initdata_memblock; 439 440 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 441 int movable_zone; 442 EXPORT_SYMBOL(movable_zone); 443 444 #if MAX_NUMNODES > 1 445 unsigned int nr_node_ids __read_mostly = MAX_NUMNODES; 446 unsigned int nr_online_nodes __read_mostly = 1; 447 EXPORT_SYMBOL(nr_node_ids); 448 EXPORT_SYMBOL(nr_online_nodes); 449 #endif 450 451 int page_group_by_mobility_disabled __read_mostly; 452 453 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 454 /* 455 * During boot we initialize deferred pages on-demand, as needed, but once 456 * page_alloc_init_late() has finished, the deferred pages are all initialized, 457 * and we can permanently disable that path. 458 */ 459 static DEFINE_STATIC_KEY_TRUE(deferred_pages); 460 461 static inline bool deferred_pages_enabled(void) 462 { 463 return static_branch_unlikely(&deferred_pages); 464 } 465 466 /* Returns true if the struct page for the pfn is uninitialised */ 467 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 468 { 469 int nid = early_pfn_to_nid(pfn); 470 471 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 472 return true; 473 474 return false; 475 } 476 477 /* 478 * Returns true when the remaining initialisation should be deferred until 479 * later in the boot cycle when it can be parallelised. 480 */ 481 static bool __meminit 482 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 483 { 484 static unsigned long prev_end_pfn, nr_initialised; 485 486 if (early_page_ext_enabled()) 487 return false; 488 /* 489 * prev_end_pfn static that contains the end of previous zone 490 * No need to protect because called very early in boot before smp_init. 491 */ 492 if (prev_end_pfn != end_pfn) { 493 prev_end_pfn = end_pfn; 494 nr_initialised = 0; 495 } 496 497 /* Always populate low zones for address-constrained allocations */ 498 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 499 return false; 500 501 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 502 return true; 503 /* 504 * We start only with one section of pages, more pages are added as 505 * needed until the rest of deferred pages are initialized. 506 */ 507 nr_initialised++; 508 if ((nr_initialised > PAGES_PER_SECTION) && 509 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 510 NODE_DATA(nid)->first_deferred_pfn = pfn; 511 return true; 512 } 513 return false; 514 } 515 #else 516 static inline bool deferred_pages_enabled(void) 517 { 518 return false; 519 } 520 521 static inline bool early_page_uninitialised(unsigned long pfn) 522 { 523 return false; 524 } 525 526 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 527 { 528 return false; 529 } 530 #endif 531 532 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 533 static inline unsigned long *get_pageblock_bitmap(const struct page *page, 534 unsigned long pfn) 535 { 536 #ifdef CONFIG_SPARSEMEM 537 return section_to_usemap(__pfn_to_section(pfn)); 538 #else 539 return page_zone(page)->pageblock_flags; 540 #endif /* CONFIG_SPARSEMEM */ 541 } 542 543 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) 544 { 545 #ifdef CONFIG_SPARSEMEM 546 pfn &= (PAGES_PER_SECTION-1); 547 #else 548 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); 549 #endif /* CONFIG_SPARSEMEM */ 550 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 551 } 552 553 static __always_inline 554 unsigned long __get_pfnblock_flags_mask(const struct page *page, 555 unsigned long pfn, 556 unsigned long mask) 557 { 558 unsigned long *bitmap; 559 unsigned long bitidx, word_bitidx; 560 unsigned long word; 561 562 bitmap = get_pageblock_bitmap(page, pfn); 563 bitidx = pfn_to_bitidx(page, pfn); 564 word_bitidx = bitidx / BITS_PER_LONG; 565 bitidx &= (BITS_PER_LONG-1); 566 /* 567 * This races, without locks, with set_pfnblock_flags_mask(). Ensure 568 * a consistent read of the memory array, so that results, even though 569 * racy, are not corrupted. 570 */ 571 word = READ_ONCE(bitmap[word_bitidx]); 572 return (word >> bitidx) & mask; 573 } 574 575 /** 576 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 577 * @page: The page within the block of interest 578 * @pfn: The target page frame number 579 * @mask: mask of bits that the caller is interested in 580 * 581 * Return: pageblock_bits flags 582 */ 583 unsigned long get_pfnblock_flags_mask(const struct page *page, 584 unsigned long pfn, unsigned long mask) 585 { 586 return __get_pfnblock_flags_mask(page, pfn, mask); 587 } 588 589 static __always_inline int get_pfnblock_migratetype(const struct page *page, 590 unsigned long pfn) 591 { 592 return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); 593 } 594 595 /** 596 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 597 * @page: The page within the block of interest 598 * @flags: The flags to set 599 * @pfn: The target page frame number 600 * @mask: mask of bits that the caller is interested in 601 */ 602 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 603 unsigned long pfn, 604 unsigned long mask) 605 { 606 unsigned long *bitmap; 607 unsigned long bitidx, word_bitidx; 608 unsigned long word; 609 610 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 611 BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits)); 612 613 bitmap = get_pageblock_bitmap(page, pfn); 614 bitidx = pfn_to_bitidx(page, pfn); 615 word_bitidx = bitidx / BITS_PER_LONG; 616 bitidx &= (BITS_PER_LONG-1); 617 618 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 619 620 mask <<= bitidx; 621 flags <<= bitidx; 622 623 word = READ_ONCE(bitmap[word_bitidx]); 624 do { 625 } while (!try_cmpxchg(&bitmap[word_bitidx], &word, (word & ~mask) | flags)); 626 } 627 628 void set_pageblock_migratetype(struct page *page, int migratetype) 629 { 630 if (unlikely(page_group_by_mobility_disabled && 631 migratetype < MIGRATE_PCPTYPES)) 632 migratetype = MIGRATE_UNMOVABLE; 633 634 set_pfnblock_flags_mask(page, (unsigned long)migratetype, 635 page_to_pfn(page), MIGRATETYPE_MASK); 636 } 637 638 #ifdef CONFIG_DEBUG_VM 639 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 640 { 641 int ret = 0; 642 unsigned seq; 643 unsigned long pfn = page_to_pfn(page); 644 unsigned long sp, start_pfn; 645 646 do { 647 seq = zone_span_seqbegin(zone); 648 start_pfn = zone->zone_start_pfn; 649 sp = zone->spanned_pages; 650 if (!zone_spans_pfn(zone, pfn)) 651 ret = 1; 652 } while (zone_span_seqretry(zone, seq)); 653 654 if (ret) 655 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 656 pfn, zone_to_nid(zone), zone->name, 657 start_pfn, start_pfn + sp); 658 659 return ret; 660 } 661 662 static int page_is_consistent(struct zone *zone, struct page *page) 663 { 664 if (zone != page_zone(page)) 665 return 0; 666 667 return 1; 668 } 669 /* 670 * Temporary debugging check for pages not lying within a given zone. 671 */ 672 static int __maybe_unused bad_range(struct zone *zone, struct page *page) 673 { 674 if (page_outside_zone_boundaries(zone, page)) 675 return 1; 676 if (!page_is_consistent(zone, page)) 677 return 1; 678 679 return 0; 680 } 681 #else 682 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) 683 { 684 return 0; 685 } 686 #endif 687 688 static void bad_page(struct page *page, const char *reason) 689 { 690 static unsigned long resume; 691 static unsigned long nr_shown; 692 static unsigned long nr_unshown; 693 694 /* 695 * Allow a burst of 60 reports, then keep quiet for that minute; 696 * or allow a steady drip of one report per second. 697 */ 698 if (nr_shown == 60) { 699 if (time_before(jiffies, resume)) { 700 nr_unshown++; 701 goto out; 702 } 703 if (nr_unshown) { 704 pr_alert( 705 "BUG: Bad page state: %lu messages suppressed\n", 706 nr_unshown); 707 nr_unshown = 0; 708 } 709 nr_shown = 0; 710 } 711 if (nr_shown++ == 0) 712 resume = jiffies + 60 * HZ; 713 714 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 715 current->comm, page_to_pfn(page)); 716 dump_page(page, reason); 717 718 print_modules(); 719 dump_stack(); 720 out: 721 /* Leave bad fields for debug, except PageBuddy could make trouble */ 722 page_mapcount_reset(page); /* remove PageBuddy */ 723 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 724 } 725 726 static inline unsigned int order_to_pindex(int migratetype, int order) 727 { 728 int base = order; 729 730 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 731 if (order > PAGE_ALLOC_COSTLY_ORDER) { 732 VM_BUG_ON(order != pageblock_order); 733 return NR_LOWORDER_PCP_LISTS; 734 } 735 #else 736 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 737 #endif 738 739 return (MIGRATE_PCPTYPES * base) + migratetype; 740 } 741 742 static inline int pindex_to_order(unsigned int pindex) 743 { 744 int order = pindex / MIGRATE_PCPTYPES; 745 746 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 747 if (pindex == NR_LOWORDER_PCP_LISTS) 748 order = pageblock_order; 749 #else 750 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); 751 #endif 752 753 return order; 754 } 755 756 static inline bool pcp_allowed_order(unsigned int order) 757 { 758 if (order <= PAGE_ALLOC_COSTLY_ORDER) 759 return true; 760 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 761 if (order == pageblock_order) 762 return true; 763 #endif 764 return false; 765 } 766 767 static inline void free_the_page(struct page *page, unsigned int order) 768 { 769 if (pcp_allowed_order(order)) /* Via pcp? */ 770 free_unref_page(page, order); 771 else 772 __free_pages_ok(page, order, FPI_NONE); 773 } 774 775 /* 776 * Higher-order pages are called "compound pages". They are structured thusly: 777 * 778 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 779 * 780 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 781 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 782 * 783 * The first tail page's ->compound_dtor holds the offset in array of compound 784 * page destructors. See compound_page_dtors. 785 * 786 * The first tail page's ->compound_order holds the order of allocation. 787 * This usage means that zero-order pages may not be compound. 788 */ 789 790 void free_compound_page(struct page *page) 791 { 792 mem_cgroup_uncharge(page_folio(page)); 793 free_the_page(page, compound_order(page)); 794 } 795 796 static void prep_compound_head(struct page *page, unsigned int order) 797 { 798 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 799 set_compound_order(page, order); 800 atomic_set(compound_mapcount_ptr(page), -1); 801 atomic_set(compound_pincount_ptr(page), 0); 802 } 803 804 static void prep_compound_tail(struct page *head, int tail_idx) 805 { 806 struct page *p = head + tail_idx; 807 808 p->mapping = TAIL_MAPPING; 809 set_compound_head(p, head); 810 } 811 812 void prep_compound_page(struct page *page, unsigned int order) 813 { 814 int i; 815 int nr_pages = 1 << order; 816 817 __SetPageHead(page); 818 for (i = 1; i < nr_pages; i++) 819 prep_compound_tail(page, i); 820 821 prep_compound_head(page, order); 822 } 823 824 void destroy_large_folio(struct folio *folio) 825 { 826 enum compound_dtor_id dtor = folio_page(folio, 1)->compound_dtor; 827 828 VM_BUG_ON_FOLIO(dtor >= NR_COMPOUND_DTORS, folio); 829 compound_page_dtors[dtor](&folio->page); 830 } 831 832 #ifdef CONFIG_DEBUG_PAGEALLOC 833 unsigned int _debug_guardpage_minorder; 834 835 bool _debug_pagealloc_enabled_early __read_mostly 836 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 837 EXPORT_SYMBOL(_debug_pagealloc_enabled_early); 838 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); 839 EXPORT_SYMBOL(_debug_pagealloc_enabled); 840 841 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); 842 843 static int __init early_debug_pagealloc(char *buf) 844 { 845 return kstrtobool(buf, &_debug_pagealloc_enabled_early); 846 } 847 early_param("debug_pagealloc", early_debug_pagealloc); 848 849 static int __init debug_guardpage_minorder_setup(char *buf) 850 { 851 unsigned long res; 852 853 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 854 pr_err("Bad debug_guardpage_minorder value\n"); 855 return 0; 856 } 857 _debug_guardpage_minorder = res; 858 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 859 return 0; 860 } 861 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); 862 863 static inline bool set_page_guard(struct zone *zone, struct page *page, 864 unsigned int order, int migratetype) 865 { 866 if (!debug_guardpage_enabled()) 867 return false; 868 869 if (order >= debug_guardpage_minorder()) 870 return false; 871 872 __SetPageGuard(page); 873 INIT_LIST_HEAD(&page->buddy_list); 874 set_page_private(page, order); 875 /* Guard pages are not available for any usage */ 876 if (!is_migrate_isolate(migratetype)) 877 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 878 879 return true; 880 } 881 882 static inline void clear_page_guard(struct zone *zone, struct page *page, 883 unsigned int order, int migratetype) 884 { 885 if (!debug_guardpage_enabled()) 886 return; 887 888 __ClearPageGuard(page); 889 890 set_page_private(page, 0); 891 if (!is_migrate_isolate(migratetype)) 892 __mod_zone_freepage_state(zone, (1 << order), migratetype); 893 } 894 #else 895 static inline bool set_page_guard(struct zone *zone, struct page *page, 896 unsigned int order, int migratetype) { return false; } 897 static inline void clear_page_guard(struct zone *zone, struct page *page, 898 unsigned int order, int migratetype) {} 899 #endif 900 901 /* 902 * Enable static keys related to various memory debugging and hardening options. 903 * Some override others, and depend on early params that are evaluated in the 904 * order of appearance. So we need to first gather the full picture of what was 905 * enabled, and then make decisions. 906 */ 907 void __init init_mem_debugging_and_hardening(void) 908 { 909 bool page_poisoning_requested = false; 910 911 #ifdef CONFIG_PAGE_POISONING 912 /* 913 * Page poisoning is debug page alloc for some arches. If 914 * either of those options are enabled, enable poisoning. 915 */ 916 if (page_poisoning_enabled() || 917 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 918 debug_pagealloc_enabled())) { 919 static_branch_enable(&_page_poisoning_enabled); 920 page_poisoning_requested = true; 921 } 922 #endif 923 924 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 925 page_poisoning_requested) { 926 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 927 "will take precedence over init_on_alloc and init_on_free\n"); 928 _init_on_alloc_enabled_early = false; 929 _init_on_free_enabled_early = false; 930 } 931 932 if (_init_on_alloc_enabled_early) 933 static_branch_enable(&init_on_alloc); 934 else 935 static_branch_disable(&init_on_alloc); 936 937 if (_init_on_free_enabled_early) 938 static_branch_enable(&init_on_free); 939 else 940 static_branch_disable(&init_on_free); 941 942 if (IS_ENABLED(CONFIG_KMSAN) && 943 (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) 944 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); 945 946 #ifdef CONFIG_DEBUG_PAGEALLOC 947 if (!debug_pagealloc_enabled()) 948 return; 949 950 static_branch_enable(&_debug_pagealloc_enabled); 951 952 if (!debug_guardpage_minorder()) 953 return; 954 955 static_branch_enable(&_debug_guardpage_enabled); 956 #endif 957 } 958 959 static inline void set_buddy_order(struct page *page, unsigned int order) 960 { 961 set_page_private(page, order); 962 __SetPageBuddy(page); 963 } 964 965 #ifdef CONFIG_COMPACTION 966 static inline struct capture_control *task_capc(struct zone *zone) 967 { 968 struct capture_control *capc = current->capture_control; 969 970 return unlikely(capc) && 971 !(current->flags & PF_KTHREAD) && 972 !capc->page && 973 capc->cc->zone == zone ? capc : NULL; 974 } 975 976 static inline bool 977 compaction_capture(struct capture_control *capc, struct page *page, 978 int order, int migratetype) 979 { 980 if (!capc || order != capc->cc->order) 981 return false; 982 983 /* Do not accidentally pollute CMA or isolated regions*/ 984 if (is_migrate_cma(migratetype) || 985 is_migrate_isolate(migratetype)) 986 return false; 987 988 /* 989 * Do not let lower order allocations pollute a movable pageblock. 990 * This might let an unmovable request use a reclaimable pageblock 991 * and vice-versa but no more than normal fallback logic which can 992 * have trouble finding a high-order free page. 993 */ 994 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) 995 return false; 996 997 capc->page = page; 998 return true; 999 } 1000 1001 #else 1002 static inline struct capture_control *task_capc(struct zone *zone) 1003 { 1004 return NULL; 1005 } 1006 1007 static inline bool 1008 compaction_capture(struct capture_control *capc, struct page *page, 1009 int order, int migratetype) 1010 { 1011 return false; 1012 } 1013 #endif /* CONFIG_COMPACTION */ 1014 1015 /* Used for pages not on another list */ 1016 static inline void add_to_free_list(struct page *page, struct zone *zone, 1017 unsigned int order, int migratetype) 1018 { 1019 struct free_area *area = &zone->free_area[order]; 1020 1021 list_add(&page->buddy_list, &area->free_list[migratetype]); 1022 area->nr_free++; 1023 } 1024 1025 /* Used for pages not on another list */ 1026 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, 1027 unsigned int order, int migratetype) 1028 { 1029 struct free_area *area = &zone->free_area[order]; 1030 1031 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); 1032 area->nr_free++; 1033 } 1034 1035 /* 1036 * Used for pages which are on another list. Move the pages to the tail 1037 * of the list - so the moved pages won't immediately be considered for 1038 * allocation again (e.g., optimization for memory onlining). 1039 */ 1040 static inline void move_to_free_list(struct page *page, struct zone *zone, 1041 unsigned int order, int migratetype) 1042 { 1043 struct free_area *area = &zone->free_area[order]; 1044 1045 list_move_tail(&page->buddy_list, &area->free_list[migratetype]); 1046 } 1047 1048 static inline void del_page_from_free_list(struct page *page, struct zone *zone, 1049 unsigned int order) 1050 { 1051 /* clear reported state and update reported page count */ 1052 if (page_reported(page)) 1053 __ClearPageReported(page); 1054 1055 list_del(&page->buddy_list); 1056 __ClearPageBuddy(page); 1057 set_page_private(page, 0); 1058 zone->free_area[order].nr_free--; 1059 } 1060 1061 /* 1062 * If this is not the largest possible page, check if the buddy 1063 * of the next-highest order is free. If it is, it's possible 1064 * that pages are being freed that will coalesce soon. In case, 1065 * that is happening, add the free page to the tail of the list 1066 * so it's less likely to be used soon and more likely to be merged 1067 * as a higher order page 1068 */ 1069 static inline bool 1070 buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn, 1071 struct page *page, unsigned int order) 1072 { 1073 unsigned long higher_page_pfn; 1074 struct page *higher_page; 1075 1076 if (order >= MAX_ORDER - 2) 1077 return false; 1078 1079 higher_page_pfn = buddy_pfn & pfn; 1080 higher_page = page + (higher_page_pfn - pfn); 1081 1082 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, 1083 NULL) != NULL; 1084 } 1085 1086 /* 1087 * Freeing function for a buddy system allocator. 1088 * 1089 * The concept of a buddy system is to maintain direct-mapped table 1090 * (containing bit values) for memory blocks of various "orders". 1091 * The bottom level table contains the map for the smallest allocatable 1092 * units of memory (here, pages), and each level above it describes 1093 * pairs of units from the levels below, hence, "buddies". 1094 * At a high level, all that happens here is marking the table entry 1095 * at the bottom level available, and propagating the changes upward 1096 * as necessary, plus some accounting needed to play nicely with other 1097 * parts of the VM system. 1098 * At each level, we keep a list of pages, which are heads of continuous 1099 * free pages of length of (1 << order) and marked with PageBuddy. 1100 * Page's order is recorded in page_private(page) field. 1101 * So when we are allocating or freeing one, we can derive the state of the 1102 * other. That is, if we allocate a small block, and both were 1103 * free, the remainder of the region must be split into blocks. 1104 * If a block is freed, and its buddy is also free, then this 1105 * triggers coalescing into a block of larger size. 1106 * 1107 * -- nyc 1108 */ 1109 1110 static inline void __free_one_page(struct page *page, 1111 unsigned long pfn, 1112 struct zone *zone, unsigned int order, 1113 int migratetype, fpi_t fpi_flags) 1114 { 1115 struct capture_control *capc = task_capc(zone); 1116 unsigned long buddy_pfn = 0; 1117 unsigned long combined_pfn; 1118 struct page *buddy; 1119 bool to_tail; 1120 1121 VM_BUG_ON(!zone_is_initialized(zone)); 1122 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 1123 1124 VM_BUG_ON(migratetype == -1); 1125 if (likely(!is_migrate_isolate(migratetype))) 1126 __mod_zone_freepage_state(zone, 1 << order, migratetype); 1127 1128 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 1129 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1130 1131 while (order < MAX_ORDER - 1) { 1132 if (compaction_capture(capc, page, order, migratetype)) { 1133 __mod_zone_freepage_state(zone, -(1 << order), 1134 migratetype); 1135 return; 1136 } 1137 1138 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); 1139 if (!buddy) 1140 goto done_merging; 1141 1142 if (unlikely(order >= pageblock_order)) { 1143 /* 1144 * We want to prevent merge between freepages on pageblock 1145 * without fallbacks and normal pageblock. Without this, 1146 * pageblock isolation could cause incorrect freepage or CMA 1147 * accounting or HIGHATOMIC accounting. 1148 */ 1149 int buddy_mt = get_pageblock_migratetype(buddy); 1150 1151 if (migratetype != buddy_mt 1152 && (!migratetype_is_mergeable(migratetype) || 1153 !migratetype_is_mergeable(buddy_mt))) 1154 goto done_merging; 1155 } 1156 1157 /* 1158 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 1159 * merge with it and move up one order. 1160 */ 1161 if (page_is_guard(buddy)) 1162 clear_page_guard(zone, buddy, order, migratetype); 1163 else 1164 del_page_from_free_list(buddy, zone, order); 1165 combined_pfn = buddy_pfn & pfn; 1166 page = page + (combined_pfn - pfn); 1167 pfn = combined_pfn; 1168 order++; 1169 } 1170 1171 done_merging: 1172 set_buddy_order(page, order); 1173 1174 if (fpi_flags & FPI_TO_TAIL) 1175 to_tail = true; 1176 else if (is_shuffle_order(order)) 1177 to_tail = shuffle_pick_tail(); 1178 else 1179 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); 1180 1181 if (to_tail) 1182 add_to_free_list_tail(page, zone, order, migratetype); 1183 else 1184 add_to_free_list(page, zone, order, migratetype); 1185 1186 /* Notify page reporting subsystem of freed page */ 1187 if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) 1188 page_reporting_notify_free(order); 1189 } 1190 1191 /** 1192 * split_free_page() -- split a free page at split_pfn_offset 1193 * @free_page: the original free page 1194 * @order: the order of the page 1195 * @split_pfn_offset: split offset within the page 1196 * 1197 * Return -ENOENT if the free page is changed, otherwise 0 1198 * 1199 * It is used when the free page crosses two pageblocks with different migratetypes 1200 * at split_pfn_offset within the page. The split free page will be put into 1201 * separate migratetype lists afterwards. Otherwise, the function achieves 1202 * nothing. 1203 */ 1204 int split_free_page(struct page *free_page, 1205 unsigned int order, unsigned long split_pfn_offset) 1206 { 1207 struct zone *zone = page_zone(free_page); 1208 unsigned long free_page_pfn = page_to_pfn(free_page); 1209 unsigned long pfn; 1210 unsigned long flags; 1211 int free_page_order; 1212 int mt; 1213 int ret = 0; 1214 1215 if (split_pfn_offset == 0) 1216 return ret; 1217 1218 spin_lock_irqsave(&zone->lock, flags); 1219 1220 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { 1221 ret = -ENOENT; 1222 goto out; 1223 } 1224 1225 mt = get_pageblock_migratetype(free_page); 1226 if (likely(!is_migrate_isolate(mt))) 1227 __mod_zone_freepage_state(zone, -(1UL << order), mt); 1228 1229 del_page_from_free_list(free_page, zone, order); 1230 for (pfn = free_page_pfn; 1231 pfn < free_page_pfn + (1UL << order);) { 1232 int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); 1233 1234 free_page_order = min_t(unsigned int, 1235 pfn ? __ffs(pfn) : order, 1236 __fls(split_pfn_offset)); 1237 __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, 1238 mt, FPI_NONE); 1239 pfn += 1UL << free_page_order; 1240 split_pfn_offset -= (1UL << free_page_order); 1241 /* we have done the first part, now switch to second part */ 1242 if (split_pfn_offset == 0) 1243 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); 1244 } 1245 out: 1246 spin_unlock_irqrestore(&zone->lock, flags); 1247 return ret; 1248 } 1249 /* 1250 * A bad page could be due to a number of fields. Instead of multiple branches, 1251 * try and check multiple fields with one check. The caller must do a detailed 1252 * check if necessary. 1253 */ 1254 static inline bool page_expected_state(struct page *page, 1255 unsigned long check_flags) 1256 { 1257 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1258 return false; 1259 1260 if (unlikely((unsigned long)page->mapping | 1261 page_ref_count(page) | 1262 #ifdef CONFIG_MEMCG 1263 page->memcg_data | 1264 #endif 1265 (page->flags & check_flags))) 1266 return false; 1267 1268 return true; 1269 } 1270 1271 static const char *page_bad_reason(struct page *page, unsigned long flags) 1272 { 1273 const char *bad_reason = NULL; 1274 1275 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1276 bad_reason = "nonzero mapcount"; 1277 if (unlikely(page->mapping != NULL)) 1278 bad_reason = "non-NULL mapping"; 1279 if (unlikely(page_ref_count(page) != 0)) 1280 bad_reason = "nonzero _refcount"; 1281 if (unlikely(page->flags & flags)) { 1282 if (flags == PAGE_FLAGS_CHECK_AT_PREP) 1283 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set"; 1284 else 1285 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 1286 } 1287 #ifdef CONFIG_MEMCG 1288 if (unlikely(page->memcg_data)) 1289 bad_reason = "page still charged to cgroup"; 1290 #endif 1291 return bad_reason; 1292 } 1293 1294 static void free_page_is_bad_report(struct page *page) 1295 { 1296 bad_page(page, 1297 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); 1298 } 1299 1300 static inline bool free_page_is_bad(struct page *page) 1301 { 1302 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 1303 return false; 1304 1305 /* Something has gone sideways, find it */ 1306 free_page_is_bad_report(page); 1307 return true; 1308 } 1309 1310 static int free_tail_pages_check(struct page *head_page, struct page *page) 1311 { 1312 int ret = 1; 1313 1314 /* 1315 * We rely page->lru.next never has bit 0 set, unless the page 1316 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 1317 */ 1318 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 1319 1320 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 1321 ret = 0; 1322 goto out; 1323 } 1324 switch (page - head_page) { 1325 case 1: 1326 /* the first tail page: ->mapping may be compound_mapcount() */ 1327 if (unlikely(compound_mapcount(page))) { 1328 bad_page(page, "nonzero compound_mapcount"); 1329 goto out; 1330 } 1331 break; 1332 case 2: 1333 /* 1334 * the second tail page: ->mapping is 1335 * deferred_list.next -- ignore value. 1336 */ 1337 break; 1338 default: 1339 if (page->mapping != TAIL_MAPPING) { 1340 bad_page(page, "corrupted mapping in tail page"); 1341 goto out; 1342 } 1343 break; 1344 } 1345 if (unlikely(!PageTail(page))) { 1346 bad_page(page, "PageTail not set"); 1347 goto out; 1348 } 1349 if (unlikely(compound_head(page) != head_page)) { 1350 bad_page(page, "compound_head not consistent"); 1351 goto out; 1352 } 1353 ret = 0; 1354 out: 1355 page->mapping = NULL; 1356 clear_compound_head(page); 1357 return ret; 1358 } 1359 1360 /* 1361 * Skip KASAN memory poisoning when either: 1362 * 1363 * 1. Deferred memory initialization has not yet completed, 1364 * see the explanation below. 1365 * 2. Skipping poisoning is requested via FPI_SKIP_KASAN_POISON, 1366 * see the comment next to it. 1367 * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON, 1368 * see the comment next to it. 1369 * 1370 * Poisoning pages during deferred memory init will greatly lengthen the 1371 * process and cause problem in large memory systems as the deferred pages 1372 * initialization is done with interrupt disabled. 1373 * 1374 * Assuming that there will be no reference to those newly initialized 1375 * pages before they are ever allocated, this should have no effect on 1376 * KASAN memory tracking as the poison will be properly inserted at page 1377 * allocation time. The only corner case is when pages are allocated by 1378 * on-demand allocation and then freed again before the deferred pages 1379 * initialization is done, but this is not likely to happen. 1380 */ 1381 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) 1382 { 1383 return deferred_pages_enabled() || 1384 (!IS_ENABLED(CONFIG_KASAN_GENERIC) && 1385 (fpi_flags & FPI_SKIP_KASAN_POISON)) || 1386 PageSkipKASanPoison(page); 1387 } 1388 1389 static void kernel_init_pages(struct page *page, int numpages) 1390 { 1391 int i; 1392 1393 /* s390's use of memset() could override KASAN redzones. */ 1394 kasan_disable_current(); 1395 for (i = 0; i < numpages; i++) 1396 clear_highpage_kasan_tagged(page + i); 1397 kasan_enable_current(); 1398 } 1399 1400 static __always_inline bool free_pages_prepare(struct page *page, 1401 unsigned int order, bool check_free, fpi_t fpi_flags) 1402 { 1403 int bad = 0; 1404 bool init = want_init_on_free(); 1405 1406 VM_BUG_ON_PAGE(PageTail(page), page); 1407 1408 trace_mm_page_free(page, order); 1409 kmsan_free_page(page, order); 1410 1411 if (unlikely(PageHWPoison(page)) && !order) { 1412 /* 1413 * Do not let hwpoison pages hit pcplists/buddy 1414 * Untie memcg state and reset page's owner 1415 */ 1416 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1417 __memcg_kmem_uncharge_page(page, order); 1418 reset_page_owner(page, order); 1419 page_table_check_free(page, order); 1420 return false; 1421 } 1422 1423 /* 1424 * Check tail pages before head page information is cleared to 1425 * avoid checking PageCompound for order-0 pages. 1426 */ 1427 if (unlikely(order)) { 1428 bool compound = PageCompound(page); 1429 int i; 1430 1431 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1432 1433 if (compound) { 1434 ClearPageDoubleMap(page); 1435 ClearPageHasHWPoisoned(page); 1436 } 1437 for (i = 1; i < (1 << order); i++) { 1438 if (compound) 1439 bad += free_tail_pages_check(page, page + i); 1440 if (unlikely(free_page_is_bad(page + i))) { 1441 bad++; 1442 continue; 1443 } 1444 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1445 } 1446 } 1447 if (PageMappingFlags(page)) 1448 page->mapping = NULL; 1449 if (memcg_kmem_enabled() && PageMemcgKmem(page)) 1450 __memcg_kmem_uncharge_page(page, order); 1451 if (check_free && free_page_is_bad(page)) 1452 bad++; 1453 if (bad) 1454 return false; 1455 1456 page_cpupid_reset_last(page); 1457 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1458 reset_page_owner(page, order); 1459 page_table_check_free(page, order); 1460 1461 if (!PageHighMem(page)) { 1462 debug_check_no_locks_freed(page_address(page), 1463 PAGE_SIZE << order); 1464 debug_check_no_obj_freed(page_address(page), 1465 PAGE_SIZE << order); 1466 } 1467 1468 kernel_poison_pages(page, 1 << order); 1469 1470 /* 1471 * As memory initialization might be integrated into KASAN, 1472 * KASAN poisoning and memory initialization code must be 1473 * kept together to avoid discrepancies in behavior. 1474 * 1475 * With hardware tag-based KASAN, memory tags must be set before the 1476 * page becomes unavailable via debug_pagealloc or arch_free_page. 1477 */ 1478 if (!should_skip_kasan_poison(page, fpi_flags)) { 1479 kasan_poison_pages(page, order, init); 1480 1481 /* Memory is already initialized if KASAN did it internally. */ 1482 if (kasan_has_integrated_init()) 1483 init = false; 1484 } 1485 if (init) 1486 kernel_init_pages(page, 1 << order); 1487 1488 /* 1489 * arch_free_page() can make the page's contents inaccessible. s390 1490 * does this. So nothing which can access the page's contents should 1491 * happen after this. 1492 */ 1493 arch_free_page(page, order); 1494 1495 debug_pagealloc_unmap_pages(page, 1 << order); 1496 1497 return true; 1498 } 1499 1500 #ifdef CONFIG_DEBUG_VM 1501 /* 1502 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed 1503 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when 1504 * moved from pcp lists to free lists. 1505 */ 1506 static bool free_pcp_prepare(struct page *page, unsigned int order) 1507 { 1508 return free_pages_prepare(page, order, true, FPI_NONE); 1509 } 1510 1511 /* return true if this page has an inappropriate state */ 1512 static bool bulkfree_pcp_prepare(struct page *page) 1513 { 1514 if (debug_pagealloc_enabled_static()) 1515 return free_page_is_bad(page); 1516 else 1517 return false; 1518 } 1519 #else 1520 /* 1521 * With DEBUG_VM disabled, order-0 pages being freed are checked only when 1522 * moving from pcp lists to free list in order to reduce overhead. With 1523 * debug_pagealloc enabled, they are checked also immediately when being freed 1524 * to the pcp lists. 1525 */ 1526 static bool free_pcp_prepare(struct page *page, unsigned int order) 1527 { 1528 if (debug_pagealloc_enabled_static()) 1529 return free_pages_prepare(page, order, true, FPI_NONE); 1530 else 1531 return free_pages_prepare(page, order, false, FPI_NONE); 1532 } 1533 1534 static bool bulkfree_pcp_prepare(struct page *page) 1535 { 1536 return free_page_is_bad(page); 1537 } 1538 #endif /* CONFIG_DEBUG_VM */ 1539 1540 /* 1541 * Frees a number of pages from the PCP lists 1542 * Assumes all pages on list are in same zone. 1543 * count is the number of pages to free. 1544 */ 1545 static void free_pcppages_bulk(struct zone *zone, int count, 1546 struct per_cpu_pages *pcp, 1547 int pindex) 1548 { 1549 int min_pindex = 0; 1550 int max_pindex = NR_PCP_LISTS - 1; 1551 unsigned int order; 1552 bool isolated_pageblocks; 1553 struct page *page; 1554 1555 /* 1556 * Ensure proper count is passed which otherwise would stuck in the 1557 * below while (list_empty(list)) loop. 1558 */ 1559 count = min(pcp->count, count); 1560 1561 /* Ensure requested pindex is drained first. */ 1562 pindex = pindex - 1; 1563 1564 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */ 1565 spin_lock(&zone->lock); 1566 isolated_pageblocks = has_isolate_pageblock(zone); 1567 1568 while (count > 0) { 1569 struct list_head *list; 1570 int nr_pages; 1571 1572 /* Remove pages from lists in a round-robin fashion. */ 1573 do { 1574 if (++pindex > max_pindex) 1575 pindex = min_pindex; 1576 list = &pcp->lists[pindex]; 1577 if (!list_empty(list)) 1578 break; 1579 1580 if (pindex == max_pindex) 1581 max_pindex--; 1582 if (pindex == min_pindex) 1583 min_pindex++; 1584 } while (1); 1585 1586 order = pindex_to_order(pindex); 1587 nr_pages = 1 << order; 1588 do { 1589 int mt; 1590 1591 page = list_last_entry(list, struct page, pcp_list); 1592 mt = get_pcppage_migratetype(page); 1593 1594 /* must delete to avoid corrupting pcp list */ 1595 list_del(&page->pcp_list); 1596 count -= nr_pages; 1597 pcp->count -= nr_pages; 1598 1599 if (bulkfree_pcp_prepare(page)) 1600 continue; 1601 1602 /* MIGRATE_ISOLATE page should not go to pcplists */ 1603 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1604 /* Pageblock could have been isolated meanwhile */ 1605 if (unlikely(isolated_pageblocks)) 1606 mt = get_pageblock_migratetype(page); 1607 1608 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); 1609 trace_mm_page_pcpu_drain(page, order, mt); 1610 } while (count > 0 && !list_empty(list)); 1611 } 1612 1613 spin_unlock(&zone->lock); 1614 } 1615 1616 static void free_one_page(struct zone *zone, 1617 struct page *page, unsigned long pfn, 1618 unsigned int order, 1619 int migratetype, fpi_t fpi_flags) 1620 { 1621 unsigned long flags; 1622 1623 spin_lock_irqsave(&zone->lock, flags); 1624 if (unlikely(has_isolate_pageblock(zone) || 1625 is_migrate_isolate(migratetype))) { 1626 migratetype = get_pfnblock_migratetype(page, pfn); 1627 } 1628 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1629 spin_unlock_irqrestore(&zone->lock, flags); 1630 } 1631 1632 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1633 unsigned long zone, int nid) 1634 { 1635 mm_zero_struct_page(page); 1636 set_page_links(page, zone, nid, pfn); 1637 init_page_count(page); 1638 page_mapcount_reset(page); 1639 page_cpupid_reset_last(page); 1640 page_kasan_tag_reset(page); 1641 1642 INIT_LIST_HEAD(&page->lru); 1643 #ifdef WANT_PAGE_VIRTUAL 1644 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1645 if (!is_highmem_idx(zone)) 1646 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1647 #endif 1648 } 1649 1650 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1651 static void __meminit init_reserved_page(unsigned long pfn) 1652 { 1653 pg_data_t *pgdat; 1654 int nid, zid; 1655 1656 if (!early_page_uninitialised(pfn)) 1657 return; 1658 1659 nid = early_pfn_to_nid(pfn); 1660 pgdat = NODE_DATA(nid); 1661 1662 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1663 struct zone *zone = &pgdat->node_zones[zid]; 1664 1665 if (zone_spans_pfn(zone, pfn)) 1666 break; 1667 } 1668 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 1669 } 1670 #else 1671 static inline void init_reserved_page(unsigned long pfn) 1672 { 1673 } 1674 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1675 1676 /* 1677 * Initialised pages do not have PageReserved set. This function is 1678 * called for each range allocated by the bootmem allocator and 1679 * marks the pages PageReserved. The remaining valid pages are later 1680 * sent to the buddy page allocator. 1681 */ 1682 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 1683 { 1684 unsigned long start_pfn = PFN_DOWN(start); 1685 unsigned long end_pfn = PFN_UP(end); 1686 1687 for (; start_pfn < end_pfn; start_pfn++) { 1688 if (pfn_valid(start_pfn)) { 1689 struct page *page = pfn_to_page(start_pfn); 1690 1691 init_reserved_page(start_pfn); 1692 1693 /* Avoid false-positive PageTail() */ 1694 INIT_LIST_HEAD(&page->lru); 1695 1696 /* 1697 * no need for atomic set_bit because the struct 1698 * page is not visible yet so nobody should 1699 * access it yet. 1700 */ 1701 __SetPageReserved(page); 1702 } 1703 } 1704 } 1705 1706 static void __free_pages_ok(struct page *page, unsigned int order, 1707 fpi_t fpi_flags) 1708 { 1709 unsigned long flags; 1710 int migratetype; 1711 unsigned long pfn = page_to_pfn(page); 1712 struct zone *zone = page_zone(page); 1713 1714 if (!free_pages_prepare(page, order, true, fpi_flags)) 1715 return; 1716 1717 migratetype = get_pfnblock_migratetype(page, pfn); 1718 1719 spin_lock_irqsave(&zone->lock, flags); 1720 if (unlikely(has_isolate_pageblock(zone) || 1721 is_migrate_isolate(migratetype))) { 1722 migratetype = get_pfnblock_migratetype(page, pfn); 1723 } 1724 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); 1725 spin_unlock_irqrestore(&zone->lock, flags); 1726 1727 __count_vm_events(PGFREE, 1 << order); 1728 } 1729 1730 void __free_pages_core(struct page *page, unsigned int order) 1731 { 1732 unsigned int nr_pages = 1 << order; 1733 struct page *p = page; 1734 unsigned int loop; 1735 1736 /* 1737 * When initializing the memmap, __init_single_page() sets the refcount 1738 * of all pages to 1 ("allocated"/"not free"). We have to set the 1739 * refcount of all involved pages to 0. 1740 */ 1741 prefetchw(p); 1742 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1743 prefetchw(p + 1); 1744 __ClearPageReserved(p); 1745 set_page_count(p, 0); 1746 } 1747 __ClearPageReserved(p); 1748 set_page_count(p, 0); 1749 1750 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); 1751 1752 /* 1753 * Bypass PCP and place fresh pages right to the tail, primarily 1754 * relevant for memory onlining. 1755 */ 1756 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); 1757 } 1758 1759 #ifdef CONFIG_NUMA 1760 1761 /* 1762 * During memory init memblocks map pfns to nids. The search is expensive and 1763 * this caches recent lookups. The implementation of __early_pfn_to_nid 1764 * treats start/end as pfns. 1765 */ 1766 struct mminit_pfnnid_cache { 1767 unsigned long last_start; 1768 unsigned long last_end; 1769 int last_nid; 1770 }; 1771 1772 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1773 1774 /* 1775 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 1776 */ 1777 static int __meminit __early_pfn_to_nid(unsigned long pfn, 1778 struct mminit_pfnnid_cache *state) 1779 { 1780 unsigned long start_pfn, end_pfn; 1781 int nid; 1782 1783 if (state->last_start <= pfn && pfn < state->last_end) 1784 return state->last_nid; 1785 1786 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 1787 if (nid != NUMA_NO_NODE) { 1788 state->last_start = start_pfn; 1789 state->last_end = end_pfn; 1790 state->last_nid = nid; 1791 } 1792 1793 return nid; 1794 } 1795 1796 int __meminit early_pfn_to_nid(unsigned long pfn) 1797 { 1798 static DEFINE_SPINLOCK(early_pfn_lock); 1799 int nid; 1800 1801 spin_lock(&early_pfn_lock); 1802 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1803 if (nid < 0) 1804 nid = first_online_node; 1805 spin_unlock(&early_pfn_lock); 1806 1807 return nid; 1808 } 1809 #endif /* CONFIG_NUMA */ 1810 1811 void __init memblock_free_pages(struct page *page, unsigned long pfn, 1812 unsigned int order) 1813 { 1814 if (early_page_uninitialised(pfn)) 1815 return; 1816 if (!kmsan_memblock_free_pages(page, order)) { 1817 /* KMSAN will take care of these pages. */ 1818 return; 1819 } 1820 __free_pages_core(page, order); 1821 } 1822 1823 /* 1824 * Check that the whole (or subset of) a pageblock given by the interval of 1825 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1826 * with the migration of free compaction scanner. 1827 * 1828 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1829 * 1830 * It's possible on some configurations to have a setup like node0 node1 node0 1831 * i.e. it's possible that all pages within a zones range of pages do not 1832 * belong to a single zone. We assume that a border between node0 and node1 1833 * can occur within a single pageblock, but not a node0 node1 node0 1834 * interleaving within a single pageblock. It is therefore sufficient to check 1835 * the first and last page of a pageblock and avoid checking each individual 1836 * page in a pageblock. 1837 */ 1838 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1839 unsigned long end_pfn, struct zone *zone) 1840 { 1841 struct page *start_page; 1842 struct page *end_page; 1843 1844 /* end_pfn is one past the range we are checking */ 1845 end_pfn--; 1846 1847 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1848 return NULL; 1849 1850 start_page = pfn_to_online_page(start_pfn); 1851 if (!start_page) 1852 return NULL; 1853 1854 if (page_zone(start_page) != zone) 1855 return NULL; 1856 1857 end_page = pfn_to_page(end_pfn); 1858 1859 /* This gives a shorter code than deriving page_zone(end_page) */ 1860 if (page_zone_id(start_page) != page_zone_id(end_page)) 1861 return NULL; 1862 1863 return start_page; 1864 } 1865 1866 void set_zone_contiguous(struct zone *zone) 1867 { 1868 unsigned long block_start_pfn = zone->zone_start_pfn; 1869 unsigned long block_end_pfn; 1870 1871 block_end_pfn = pageblock_end_pfn(block_start_pfn); 1872 for (; block_start_pfn < zone_end_pfn(zone); 1873 block_start_pfn = block_end_pfn, 1874 block_end_pfn += pageblock_nr_pages) { 1875 1876 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1877 1878 if (!__pageblock_pfn_to_page(block_start_pfn, 1879 block_end_pfn, zone)) 1880 return; 1881 cond_resched(); 1882 } 1883 1884 /* We confirm that there is no hole */ 1885 zone->contiguous = true; 1886 } 1887 1888 void clear_zone_contiguous(struct zone *zone) 1889 { 1890 zone->contiguous = false; 1891 } 1892 1893 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1894 static void __init deferred_free_range(unsigned long pfn, 1895 unsigned long nr_pages) 1896 { 1897 struct page *page; 1898 unsigned long i; 1899 1900 if (!nr_pages) 1901 return; 1902 1903 page = pfn_to_page(pfn); 1904 1905 /* Free a large naturally-aligned chunk if possible */ 1906 if (nr_pages == pageblock_nr_pages && pageblock_aligned(pfn)) { 1907 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1908 __free_pages_core(page, pageblock_order); 1909 return; 1910 } 1911 1912 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1913 if (pageblock_aligned(pfn)) 1914 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1915 __free_pages_core(page, 0); 1916 } 1917 } 1918 1919 /* Completion tracking for deferred_init_memmap() threads */ 1920 static atomic_t pgdat_init_n_undone __initdata; 1921 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1922 1923 static inline void __init pgdat_init_report_one_done(void) 1924 { 1925 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1926 complete(&pgdat_init_all_done_comp); 1927 } 1928 1929 /* 1930 * Returns true if page needs to be initialized or freed to buddy allocator. 1931 * 1932 * We check if a current large page is valid by only checking the validity 1933 * of the head pfn. 1934 */ 1935 static inline bool __init deferred_pfn_valid(unsigned long pfn) 1936 { 1937 if (pageblock_aligned(pfn) && !pfn_valid(pfn)) 1938 return false; 1939 return true; 1940 } 1941 1942 /* 1943 * Free pages to buddy allocator. Try to free aligned pages in 1944 * pageblock_nr_pages sizes. 1945 */ 1946 static void __init deferred_free_pages(unsigned long pfn, 1947 unsigned long end_pfn) 1948 { 1949 unsigned long nr_free = 0; 1950 1951 for (; pfn < end_pfn; pfn++) { 1952 if (!deferred_pfn_valid(pfn)) { 1953 deferred_free_range(pfn - nr_free, nr_free); 1954 nr_free = 0; 1955 } else if (pageblock_aligned(pfn)) { 1956 deferred_free_range(pfn - nr_free, nr_free); 1957 nr_free = 1; 1958 } else { 1959 nr_free++; 1960 } 1961 } 1962 /* Free the last block of pages to allocator */ 1963 deferred_free_range(pfn - nr_free, nr_free); 1964 } 1965 1966 /* 1967 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 1968 * by performing it only once every pageblock_nr_pages. 1969 * Return number of pages initialized. 1970 */ 1971 static unsigned long __init deferred_init_pages(struct zone *zone, 1972 unsigned long pfn, 1973 unsigned long end_pfn) 1974 { 1975 int nid = zone_to_nid(zone); 1976 unsigned long nr_pages = 0; 1977 int zid = zone_idx(zone); 1978 struct page *page = NULL; 1979 1980 for (; pfn < end_pfn; pfn++) { 1981 if (!deferred_pfn_valid(pfn)) { 1982 page = NULL; 1983 continue; 1984 } else if (!page || pageblock_aligned(pfn)) { 1985 page = pfn_to_page(pfn); 1986 } else { 1987 page++; 1988 } 1989 __init_single_page(page, pfn, zid, nid); 1990 nr_pages++; 1991 } 1992 return (nr_pages); 1993 } 1994 1995 /* 1996 * This function is meant to pre-load the iterator for the zone init. 1997 * Specifically it walks through the ranges until we are caught up to the 1998 * first_init_pfn value and exits there. If we never encounter the value we 1999 * return false indicating there are no valid ranges left. 2000 */ 2001 static bool __init 2002 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, 2003 unsigned long *spfn, unsigned long *epfn, 2004 unsigned long first_init_pfn) 2005 { 2006 u64 j; 2007 2008 /* 2009 * Start out by walking through the ranges in this zone that have 2010 * already been initialized. We don't need to do anything with them 2011 * so we just need to flush them out of the system. 2012 */ 2013 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { 2014 if (*epfn <= first_init_pfn) 2015 continue; 2016 if (*spfn < first_init_pfn) 2017 *spfn = first_init_pfn; 2018 *i = j; 2019 return true; 2020 } 2021 2022 return false; 2023 } 2024 2025 /* 2026 * Initialize and free pages. We do it in two loops: first we initialize 2027 * struct page, then free to buddy allocator, because while we are 2028 * freeing pages we can access pages that are ahead (computing buddy 2029 * page in __free_one_page()). 2030 * 2031 * In order to try and keep some memory in the cache we have the loop 2032 * broken along max page order boundaries. This way we will not cause 2033 * any issues with the buddy page computation. 2034 */ 2035 static unsigned long __init 2036 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, 2037 unsigned long *end_pfn) 2038 { 2039 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); 2040 unsigned long spfn = *start_pfn, epfn = *end_pfn; 2041 unsigned long nr_pages = 0; 2042 u64 j = *i; 2043 2044 /* First we loop through and initialize the page values */ 2045 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { 2046 unsigned long t; 2047 2048 if (mo_pfn <= *start_pfn) 2049 break; 2050 2051 t = min(mo_pfn, *end_pfn); 2052 nr_pages += deferred_init_pages(zone, *start_pfn, t); 2053 2054 if (mo_pfn < *end_pfn) { 2055 *start_pfn = mo_pfn; 2056 break; 2057 } 2058 } 2059 2060 /* Reset values and now loop through freeing pages as needed */ 2061 swap(j, *i); 2062 2063 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { 2064 unsigned long t; 2065 2066 if (mo_pfn <= spfn) 2067 break; 2068 2069 t = min(mo_pfn, epfn); 2070 deferred_free_pages(spfn, t); 2071 2072 if (mo_pfn <= epfn) 2073 break; 2074 } 2075 2076 return nr_pages; 2077 } 2078 2079 static void __init 2080 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 2081 void *arg) 2082 { 2083 unsigned long spfn, epfn; 2084 struct zone *zone = arg; 2085 u64 i; 2086 2087 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); 2088 2089 /* 2090 * Initialize and free pages in MAX_ORDER sized increments so that we 2091 * can avoid introducing any issues with the buddy allocator. 2092 */ 2093 while (spfn < end_pfn) { 2094 deferred_init_maxorder(&i, zone, &spfn, &epfn); 2095 cond_resched(); 2096 } 2097 } 2098 2099 /* An arch may override for more concurrency. */ 2100 __weak int __init 2101 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2102 { 2103 return 1; 2104 } 2105 2106 /* Initialise remaining memory on a node */ 2107 static int __init deferred_init_memmap(void *data) 2108 { 2109 pg_data_t *pgdat = data; 2110 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2111 unsigned long spfn = 0, epfn = 0; 2112 unsigned long first_init_pfn, flags; 2113 unsigned long start = jiffies; 2114 struct zone *zone; 2115 int zid, max_threads; 2116 u64 i; 2117 2118 /* Bind memory initialisation thread to a local node if possible */ 2119 if (!cpumask_empty(cpumask)) 2120 set_cpus_allowed_ptr(current, cpumask); 2121 2122 pgdat_resize_lock(pgdat, &flags); 2123 first_init_pfn = pgdat->first_deferred_pfn; 2124 if (first_init_pfn == ULONG_MAX) { 2125 pgdat_resize_unlock(pgdat, &flags); 2126 pgdat_init_report_one_done(); 2127 return 0; 2128 } 2129 2130 /* Sanity check boundaries */ 2131 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2132 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2133 pgdat->first_deferred_pfn = ULONG_MAX; 2134 2135 /* 2136 * Once we unlock here, the zone cannot be grown anymore, thus if an 2137 * interrupt thread must allocate this early in boot, zone must be 2138 * pre-grown prior to start of deferred page initialization. 2139 */ 2140 pgdat_resize_unlock(pgdat, &flags); 2141 2142 /* Only the highest zone is deferred so find it */ 2143 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 2144 zone = pgdat->node_zones + zid; 2145 if (first_init_pfn < zone_end_pfn(zone)) 2146 break; 2147 } 2148 2149 /* If the zone is empty somebody else may have cleared out the zone */ 2150 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2151 first_init_pfn)) 2152 goto zone_empty; 2153 2154 max_threads = deferred_page_init_max_threads(cpumask); 2155 2156 while (spfn < epfn) { 2157 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); 2158 struct padata_mt_job job = { 2159 .thread_fn = deferred_init_memmap_chunk, 2160 .fn_arg = zone, 2161 .start = spfn, 2162 .size = epfn_align - spfn, 2163 .align = PAGES_PER_SECTION, 2164 .min_chunk = PAGES_PER_SECTION, 2165 .max_threads = max_threads, 2166 }; 2167 2168 padata_do_multithreaded(&job); 2169 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2170 epfn_align); 2171 } 2172 zone_empty: 2173 /* Sanity check that the next zone really is unpopulated */ 2174 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 2175 2176 pr_info("node %d deferred pages initialised in %ums\n", 2177 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2178 2179 pgdat_init_report_one_done(); 2180 return 0; 2181 } 2182 2183 /* 2184 * If this zone has deferred pages, try to grow it by initializing enough 2185 * deferred pages to satisfy the allocation specified by order, rounded up to 2186 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2187 * of SECTION_SIZE bytes by initializing struct pages in increments of 2188 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2189 * 2190 * Return true when zone was grown, otherwise return false. We return true even 2191 * when we grow less than requested, to let the caller decide if there are 2192 * enough pages to satisfy the allocation. 2193 * 2194 * Note: We use noinline because this function is needed only during boot, and 2195 * it is called from a __ref function _deferred_grow_zone. This way we are 2196 * making sure that it is not inlined into permanent text section. 2197 */ 2198 static noinline bool __init 2199 deferred_grow_zone(struct zone *zone, unsigned int order) 2200 { 2201 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 2202 pg_data_t *pgdat = zone->zone_pgdat; 2203 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2204 unsigned long spfn, epfn, flags; 2205 unsigned long nr_pages = 0; 2206 u64 i; 2207 2208 /* Only the last zone may have deferred pages */ 2209 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2210 return false; 2211 2212 pgdat_resize_lock(pgdat, &flags); 2213 2214 /* 2215 * If someone grew this zone while we were waiting for spinlock, return 2216 * true, as there might be enough pages already. 2217 */ 2218 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2219 pgdat_resize_unlock(pgdat, &flags); 2220 return true; 2221 } 2222 2223 /* If the zone is empty somebody else may have cleared out the zone */ 2224 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2225 first_deferred_pfn)) { 2226 pgdat->first_deferred_pfn = ULONG_MAX; 2227 pgdat_resize_unlock(pgdat, &flags); 2228 /* Retry only once. */ 2229 return first_deferred_pfn != ULONG_MAX; 2230 } 2231 2232 /* 2233 * Initialize and free pages in MAX_ORDER sized increments so 2234 * that we can avoid introducing any issues with the buddy 2235 * allocator. 2236 */ 2237 while (spfn < epfn) { 2238 /* update our first deferred PFN for this section */ 2239 first_deferred_pfn = spfn; 2240 2241 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); 2242 touch_nmi_watchdog(); 2243 2244 /* We should only stop along section boundaries */ 2245 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) 2246 continue; 2247 2248 /* If our quota has been met we can stop here */ 2249 if (nr_pages >= nr_pages_needed) 2250 break; 2251 } 2252 2253 pgdat->first_deferred_pfn = spfn; 2254 pgdat_resize_unlock(pgdat, &flags); 2255 2256 return nr_pages > 0; 2257 } 2258 2259 /* 2260 * deferred_grow_zone() is __init, but it is called from 2261 * get_page_from_freelist() during early boot until deferred_pages permanently 2262 * disables this call. This is why we have refdata wrapper to avoid warning, 2263 * and to ensure that the function body gets unloaded. 2264 */ 2265 static bool __ref 2266 _deferred_grow_zone(struct zone *zone, unsigned int order) 2267 { 2268 return deferred_grow_zone(zone, order); 2269 } 2270 2271 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2272 2273 void __init page_alloc_init_late(void) 2274 { 2275 struct zone *zone; 2276 int nid; 2277 2278 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2279 2280 /* There will be num_node_state(N_MEMORY) threads */ 2281 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2282 for_each_node_state(nid, N_MEMORY) { 2283 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2284 } 2285 2286 /* Block until all are initialised */ 2287 wait_for_completion(&pgdat_init_all_done_comp); 2288 2289 /* 2290 * We initialized the rest of the deferred pages. Permanently disable 2291 * on-demand struct page initialization. 2292 */ 2293 static_branch_disable(&deferred_pages); 2294 2295 /* Reinit limits that are based on free pages after the kernel is up */ 2296 files_maxfiles_init(); 2297 #endif 2298 2299 buffer_init(); 2300 2301 /* Discard memblock private memory */ 2302 memblock_discard(); 2303 2304 for_each_node_state(nid, N_MEMORY) 2305 shuffle_free_memory(NODE_DATA(nid)); 2306 2307 for_each_populated_zone(zone) 2308 set_zone_contiguous(zone); 2309 } 2310 2311 #ifdef CONFIG_CMA 2312 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 2313 void __init init_cma_reserved_pageblock(struct page *page) 2314 { 2315 unsigned i = pageblock_nr_pages; 2316 struct page *p = page; 2317 2318 do { 2319 __ClearPageReserved(p); 2320 set_page_count(p, 0); 2321 } while (++p, --i); 2322 2323 set_pageblock_migratetype(page, MIGRATE_CMA); 2324 set_page_refcounted(page); 2325 __free_pages(page, pageblock_order); 2326 2327 adjust_managed_page_count(page, pageblock_nr_pages); 2328 page_zone(page)->cma_pages += pageblock_nr_pages; 2329 } 2330 #endif 2331 2332 /* 2333 * The order of subdivision here is critical for the IO subsystem. 2334 * Please do not alter this order without good reasons and regression 2335 * testing. Specifically, as large blocks of memory are subdivided, 2336 * the order in which smaller blocks are delivered depends on the order 2337 * they're subdivided in this function. This is the primary factor 2338 * influencing the order in which pages are delivered to the IO 2339 * subsystem according to empirical testing, and this is also justified 2340 * by considering the behavior of a buddy system containing a single 2341 * large block of memory acted on by a series of small allocations. 2342 * This behavior is a critical factor in sglist merging's success. 2343 * 2344 * -- nyc 2345 */ 2346 static inline void expand(struct zone *zone, struct page *page, 2347 int low, int high, int migratetype) 2348 { 2349 unsigned long size = 1 << high; 2350 2351 while (high > low) { 2352 high--; 2353 size >>= 1; 2354 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 2355 2356 /* 2357 * Mark as guard pages (or page), that will allow to 2358 * merge back to allocator when buddy will be freed. 2359 * Corresponding page table entries will not be touched, 2360 * pages will stay not present in virtual address space 2361 */ 2362 if (set_page_guard(zone, &page[size], high, migratetype)) 2363 continue; 2364 2365 add_to_free_list(&page[size], zone, high, migratetype); 2366 set_buddy_order(&page[size], high); 2367 } 2368 } 2369 2370 static void check_new_page_bad(struct page *page) 2371 { 2372 if (unlikely(page->flags & __PG_HWPOISON)) { 2373 /* Don't complain about hwpoisoned pages */ 2374 page_mapcount_reset(page); /* remove PageBuddy */ 2375 return; 2376 } 2377 2378 bad_page(page, 2379 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); 2380 } 2381 2382 /* 2383 * This page is about to be returned from the page allocator 2384 */ 2385 static inline int check_new_page(struct page *page) 2386 { 2387 if (likely(page_expected_state(page, 2388 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 2389 return 0; 2390 2391 check_new_page_bad(page); 2392 return 1; 2393 } 2394 2395 static bool check_new_pages(struct page *page, unsigned int order) 2396 { 2397 int i; 2398 for (i = 0; i < (1 << order); i++) { 2399 struct page *p = page + i; 2400 2401 if (unlikely(check_new_page(p))) 2402 return true; 2403 } 2404 2405 return false; 2406 } 2407 2408 #ifdef CONFIG_DEBUG_VM 2409 /* 2410 * With DEBUG_VM enabled, order-0 pages are checked for expected state when 2411 * being allocated from pcp lists. With debug_pagealloc also enabled, they are 2412 * also checked when pcp lists are refilled from the free lists. 2413 */ 2414 static inline bool check_pcp_refill(struct page *page, unsigned int order) 2415 { 2416 if (debug_pagealloc_enabled_static()) 2417 return check_new_pages(page, order); 2418 else 2419 return false; 2420 } 2421 2422 static inline bool check_new_pcp(struct page *page, unsigned int order) 2423 { 2424 return check_new_pages(page, order); 2425 } 2426 #else 2427 /* 2428 * With DEBUG_VM disabled, free order-0 pages are checked for expected state 2429 * when pcp lists are being refilled from the free lists. With debug_pagealloc 2430 * enabled, they are also checked when being allocated from the pcp lists. 2431 */ 2432 static inline bool check_pcp_refill(struct page *page, unsigned int order) 2433 { 2434 return check_new_pages(page, order); 2435 } 2436 static inline bool check_new_pcp(struct page *page, unsigned int order) 2437 { 2438 if (debug_pagealloc_enabled_static()) 2439 return check_new_pages(page, order); 2440 else 2441 return false; 2442 } 2443 #endif /* CONFIG_DEBUG_VM */ 2444 2445 static inline bool should_skip_kasan_unpoison(gfp_t flags) 2446 { 2447 /* Don't skip if a software KASAN mode is enabled. */ 2448 if (IS_ENABLED(CONFIG_KASAN_GENERIC) || 2449 IS_ENABLED(CONFIG_KASAN_SW_TAGS)) 2450 return false; 2451 2452 /* Skip, if hardware tag-based KASAN is not enabled. */ 2453 if (!kasan_hw_tags_enabled()) 2454 return true; 2455 2456 /* 2457 * With hardware tag-based KASAN enabled, skip if this has been 2458 * requested via __GFP_SKIP_KASAN_UNPOISON. 2459 */ 2460 return flags & __GFP_SKIP_KASAN_UNPOISON; 2461 } 2462 2463 static inline bool should_skip_init(gfp_t flags) 2464 { 2465 /* Don't skip, if hardware tag-based KASAN is not enabled. */ 2466 if (!kasan_hw_tags_enabled()) 2467 return false; 2468 2469 /* For hardware tag-based KASAN, skip if requested. */ 2470 return (flags & __GFP_SKIP_ZERO); 2471 } 2472 2473 inline void post_alloc_hook(struct page *page, unsigned int order, 2474 gfp_t gfp_flags) 2475 { 2476 bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) && 2477 !should_skip_init(gfp_flags); 2478 bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS); 2479 int i; 2480 2481 set_page_private(page, 0); 2482 set_page_refcounted(page); 2483 2484 arch_alloc_page(page, order); 2485 debug_pagealloc_map_pages(page, 1 << order); 2486 2487 /* 2488 * Page unpoisoning must happen before memory initialization. 2489 * Otherwise, the poison pattern will be overwritten for __GFP_ZERO 2490 * allocations and the page unpoisoning code will complain. 2491 */ 2492 kernel_unpoison_pages(page, 1 << order); 2493 2494 /* 2495 * As memory initialization might be integrated into KASAN, 2496 * KASAN unpoisoning and memory initializion code must be 2497 * kept together to avoid discrepancies in behavior. 2498 */ 2499 2500 /* 2501 * If memory tags should be zeroed (which happens only when memory 2502 * should be initialized as well). 2503 */ 2504 if (init_tags) { 2505 /* Initialize both memory and tags. */ 2506 for (i = 0; i != 1 << order; ++i) 2507 tag_clear_highpage(page + i); 2508 2509 /* Note that memory is already initialized by the loop above. */ 2510 init = false; 2511 } 2512 if (!should_skip_kasan_unpoison(gfp_flags)) { 2513 /* Unpoison shadow memory or set memory tags. */ 2514 kasan_unpoison_pages(page, order, init); 2515 2516 /* Note that memory is already initialized by KASAN. */ 2517 if (kasan_has_integrated_init()) 2518 init = false; 2519 } else { 2520 /* Ensure page_address() dereferencing does not fault. */ 2521 for (i = 0; i != 1 << order; ++i) 2522 page_kasan_tag_reset(page + i); 2523 } 2524 /* If memory is still not initialized, do it now. */ 2525 if (init) 2526 kernel_init_pages(page, 1 << order); 2527 /* Propagate __GFP_SKIP_KASAN_POISON to page flags. */ 2528 if (kasan_hw_tags_enabled() && (gfp_flags & __GFP_SKIP_KASAN_POISON)) 2529 SetPageSkipKASanPoison(page); 2530 2531 set_page_owner(page, order, gfp_flags); 2532 page_table_check_alloc(page, order); 2533 } 2534 2535 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 2536 unsigned int alloc_flags) 2537 { 2538 post_alloc_hook(page, order, gfp_flags); 2539 2540 if (order && (gfp_flags & __GFP_COMP)) 2541 prep_compound_page(page, order); 2542 2543 /* 2544 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 2545 * allocate the page. The expectation is that the caller is taking 2546 * steps that will free more memory. The caller should avoid the page 2547 * being used for !PFMEMALLOC purposes. 2548 */ 2549 if (alloc_flags & ALLOC_NO_WATERMARKS) 2550 set_page_pfmemalloc(page); 2551 else 2552 clear_page_pfmemalloc(page); 2553 } 2554 2555 /* 2556 * Go through the free lists for the given migratetype and remove 2557 * the smallest available page from the freelists 2558 */ 2559 static __always_inline 2560 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 2561 int migratetype) 2562 { 2563 unsigned int current_order; 2564 struct free_area *area; 2565 struct page *page; 2566 2567 /* Find a page of the appropriate size in the preferred list */ 2568 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 2569 area = &(zone->free_area[current_order]); 2570 page = get_page_from_free_area(area, migratetype); 2571 if (!page) 2572 continue; 2573 del_page_from_free_list(page, zone, current_order); 2574 expand(zone, page, order, current_order, migratetype); 2575 set_pcppage_migratetype(page, migratetype); 2576 trace_mm_page_alloc_zone_locked(page, order, migratetype, 2577 pcp_allowed_order(order) && 2578 migratetype < MIGRATE_PCPTYPES); 2579 return page; 2580 } 2581 2582 return NULL; 2583 } 2584 2585 2586 /* 2587 * This array describes the order lists are fallen back to when 2588 * the free lists for the desirable migrate type are depleted 2589 * 2590 * The other migratetypes do not have fallbacks. 2591 */ 2592 static int fallbacks[MIGRATE_TYPES][3] = { 2593 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2594 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 2595 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 2596 }; 2597 2598 #ifdef CONFIG_CMA 2599 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2600 unsigned int order) 2601 { 2602 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 2603 } 2604 #else 2605 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 2606 unsigned int order) { return NULL; } 2607 #endif 2608 2609 /* 2610 * Move the free pages in a range to the freelist tail of the requested type. 2611 * Note that start_page and end_pages are not aligned on a pageblock 2612 * boundary. If alignment is required, use move_freepages_block() 2613 */ 2614 static int move_freepages(struct zone *zone, 2615 unsigned long start_pfn, unsigned long end_pfn, 2616 int migratetype, int *num_movable) 2617 { 2618 struct page *page; 2619 unsigned long pfn; 2620 unsigned int order; 2621 int pages_moved = 0; 2622 2623 for (pfn = start_pfn; pfn <= end_pfn;) { 2624 page = pfn_to_page(pfn); 2625 if (!PageBuddy(page)) { 2626 /* 2627 * We assume that pages that could be isolated for 2628 * migration are movable. But we don't actually try 2629 * isolating, as that would be expensive. 2630 */ 2631 if (num_movable && 2632 (PageLRU(page) || __PageMovable(page))) 2633 (*num_movable)++; 2634 pfn++; 2635 continue; 2636 } 2637 2638 /* Make sure we are not inadvertently changing nodes */ 2639 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 2640 VM_BUG_ON_PAGE(page_zone(page) != zone, page); 2641 2642 order = buddy_order(page); 2643 move_to_free_list(page, zone, order, migratetype); 2644 pfn += 1 << order; 2645 pages_moved += 1 << order; 2646 } 2647 2648 return pages_moved; 2649 } 2650 2651 int move_freepages_block(struct zone *zone, struct page *page, 2652 int migratetype, int *num_movable) 2653 { 2654 unsigned long start_pfn, end_pfn, pfn; 2655 2656 if (num_movable) 2657 *num_movable = 0; 2658 2659 pfn = page_to_pfn(page); 2660 start_pfn = pageblock_start_pfn(pfn); 2661 end_pfn = pageblock_end_pfn(pfn) - 1; 2662 2663 /* Do not cross zone boundaries */ 2664 if (!zone_spans_pfn(zone, start_pfn)) 2665 start_pfn = pfn; 2666 if (!zone_spans_pfn(zone, end_pfn)) 2667 return 0; 2668 2669 return move_freepages(zone, start_pfn, end_pfn, migratetype, 2670 num_movable); 2671 } 2672 2673 static void change_pageblock_range(struct page *pageblock_page, 2674 int start_order, int migratetype) 2675 { 2676 int nr_pageblocks = 1 << (start_order - pageblock_order); 2677 2678 while (nr_pageblocks--) { 2679 set_pageblock_migratetype(pageblock_page, migratetype); 2680 pageblock_page += pageblock_nr_pages; 2681 } 2682 } 2683 2684 /* 2685 * When we are falling back to another migratetype during allocation, try to 2686 * steal extra free pages from the same pageblocks to satisfy further 2687 * allocations, instead of polluting multiple pageblocks. 2688 * 2689 * If we are stealing a relatively large buddy page, it is likely there will 2690 * be more free pages in the pageblock, so try to steal them all. For 2691 * reclaimable and unmovable allocations, we steal regardless of page size, 2692 * as fragmentation caused by those allocations polluting movable pageblocks 2693 * is worse than movable allocations stealing from unmovable and reclaimable 2694 * pageblocks. 2695 */ 2696 static bool can_steal_fallback(unsigned int order, int start_mt) 2697 { 2698 /* 2699 * Leaving this order check is intended, although there is 2700 * relaxed order check in next check. The reason is that 2701 * we can actually steal whole pageblock if this condition met, 2702 * but, below check doesn't guarantee it and that is just heuristic 2703 * so could be changed anytime. 2704 */ 2705 if (order >= pageblock_order) 2706 return true; 2707 2708 if (order >= pageblock_order / 2 || 2709 start_mt == MIGRATE_RECLAIMABLE || 2710 start_mt == MIGRATE_UNMOVABLE || 2711 page_group_by_mobility_disabled) 2712 return true; 2713 2714 return false; 2715 } 2716 2717 static inline bool boost_watermark(struct zone *zone) 2718 { 2719 unsigned long max_boost; 2720 2721 if (!watermark_boost_factor) 2722 return false; 2723 /* 2724 * Don't bother in zones that are unlikely to produce results. 2725 * On small machines, including kdump capture kernels running 2726 * in a small area, boosting the watermark can cause an out of 2727 * memory situation immediately. 2728 */ 2729 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) 2730 return false; 2731 2732 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], 2733 watermark_boost_factor, 10000); 2734 2735 /* 2736 * high watermark may be uninitialised if fragmentation occurs 2737 * very early in boot so do not boost. We do not fall 2738 * through and boost by pageblock_nr_pages as failing 2739 * allocations that early means that reclaim is not going 2740 * to help and it may even be impossible to reclaim the 2741 * boosted watermark resulting in a hang. 2742 */ 2743 if (!max_boost) 2744 return false; 2745 2746 max_boost = max(pageblock_nr_pages, max_boost); 2747 2748 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, 2749 max_boost); 2750 2751 return true; 2752 } 2753 2754 /* 2755 * This function implements actual steal behaviour. If order is large enough, 2756 * we can steal whole pageblock. If not, we first move freepages in this 2757 * pageblock to our migratetype and determine how many already-allocated pages 2758 * are there in the pageblock with a compatible migratetype. If at least half 2759 * of pages are free or compatible, we can change migratetype of the pageblock 2760 * itself, so pages freed in the future will be put on the correct free list. 2761 */ 2762 static void steal_suitable_fallback(struct zone *zone, struct page *page, 2763 unsigned int alloc_flags, int start_type, bool whole_block) 2764 { 2765 unsigned int current_order = buddy_order(page); 2766 int free_pages, movable_pages, alike_pages; 2767 int old_block_type; 2768 2769 old_block_type = get_pageblock_migratetype(page); 2770 2771 /* 2772 * This can happen due to races and we want to prevent broken 2773 * highatomic accounting. 2774 */ 2775 if (is_migrate_highatomic(old_block_type)) 2776 goto single_page; 2777 2778 /* Take ownership for orders >= pageblock_order */ 2779 if (current_order >= pageblock_order) { 2780 change_pageblock_range(page, current_order, start_type); 2781 goto single_page; 2782 } 2783 2784 /* 2785 * Boost watermarks to increase reclaim pressure to reduce the 2786 * likelihood of future fallbacks. Wake kswapd now as the node 2787 * may be balanced overall and kswapd will not wake naturally. 2788 */ 2789 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) 2790 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 2791 2792 /* We are not allowed to try stealing from the whole block */ 2793 if (!whole_block) 2794 goto single_page; 2795 2796 free_pages = move_freepages_block(zone, page, start_type, 2797 &movable_pages); 2798 /* 2799 * Determine how many pages are compatible with our allocation. 2800 * For movable allocation, it's the number of movable pages which 2801 * we just obtained. For other types it's a bit more tricky. 2802 */ 2803 if (start_type == MIGRATE_MOVABLE) { 2804 alike_pages = movable_pages; 2805 } else { 2806 /* 2807 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2808 * to MOVABLE pageblock, consider all non-movable pages as 2809 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2810 * vice versa, be conservative since we can't distinguish the 2811 * exact migratetype of non-movable pages. 2812 */ 2813 if (old_block_type == MIGRATE_MOVABLE) 2814 alike_pages = pageblock_nr_pages 2815 - (free_pages + movable_pages); 2816 else 2817 alike_pages = 0; 2818 } 2819 2820 /* moving whole block can fail due to zone boundary conditions */ 2821 if (!free_pages) 2822 goto single_page; 2823 2824 /* 2825 * If a sufficient number of pages in the block are either free or of 2826 * comparable migratability as our allocation, claim the whole block. 2827 */ 2828 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2829 page_group_by_mobility_disabled) 2830 set_pageblock_migratetype(page, start_type); 2831 2832 return; 2833 2834 single_page: 2835 move_to_free_list(page, zone, current_order, start_type); 2836 } 2837 2838 /* 2839 * Check whether there is a suitable fallback freepage with requested order. 2840 * If only_stealable is true, this function returns fallback_mt only if 2841 * we can steal other freepages all together. This would help to reduce 2842 * fragmentation due to mixed migratetype pages in one pageblock. 2843 */ 2844 int find_suitable_fallback(struct free_area *area, unsigned int order, 2845 int migratetype, bool only_stealable, bool *can_steal) 2846 { 2847 int i; 2848 int fallback_mt; 2849 2850 if (area->nr_free == 0) 2851 return -1; 2852 2853 *can_steal = false; 2854 for (i = 0;; i++) { 2855 fallback_mt = fallbacks[migratetype][i]; 2856 if (fallback_mt == MIGRATE_TYPES) 2857 break; 2858 2859 if (free_area_empty(area, fallback_mt)) 2860 continue; 2861 2862 if (can_steal_fallback(order, migratetype)) 2863 *can_steal = true; 2864 2865 if (!only_stealable) 2866 return fallback_mt; 2867 2868 if (*can_steal) 2869 return fallback_mt; 2870 } 2871 2872 return -1; 2873 } 2874 2875 /* 2876 * Reserve a pageblock for exclusive use of high-order atomic allocations if 2877 * there are no empty page blocks that contain a page with a suitable order 2878 */ 2879 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2880 unsigned int alloc_order) 2881 { 2882 int mt; 2883 unsigned long max_managed, flags; 2884 2885 /* 2886 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2887 * Check is race-prone but harmless. 2888 */ 2889 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; 2890 if (zone->nr_reserved_highatomic >= max_managed) 2891 return; 2892 2893 spin_lock_irqsave(&zone->lock, flags); 2894 2895 /* Recheck the nr_reserved_highatomic limit under the lock */ 2896 if (zone->nr_reserved_highatomic >= max_managed) 2897 goto out_unlock; 2898 2899 /* Yoink! */ 2900 mt = get_pageblock_migratetype(page); 2901 /* Only reserve normal pageblocks (i.e., they can merge with others) */ 2902 if (migratetype_is_mergeable(mt)) { 2903 zone->nr_reserved_highatomic += pageblock_nr_pages; 2904 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2905 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 2906 } 2907 2908 out_unlock: 2909 spin_unlock_irqrestore(&zone->lock, flags); 2910 } 2911 2912 /* 2913 * Used when an allocation is about to fail under memory pressure. This 2914 * potentially hurts the reliability of high-order allocations when under 2915 * intense memory pressure but failed atomic allocations should be easier 2916 * to recover from than an OOM. 2917 * 2918 * If @force is true, try to unreserve a pageblock even though highatomic 2919 * pageblock is exhausted. 2920 */ 2921 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2922 bool force) 2923 { 2924 struct zonelist *zonelist = ac->zonelist; 2925 unsigned long flags; 2926 struct zoneref *z; 2927 struct zone *zone; 2928 struct page *page; 2929 int order; 2930 bool ret; 2931 2932 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, 2933 ac->nodemask) { 2934 /* 2935 * Preserve at least one pageblock unless memory pressure 2936 * is really high. 2937 */ 2938 if (!force && zone->nr_reserved_highatomic <= 2939 pageblock_nr_pages) 2940 continue; 2941 2942 spin_lock_irqsave(&zone->lock, flags); 2943 for (order = 0; order < MAX_ORDER; order++) { 2944 struct free_area *area = &(zone->free_area[order]); 2945 2946 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); 2947 if (!page) 2948 continue; 2949 2950 /* 2951 * In page freeing path, migratetype change is racy so 2952 * we can counter several free pages in a pageblock 2953 * in this loop although we changed the pageblock type 2954 * from highatomic to ac->migratetype. So we should 2955 * adjust the count once. 2956 */ 2957 if (is_migrate_highatomic_page(page)) { 2958 /* 2959 * It should never happen but changes to 2960 * locking could inadvertently allow a per-cpu 2961 * drain to add pages to MIGRATE_HIGHATOMIC 2962 * while unreserving so be safe and watch for 2963 * underflows. 2964 */ 2965 zone->nr_reserved_highatomic -= min( 2966 pageblock_nr_pages, 2967 zone->nr_reserved_highatomic); 2968 } 2969 2970 /* 2971 * Convert to ac->migratetype and avoid the normal 2972 * pageblock stealing heuristics. Minimally, the caller 2973 * is doing the work and needs the pages. More 2974 * importantly, if the block was always converted to 2975 * MIGRATE_UNMOVABLE or another type then the number 2976 * of pageblocks that cannot be completely freed 2977 * may increase. 2978 */ 2979 set_pageblock_migratetype(page, ac->migratetype); 2980 ret = move_freepages_block(zone, page, ac->migratetype, 2981 NULL); 2982 if (ret) { 2983 spin_unlock_irqrestore(&zone->lock, flags); 2984 return ret; 2985 } 2986 } 2987 spin_unlock_irqrestore(&zone->lock, flags); 2988 } 2989 2990 return false; 2991 } 2992 2993 /* 2994 * Try finding a free buddy page on the fallback list and put it on the free 2995 * list of requested migratetype, possibly along with other pages from the same 2996 * block, depending on fragmentation avoidance heuristics. Returns true if 2997 * fallback was found so that __rmqueue_smallest() can grab it. 2998 * 2999 * The use of signed ints for order and current_order is a deliberate 3000 * deviation from the rest of this file, to make the for loop 3001 * condition simpler. 3002 */ 3003 static __always_inline bool 3004 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, 3005 unsigned int alloc_flags) 3006 { 3007 struct free_area *area; 3008 int current_order; 3009 int min_order = order; 3010 struct page *page; 3011 int fallback_mt; 3012 bool can_steal; 3013 3014 /* 3015 * Do not steal pages from freelists belonging to other pageblocks 3016 * i.e. orders < pageblock_order. If there are no local zones free, 3017 * the zonelists will be reiterated without ALLOC_NOFRAGMENT. 3018 */ 3019 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) 3020 min_order = pageblock_order; 3021 3022 /* 3023 * Find the largest available free page in the other list. This roughly 3024 * approximates finding the pageblock with the most free pages, which 3025 * would be too costly to do exactly. 3026 */ 3027 for (current_order = MAX_ORDER - 1; current_order >= min_order; 3028 --current_order) { 3029 area = &(zone->free_area[current_order]); 3030 fallback_mt = find_suitable_fallback(area, current_order, 3031 start_migratetype, false, &can_steal); 3032 if (fallback_mt == -1) 3033 continue; 3034 3035 /* 3036 * We cannot steal all free pages from the pageblock and the 3037 * requested migratetype is movable. In that case it's better to 3038 * steal and split the smallest available page instead of the 3039 * largest available page, because even if the next movable 3040 * allocation falls back into a different pageblock than this 3041 * one, it won't cause permanent fragmentation. 3042 */ 3043 if (!can_steal && start_migratetype == MIGRATE_MOVABLE 3044 && current_order > order) 3045 goto find_smallest; 3046 3047 goto do_steal; 3048 } 3049 3050 return false; 3051 3052 find_smallest: 3053 for (current_order = order; current_order < MAX_ORDER; 3054 current_order++) { 3055 area = &(zone->free_area[current_order]); 3056 fallback_mt = find_suitable_fallback(area, current_order, 3057 start_migratetype, false, &can_steal); 3058 if (fallback_mt != -1) 3059 break; 3060 } 3061 3062 /* 3063 * This should not happen - we already found a suitable fallback 3064 * when looking for the largest page. 3065 */ 3066 VM_BUG_ON(current_order == MAX_ORDER); 3067 3068 do_steal: 3069 page = get_page_from_free_area(area, fallback_mt); 3070 3071 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, 3072 can_steal); 3073 3074 trace_mm_page_alloc_extfrag(page, order, current_order, 3075 start_migratetype, fallback_mt); 3076 3077 return true; 3078 3079 } 3080 3081 /* 3082 * Do the hard work of removing an element from the buddy allocator. 3083 * Call me with the zone->lock already held. 3084 */ 3085 static __always_inline struct page * 3086 __rmqueue(struct zone *zone, unsigned int order, int migratetype, 3087 unsigned int alloc_flags) 3088 { 3089 struct page *page; 3090 3091 if (IS_ENABLED(CONFIG_CMA)) { 3092 /* 3093 * Balance movable allocations between regular and CMA areas by 3094 * allocating from CMA when over half of the zone's free memory 3095 * is in the CMA area. 3096 */ 3097 if (alloc_flags & ALLOC_CMA && 3098 zone_page_state(zone, NR_FREE_CMA_PAGES) > 3099 zone_page_state(zone, NR_FREE_PAGES) / 2) { 3100 page = __rmqueue_cma_fallback(zone, order); 3101 if (page) 3102 return page; 3103 } 3104 } 3105 retry: 3106 page = __rmqueue_smallest(zone, order, migratetype); 3107 if (unlikely(!page)) { 3108 if (alloc_flags & ALLOC_CMA) 3109 page = __rmqueue_cma_fallback(zone, order); 3110 3111 if (!page && __rmqueue_fallback(zone, order, migratetype, 3112 alloc_flags)) 3113 goto retry; 3114 } 3115 return page; 3116 } 3117 3118 /* 3119 * Obtain a specified number of elements from the buddy allocator, all under 3120 * a single hold of the lock, for efficiency. Add them to the supplied list. 3121 * Returns the number of new pages which were placed at *list. 3122 */ 3123 static int rmqueue_bulk(struct zone *zone, unsigned int order, 3124 unsigned long count, struct list_head *list, 3125 int migratetype, unsigned int alloc_flags) 3126 { 3127 int i, allocated = 0; 3128 3129 /* Caller must hold IRQ-safe pcp->lock so IRQs are disabled. */ 3130 spin_lock(&zone->lock); 3131 for (i = 0; i < count; ++i) { 3132 struct page *page = __rmqueue(zone, order, migratetype, 3133 alloc_flags); 3134 if (unlikely(page == NULL)) 3135 break; 3136 3137 if (unlikely(check_pcp_refill(page, order))) 3138 continue; 3139 3140 /* 3141 * Split buddy pages returned by expand() are received here in 3142 * physical page order. The page is added to the tail of 3143 * caller's list. From the callers perspective, the linked list 3144 * is ordered by page number under some conditions. This is 3145 * useful for IO devices that can forward direction from the 3146 * head, thus also in the physical page order. This is useful 3147 * for IO devices that can merge IO requests if the physical 3148 * pages are ordered properly. 3149 */ 3150 list_add_tail(&page->pcp_list, list); 3151 allocated++; 3152 if (is_migrate_cma(get_pcppage_migratetype(page))) 3153 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 3154 -(1 << order)); 3155 } 3156 3157 /* 3158 * i pages were removed from the buddy list even if some leak due 3159 * to check_pcp_refill failing so adjust NR_FREE_PAGES based 3160 * on i. Do not confuse with 'allocated' which is the number of 3161 * pages added to the pcp list. 3162 */ 3163 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 3164 spin_unlock(&zone->lock); 3165 return allocated; 3166 } 3167 3168 #ifdef CONFIG_NUMA 3169 /* 3170 * Called from the vmstat counter updater to drain pagesets of this 3171 * currently executing processor on remote nodes after they have 3172 * expired. 3173 */ 3174 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 3175 { 3176 int to_drain, batch; 3177 3178 batch = READ_ONCE(pcp->batch); 3179 to_drain = min(pcp->count, batch); 3180 if (to_drain > 0) { 3181 unsigned long flags; 3182 3183 /* 3184 * free_pcppages_bulk expects IRQs disabled for zone->lock 3185 * so even though pcp->lock is not intended to be IRQ-safe, 3186 * it's needed in this context. 3187 */ 3188 spin_lock_irqsave(&pcp->lock, flags); 3189 free_pcppages_bulk(zone, to_drain, pcp, 0); 3190 spin_unlock_irqrestore(&pcp->lock, flags); 3191 } 3192 } 3193 #endif 3194 3195 /* 3196 * Drain pcplists of the indicated processor and zone. 3197 */ 3198 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 3199 { 3200 struct per_cpu_pages *pcp; 3201 3202 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 3203 if (pcp->count) { 3204 unsigned long flags; 3205 3206 /* See drain_zone_pages on why this is disabling IRQs */ 3207 spin_lock_irqsave(&pcp->lock, flags); 3208 free_pcppages_bulk(zone, pcp->count, pcp, 0); 3209 spin_unlock_irqrestore(&pcp->lock, flags); 3210 } 3211 } 3212 3213 /* 3214 * Drain pcplists of all zones on the indicated processor. 3215 */ 3216 static void drain_pages(unsigned int cpu) 3217 { 3218 struct zone *zone; 3219 3220 for_each_populated_zone(zone) { 3221 drain_pages_zone(cpu, zone); 3222 } 3223 } 3224 3225 /* 3226 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 3227 */ 3228 void drain_local_pages(struct zone *zone) 3229 { 3230 int cpu = smp_processor_id(); 3231 3232 if (zone) 3233 drain_pages_zone(cpu, zone); 3234 else 3235 drain_pages(cpu); 3236 } 3237 3238 /* 3239 * The implementation of drain_all_pages(), exposing an extra parameter to 3240 * drain on all cpus. 3241 * 3242 * drain_all_pages() is optimized to only execute on cpus where pcplists are 3243 * not empty. The check for non-emptiness can however race with a free to 3244 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers 3245 * that need the guarantee that every CPU has drained can disable the 3246 * optimizing racy check. 3247 */ 3248 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) 3249 { 3250 int cpu; 3251 3252 /* 3253 * Allocate in the BSS so we won't require allocation in 3254 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 3255 */ 3256 static cpumask_t cpus_with_pcps; 3257 3258 /* 3259 * Do not drain if one is already in progress unless it's specific to 3260 * a zone. Such callers are primarily CMA and memory hotplug and need 3261 * the drain to be complete when the call returns. 3262 */ 3263 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 3264 if (!zone) 3265 return; 3266 mutex_lock(&pcpu_drain_mutex); 3267 } 3268 3269 /* 3270 * We don't care about racing with CPU hotplug event 3271 * as offline notification will cause the notified 3272 * cpu to drain that CPU pcps and on_each_cpu_mask 3273 * disables preemption as part of its processing 3274 */ 3275 for_each_online_cpu(cpu) { 3276 struct per_cpu_pages *pcp; 3277 struct zone *z; 3278 bool has_pcps = false; 3279 3280 if (force_all_cpus) { 3281 /* 3282 * The pcp.count check is racy, some callers need a 3283 * guarantee that no cpu is missed. 3284 */ 3285 has_pcps = true; 3286 } else if (zone) { 3287 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 3288 if (pcp->count) 3289 has_pcps = true; 3290 } else { 3291 for_each_populated_zone(z) { 3292 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); 3293 if (pcp->count) { 3294 has_pcps = true; 3295 break; 3296 } 3297 } 3298 } 3299 3300 if (has_pcps) 3301 cpumask_set_cpu(cpu, &cpus_with_pcps); 3302 else 3303 cpumask_clear_cpu(cpu, &cpus_with_pcps); 3304 } 3305 3306 for_each_cpu(cpu, &cpus_with_pcps) { 3307 if (zone) 3308 drain_pages_zone(cpu, zone); 3309 else 3310 drain_pages(cpu); 3311 } 3312 3313 mutex_unlock(&pcpu_drain_mutex); 3314 } 3315 3316 /* 3317 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 3318 * 3319 * When zone parameter is non-NULL, spill just the single zone's pages. 3320 */ 3321 void drain_all_pages(struct zone *zone) 3322 { 3323 __drain_all_pages(zone, false); 3324 } 3325 3326 #ifdef CONFIG_HIBERNATION 3327 3328 /* 3329 * Touch the watchdog for every WD_PAGE_COUNT pages. 3330 */ 3331 #define WD_PAGE_COUNT (128*1024) 3332 3333 void mark_free_pages(struct zone *zone) 3334 { 3335 unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; 3336 unsigned long flags; 3337 unsigned int order, t; 3338 struct page *page; 3339 3340 if (zone_is_empty(zone)) 3341 return; 3342 3343 spin_lock_irqsave(&zone->lock, flags); 3344 3345 max_zone_pfn = zone_end_pfn(zone); 3346 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 3347 if (pfn_valid(pfn)) { 3348 page = pfn_to_page(pfn); 3349 3350 if (!--page_count) { 3351 touch_nmi_watchdog(); 3352 page_count = WD_PAGE_COUNT; 3353 } 3354 3355 if (page_zone(page) != zone) 3356 continue; 3357 3358 if (!swsusp_page_is_forbidden(page)) 3359 swsusp_unset_page_free(page); 3360 } 3361 3362 for_each_migratetype_order(order, t) { 3363 list_for_each_entry(page, 3364 &zone->free_area[order].free_list[t], buddy_list) { 3365 unsigned long i; 3366 3367 pfn = page_to_pfn(page); 3368 for (i = 0; i < (1UL << order); i++) { 3369 if (!--page_count) { 3370 touch_nmi_watchdog(); 3371 page_count = WD_PAGE_COUNT; 3372 } 3373 swsusp_set_page_free(pfn_to_page(pfn + i)); 3374 } 3375 } 3376 } 3377 spin_unlock_irqrestore(&zone->lock, flags); 3378 } 3379 #endif /* CONFIG_PM */ 3380 3381 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, 3382 unsigned int order) 3383 { 3384 int migratetype; 3385 3386 if (!free_pcp_prepare(page, order)) 3387 return false; 3388 3389 migratetype = get_pfnblock_migratetype(page, pfn); 3390 set_pcppage_migratetype(page, migratetype); 3391 return true; 3392 } 3393 3394 static int nr_pcp_free(struct per_cpu_pages *pcp, int high, int batch, 3395 bool free_high) 3396 { 3397 int min_nr_free, max_nr_free; 3398 3399 /* Free everything if batch freeing high-order pages. */ 3400 if (unlikely(free_high)) 3401 return pcp->count; 3402 3403 /* Check for PCP disabled or boot pageset */ 3404 if (unlikely(high < batch)) 3405 return 1; 3406 3407 /* Leave at least pcp->batch pages on the list */ 3408 min_nr_free = batch; 3409 max_nr_free = high - batch; 3410 3411 /* 3412 * Double the number of pages freed each time there is subsequent 3413 * freeing of pages without any allocation. 3414 */ 3415 batch <<= pcp->free_factor; 3416 if (batch < max_nr_free) 3417 pcp->free_factor++; 3418 batch = clamp(batch, min_nr_free, max_nr_free); 3419 3420 return batch; 3421 } 3422 3423 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, 3424 bool free_high) 3425 { 3426 int high = READ_ONCE(pcp->high); 3427 3428 if (unlikely(!high || free_high)) 3429 return 0; 3430 3431 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) 3432 return high; 3433 3434 /* 3435 * If reclaim is active, limit the number of pages that can be 3436 * stored on pcp lists 3437 */ 3438 return min(READ_ONCE(pcp->batch) << 2, high); 3439 } 3440 3441 static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, 3442 struct page *page, int migratetype, 3443 unsigned int order) 3444 { 3445 int high; 3446 int pindex; 3447 bool free_high; 3448 3449 __count_vm_event(PGFREE); 3450 pindex = order_to_pindex(migratetype, order); 3451 list_add(&page->pcp_list, &pcp->lists[pindex]); 3452 pcp->count += 1 << order; 3453 3454 /* 3455 * As high-order pages other than THP's stored on PCP can contribute 3456 * to fragmentation, limit the number stored when PCP is heavily 3457 * freeing without allocation. The remainder after bulk freeing 3458 * stops will be drained from vmstat refresh context. 3459 */ 3460 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); 3461 3462 high = nr_pcp_high(pcp, zone, free_high); 3463 if (pcp->count >= high) { 3464 int batch = READ_ONCE(pcp->batch); 3465 3466 free_pcppages_bulk(zone, nr_pcp_free(pcp, high, batch, free_high), pcp, pindex); 3467 } 3468 } 3469 3470 /* 3471 * Free a pcp page 3472 */ 3473 void free_unref_page(struct page *page, unsigned int order) 3474 { 3475 unsigned long flags; 3476 unsigned long __maybe_unused UP_flags; 3477 struct per_cpu_pages *pcp; 3478 struct zone *zone; 3479 unsigned long pfn = page_to_pfn(page); 3480 int migratetype; 3481 3482 if (!free_unref_page_prepare(page, pfn, order)) 3483 return; 3484 3485 /* 3486 * We only track unmovable, reclaimable and movable on pcp lists. 3487 * Place ISOLATE pages on the isolated list because they are being 3488 * offlined but treat HIGHATOMIC as movable pages so we can get those 3489 * areas back if necessary. Otherwise, we may have to free 3490 * excessively into the page allocator 3491 */ 3492 migratetype = get_pcppage_migratetype(page); 3493 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { 3494 if (unlikely(is_migrate_isolate(migratetype))) { 3495 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); 3496 return; 3497 } 3498 migratetype = MIGRATE_MOVABLE; 3499 } 3500 3501 zone = page_zone(page); 3502 pcp_trylock_prepare(UP_flags); 3503 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); 3504 if (pcp) { 3505 free_unref_page_commit(zone, pcp, page, migratetype, order); 3506 pcp_spin_unlock_irqrestore(pcp, flags); 3507 } else { 3508 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); 3509 } 3510 pcp_trylock_finish(UP_flags); 3511 } 3512 3513 /* 3514 * Free a list of 0-order pages 3515 */ 3516 void free_unref_page_list(struct list_head *list) 3517 { 3518 struct page *page, *next; 3519 struct per_cpu_pages *pcp = NULL; 3520 struct zone *locked_zone = NULL; 3521 unsigned long flags; 3522 int batch_count = 0; 3523 int migratetype; 3524 3525 /* Prepare pages for freeing */ 3526 list_for_each_entry_safe(page, next, list, lru) { 3527 unsigned long pfn = page_to_pfn(page); 3528 if (!free_unref_page_prepare(page, pfn, 0)) { 3529 list_del(&page->lru); 3530 continue; 3531 } 3532 3533 /* 3534 * Free isolated pages directly to the allocator, see 3535 * comment in free_unref_page. 3536 */ 3537 migratetype = get_pcppage_migratetype(page); 3538 if (unlikely(is_migrate_isolate(migratetype))) { 3539 list_del(&page->lru); 3540 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); 3541 continue; 3542 } 3543 } 3544 3545 list_for_each_entry_safe(page, next, list, lru) { 3546 struct zone *zone = page_zone(page); 3547 3548 /* Different zone, different pcp lock. */ 3549 if (zone != locked_zone) { 3550 if (pcp) 3551 pcp_spin_unlock_irqrestore(pcp, flags); 3552 3553 locked_zone = zone; 3554 pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags); 3555 } 3556 3557 /* 3558 * Non-isolated types over MIGRATE_PCPTYPES get added 3559 * to the MIGRATE_MOVABLE pcp list. 3560 */ 3561 migratetype = get_pcppage_migratetype(page); 3562 if (unlikely(migratetype >= MIGRATE_PCPTYPES)) 3563 migratetype = MIGRATE_MOVABLE; 3564 3565 trace_mm_page_free_batched(page); 3566 free_unref_page_commit(zone, pcp, page, migratetype, 0); 3567 3568 /* 3569 * Guard against excessive IRQ disabled times when we get 3570 * a large list of pages to free. 3571 */ 3572 if (++batch_count == SWAP_CLUSTER_MAX) { 3573 pcp_spin_unlock_irqrestore(pcp, flags); 3574 batch_count = 0; 3575 pcp = pcp_spin_lock_irqsave(locked_zone->per_cpu_pageset, flags); 3576 } 3577 } 3578 3579 if (pcp) 3580 pcp_spin_unlock_irqrestore(pcp, flags); 3581 } 3582 3583 /* 3584 * split_page takes a non-compound higher-order page, and splits it into 3585 * n (1<<order) sub-pages: page[0..n] 3586 * Each sub-page must be freed individually. 3587 * 3588 * Note: this is probably too low level an operation for use in drivers. 3589 * Please consult with lkml before using this in your driver. 3590 */ 3591 void split_page(struct page *page, unsigned int order) 3592 { 3593 int i; 3594 3595 VM_BUG_ON_PAGE(PageCompound(page), page); 3596 VM_BUG_ON_PAGE(!page_count(page), page); 3597 3598 for (i = 1; i < (1 << order); i++) 3599 set_page_refcounted(page + i); 3600 split_page_owner(page, 1 << order); 3601 split_page_memcg(page, 1 << order); 3602 } 3603 EXPORT_SYMBOL_GPL(split_page); 3604 3605 int __isolate_free_page(struct page *page, unsigned int order) 3606 { 3607 struct zone *zone = page_zone(page); 3608 int mt = get_pageblock_migratetype(page); 3609 3610 if (!is_migrate_isolate(mt)) { 3611 unsigned long watermark; 3612 /* 3613 * Obey watermarks as if the page was being allocated. We can 3614 * emulate a high-order watermark check with a raised order-0 3615 * watermark, because we already know our high-order page 3616 * exists. 3617 */ 3618 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); 3619 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 3620 return 0; 3621 3622 __mod_zone_freepage_state(zone, -(1UL << order), mt); 3623 } 3624 3625 del_page_from_free_list(page, zone, order); 3626 3627 /* 3628 * Set the pageblock if the isolated page is at least half of a 3629 * pageblock 3630 */ 3631 if (order >= pageblock_order - 1) { 3632 struct page *endpage = page + (1 << order) - 1; 3633 for (; page < endpage; page += pageblock_nr_pages) { 3634 int mt = get_pageblock_migratetype(page); 3635 /* 3636 * Only change normal pageblocks (i.e., they can merge 3637 * with others) 3638 */ 3639 if (migratetype_is_mergeable(mt)) 3640 set_pageblock_migratetype(page, 3641 MIGRATE_MOVABLE); 3642 } 3643 } 3644 3645 return 1UL << order; 3646 } 3647 3648 /** 3649 * __putback_isolated_page - Return a now-isolated page back where we got it 3650 * @page: Page that was isolated 3651 * @order: Order of the isolated page 3652 * @mt: The page's pageblock's migratetype 3653 * 3654 * This function is meant to return a page pulled from the free lists via 3655 * __isolate_free_page back to the free lists they were pulled from. 3656 */ 3657 void __putback_isolated_page(struct page *page, unsigned int order, int mt) 3658 { 3659 struct zone *zone = page_zone(page); 3660 3661 /* zone lock should be held when this function is called */ 3662 lockdep_assert_held(&zone->lock); 3663 3664 /* Return isolated page to tail of freelist. */ 3665 __free_one_page(page, page_to_pfn(page), zone, order, mt, 3666 FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL); 3667 } 3668 3669 /* 3670 * Update NUMA hit/miss statistics 3671 */ 3672 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, 3673 long nr_account) 3674 { 3675 #ifdef CONFIG_NUMA 3676 enum numa_stat_item local_stat = NUMA_LOCAL; 3677 3678 /* skip numa counters update if numa stats is disabled */ 3679 if (!static_branch_likely(&vm_numa_stat_key)) 3680 return; 3681 3682 if (zone_to_nid(z) != numa_node_id()) 3683 local_stat = NUMA_OTHER; 3684 3685 if (zone_to_nid(z) == zone_to_nid(preferred_zone)) 3686 __count_numa_events(z, NUMA_HIT, nr_account); 3687 else { 3688 __count_numa_events(z, NUMA_MISS, nr_account); 3689 __count_numa_events(preferred_zone, NUMA_FOREIGN, nr_account); 3690 } 3691 __count_numa_events(z, local_stat, nr_account); 3692 #endif 3693 } 3694 3695 static __always_inline 3696 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, 3697 unsigned int order, unsigned int alloc_flags, 3698 int migratetype) 3699 { 3700 struct page *page; 3701 unsigned long flags; 3702 3703 do { 3704 page = NULL; 3705 spin_lock_irqsave(&zone->lock, flags); 3706 /* 3707 * order-0 request can reach here when the pcplist is skipped 3708 * due to non-CMA allocation context. HIGHATOMIC area is 3709 * reserved for high-order atomic allocation, so order-0 3710 * request should skip it. 3711 */ 3712 if (order > 0 && alloc_flags & ALLOC_HARDER) 3713 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 3714 if (!page) { 3715 page = __rmqueue(zone, order, migratetype, alloc_flags); 3716 if (!page) { 3717 spin_unlock_irqrestore(&zone->lock, flags); 3718 return NULL; 3719 } 3720 } 3721 __mod_zone_freepage_state(zone, -(1 << order), 3722 get_pcppage_migratetype(page)); 3723 spin_unlock_irqrestore(&zone->lock, flags); 3724 } while (check_new_pages(page, order)); 3725 3726 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3727 zone_statistics(preferred_zone, zone, 1); 3728 3729 return page; 3730 } 3731 3732 /* Remove page from the per-cpu list, caller must protect the list */ 3733 static inline 3734 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, 3735 int migratetype, 3736 unsigned int alloc_flags, 3737 struct per_cpu_pages *pcp, 3738 struct list_head *list) 3739 { 3740 struct page *page; 3741 3742 do { 3743 if (list_empty(list)) { 3744 int batch = READ_ONCE(pcp->batch); 3745 int alloced; 3746 3747 /* 3748 * Scale batch relative to order if batch implies 3749 * free pages can be stored on the PCP. Batch can 3750 * be 1 for small zones or for boot pagesets which 3751 * should never store free pages as the pages may 3752 * belong to arbitrary zones. 3753 */ 3754 if (batch > 1) 3755 batch = max(batch >> order, 2); 3756 alloced = rmqueue_bulk(zone, order, 3757 batch, list, 3758 migratetype, alloc_flags); 3759 3760 pcp->count += alloced << order; 3761 if (unlikely(list_empty(list))) 3762 return NULL; 3763 } 3764 3765 page = list_first_entry(list, struct page, pcp_list); 3766 list_del(&page->pcp_list); 3767 pcp->count -= 1 << order; 3768 } while (check_new_pcp(page, order)); 3769 3770 return page; 3771 } 3772 3773 /* Lock and remove page from the per-cpu list */ 3774 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 3775 struct zone *zone, unsigned int order, 3776 int migratetype, unsigned int alloc_flags) 3777 { 3778 struct per_cpu_pages *pcp; 3779 struct list_head *list; 3780 struct page *page; 3781 unsigned long flags; 3782 unsigned long __maybe_unused UP_flags; 3783 3784 /* 3785 * spin_trylock may fail due to a parallel drain. In the future, the 3786 * trylock will also protect against IRQ reentrancy. 3787 */ 3788 pcp_trylock_prepare(UP_flags); 3789 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); 3790 if (!pcp) { 3791 pcp_trylock_finish(UP_flags); 3792 return NULL; 3793 } 3794 3795 /* 3796 * On allocation, reduce the number of pages that are batch freed. 3797 * See nr_pcp_free() where free_factor is increased for subsequent 3798 * frees. 3799 */ 3800 pcp->free_factor >>= 1; 3801 list = &pcp->lists[order_to_pindex(migratetype, order)]; 3802 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); 3803 pcp_spin_unlock_irqrestore(pcp, flags); 3804 pcp_trylock_finish(UP_flags); 3805 if (page) { 3806 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); 3807 zone_statistics(preferred_zone, zone, 1); 3808 } 3809 return page; 3810 } 3811 3812 /* 3813 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 3814 */ 3815 3816 /* 3817 * Do not instrument rmqueue() with KMSAN. This function may call 3818 * __msan_poison_alloca() through a call to set_pfnblock_flags_mask(). 3819 * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it 3820 * may call rmqueue() again, which will result in a deadlock. 3821 */ 3822 __no_sanitize_memory 3823 static inline 3824 struct page *rmqueue(struct zone *preferred_zone, 3825 struct zone *zone, unsigned int order, 3826 gfp_t gfp_flags, unsigned int alloc_flags, 3827 int migratetype) 3828 { 3829 struct page *page; 3830 3831 /* 3832 * We most definitely don't want callers attempting to 3833 * allocate greater than order-1 page units with __GFP_NOFAIL. 3834 */ 3835 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 3836 3837 if (likely(pcp_allowed_order(order))) { 3838 /* 3839 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and 3840 * we need to skip it when CMA area isn't allowed. 3841 */ 3842 if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || 3843 migratetype != MIGRATE_MOVABLE) { 3844 page = rmqueue_pcplist(preferred_zone, zone, order, 3845 migratetype, alloc_flags); 3846 if (likely(page)) 3847 goto out; 3848 } 3849 } 3850 3851 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, 3852 migratetype); 3853 3854 out: 3855 /* Separate test+clear to avoid unnecessary atomics */ 3856 if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { 3857 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); 3858 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 3859 } 3860 3861 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3862 return page; 3863 } 3864 3865 #ifdef CONFIG_FAIL_PAGE_ALLOC 3866 3867 static struct { 3868 struct fault_attr attr; 3869 3870 bool ignore_gfp_highmem; 3871 bool ignore_gfp_reclaim; 3872 u32 min_order; 3873 } fail_page_alloc = { 3874 .attr = FAULT_ATTR_INITIALIZER, 3875 .ignore_gfp_reclaim = true, 3876 .ignore_gfp_highmem = true, 3877 .min_order = 1, 3878 }; 3879 3880 static int __init setup_fail_page_alloc(char *str) 3881 { 3882 return setup_fault_attr(&fail_page_alloc.attr, str); 3883 } 3884 __setup("fail_page_alloc=", setup_fail_page_alloc); 3885 3886 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3887 { 3888 if (order < fail_page_alloc.min_order) 3889 return false; 3890 if (gfp_mask & __GFP_NOFAIL) 3891 return false; 3892 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 3893 return false; 3894 if (fail_page_alloc.ignore_gfp_reclaim && 3895 (gfp_mask & __GFP_DIRECT_RECLAIM)) 3896 return false; 3897 3898 if (gfp_mask & __GFP_NOWARN) 3899 fail_page_alloc.attr.no_warn = true; 3900 3901 return should_fail(&fail_page_alloc.attr, 1 << order); 3902 } 3903 3904 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 3905 3906 static int __init fail_page_alloc_debugfs(void) 3907 { 3908 umode_t mode = S_IFREG | 0600; 3909 struct dentry *dir; 3910 3911 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 3912 &fail_page_alloc.attr); 3913 3914 debugfs_create_bool("ignore-gfp-wait", mode, dir, 3915 &fail_page_alloc.ignore_gfp_reclaim); 3916 debugfs_create_bool("ignore-gfp-highmem", mode, dir, 3917 &fail_page_alloc.ignore_gfp_highmem); 3918 debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); 3919 3920 return 0; 3921 } 3922 3923 late_initcall(fail_page_alloc_debugfs); 3924 3925 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 3926 3927 #else /* CONFIG_FAIL_PAGE_ALLOC */ 3928 3929 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3930 { 3931 return false; 3932 } 3933 3934 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 3935 3936 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3937 { 3938 return __should_fail_alloc_page(gfp_mask, order); 3939 } 3940 ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE); 3941 3942 static inline long __zone_watermark_unusable_free(struct zone *z, 3943 unsigned int order, unsigned int alloc_flags) 3944 { 3945 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3946 long unusable_free = (1 << order) - 1; 3947 3948 /* 3949 * If the caller does not have rights to ALLOC_HARDER then subtract 3950 * the high-atomic reserves. This will over-estimate the size of the 3951 * atomic reserve but it avoids a search. 3952 */ 3953 if (likely(!alloc_harder)) 3954 unusable_free += z->nr_reserved_highatomic; 3955 3956 #ifdef CONFIG_CMA 3957 /* If allocation can't use CMA areas don't use free CMA pages */ 3958 if (!(alloc_flags & ALLOC_CMA)) 3959 unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES); 3960 #endif 3961 3962 return unusable_free; 3963 } 3964 3965 /* 3966 * Return true if free base pages are above 'mark'. For high-order checks it 3967 * will return true of the order-0 watermark is reached and there is at least 3968 * one free page of a suitable size. Checking now avoids taking the zone lock 3969 * to check in the allocation paths if no pages are free. 3970 */ 3971 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 3972 int highest_zoneidx, unsigned int alloc_flags, 3973 long free_pages) 3974 { 3975 long min = mark; 3976 int o; 3977 const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM)); 3978 3979 /* free_pages may go negative - that's OK */ 3980 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); 3981 3982 if (alloc_flags & ALLOC_HIGH) 3983 min -= min / 2; 3984 3985 if (unlikely(alloc_harder)) { 3986 /* 3987 * OOM victims can try even harder than normal ALLOC_HARDER 3988 * users on the grounds that it's definitely going to be in 3989 * the exit path shortly and free memory. Any allocation it 3990 * makes during the free path will be small and short-lived. 3991 */ 3992 if (alloc_flags & ALLOC_OOM) 3993 min -= min / 2; 3994 else 3995 min -= min / 4; 3996 } 3997 3998 /* 3999 * Check watermarks for an order-0 allocation request. If these 4000 * are not met, then a high-order request also cannot go ahead 4001 * even if a suitable page happened to be free. 4002 */ 4003 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) 4004 return false; 4005 4006 /* If this is an order-0 request then the watermark is fine */ 4007 if (!order) 4008 return true; 4009 4010 /* For a high-order request, check at least one suitable page is free */ 4011 for (o = order; o < MAX_ORDER; o++) { 4012 struct free_area *area = &z->free_area[o]; 4013 int mt; 4014 4015 if (!area->nr_free) 4016 continue; 4017 4018 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 4019 if (!free_area_empty(area, mt)) 4020 return true; 4021 } 4022 4023 #ifdef CONFIG_CMA 4024 if ((alloc_flags & ALLOC_CMA) && 4025 !free_area_empty(area, MIGRATE_CMA)) { 4026 return true; 4027 } 4028 #endif 4029 if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC)) 4030 return true; 4031 } 4032 return false; 4033 } 4034 4035 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 4036 int highest_zoneidx, unsigned int alloc_flags) 4037 { 4038 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 4039 zone_page_state(z, NR_FREE_PAGES)); 4040 } 4041 4042 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 4043 unsigned long mark, int highest_zoneidx, 4044 unsigned int alloc_flags, gfp_t gfp_mask) 4045 { 4046 long free_pages; 4047 4048 free_pages = zone_page_state(z, NR_FREE_PAGES); 4049 4050 /* 4051 * Fast check for order-0 only. If this fails then the reserves 4052 * need to be calculated. 4053 */ 4054 if (!order) { 4055 long usable_free; 4056 long reserved; 4057 4058 usable_free = free_pages; 4059 reserved = __zone_watermark_unusable_free(z, 0, alloc_flags); 4060 4061 /* reserved may over estimate high-atomic reserves. */ 4062 usable_free -= min(usable_free, reserved); 4063 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) 4064 return true; 4065 } 4066 4067 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, 4068 free_pages)) 4069 return true; 4070 /* 4071 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations 4072 * when checking the min watermark. The min watermark is the 4073 * point where boosting is ignored so that kswapd is woken up 4074 * when below the low watermark. 4075 */ 4076 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost 4077 && ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) { 4078 mark = z->_watermark[WMARK_MIN]; 4079 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 4080 alloc_flags, free_pages); 4081 } 4082 4083 return false; 4084 } 4085 4086 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 4087 unsigned long mark, int highest_zoneidx) 4088 { 4089 long free_pages = zone_page_state(z, NR_FREE_PAGES); 4090 4091 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 4092 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 4093 4094 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, 4095 free_pages); 4096 } 4097 4098 #ifdef CONFIG_NUMA 4099 int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE; 4100 4101 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 4102 { 4103 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 4104 node_reclaim_distance; 4105 } 4106 #else /* CONFIG_NUMA */ 4107 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 4108 { 4109 return true; 4110 } 4111 #endif /* CONFIG_NUMA */ 4112 4113 /* 4114 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid 4115 * fragmentation is subtle. If the preferred zone was HIGHMEM then 4116 * premature use of a lower zone may cause lowmem pressure problems that 4117 * are worse than fragmentation. If the next zone is ZONE_DMA then it is 4118 * probably too small. It only makes sense to spread allocations to avoid 4119 * fragmentation between the Normal and DMA32 zones. 4120 */ 4121 static inline unsigned int 4122 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) 4123 { 4124 unsigned int alloc_flags; 4125 4126 /* 4127 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4128 * to save a branch. 4129 */ 4130 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); 4131 4132 #ifdef CONFIG_ZONE_DMA32 4133 if (!zone) 4134 return alloc_flags; 4135 4136 if (zone_idx(zone) != ZONE_NORMAL) 4137 return alloc_flags; 4138 4139 /* 4140 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and 4141 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume 4142 * on UMA that if Normal is populated then so is DMA32. 4143 */ 4144 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); 4145 if (nr_online_nodes > 1 && !populated_zone(--zone)) 4146 return alloc_flags; 4147 4148 alloc_flags |= ALLOC_NOFRAGMENT; 4149 #endif /* CONFIG_ZONE_DMA32 */ 4150 return alloc_flags; 4151 } 4152 4153 /* Must be called after current_gfp_context() which can change gfp_mask */ 4154 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, 4155 unsigned int alloc_flags) 4156 { 4157 #ifdef CONFIG_CMA 4158 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) 4159 alloc_flags |= ALLOC_CMA; 4160 #endif 4161 return alloc_flags; 4162 } 4163 4164 /* 4165 * get_page_from_freelist goes through the zonelist trying to allocate 4166 * a page. 4167 */ 4168 static struct page * 4169 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 4170 const struct alloc_context *ac) 4171 { 4172 struct zoneref *z; 4173 struct zone *zone; 4174 struct pglist_data *last_pgdat = NULL; 4175 bool last_pgdat_dirty_ok = false; 4176 bool no_fallback; 4177 4178 retry: 4179 /* 4180 * Scan zonelist, looking for a zone with enough free. 4181 * See also __cpuset_node_allowed() comment in kernel/cgroup/cpuset.c. 4182 */ 4183 no_fallback = alloc_flags & ALLOC_NOFRAGMENT; 4184 z = ac->preferred_zoneref; 4185 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, 4186 ac->nodemask) { 4187 struct page *page; 4188 unsigned long mark; 4189 4190 if (cpusets_enabled() && 4191 (alloc_flags & ALLOC_CPUSET) && 4192 !__cpuset_zone_allowed(zone, gfp_mask)) 4193 continue; 4194 /* 4195 * When allocating a page cache page for writing, we 4196 * want to get it from a node that is within its dirty 4197 * limit, such that no single node holds more than its 4198 * proportional share of globally allowed dirty pages. 4199 * The dirty limits take into account the node's 4200 * lowmem reserves and high watermark so that kswapd 4201 * should be able to balance it without having to 4202 * write pages from its LRU list. 4203 * 4204 * XXX: For now, allow allocations to potentially 4205 * exceed the per-node dirty limit in the slowpath 4206 * (spread_dirty_pages unset) before going into reclaim, 4207 * which is important when on a NUMA setup the allowed 4208 * nodes are together not big enough to reach the 4209 * global limit. The proper fix for these situations 4210 * will require awareness of nodes in the 4211 * dirty-throttling and the flusher threads. 4212 */ 4213 if (ac->spread_dirty_pages) { 4214 if (last_pgdat != zone->zone_pgdat) { 4215 last_pgdat = zone->zone_pgdat; 4216 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); 4217 } 4218 4219 if (!last_pgdat_dirty_ok) 4220 continue; 4221 } 4222 4223 if (no_fallback && nr_online_nodes > 1 && 4224 zone != ac->preferred_zoneref->zone) { 4225 int local_nid; 4226 4227 /* 4228 * If moving to a remote node, retry but allow 4229 * fragmenting fallbacks. Locality is more important 4230 * than fragmentation avoidance. 4231 */ 4232 local_nid = zone_to_nid(ac->preferred_zoneref->zone); 4233 if (zone_to_nid(zone) != local_nid) { 4234 alloc_flags &= ~ALLOC_NOFRAGMENT; 4235 goto retry; 4236 } 4237 } 4238 4239 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 4240 if (!zone_watermark_fast(zone, order, mark, 4241 ac->highest_zoneidx, alloc_flags, 4242 gfp_mask)) { 4243 int ret; 4244 4245 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 4246 /* 4247 * Watermark failed for this zone, but see if we can 4248 * grow this zone if it contains deferred pages. 4249 */ 4250 if (static_branch_unlikely(&deferred_pages)) { 4251 if (_deferred_grow_zone(zone, order)) 4252 goto try_this_zone; 4253 } 4254 #endif 4255 /* Checked here to keep the fast path fast */ 4256 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 4257 if (alloc_flags & ALLOC_NO_WATERMARKS) 4258 goto try_this_zone; 4259 4260 if (!node_reclaim_enabled() || 4261 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 4262 continue; 4263 4264 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 4265 switch (ret) { 4266 case NODE_RECLAIM_NOSCAN: 4267 /* did not scan */ 4268 continue; 4269 case NODE_RECLAIM_FULL: 4270 /* scanned but unreclaimable */ 4271 continue; 4272 default: 4273 /* did we reclaim enough */ 4274 if (zone_watermark_ok(zone, order, mark, 4275 ac->highest_zoneidx, alloc_flags)) 4276 goto try_this_zone; 4277 4278 continue; 4279 } 4280 } 4281 4282 try_this_zone: 4283 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 4284 gfp_mask, alloc_flags, ac->migratetype); 4285 if (page) { 4286 prep_new_page(page, order, gfp_mask, alloc_flags); 4287 4288 /* 4289 * If this is a high-order atomic allocation then check 4290 * if the pageblock should be reserved for the future 4291 */ 4292 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 4293 reserve_highatomic_pageblock(page, zone, order); 4294 4295 return page; 4296 } else { 4297 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 4298 /* Try again if zone has deferred pages */ 4299 if (static_branch_unlikely(&deferred_pages)) { 4300 if (_deferred_grow_zone(zone, order)) 4301 goto try_this_zone; 4302 } 4303 #endif 4304 } 4305 } 4306 4307 /* 4308 * It's possible on a UMA machine to get through all zones that are 4309 * fragmented. If avoiding fragmentation, reset and try again. 4310 */ 4311 if (no_fallback) { 4312 alloc_flags &= ~ALLOC_NOFRAGMENT; 4313 goto retry; 4314 } 4315 4316 return NULL; 4317 } 4318 4319 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 4320 { 4321 unsigned int filter = SHOW_MEM_FILTER_NODES; 4322 4323 /* 4324 * This documents exceptions given to allocations in certain 4325 * contexts that are allowed to allocate outside current's set 4326 * of allowed nodes. 4327 */ 4328 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4329 if (tsk_is_oom_victim(current) || 4330 (current->flags & (PF_MEMALLOC | PF_EXITING))) 4331 filter &= ~SHOW_MEM_FILTER_NODES; 4332 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 4333 filter &= ~SHOW_MEM_FILTER_NODES; 4334 4335 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); 4336 } 4337 4338 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 4339 { 4340 struct va_format vaf; 4341 va_list args; 4342 static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1); 4343 4344 if ((gfp_mask & __GFP_NOWARN) || 4345 !__ratelimit(&nopage_rs) || 4346 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) 4347 return; 4348 4349 va_start(args, fmt); 4350 vaf.fmt = fmt; 4351 vaf.va = &args; 4352 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", 4353 current->comm, &vaf, gfp_mask, &gfp_mask, 4354 nodemask_pr_args(nodemask)); 4355 va_end(args); 4356 4357 cpuset_print_current_mems_allowed(); 4358 pr_cont("\n"); 4359 dump_stack(); 4360 warn_alloc_show_mem(gfp_mask, nodemask); 4361 } 4362 4363 static inline struct page * 4364 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 4365 unsigned int alloc_flags, 4366 const struct alloc_context *ac) 4367 { 4368 struct page *page; 4369 4370 page = get_page_from_freelist(gfp_mask, order, 4371 alloc_flags|ALLOC_CPUSET, ac); 4372 /* 4373 * fallback to ignore cpuset restriction if our nodes 4374 * are depleted 4375 */ 4376 if (!page) 4377 page = get_page_from_freelist(gfp_mask, order, 4378 alloc_flags, ac); 4379 4380 return page; 4381 } 4382 4383 static inline struct page * 4384 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 4385 const struct alloc_context *ac, unsigned long *did_some_progress) 4386 { 4387 struct oom_control oc = { 4388 .zonelist = ac->zonelist, 4389 .nodemask = ac->nodemask, 4390 .memcg = NULL, 4391 .gfp_mask = gfp_mask, 4392 .order = order, 4393 }; 4394 struct page *page; 4395 4396 *did_some_progress = 0; 4397 4398 /* 4399 * Acquire the oom lock. If that fails, somebody else is 4400 * making progress for us. 4401 */ 4402 if (!mutex_trylock(&oom_lock)) { 4403 *did_some_progress = 1; 4404 schedule_timeout_uninterruptible(1); 4405 return NULL; 4406 } 4407 4408 /* 4409 * Go through the zonelist yet one more time, keep very high watermark 4410 * here, this is only to catch a parallel oom killing, we must fail if 4411 * we're still under heavy pressure. But make sure that this reclaim 4412 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY 4413 * allocation which will never fail due to oom_lock already held. 4414 */ 4415 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & 4416 ~__GFP_DIRECT_RECLAIM, order, 4417 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 4418 if (page) 4419 goto out; 4420 4421 /* Coredumps can quickly deplete all memory reserves */ 4422 if (current->flags & PF_DUMPCORE) 4423 goto out; 4424 /* The OOM killer will not help higher order allocs */ 4425 if (order > PAGE_ALLOC_COSTLY_ORDER) 4426 goto out; 4427 /* 4428 * We have already exhausted all our reclaim opportunities without any 4429 * success so it is time to admit defeat. We will skip the OOM killer 4430 * because it is very likely that the caller has a more reasonable 4431 * fallback than shooting a random task. 4432 * 4433 * The OOM killer may not free memory on a specific node. 4434 */ 4435 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) 4436 goto out; 4437 /* The OOM killer does not needlessly kill tasks for lowmem */ 4438 if (ac->highest_zoneidx < ZONE_NORMAL) 4439 goto out; 4440 if (pm_suspended_storage()) 4441 goto out; 4442 /* 4443 * XXX: GFP_NOFS allocations should rather fail than rely on 4444 * other request to make a forward progress. 4445 * We are in an unfortunate situation where out_of_memory cannot 4446 * do much for this context but let's try it to at least get 4447 * access to memory reserved if the current task is killed (see 4448 * out_of_memory). Once filesystems are ready to handle allocation 4449 * failures more gracefully we should just bail out here. 4450 */ 4451 4452 /* Exhausted what can be done so it's blame time */ 4453 if (out_of_memory(&oc) || 4454 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { 4455 *did_some_progress = 1; 4456 4457 /* 4458 * Help non-failing allocations by giving them access to memory 4459 * reserves 4460 */ 4461 if (gfp_mask & __GFP_NOFAIL) 4462 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 4463 ALLOC_NO_WATERMARKS, ac); 4464 } 4465 out: 4466 mutex_unlock(&oom_lock); 4467 return page; 4468 } 4469 4470 /* 4471 * Maximum number of compaction retries with a progress before OOM 4472 * killer is consider as the only way to move forward. 4473 */ 4474 #define MAX_COMPACT_RETRIES 16 4475 4476 #ifdef CONFIG_COMPACTION 4477 /* Try memory compaction for high-order allocations before reclaim */ 4478 static struct page * 4479 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4480 unsigned int alloc_flags, const struct alloc_context *ac, 4481 enum compact_priority prio, enum compact_result *compact_result) 4482 { 4483 struct page *page = NULL; 4484 unsigned long pflags; 4485 unsigned int noreclaim_flag; 4486 4487 if (!order) 4488 return NULL; 4489 4490 psi_memstall_enter(&pflags); 4491 delayacct_compact_start(); 4492 noreclaim_flag = memalloc_noreclaim_save(); 4493 4494 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 4495 prio, &page); 4496 4497 memalloc_noreclaim_restore(noreclaim_flag); 4498 psi_memstall_leave(&pflags); 4499 delayacct_compact_end(); 4500 4501 if (*compact_result == COMPACT_SKIPPED) 4502 return NULL; 4503 /* 4504 * At least in one zone compaction wasn't deferred or skipped, so let's 4505 * count a compaction stall 4506 */ 4507 count_vm_event(COMPACTSTALL); 4508 4509 /* Prep a captured page if available */ 4510 if (page) 4511 prep_new_page(page, order, gfp_mask, alloc_flags); 4512 4513 /* Try get a page from the freelist if available */ 4514 if (!page) 4515 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4516 4517 if (page) { 4518 struct zone *zone = page_zone(page); 4519 4520 zone->compact_blockskip_flush = false; 4521 compaction_defer_reset(zone, order, true); 4522 count_vm_event(COMPACTSUCCESS); 4523 return page; 4524 } 4525 4526 /* 4527 * It's bad if compaction run occurs and fails. The most likely reason 4528 * is that pages exist, but not enough to satisfy watermarks. 4529 */ 4530 count_vm_event(COMPACTFAIL); 4531 4532 cond_resched(); 4533 4534 return NULL; 4535 } 4536 4537 static inline bool 4538 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 4539 enum compact_result compact_result, 4540 enum compact_priority *compact_priority, 4541 int *compaction_retries) 4542 { 4543 int max_retries = MAX_COMPACT_RETRIES; 4544 int min_priority; 4545 bool ret = false; 4546 int retries = *compaction_retries; 4547 enum compact_priority priority = *compact_priority; 4548 4549 if (!order) 4550 return false; 4551 4552 if (fatal_signal_pending(current)) 4553 return false; 4554 4555 if (compaction_made_progress(compact_result)) 4556 (*compaction_retries)++; 4557 4558 /* 4559 * compaction considers all the zone as desperately out of memory 4560 * so it doesn't really make much sense to retry except when the 4561 * failure could be caused by insufficient priority 4562 */ 4563 if (compaction_failed(compact_result)) 4564 goto check_priority; 4565 4566 /* 4567 * compaction was skipped because there are not enough order-0 pages 4568 * to work with, so we retry only if it looks like reclaim can help. 4569 */ 4570 if (compaction_needs_reclaim(compact_result)) { 4571 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 4572 goto out; 4573 } 4574 4575 /* 4576 * make sure the compaction wasn't deferred or didn't bail out early 4577 * due to locks contention before we declare that we should give up. 4578 * But the next retry should use a higher priority if allowed, so 4579 * we don't just keep bailing out endlessly. 4580 */ 4581 if (compaction_withdrawn(compact_result)) { 4582 goto check_priority; 4583 } 4584 4585 /* 4586 * !costly requests are much more important than __GFP_RETRY_MAYFAIL 4587 * costly ones because they are de facto nofail and invoke OOM 4588 * killer to move on while costly can fail and users are ready 4589 * to cope with that. 1/4 retries is rather arbitrary but we 4590 * would need much more detailed feedback from compaction to 4591 * make a better decision. 4592 */ 4593 if (order > PAGE_ALLOC_COSTLY_ORDER) 4594 max_retries /= 4; 4595 if (*compaction_retries <= max_retries) { 4596 ret = true; 4597 goto out; 4598 } 4599 4600 /* 4601 * Make sure there are attempts at the highest priority if we exhausted 4602 * all retries or failed at the lower priorities. 4603 */ 4604 check_priority: 4605 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 4606 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 4607 4608 if (*compact_priority > min_priority) { 4609 (*compact_priority)--; 4610 *compaction_retries = 0; 4611 ret = true; 4612 } 4613 out: 4614 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 4615 return ret; 4616 } 4617 #else 4618 static inline struct page * 4619 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 4620 unsigned int alloc_flags, const struct alloc_context *ac, 4621 enum compact_priority prio, enum compact_result *compact_result) 4622 { 4623 *compact_result = COMPACT_SKIPPED; 4624 return NULL; 4625 } 4626 4627 static inline bool 4628 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 4629 enum compact_result compact_result, 4630 enum compact_priority *compact_priority, 4631 int *compaction_retries) 4632 { 4633 struct zone *zone; 4634 struct zoneref *z; 4635 4636 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 4637 return false; 4638 4639 /* 4640 * There are setups with compaction disabled which would prefer to loop 4641 * inside the allocator rather than hit the oom killer prematurely. 4642 * Let's give them a good hope and keep retrying while the order-0 4643 * watermarks are OK. 4644 */ 4645 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4646 ac->highest_zoneidx, ac->nodemask) { 4647 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 4648 ac->highest_zoneidx, alloc_flags)) 4649 return true; 4650 } 4651 return false; 4652 } 4653 #endif /* CONFIG_COMPACTION */ 4654 4655 #ifdef CONFIG_LOCKDEP 4656 static struct lockdep_map __fs_reclaim_map = 4657 STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map); 4658 4659 static bool __need_reclaim(gfp_t gfp_mask) 4660 { 4661 /* no reclaim without waiting on it */ 4662 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) 4663 return false; 4664 4665 /* this guy won't enter reclaim */ 4666 if (current->flags & PF_MEMALLOC) 4667 return false; 4668 4669 if (gfp_mask & __GFP_NOLOCKDEP) 4670 return false; 4671 4672 return true; 4673 } 4674 4675 void __fs_reclaim_acquire(unsigned long ip) 4676 { 4677 lock_acquire_exclusive(&__fs_reclaim_map, 0, 0, NULL, ip); 4678 } 4679 4680 void __fs_reclaim_release(unsigned long ip) 4681 { 4682 lock_release(&__fs_reclaim_map, ip); 4683 } 4684 4685 void fs_reclaim_acquire(gfp_t gfp_mask) 4686 { 4687 gfp_mask = current_gfp_context(gfp_mask); 4688 4689 if (__need_reclaim(gfp_mask)) { 4690 if (gfp_mask & __GFP_FS) 4691 __fs_reclaim_acquire(_RET_IP_); 4692 4693 #ifdef CONFIG_MMU_NOTIFIER 4694 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 4695 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 4696 #endif 4697 4698 } 4699 } 4700 EXPORT_SYMBOL_GPL(fs_reclaim_acquire); 4701 4702 void fs_reclaim_release(gfp_t gfp_mask) 4703 { 4704 gfp_mask = current_gfp_context(gfp_mask); 4705 4706 if (__need_reclaim(gfp_mask)) { 4707 if (gfp_mask & __GFP_FS) 4708 __fs_reclaim_release(_RET_IP_); 4709 } 4710 } 4711 EXPORT_SYMBOL_GPL(fs_reclaim_release); 4712 #endif 4713 4714 /* 4715 * Zonelists may change due to hotplug during allocation. Detect when zonelists 4716 * have been rebuilt so allocation retries. Reader side does not lock and 4717 * retries the allocation if zonelist changes. Writer side is protected by the 4718 * embedded spin_lock. 4719 */ 4720 static DEFINE_SEQLOCK(zonelist_update_seq); 4721 4722 static unsigned int zonelist_iter_begin(void) 4723 { 4724 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4725 return read_seqbegin(&zonelist_update_seq); 4726 4727 return 0; 4728 } 4729 4730 static unsigned int check_retry_zonelist(unsigned int seq) 4731 { 4732 if (IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)) 4733 return read_seqretry(&zonelist_update_seq, seq); 4734 4735 return seq; 4736 } 4737 4738 /* Perform direct synchronous page reclaim */ 4739 static unsigned long 4740 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 4741 const struct alloc_context *ac) 4742 { 4743 unsigned int noreclaim_flag; 4744 unsigned long progress; 4745 4746 cond_resched(); 4747 4748 /* We now go into synchronous reclaim */ 4749 cpuset_memory_pressure_bump(); 4750 fs_reclaim_acquire(gfp_mask); 4751 noreclaim_flag = memalloc_noreclaim_save(); 4752 4753 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 4754 ac->nodemask); 4755 4756 memalloc_noreclaim_restore(noreclaim_flag); 4757 fs_reclaim_release(gfp_mask); 4758 4759 cond_resched(); 4760 4761 return progress; 4762 } 4763 4764 /* The really slow allocator path where we enter direct reclaim */ 4765 static inline struct page * 4766 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 4767 unsigned int alloc_flags, const struct alloc_context *ac, 4768 unsigned long *did_some_progress) 4769 { 4770 struct page *page = NULL; 4771 unsigned long pflags; 4772 bool drained = false; 4773 4774 psi_memstall_enter(&pflags); 4775 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 4776 if (unlikely(!(*did_some_progress))) 4777 goto out; 4778 4779 retry: 4780 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 4781 4782 /* 4783 * If an allocation failed after direct reclaim, it could be because 4784 * pages are pinned on the per-cpu lists or in high alloc reserves. 4785 * Shrink them and try again 4786 */ 4787 if (!page && !drained) { 4788 unreserve_highatomic_pageblock(ac, false); 4789 drain_all_pages(NULL); 4790 drained = true; 4791 goto retry; 4792 } 4793 out: 4794 psi_memstall_leave(&pflags); 4795 4796 return page; 4797 } 4798 4799 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, 4800 const struct alloc_context *ac) 4801 { 4802 struct zoneref *z; 4803 struct zone *zone; 4804 pg_data_t *last_pgdat = NULL; 4805 enum zone_type highest_zoneidx = ac->highest_zoneidx; 4806 4807 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, 4808 ac->nodemask) { 4809 if (!managed_zone(zone)) 4810 continue; 4811 if (last_pgdat != zone->zone_pgdat) { 4812 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); 4813 last_pgdat = zone->zone_pgdat; 4814 } 4815 } 4816 } 4817 4818 static inline unsigned int 4819 gfp_to_alloc_flags(gfp_t gfp_mask) 4820 { 4821 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 4822 4823 /* 4824 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH 4825 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD 4826 * to save two branches. 4827 */ 4828 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 4829 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); 4830 4831 /* 4832 * The caller may dip into page reserves a bit more if the caller 4833 * cannot run direct reclaim, or if the caller has realtime scheduling 4834 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 4835 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 4836 */ 4837 alloc_flags |= (__force int) 4838 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); 4839 4840 if (gfp_mask & __GFP_ATOMIC) { 4841 /* 4842 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 4843 * if it can't schedule. 4844 */ 4845 if (!(gfp_mask & __GFP_NOMEMALLOC)) 4846 alloc_flags |= ALLOC_HARDER; 4847 /* 4848 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 4849 * comment for __cpuset_node_allowed(). 4850 */ 4851 alloc_flags &= ~ALLOC_CPUSET; 4852 } else if (unlikely(rt_task(current)) && in_task()) 4853 alloc_flags |= ALLOC_HARDER; 4854 4855 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); 4856 4857 return alloc_flags; 4858 } 4859 4860 static bool oom_reserves_allowed(struct task_struct *tsk) 4861 { 4862 if (!tsk_is_oom_victim(tsk)) 4863 return false; 4864 4865 /* 4866 * !MMU doesn't have oom reaper so give access to memory reserves 4867 * only to the thread with TIF_MEMDIE set 4868 */ 4869 if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE)) 4870 return false; 4871 4872 return true; 4873 } 4874 4875 /* 4876 * Distinguish requests which really need access to full memory 4877 * reserves from oom victims which can live with a portion of it 4878 */ 4879 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) 4880 { 4881 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 4882 return 0; 4883 if (gfp_mask & __GFP_MEMALLOC) 4884 return ALLOC_NO_WATERMARKS; 4885 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 4886 return ALLOC_NO_WATERMARKS; 4887 if (!in_interrupt()) { 4888 if (current->flags & PF_MEMALLOC) 4889 return ALLOC_NO_WATERMARKS; 4890 else if (oom_reserves_allowed(current)) 4891 return ALLOC_OOM; 4892 } 4893 4894 return 0; 4895 } 4896 4897 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 4898 { 4899 return !!__gfp_pfmemalloc_flags(gfp_mask); 4900 } 4901 4902 /* 4903 * Checks whether it makes sense to retry the reclaim to make a forward progress 4904 * for the given allocation request. 4905 * 4906 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 4907 * without success, or when we couldn't even meet the watermark if we 4908 * reclaimed all remaining pages on the LRU lists. 4909 * 4910 * Returns true if a retry is viable or false to enter the oom path. 4911 */ 4912 static inline bool 4913 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 4914 struct alloc_context *ac, int alloc_flags, 4915 bool did_some_progress, int *no_progress_loops) 4916 { 4917 struct zone *zone; 4918 struct zoneref *z; 4919 bool ret = false; 4920 4921 /* 4922 * Costly allocations might have made a progress but this doesn't mean 4923 * their order will become available due to high fragmentation so 4924 * always increment the no progress counter for them 4925 */ 4926 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 4927 *no_progress_loops = 0; 4928 else 4929 (*no_progress_loops)++; 4930 4931 /* 4932 * Make sure we converge to OOM if we cannot make any progress 4933 * several times in the row. 4934 */ 4935 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 4936 /* Before OOM, exhaust highatomic_reserve */ 4937 return unreserve_highatomic_pageblock(ac, true); 4938 } 4939 4940 /* 4941 * Keep reclaiming pages while there is a chance this will lead 4942 * somewhere. If none of the target zones can satisfy our allocation 4943 * request even if all reclaimable pages are considered then we are 4944 * screwed and have to go OOM. 4945 */ 4946 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 4947 ac->highest_zoneidx, ac->nodemask) { 4948 unsigned long available; 4949 unsigned long reclaimable; 4950 unsigned long min_wmark = min_wmark_pages(zone); 4951 bool wmark; 4952 4953 available = reclaimable = zone_reclaimable_pages(zone); 4954 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 4955 4956 /* 4957 * Would the allocation succeed if we reclaimed all 4958 * reclaimable pages? 4959 */ 4960 wmark = __zone_watermark_ok(zone, order, min_wmark, 4961 ac->highest_zoneidx, alloc_flags, available); 4962 trace_reclaim_retry_zone(z, order, reclaimable, 4963 available, min_wmark, *no_progress_loops, wmark); 4964 if (wmark) { 4965 ret = true; 4966 break; 4967 } 4968 } 4969 4970 /* 4971 * Memory allocation/reclaim might be called from a WQ context and the 4972 * current implementation of the WQ concurrency control doesn't 4973 * recognize that a particular WQ is congested if the worker thread is 4974 * looping without ever sleeping. Therefore we have to do a short sleep 4975 * here rather than calling cond_resched(). 4976 */ 4977 if (current->flags & PF_WQ_WORKER) 4978 schedule_timeout_uninterruptible(1); 4979 else 4980 cond_resched(); 4981 return ret; 4982 } 4983 4984 static inline bool 4985 check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac) 4986 { 4987 /* 4988 * It's possible that cpuset's mems_allowed and the nodemask from 4989 * mempolicy don't intersect. This should be normally dealt with by 4990 * policy_nodemask(), but it's possible to race with cpuset update in 4991 * such a way the check therein was true, and then it became false 4992 * before we got our cpuset_mems_cookie here. 4993 * This assumes that for all allocations, ac->nodemask can come only 4994 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored 4995 * when it does not intersect with the cpuset restrictions) or the 4996 * caller can deal with a violated nodemask. 4997 */ 4998 if (cpusets_enabled() && ac->nodemask && 4999 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { 5000 ac->nodemask = NULL; 5001 return true; 5002 } 5003 5004 /* 5005 * When updating a task's mems_allowed or mempolicy nodemask, it is 5006 * possible to race with parallel threads in such a way that our 5007 * allocation can fail while the mask is being updated. If we are about 5008 * to fail, check if the cpuset changed during allocation and if so, 5009 * retry. 5010 */ 5011 if (read_mems_allowed_retry(cpuset_mems_cookie)) 5012 return true; 5013 5014 return false; 5015 } 5016 5017 static inline struct page * 5018 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 5019 struct alloc_context *ac) 5020 { 5021 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 5022 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 5023 struct page *page = NULL; 5024 unsigned int alloc_flags; 5025 unsigned long did_some_progress; 5026 enum compact_priority compact_priority; 5027 enum compact_result compact_result; 5028 int compaction_retries; 5029 int no_progress_loops; 5030 unsigned int cpuset_mems_cookie; 5031 unsigned int zonelist_iter_cookie; 5032 int reserve_flags; 5033 5034 /* 5035 * We also sanity check to catch abuse of atomic reserves being used by 5036 * callers that are not in atomic context. 5037 */ 5038 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 5039 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 5040 gfp_mask &= ~__GFP_ATOMIC; 5041 5042 restart: 5043 compaction_retries = 0; 5044 no_progress_loops = 0; 5045 compact_priority = DEF_COMPACT_PRIORITY; 5046 cpuset_mems_cookie = read_mems_allowed_begin(); 5047 zonelist_iter_cookie = zonelist_iter_begin(); 5048 5049 /* 5050 * The fast path uses conservative alloc_flags to succeed only until 5051 * kswapd needs to be woken up, and to avoid the cost of setting up 5052 * alloc_flags precisely. So we do that now. 5053 */ 5054 alloc_flags = gfp_to_alloc_flags(gfp_mask); 5055 5056 /* 5057 * We need to recalculate the starting point for the zonelist iterator 5058 * because we might have used different nodemask in the fast path, or 5059 * there was a cpuset modification and we are retrying - otherwise we 5060 * could end up iterating over non-eligible zones endlessly. 5061 */ 5062 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5063 ac->highest_zoneidx, ac->nodemask); 5064 if (!ac->preferred_zoneref->zone) 5065 goto nopage; 5066 5067 /* 5068 * Check for insane configurations where the cpuset doesn't contain 5069 * any suitable zone to satisfy the request - e.g. non-movable 5070 * GFP_HIGHUSER allocations from MOVABLE nodes only. 5071 */ 5072 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { 5073 struct zoneref *z = first_zones_zonelist(ac->zonelist, 5074 ac->highest_zoneidx, 5075 &cpuset_current_mems_allowed); 5076 if (!z->zone) 5077 goto nopage; 5078 } 5079 5080 if (alloc_flags & ALLOC_KSWAPD) 5081 wake_all_kswapds(order, gfp_mask, ac); 5082 5083 /* 5084 * The adjusted alloc_flags might result in immediate success, so try 5085 * that first 5086 */ 5087 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 5088 if (page) 5089 goto got_pg; 5090 5091 /* 5092 * For costly allocations, try direct compaction first, as it's likely 5093 * that we have enough base pages and don't need to reclaim. For non- 5094 * movable high-order allocations, do that as well, as compaction will 5095 * try prevent permanent fragmentation by migrating from blocks of the 5096 * same migratetype. 5097 * Don't try this for allocations that are allowed to ignore 5098 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 5099 */ 5100 if (can_direct_reclaim && 5101 (costly_order || 5102 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 5103 && !gfp_pfmemalloc_allowed(gfp_mask)) { 5104 page = __alloc_pages_direct_compact(gfp_mask, order, 5105 alloc_flags, ac, 5106 INIT_COMPACT_PRIORITY, 5107 &compact_result); 5108 if (page) 5109 goto got_pg; 5110 5111 /* 5112 * Checks for costly allocations with __GFP_NORETRY, which 5113 * includes some THP page fault allocations 5114 */ 5115 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 5116 /* 5117 * If allocating entire pageblock(s) and compaction 5118 * failed because all zones are below low watermarks 5119 * or is prohibited because it recently failed at this 5120 * order, fail immediately unless the allocator has 5121 * requested compaction and reclaim retry. 5122 * 5123 * Reclaim is 5124 * - potentially very expensive because zones are far 5125 * below their low watermarks or this is part of very 5126 * bursty high order allocations, 5127 * - not guaranteed to help because isolate_freepages() 5128 * may not iterate over freed pages as part of its 5129 * linear scan, and 5130 * - unlikely to make entire pageblocks free on its 5131 * own. 5132 */ 5133 if (compact_result == COMPACT_SKIPPED || 5134 compact_result == COMPACT_DEFERRED) 5135 goto nopage; 5136 5137 /* 5138 * Looks like reclaim/compaction is worth trying, but 5139 * sync compaction could be very expensive, so keep 5140 * using async compaction. 5141 */ 5142 compact_priority = INIT_COMPACT_PRIORITY; 5143 } 5144 } 5145 5146 retry: 5147 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 5148 if (alloc_flags & ALLOC_KSWAPD) 5149 wake_all_kswapds(order, gfp_mask, ac); 5150 5151 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); 5152 if (reserve_flags) 5153 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | 5154 (alloc_flags & ALLOC_KSWAPD); 5155 5156 /* 5157 * Reset the nodemask and zonelist iterators if memory policies can be 5158 * ignored. These allocations are high priority and system rather than 5159 * user oriented. 5160 */ 5161 if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) { 5162 ac->nodemask = NULL; 5163 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5164 ac->highest_zoneidx, ac->nodemask); 5165 } 5166 5167 /* Attempt with potentially adjusted zonelist and alloc_flags */ 5168 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 5169 if (page) 5170 goto got_pg; 5171 5172 /* Caller is not willing to reclaim, we can't balance anything */ 5173 if (!can_direct_reclaim) 5174 goto nopage; 5175 5176 /* Avoid recursion of direct reclaim */ 5177 if (current->flags & PF_MEMALLOC) 5178 goto nopage; 5179 5180 /* Try direct reclaim and then allocating */ 5181 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 5182 &did_some_progress); 5183 if (page) 5184 goto got_pg; 5185 5186 /* Try direct compaction and then allocating */ 5187 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 5188 compact_priority, &compact_result); 5189 if (page) 5190 goto got_pg; 5191 5192 /* Do not loop if specifically requested */ 5193 if (gfp_mask & __GFP_NORETRY) 5194 goto nopage; 5195 5196 /* 5197 * Do not retry costly high order allocations unless they are 5198 * __GFP_RETRY_MAYFAIL 5199 */ 5200 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 5201 goto nopage; 5202 5203 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 5204 did_some_progress > 0, &no_progress_loops)) 5205 goto retry; 5206 5207 /* 5208 * It doesn't make any sense to retry for the compaction if the order-0 5209 * reclaim is not able to make any progress because the current 5210 * implementation of the compaction depends on the sufficient amount 5211 * of free memory (see __compaction_suitable) 5212 */ 5213 if (did_some_progress > 0 && 5214 should_compact_retry(ac, order, alloc_flags, 5215 compact_result, &compact_priority, 5216 &compaction_retries)) 5217 goto retry; 5218 5219 5220 /* 5221 * Deal with possible cpuset update races or zonelist updates to avoid 5222 * a unnecessary OOM kill. 5223 */ 5224 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 5225 check_retry_zonelist(zonelist_iter_cookie)) 5226 goto restart; 5227 5228 /* Reclaim has failed us, start killing things */ 5229 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 5230 if (page) 5231 goto got_pg; 5232 5233 /* Avoid allocations with no watermarks from looping endlessly */ 5234 if (tsk_is_oom_victim(current) && 5235 (alloc_flags & ALLOC_OOM || 5236 (gfp_mask & __GFP_NOMEMALLOC))) 5237 goto nopage; 5238 5239 /* Retry as long as the OOM killer is making progress */ 5240 if (did_some_progress) { 5241 no_progress_loops = 0; 5242 goto retry; 5243 } 5244 5245 nopage: 5246 /* 5247 * Deal with possible cpuset update races or zonelist updates to avoid 5248 * a unnecessary OOM kill. 5249 */ 5250 if (check_retry_cpuset(cpuset_mems_cookie, ac) || 5251 check_retry_zonelist(zonelist_iter_cookie)) 5252 goto restart; 5253 5254 /* 5255 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 5256 * we always retry 5257 */ 5258 if (gfp_mask & __GFP_NOFAIL) { 5259 /* 5260 * All existing users of the __GFP_NOFAIL are blockable, so warn 5261 * of any new users that actually require GFP_NOWAIT 5262 */ 5263 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) 5264 goto fail; 5265 5266 /* 5267 * PF_MEMALLOC request from this context is rather bizarre 5268 * because we cannot reclaim anything and only can loop waiting 5269 * for somebody to do a work for us 5270 */ 5271 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); 5272 5273 /* 5274 * non failing costly orders are a hard requirement which we 5275 * are not prepared for much so let's warn about these users 5276 * so that we can identify them and convert them to something 5277 * else. 5278 */ 5279 WARN_ON_ONCE_GFP(costly_order, gfp_mask); 5280 5281 /* 5282 * Help non-failing allocations by giving them access to memory 5283 * reserves but do not use ALLOC_NO_WATERMARKS because this 5284 * could deplete whole memory reserves which would just make 5285 * the situation worse 5286 */ 5287 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); 5288 if (page) 5289 goto got_pg; 5290 5291 cond_resched(); 5292 goto retry; 5293 } 5294 fail: 5295 warn_alloc(gfp_mask, ac->nodemask, 5296 "page allocation failure: order:%u", order); 5297 got_pg: 5298 return page; 5299 } 5300 5301 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 5302 int preferred_nid, nodemask_t *nodemask, 5303 struct alloc_context *ac, gfp_t *alloc_gfp, 5304 unsigned int *alloc_flags) 5305 { 5306 ac->highest_zoneidx = gfp_zone(gfp_mask); 5307 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); 5308 ac->nodemask = nodemask; 5309 ac->migratetype = gfp_migratetype(gfp_mask); 5310 5311 if (cpusets_enabled()) { 5312 *alloc_gfp |= __GFP_HARDWALL; 5313 /* 5314 * When we are in the interrupt context, it is irrelevant 5315 * to the current task context. It means that any node ok. 5316 */ 5317 if (in_task() && !ac->nodemask) 5318 ac->nodemask = &cpuset_current_mems_allowed; 5319 else 5320 *alloc_flags |= ALLOC_CPUSET; 5321 } 5322 5323 might_alloc(gfp_mask); 5324 5325 if (should_fail_alloc_page(gfp_mask, order)) 5326 return false; 5327 5328 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); 5329 5330 /* Dirty zone balancing only done in the fast path */ 5331 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 5332 5333 /* 5334 * The preferred zone is used for statistics but crucially it is 5335 * also used as the starting point for the zonelist iterator. It 5336 * may get reset for allocations that ignore memory policies. 5337 */ 5338 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 5339 ac->highest_zoneidx, ac->nodemask); 5340 5341 return true; 5342 } 5343 5344 /* 5345 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array 5346 * @gfp: GFP flags for the allocation 5347 * @preferred_nid: The preferred NUMA node ID to allocate from 5348 * @nodemask: Set of nodes to allocate from, may be NULL 5349 * @nr_pages: The number of pages desired on the list or array 5350 * @page_list: Optional list to store the allocated pages 5351 * @page_array: Optional array to store the pages 5352 * 5353 * This is a batched version of the page allocator that attempts to 5354 * allocate nr_pages quickly. Pages are added to page_list if page_list 5355 * is not NULL, otherwise it is assumed that the page_array is valid. 5356 * 5357 * For lists, nr_pages is the number of pages that should be allocated. 5358 * 5359 * For arrays, only NULL elements are populated with pages and nr_pages 5360 * is the maximum number of pages that will be stored in the array. 5361 * 5362 * Returns the number of pages on the list or array. 5363 */ 5364 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, 5365 nodemask_t *nodemask, int nr_pages, 5366 struct list_head *page_list, 5367 struct page **page_array) 5368 { 5369 struct page *page; 5370 unsigned long flags; 5371 unsigned long __maybe_unused UP_flags; 5372 struct zone *zone; 5373 struct zoneref *z; 5374 struct per_cpu_pages *pcp; 5375 struct list_head *pcp_list; 5376 struct alloc_context ac; 5377 gfp_t alloc_gfp; 5378 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5379 int nr_populated = 0, nr_account = 0; 5380 5381 /* 5382 * Skip populated array elements to determine if any pages need 5383 * to be allocated before disabling IRQs. 5384 */ 5385 while (page_array && nr_populated < nr_pages && page_array[nr_populated]) 5386 nr_populated++; 5387 5388 /* No pages requested? */ 5389 if (unlikely(nr_pages <= 0)) 5390 goto out; 5391 5392 /* Already populated array? */ 5393 if (unlikely(page_array && nr_pages - nr_populated == 0)) 5394 goto out; 5395 5396 /* Bulk allocator does not support memcg accounting. */ 5397 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT)) 5398 goto failed; 5399 5400 /* Use the single page allocator for one page. */ 5401 if (nr_pages - nr_populated == 1) 5402 goto failed; 5403 5404 #ifdef CONFIG_PAGE_OWNER 5405 /* 5406 * PAGE_OWNER may recurse into the allocator to allocate space to 5407 * save the stack with pagesets.lock held. Releasing/reacquiring 5408 * removes much of the performance benefit of bulk allocation so 5409 * force the caller to allocate one page at a time as it'll have 5410 * similar performance to added complexity to the bulk allocator. 5411 */ 5412 if (static_branch_unlikely(&page_owner_inited)) 5413 goto failed; 5414 #endif 5415 5416 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ 5417 gfp &= gfp_allowed_mask; 5418 alloc_gfp = gfp; 5419 if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) 5420 goto out; 5421 gfp = alloc_gfp; 5422 5423 /* Find an allowed local zone that meets the low watermark. */ 5424 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 5425 unsigned long mark; 5426 5427 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && 5428 !__cpuset_zone_allowed(zone, gfp)) { 5429 continue; 5430 } 5431 5432 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && 5433 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { 5434 goto failed; 5435 } 5436 5437 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; 5438 if (zone_watermark_fast(zone, 0, mark, 5439 zonelist_zone_idx(ac.preferred_zoneref), 5440 alloc_flags, gfp)) { 5441 break; 5442 } 5443 } 5444 5445 /* 5446 * If there are no allowed local zones that meets the watermarks then 5447 * try to allocate a single page and reclaim if necessary. 5448 */ 5449 if (unlikely(!zone)) 5450 goto failed; 5451 5452 /* Is a parallel drain in progress? */ 5453 pcp_trylock_prepare(UP_flags); 5454 pcp = pcp_spin_trylock_irqsave(zone->per_cpu_pageset, flags); 5455 if (!pcp) 5456 goto failed_irq; 5457 5458 /* Attempt the batch allocation */ 5459 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; 5460 while (nr_populated < nr_pages) { 5461 5462 /* Skip existing pages */ 5463 if (page_array && page_array[nr_populated]) { 5464 nr_populated++; 5465 continue; 5466 } 5467 5468 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, 5469 pcp, pcp_list); 5470 if (unlikely(!page)) { 5471 /* Try and allocate at least one page */ 5472 if (!nr_account) { 5473 pcp_spin_unlock_irqrestore(pcp, flags); 5474 goto failed_irq; 5475 } 5476 break; 5477 } 5478 nr_account++; 5479 5480 prep_new_page(page, 0, gfp, 0); 5481 if (page_list) 5482 list_add(&page->lru, page_list); 5483 else 5484 page_array[nr_populated] = page; 5485 nr_populated++; 5486 } 5487 5488 pcp_spin_unlock_irqrestore(pcp, flags); 5489 pcp_trylock_finish(UP_flags); 5490 5491 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); 5492 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); 5493 5494 out: 5495 return nr_populated; 5496 5497 failed_irq: 5498 pcp_trylock_finish(UP_flags); 5499 5500 failed: 5501 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); 5502 if (page) { 5503 if (page_list) 5504 list_add(&page->lru, page_list); 5505 else 5506 page_array[nr_populated] = page; 5507 nr_populated++; 5508 } 5509 5510 goto out; 5511 } 5512 EXPORT_SYMBOL_GPL(__alloc_pages_bulk); 5513 5514 /* 5515 * This is the 'heart' of the zoned buddy allocator. 5516 */ 5517 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, 5518 nodemask_t *nodemask) 5519 { 5520 struct page *page; 5521 unsigned int alloc_flags = ALLOC_WMARK_LOW; 5522 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ 5523 struct alloc_context ac = { }; 5524 5525 /* 5526 * There are several places where we assume that the order value is sane 5527 * so bail out early if the request is out of bound. 5528 */ 5529 if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp)) 5530 return NULL; 5531 5532 gfp &= gfp_allowed_mask; 5533 /* 5534 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 5535 * resp. GFP_NOIO which has to be inherited for all allocation requests 5536 * from a particular context which has been marked by 5537 * memalloc_no{fs,io}_{save,restore}. And PF_MEMALLOC_PIN which ensures 5538 * movable zones are not used during allocation. 5539 */ 5540 gfp = current_gfp_context(gfp); 5541 alloc_gfp = gfp; 5542 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, 5543 &alloc_gfp, &alloc_flags)) 5544 return NULL; 5545 5546 /* 5547 * Forbid the first pass from falling back to types that fragment 5548 * memory until all local zones are considered. 5549 */ 5550 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); 5551 5552 /* First allocation attempt */ 5553 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); 5554 if (likely(page)) 5555 goto out; 5556 5557 alloc_gfp = gfp; 5558 ac.spread_dirty_pages = false; 5559 5560 /* 5561 * Restore the original nodemask if it was potentially replaced with 5562 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 5563 */ 5564 ac.nodemask = nodemask; 5565 5566 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); 5567 5568 out: 5569 if (memcg_kmem_enabled() && (gfp & __GFP_ACCOUNT) && page && 5570 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { 5571 __free_pages(page, order); 5572 page = NULL; 5573 } 5574 5575 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); 5576 kmsan_alloc_page(page, order, alloc_gfp); 5577 5578 return page; 5579 } 5580 EXPORT_SYMBOL(__alloc_pages); 5581 5582 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, 5583 nodemask_t *nodemask) 5584 { 5585 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, 5586 preferred_nid, nodemask); 5587 5588 if (page && order > 1) 5589 prep_transhuge_page(page); 5590 return (struct folio *)page; 5591 } 5592 EXPORT_SYMBOL(__folio_alloc); 5593 5594 /* 5595 * Common helper functions. Never use with __GFP_HIGHMEM because the returned 5596 * address cannot represent highmem pages. Use alloc_pages and then kmap if 5597 * you need to access high mem. 5598 */ 5599 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 5600 { 5601 struct page *page; 5602 5603 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); 5604 if (!page) 5605 return 0; 5606 return (unsigned long) page_address(page); 5607 } 5608 EXPORT_SYMBOL(__get_free_pages); 5609 5610 unsigned long get_zeroed_page(gfp_t gfp_mask) 5611 { 5612 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 5613 } 5614 EXPORT_SYMBOL(get_zeroed_page); 5615 5616 /** 5617 * __free_pages - Free pages allocated with alloc_pages(). 5618 * @page: The page pointer returned from alloc_pages(). 5619 * @order: The order of the allocation. 5620 * 5621 * This function can free multi-page allocations that are not compound 5622 * pages. It does not check that the @order passed in matches that of 5623 * the allocation, so it is easy to leak memory. Freeing more memory 5624 * than was allocated will probably emit a warning. 5625 * 5626 * If the last reference to this page is speculative, it will be released 5627 * by put_page() which only frees the first page of a non-compound 5628 * allocation. To prevent the remaining pages from being leaked, we free 5629 * the subsequent pages here. If you want to use the page's reference 5630 * count to decide when to free the allocation, you should allocate a 5631 * compound page, and use put_page() instead of __free_pages(). 5632 * 5633 * Context: May be called in interrupt context or while holding a normal 5634 * spinlock, but not in NMI context or while holding a raw spinlock. 5635 */ 5636 void __free_pages(struct page *page, unsigned int order) 5637 { 5638 if (put_page_testzero(page)) 5639 free_the_page(page, order); 5640 else if (!PageHead(page)) 5641 while (order-- > 0) 5642 free_the_page(page + (1 << order), order); 5643 } 5644 EXPORT_SYMBOL(__free_pages); 5645 5646 void free_pages(unsigned long addr, unsigned int order) 5647 { 5648 if (addr != 0) { 5649 VM_BUG_ON(!virt_addr_valid((void *)addr)); 5650 __free_pages(virt_to_page((void *)addr), order); 5651 } 5652 } 5653 5654 EXPORT_SYMBOL(free_pages); 5655 5656 /* 5657 * Page Fragment: 5658 * An arbitrary-length arbitrary-offset area of memory which resides 5659 * within a 0 or higher order page. Multiple fragments within that page 5660 * are individually refcounted, in the page's reference counter. 5661 * 5662 * The page_frag functions below provide a simple allocation framework for 5663 * page fragments. This is used by the network stack and network device 5664 * drivers to provide a backing region of memory for use as either an 5665 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 5666 */ 5667 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 5668 gfp_t gfp_mask) 5669 { 5670 struct page *page = NULL; 5671 gfp_t gfp = gfp_mask; 5672 5673 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5674 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 5675 __GFP_NOMEMALLOC; 5676 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 5677 PAGE_FRAG_CACHE_MAX_ORDER); 5678 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 5679 #endif 5680 if (unlikely(!page)) 5681 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 5682 5683 nc->va = page ? page_address(page) : NULL; 5684 5685 return page; 5686 } 5687 5688 void __page_frag_cache_drain(struct page *page, unsigned int count) 5689 { 5690 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 5691 5692 if (page_ref_sub_and_test(page, count)) 5693 free_the_page(page, compound_order(page)); 5694 } 5695 EXPORT_SYMBOL(__page_frag_cache_drain); 5696 5697 void *page_frag_alloc_align(struct page_frag_cache *nc, 5698 unsigned int fragsz, gfp_t gfp_mask, 5699 unsigned int align_mask) 5700 { 5701 unsigned int size = PAGE_SIZE; 5702 struct page *page; 5703 int offset; 5704 5705 if (unlikely(!nc->va)) { 5706 refill: 5707 page = __page_frag_cache_refill(nc, gfp_mask); 5708 if (!page) 5709 return NULL; 5710 5711 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5712 /* if size can vary use size else just use PAGE_SIZE */ 5713 size = nc->size; 5714 #endif 5715 /* Even if we own the page, we do not use atomic_set(). 5716 * This would break get_page_unless_zero() users. 5717 */ 5718 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); 5719 5720 /* reset page count bias and offset to start of new frag */ 5721 nc->pfmemalloc = page_is_pfmemalloc(page); 5722 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5723 nc->offset = size; 5724 } 5725 5726 offset = nc->offset - fragsz; 5727 if (unlikely(offset < 0)) { 5728 page = virt_to_page(nc->va); 5729 5730 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 5731 goto refill; 5732 5733 if (unlikely(nc->pfmemalloc)) { 5734 free_the_page(page, compound_order(page)); 5735 goto refill; 5736 } 5737 5738 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 5739 /* if size can vary use size else just use PAGE_SIZE */ 5740 size = nc->size; 5741 #endif 5742 /* OK, page count is 0, we can safely set it */ 5743 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); 5744 5745 /* reset page count bias and offset to start of new frag */ 5746 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; 5747 offset = size - fragsz; 5748 if (unlikely(offset < 0)) { 5749 /* 5750 * The caller is trying to allocate a fragment 5751 * with fragsz > PAGE_SIZE but the cache isn't big 5752 * enough to satisfy the request, this may 5753 * happen in low memory conditions. 5754 * We don't release the cache page because 5755 * it could make memory pressure worse 5756 * so we simply return NULL here. 5757 */ 5758 return NULL; 5759 } 5760 } 5761 5762 nc->pagecnt_bias--; 5763 offset &= align_mask; 5764 nc->offset = offset; 5765 5766 return nc->va + offset; 5767 } 5768 EXPORT_SYMBOL(page_frag_alloc_align); 5769 5770 /* 5771 * Frees a page fragment allocated out of either a compound or order 0 page. 5772 */ 5773 void page_frag_free(void *addr) 5774 { 5775 struct page *page = virt_to_head_page(addr); 5776 5777 if (unlikely(put_page_testzero(page))) 5778 free_the_page(page, compound_order(page)); 5779 } 5780 EXPORT_SYMBOL(page_frag_free); 5781 5782 static void *make_alloc_exact(unsigned long addr, unsigned int order, 5783 size_t size) 5784 { 5785 if (addr) { 5786 unsigned long alloc_end = addr + (PAGE_SIZE << order); 5787 unsigned long used = addr + PAGE_ALIGN(size); 5788 5789 split_page(virt_to_page((void *)addr), order); 5790 while (used < alloc_end) { 5791 free_page(used); 5792 used += PAGE_SIZE; 5793 } 5794 } 5795 return (void *)addr; 5796 } 5797 5798 /** 5799 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 5800 * @size: the number of bytes to allocate 5801 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5802 * 5803 * This function is similar to alloc_pages(), except that it allocates the 5804 * minimum number of pages to satisfy the request. alloc_pages() can only 5805 * allocate memory in power-of-two pages. 5806 * 5807 * This function is also limited by MAX_ORDER. 5808 * 5809 * Memory allocated by this function must be released by free_pages_exact(). 5810 * 5811 * Return: pointer to the allocated area or %NULL in case of error. 5812 */ 5813 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 5814 { 5815 unsigned int order = get_order(size); 5816 unsigned long addr; 5817 5818 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5819 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5820 5821 addr = __get_free_pages(gfp_mask, order); 5822 return make_alloc_exact(addr, order, size); 5823 } 5824 EXPORT_SYMBOL(alloc_pages_exact); 5825 5826 /** 5827 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 5828 * pages on a node. 5829 * @nid: the preferred node ID where memory should be allocated 5830 * @size: the number of bytes to allocate 5831 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP 5832 * 5833 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 5834 * back. 5835 * 5836 * Return: pointer to the allocated area or %NULL in case of error. 5837 */ 5838 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 5839 { 5840 unsigned int order = get_order(size); 5841 struct page *p; 5842 5843 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) 5844 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); 5845 5846 p = alloc_pages_node(nid, gfp_mask, order); 5847 if (!p) 5848 return NULL; 5849 return make_alloc_exact((unsigned long)page_address(p), order, size); 5850 } 5851 5852 /** 5853 * free_pages_exact - release memory allocated via alloc_pages_exact() 5854 * @virt: the value returned by alloc_pages_exact. 5855 * @size: size of allocation, same value as passed to alloc_pages_exact(). 5856 * 5857 * Release the memory allocated by a previous call to alloc_pages_exact. 5858 */ 5859 void free_pages_exact(void *virt, size_t size) 5860 { 5861 unsigned long addr = (unsigned long)virt; 5862 unsigned long end = addr + PAGE_ALIGN(size); 5863 5864 while (addr < end) { 5865 free_page(addr); 5866 addr += PAGE_SIZE; 5867 } 5868 } 5869 EXPORT_SYMBOL(free_pages_exact); 5870 5871 /** 5872 * nr_free_zone_pages - count number of pages beyond high watermark 5873 * @offset: The zone index of the highest zone 5874 * 5875 * nr_free_zone_pages() counts the number of pages which are beyond the 5876 * high watermark within all zones at or below a given zone index. For each 5877 * zone, the number of pages is calculated as: 5878 * 5879 * nr_free_zone_pages = managed_pages - high_pages 5880 * 5881 * Return: number of pages beyond high watermark. 5882 */ 5883 static unsigned long nr_free_zone_pages(int offset) 5884 { 5885 struct zoneref *z; 5886 struct zone *zone; 5887 5888 /* Just pick one node, since fallback list is circular */ 5889 unsigned long sum = 0; 5890 5891 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 5892 5893 for_each_zone_zonelist(zone, z, zonelist, offset) { 5894 unsigned long size = zone_managed_pages(zone); 5895 unsigned long high = high_wmark_pages(zone); 5896 if (size > high) 5897 sum += size - high; 5898 } 5899 5900 return sum; 5901 } 5902 5903 /** 5904 * nr_free_buffer_pages - count number of pages beyond high watermark 5905 * 5906 * nr_free_buffer_pages() counts the number of pages which are beyond the high 5907 * watermark within ZONE_DMA and ZONE_NORMAL. 5908 * 5909 * Return: number of pages beyond high watermark within ZONE_DMA and 5910 * ZONE_NORMAL. 5911 */ 5912 unsigned long nr_free_buffer_pages(void) 5913 { 5914 return nr_free_zone_pages(gfp_zone(GFP_USER)); 5915 } 5916 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 5917 5918 static inline void show_node(struct zone *zone) 5919 { 5920 if (IS_ENABLED(CONFIG_NUMA)) 5921 printk("Node %d ", zone_to_nid(zone)); 5922 } 5923 5924 long si_mem_available(void) 5925 { 5926 long available; 5927 unsigned long pagecache; 5928 unsigned long wmark_low = 0; 5929 unsigned long pages[NR_LRU_LISTS]; 5930 unsigned long reclaimable; 5931 struct zone *zone; 5932 int lru; 5933 5934 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 5935 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 5936 5937 for_each_zone(zone) 5938 wmark_low += low_wmark_pages(zone); 5939 5940 /* 5941 * Estimate the amount of memory available for userspace allocations, 5942 * without causing swapping or OOM. 5943 */ 5944 available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; 5945 5946 /* 5947 * Not all the page cache can be freed, otherwise the system will 5948 * start swapping or thrashing. Assume at least half of the page 5949 * cache, or the low watermark worth of cache, needs to stay. 5950 */ 5951 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 5952 pagecache -= min(pagecache / 2, wmark_low); 5953 available += pagecache; 5954 5955 /* 5956 * Part of the reclaimable slab and other kernel memory consists of 5957 * items that are in use, and cannot be freed. Cap this estimate at the 5958 * low watermark. 5959 */ 5960 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + 5961 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); 5962 available += reclaimable - min(reclaimable / 2, wmark_low); 5963 5964 if (available < 0) 5965 available = 0; 5966 return available; 5967 } 5968 EXPORT_SYMBOL_GPL(si_mem_available); 5969 5970 void si_meminfo(struct sysinfo *val) 5971 { 5972 val->totalram = totalram_pages(); 5973 val->sharedram = global_node_page_state(NR_SHMEM); 5974 val->freeram = global_zone_page_state(NR_FREE_PAGES); 5975 val->bufferram = nr_blockdev_pages(); 5976 val->totalhigh = totalhigh_pages(); 5977 val->freehigh = nr_free_highpages(); 5978 val->mem_unit = PAGE_SIZE; 5979 } 5980 5981 EXPORT_SYMBOL(si_meminfo); 5982 5983 #ifdef CONFIG_NUMA 5984 void si_meminfo_node(struct sysinfo *val, int nid) 5985 { 5986 int zone_type; /* needs to be signed */ 5987 unsigned long managed_pages = 0; 5988 unsigned long managed_highpages = 0; 5989 unsigned long free_highpages = 0; 5990 pg_data_t *pgdat = NODE_DATA(nid); 5991 5992 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 5993 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); 5994 val->totalram = managed_pages; 5995 val->sharedram = node_page_state(pgdat, NR_SHMEM); 5996 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 5997 #ifdef CONFIG_HIGHMEM 5998 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 5999 struct zone *zone = &pgdat->node_zones[zone_type]; 6000 6001 if (is_highmem(zone)) { 6002 managed_highpages += zone_managed_pages(zone); 6003 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 6004 } 6005 } 6006 val->totalhigh = managed_highpages; 6007 val->freehigh = free_highpages; 6008 #else 6009 val->totalhigh = managed_highpages; 6010 val->freehigh = free_highpages; 6011 #endif 6012 val->mem_unit = PAGE_SIZE; 6013 } 6014 #endif 6015 6016 /* 6017 * Determine whether the node should be displayed or not, depending on whether 6018 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 6019 */ 6020 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 6021 { 6022 if (!(flags & SHOW_MEM_FILTER_NODES)) 6023 return false; 6024 6025 /* 6026 * no node mask - aka implicit memory numa policy. Do not bother with 6027 * the synchronization - read_mems_allowed_begin - because we do not 6028 * have to be precise here. 6029 */ 6030 if (!nodemask) 6031 nodemask = &cpuset_current_mems_allowed; 6032 6033 return !node_isset(nid, *nodemask); 6034 } 6035 6036 #define K(x) ((x) << (PAGE_SHIFT-10)) 6037 6038 static void show_migration_types(unsigned char type) 6039 { 6040 static const char types[MIGRATE_TYPES] = { 6041 [MIGRATE_UNMOVABLE] = 'U', 6042 [MIGRATE_MOVABLE] = 'M', 6043 [MIGRATE_RECLAIMABLE] = 'E', 6044 [MIGRATE_HIGHATOMIC] = 'H', 6045 #ifdef CONFIG_CMA 6046 [MIGRATE_CMA] = 'C', 6047 #endif 6048 #ifdef CONFIG_MEMORY_ISOLATION 6049 [MIGRATE_ISOLATE] = 'I', 6050 #endif 6051 }; 6052 char tmp[MIGRATE_TYPES + 1]; 6053 char *p = tmp; 6054 int i; 6055 6056 for (i = 0; i < MIGRATE_TYPES; i++) { 6057 if (type & (1 << i)) 6058 *p++ = types[i]; 6059 } 6060 6061 *p = '\0'; 6062 printk(KERN_CONT "(%s) ", tmp); 6063 } 6064 6065 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) 6066 { 6067 int zone_idx; 6068 for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) 6069 if (zone_managed_pages(pgdat->node_zones + zone_idx)) 6070 return true; 6071 return false; 6072 } 6073 6074 /* 6075 * Show free area list (used inside shift_scroll-lock stuff) 6076 * We also calculate the percentage fragmentation. We do this by counting the 6077 * memory on each free list with the exception of the first item on the list. 6078 * 6079 * Bits in @filter: 6080 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 6081 * cpuset. 6082 */ 6083 void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) 6084 { 6085 unsigned long free_pcp = 0; 6086 int cpu, nid; 6087 struct zone *zone; 6088 pg_data_t *pgdat; 6089 6090 for_each_populated_zone(zone) { 6091 if (zone_idx(zone) > max_zone_idx) 6092 continue; 6093 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6094 continue; 6095 6096 for_each_online_cpu(cpu) 6097 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 6098 } 6099 6100 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 6101 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 6102 " unevictable:%lu dirty:%lu writeback:%lu\n" 6103 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 6104 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 6105 " kernel_misc_reclaimable:%lu\n" 6106 " free:%lu free_pcp:%lu free_cma:%lu\n", 6107 global_node_page_state(NR_ACTIVE_ANON), 6108 global_node_page_state(NR_INACTIVE_ANON), 6109 global_node_page_state(NR_ISOLATED_ANON), 6110 global_node_page_state(NR_ACTIVE_FILE), 6111 global_node_page_state(NR_INACTIVE_FILE), 6112 global_node_page_state(NR_ISOLATED_FILE), 6113 global_node_page_state(NR_UNEVICTABLE), 6114 global_node_page_state(NR_FILE_DIRTY), 6115 global_node_page_state(NR_WRITEBACK), 6116 global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), 6117 global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), 6118 global_node_page_state(NR_FILE_MAPPED), 6119 global_node_page_state(NR_SHMEM), 6120 global_node_page_state(NR_PAGETABLE), 6121 global_zone_page_state(NR_BOUNCE), 6122 global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), 6123 global_zone_page_state(NR_FREE_PAGES), 6124 free_pcp, 6125 global_zone_page_state(NR_FREE_CMA_PAGES)); 6126 6127 for_each_online_pgdat(pgdat) { 6128 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 6129 continue; 6130 if (!node_has_managed_zones(pgdat, max_zone_idx)) 6131 continue; 6132 6133 printk("Node %d" 6134 " active_anon:%lukB" 6135 " inactive_anon:%lukB" 6136 " active_file:%lukB" 6137 " inactive_file:%lukB" 6138 " unevictable:%lukB" 6139 " isolated(anon):%lukB" 6140 " isolated(file):%lukB" 6141 " mapped:%lukB" 6142 " dirty:%lukB" 6143 " writeback:%lukB" 6144 " shmem:%lukB" 6145 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6146 " shmem_thp: %lukB" 6147 " shmem_pmdmapped: %lukB" 6148 " anon_thp: %lukB" 6149 #endif 6150 " writeback_tmp:%lukB" 6151 " kernel_stack:%lukB" 6152 #ifdef CONFIG_SHADOW_CALL_STACK 6153 " shadow_call_stack:%lukB" 6154 #endif 6155 " pagetables:%lukB" 6156 " all_unreclaimable? %s" 6157 "\n", 6158 pgdat->node_id, 6159 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 6160 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 6161 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 6162 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 6163 K(node_page_state(pgdat, NR_UNEVICTABLE)), 6164 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 6165 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 6166 K(node_page_state(pgdat, NR_FILE_MAPPED)), 6167 K(node_page_state(pgdat, NR_FILE_DIRTY)), 6168 K(node_page_state(pgdat, NR_WRITEBACK)), 6169 K(node_page_state(pgdat, NR_SHMEM)), 6170 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6171 K(node_page_state(pgdat, NR_SHMEM_THPS)), 6172 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), 6173 K(node_page_state(pgdat, NR_ANON_THPS)), 6174 #endif 6175 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 6176 node_page_state(pgdat, NR_KERNEL_STACK_KB), 6177 #ifdef CONFIG_SHADOW_CALL_STACK 6178 node_page_state(pgdat, NR_KERNEL_SCS_KB), 6179 #endif 6180 K(node_page_state(pgdat, NR_PAGETABLE)), 6181 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 6182 "yes" : "no"); 6183 } 6184 6185 for_each_populated_zone(zone) { 6186 int i; 6187 6188 if (zone_idx(zone) > max_zone_idx) 6189 continue; 6190 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6191 continue; 6192 6193 free_pcp = 0; 6194 for_each_online_cpu(cpu) 6195 free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; 6196 6197 show_node(zone); 6198 printk(KERN_CONT 6199 "%s" 6200 " free:%lukB" 6201 " boost:%lukB" 6202 " min:%lukB" 6203 " low:%lukB" 6204 " high:%lukB" 6205 " reserved_highatomic:%luKB" 6206 " active_anon:%lukB" 6207 " inactive_anon:%lukB" 6208 " active_file:%lukB" 6209 " inactive_file:%lukB" 6210 " unevictable:%lukB" 6211 " writepending:%lukB" 6212 " present:%lukB" 6213 " managed:%lukB" 6214 " mlocked:%lukB" 6215 " bounce:%lukB" 6216 " free_pcp:%lukB" 6217 " local_pcp:%ukB" 6218 " free_cma:%lukB" 6219 "\n", 6220 zone->name, 6221 K(zone_page_state(zone, NR_FREE_PAGES)), 6222 K(zone->watermark_boost), 6223 K(min_wmark_pages(zone)), 6224 K(low_wmark_pages(zone)), 6225 K(high_wmark_pages(zone)), 6226 K(zone->nr_reserved_highatomic), 6227 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 6228 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 6229 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 6230 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 6231 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 6232 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 6233 K(zone->present_pages), 6234 K(zone_managed_pages(zone)), 6235 K(zone_page_state(zone, NR_MLOCK)), 6236 K(zone_page_state(zone, NR_BOUNCE)), 6237 K(free_pcp), 6238 K(this_cpu_read(zone->per_cpu_pageset->count)), 6239 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 6240 printk("lowmem_reserve[]:"); 6241 for (i = 0; i < MAX_NR_ZONES; i++) 6242 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 6243 printk(KERN_CONT "\n"); 6244 } 6245 6246 for_each_populated_zone(zone) { 6247 unsigned int order; 6248 unsigned long nr[MAX_ORDER], flags, total = 0; 6249 unsigned char types[MAX_ORDER]; 6250 6251 if (zone_idx(zone) > max_zone_idx) 6252 continue; 6253 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 6254 continue; 6255 show_node(zone); 6256 printk(KERN_CONT "%s: ", zone->name); 6257 6258 spin_lock_irqsave(&zone->lock, flags); 6259 for (order = 0; order < MAX_ORDER; order++) { 6260 struct free_area *area = &zone->free_area[order]; 6261 int type; 6262 6263 nr[order] = area->nr_free; 6264 total += nr[order] << order; 6265 6266 types[order] = 0; 6267 for (type = 0; type < MIGRATE_TYPES; type++) { 6268 if (!free_area_empty(area, type)) 6269 types[order] |= 1 << type; 6270 } 6271 } 6272 spin_unlock_irqrestore(&zone->lock, flags); 6273 for (order = 0; order < MAX_ORDER; order++) { 6274 printk(KERN_CONT "%lu*%lukB ", 6275 nr[order], K(1UL) << order); 6276 if (nr[order]) 6277 show_migration_types(types[order]); 6278 } 6279 printk(KERN_CONT "= %lukB\n", K(total)); 6280 } 6281 6282 for_each_online_node(nid) { 6283 if (show_mem_node_skip(filter, nid, nodemask)) 6284 continue; 6285 hugetlb_show_meminfo_node(nid); 6286 } 6287 6288 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 6289 6290 show_swap_cache_info(); 6291 } 6292 6293 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 6294 { 6295 zoneref->zone = zone; 6296 zoneref->zone_idx = zone_idx(zone); 6297 } 6298 6299 /* 6300 * Builds allocation fallback zone lists. 6301 * 6302 * Add all populated zones of a node to the zonelist. 6303 */ 6304 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) 6305 { 6306 struct zone *zone; 6307 enum zone_type zone_type = MAX_NR_ZONES; 6308 int nr_zones = 0; 6309 6310 do { 6311 zone_type--; 6312 zone = pgdat->node_zones + zone_type; 6313 if (populated_zone(zone)) { 6314 zoneref_set_zone(zone, &zonerefs[nr_zones++]); 6315 check_highest_zone(zone_type); 6316 } 6317 } while (zone_type); 6318 6319 return nr_zones; 6320 } 6321 6322 #ifdef CONFIG_NUMA 6323 6324 static int __parse_numa_zonelist_order(char *s) 6325 { 6326 /* 6327 * We used to support different zonelists modes but they turned 6328 * out to be just not useful. Let's keep the warning in place 6329 * if somebody still use the cmd line parameter so that we do 6330 * not fail it silently 6331 */ 6332 if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) { 6333 pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s); 6334 return -EINVAL; 6335 } 6336 return 0; 6337 } 6338 6339 char numa_zonelist_order[] = "Node"; 6340 6341 /* 6342 * sysctl handler for numa_zonelist_order 6343 */ 6344 int numa_zonelist_order_handler(struct ctl_table *table, int write, 6345 void *buffer, size_t *length, loff_t *ppos) 6346 { 6347 if (write) 6348 return __parse_numa_zonelist_order(buffer); 6349 return proc_dostring(table, write, buffer, length, ppos); 6350 } 6351 6352 6353 static int node_load[MAX_NUMNODES]; 6354 6355 /** 6356 * find_next_best_node - find the next node that should appear in a given node's fallback list 6357 * @node: node whose fallback list we're appending 6358 * @used_node_mask: nodemask_t of already used nodes 6359 * 6360 * We use a number of factors to determine which is the next node that should 6361 * appear on a given node's fallback list. The node should not have appeared 6362 * already in @node's fallback list, and it should be the next closest node 6363 * according to the distance array (which contains arbitrary distance values 6364 * from each node to each node in the system), and should also prefer nodes 6365 * with no CPUs, since presumably they'll have very little allocation pressure 6366 * on them otherwise. 6367 * 6368 * Return: node id of the found node or %NUMA_NO_NODE if no node is found. 6369 */ 6370 int find_next_best_node(int node, nodemask_t *used_node_mask) 6371 { 6372 int n, val; 6373 int min_val = INT_MAX; 6374 int best_node = NUMA_NO_NODE; 6375 6376 /* Use the local node if we haven't already */ 6377 if (!node_isset(node, *used_node_mask)) { 6378 node_set(node, *used_node_mask); 6379 return node; 6380 } 6381 6382 for_each_node_state(n, N_MEMORY) { 6383 6384 /* Don't want a node to appear more than once */ 6385 if (node_isset(n, *used_node_mask)) 6386 continue; 6387 6388 /* Use the distance array to find the distance */ 6389 val = node_distance(node, n); 6390 6391 /* Penalize nodes under us ("prefer the next node") */ 6392 val += (n < node); 6393 6394 /* Give preference to headless and unused nodes */ 6395 if (!cpumask_empty(cpumask_of_node(n))) 6396 val += PENALTY_FOR_NODE_WITH_CPUS; 6397 6398 /* Slight preference for less loaded node */ 6399 val *= MAX_NUMNODES; 6400 val += node_load[n]; 6401 6402 if (val < min_val) { 6403 min_val = val; 6404 best_node = n; 6405 } 6406 } 6407 6408 if (best_node >= 0) 6409 node_set(best_node, *used_node_mask); 6410 6411 return best_node; 6412 } 6413 6414 6415 /* 6416 * Build zonelists ordered by node and zones within node. 6417 * This results in maximum locality--normal zone overflows into local 6418 * DMA zone, if any--but risks exhausting DMA zone. 6419 */ 6420 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, 6421 unsigned nr_nodes) 6422 { 6423 struct zoneref *zonerefs; 6424 int i; 6425 6426 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 6427 6428 for (i = 0; i < nr_nodes; i++) { 6429 int nr_zones; 6430 6431 pg_data_t *node = NODE_DATA(node_order[i]); 6432 6433 nr_zones = build_zonerefs_node(node, zonerefs); 6434 zonerefs += nr_zones; 6435 } 6436 zonerefs->zone = NULL; 6437 zonerefs->zone_idx = 0; 6438 } 6439 6440 /* 6441 * Build gfp_thisnode zonelists 6442 */ 6443 static void build_thisnode_zonelists(pg_data_t *pgdat) 6444 { 6445 struct zoneref *zonerefs; 6446 int nr_zones; 6447 6448 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; 6449 nr_zones = build_zonerefs_node(pgdat, zonerefs); 6450 zonerefs += nr_zones; 6451 zonerefs->zone = NULL; 6452 zonerefs->zone_idx = 0; 6453 } 6454 6455 /* 6456 * Build zonelists ordered by zone and nodes within zones. 6457 * This results in conserving DMA zone[s] until all Normal memory is 6458 * exhausted, but results in overflowing to remote node while memory 6459 * may still exist in local DMA zone. 6460 */ 6461 6462 static void build_zonelists(pg_data_t *pgdat) 6463 { 6464 static int node_order[MAX_NUMNODES]; 6465 int node, nr_nodes = 0; 6466 nodemask_t used_mask = NODE_MASK_NONE; 6467 int local_node, prev_node; 6468 6469 /* NUMA-aware ordering of nodes */ 6470 local_node = pgdat->node_id; 6471 prev_node = local_node; 6472 6473 memset(node_order, 0, sizeof(node_order)); 6474 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 6475 /* 6476 * We don't want to pressure a particular node. 6477 * So adding penalty to the first node in same 6478 * distance group to make it round-robin. 6479 */ 6480 if (node_distance(local_node, node) != 6481 node_distance(local_node, prev_node)) 6482 node_load[node] += 1; 6483 6484 node_order[nr_nodes++] = node; 6485 prev_node = node; 6486 } 6487 6488 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); 6489 build_thisnode_zonelists(pgdat); 6490 pr_info("Fallback order for Node %d: ", local_node); 6491 for (node = 0; node < nr_nodes; node++) 6492 pr_cont("%d ", node_order[node]); 6493 pr_cont("\n"); 6494 } 6495 6496 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6497 /* 6498 * Return node id of node used for "local" allocations. 6499 * I.e., first node id of first zone in arg node's generic zonelist. 6500 * Used for initializing percpu 'numa_mem', which is used primarily 6501 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 6502 */ 6503 int local_memory_node(int node) 6504 { 6505 struct zoneref *z; 6506 6507 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 6508 gfp_zone(GFP_KERNEL), 6509 NULL); 6510 return zone_to_nid(z->zone); 6511 } 6512 #endif 6513 6514 static void setup_min_unmapped_ratio(void); 6515 static void setup_min_slab_ratio(void); 6516 #else /* CONFIG_NUMA */ 6517 6518 static void build_zonelists(pg_data_t *pgdat) 6519 { 6520 int node, local_node; 6521 struct zoneref *zonerefs; 6522 int nr_zones; 6523 6524 local_node = pgdat->node_id; 6525 6526 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; 6527 nr_zones = build_zonerefs_node(pgdat, zonerefs); 6528 zonerefs += nr_zones; 6529 6530 /* 6531 * Now we build the zonelist so that it contains the zones 6532 * of all the other nodes. 6533 * We don't want to pressure a particular node, so when 6534 * building the zones for node N, we make sure that the 6535 * zones coming right after the local ones are those from 6536 * node N+1 (modulo N) 6537 */ 6538 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 6539 if (!node_online(node)) 6540 continue; 6541 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 6542 zonerefs += nr_zones; 6543 } 6544 for (node = 0; node < local_node; node++) { 6545 if (!node_online(node)) 6546 continue; 6547 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs); 6548 zonerefs += nr_zones; 6549 } 6550 6551 zonerefs->zone = NULL; 6552 zonerefs->zone_idx = 0; 6553 } 6554 6555 #endif /* CONFIG_NUMA */ 6556 6557 /* 6558 * Boot pageset table. One per cpu which is going to be used for all 6559 * zones and all nodes. The parameters will be set in such a way 6560 * that an item put on a list will immediately be handed over to 6561 * the buddy list. This is safe since pageset manipulation is done 6562 * with interrupts disabled. 6563 * 6564 * The boot_pagesets must be kept even after bootup is complete for 6565 * unused processors and/or zones. They do play a role for bootstrapping 6566 * hotplugged processors. 6567 * 6568 * zoneinfo_show() and maybe other functions do 6569 * not check if the processor is online before following the pageset pointer. 6570 * Other parts of the kernel may not check if the zone is available. 6571 */ 6572 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats); 6573 /* These effectively disable the pcplists in the boot pageset completely */ 6574 #define BOOT_PAGESET_HIGH 0 6575 #define BOOT_PAGESET_BATCH 1 6576 static DEFINE_PER_CPU(struct per_cpu_pages, boot_pageset); 6577 static DEFINE_PER_CPU(struct per_cpu_zonestat, boot_zonestats); 6578 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 6579 6580 static void __build_all_zonelists(void *data) 6581 { 6582 int nid; 6583 int __maybe_unused cpu; 6584 pg_data_t *self = data; 6585 6586 write_seqlock(&zonelist_update_seq); 6587 6588 #ifdef CONFIG_NUMA 6589 memset(node_load, 0, sizeof(node_load)); 6590 #endif 6591 6592 /* 6593 * This node is hotadded and no memory is yet present. So just 6594 * building zonelists is fine - no need to touch other nodes. 6595 */ 6596 if (self && !node_online(self->node_id)) { 6597 build_zonelists(self); 6598 } else { 6599 /* 6600 * All possible nodes have pgdat preallocated 6601 * in free_area_init 6602 */ 6603 for_each_node(nid) { 6604 pg_data_t *pgdat = NODE_DATA(nid); 6605 6606 build_zonelists(pgdat); 6607 } 6608 6609 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 6610 /* 6611 * We now know the "local memory node" for each node-- 6612 * i.e., the node of the first zone in the generic zonelist. 6613 * Set up numa_mem percpu variable for on-line cpus. During 6614 * boot, only the boot cpu should be on-line; we'll init the 6615 * secondary cpus' numa_mem as they come on-line. During 6616 * node/memory hotplug, we'll fixup all on-line cpus. 6617 */ 6618 for_each_online_cpu(cpu) 6619 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 6620 #endif 6621 } 6622 6623 write_sequnlock(&zonelist_update_seq); 6624 } 6625 6626 static noinline void __init 6627 build_all_zonelists_init(void) 6628 { 6629 int cpu; 6630 6631 __build_all_zonelists(NULL); 6632 6633 /* 6634 * Initialize the boot_pagesets that are going to be used 6635 * for bootstrapping processors. The real pagesets for 6636 * each zone will be allocated later when the per cpu 6637 * allocator is available. 6638 * 6639 * boot_pagesets are used also for bootstrapping offline 6640 * cpus if the system is already booted because the pagesets 6641 * are needed to initialize allocators on a specific cpu too. 6642 * F.e. the percpu allocator needs the page allocator which 6643 * needs the percpu allocator in order to allocate its pagesets 6644 * (a chicken-egg dilemma). 6645 */ 6646 for_each_possible_cpu(cpu) 6647 per_cpu_pages_init(&per_cpu(boot_pageset, cpu), &per_cpu(boot_zonestats, cpu)); 6648 6649 mminit_verify_zonelist(); 6650 cpuset_init_current_mems_allowed(); 6651 } 6652 6653 /* 6654 * unless system_state == SYSTEM_BOOTING. 6655 * 6656 * __ref due to call of __init annotated helper build_all_zonelists_init 6657 * [protected by SYSTEM_BOOTING]. 6658 */ 6659 void __ref build_all_zonelists(pg_data_t *pgdat) 6660 { 6661 unsigned long vm_total_pages; 6662 6663 if (system_state == SYSTEM_BOOTING) { 6664 build_all_zonelists_init(); 6665 } else { 6666 __build_all_zonelists(pgdat); 6667 /* cpuset refresh routine should be here */ 6668 } 6669 /* Get the number of free pages beyond high watermark in all zones. */ 6670 vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 6671 /* 6672 * Disable grouping by mobility if the number of pages in the 6673 * system is too low to allow the mechanism to work. It would be 6674 * more accurate, but expensive to check per-zone. This check is 6675 * made on memory-hotadd so a system can start with mobility 6676 * disabled and enable it later 6677 */ 6678 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 6679 page_group_by_mobility_disabled = 1; 6680 else 6681 page_group_by_mobility_disabled = 0; 6682 6683 pr_info("Built %u zonelists, mobility grouping %s. Total pages: %ld\n", 6684 nr_online_nodes, 6685 page_group_by_mobility_disabled ? "off" : "on", 6686 vm_total_pages); 6687 #ifdef CONFIG_NUMA 6688 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 6689 #endif 6690 } 6691 6692 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 6693 static bool __meminit 6694 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 6695 { 6696 static struct memblock_region *r; 6697 6698 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 6699 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 6700 for_each_mem_region(r) { 6701 if (*pfn < memblock_region_memory_end_pfn(r)) 6702 break; 6703 } 6704 } 6705 if (*pfn >= memblock_region_memory_base_pfn(r) && 6706 memblock_is_mirror(r)) { 6707 *pfn = memblock_region_memory_end_pfn(r); 6708 return true; 6709 } 6710 } 6711 return false; 6712 } 6713 6714 /* 6715 * Initially all pages are reserved - free ones are freed 6716 * up by memblock_free_all() once the early boot process is 6717 * done. Non-atomic initialization, single-pass. 6718 * 6719 * All aligned pageblocks are initialized to the specified migratetype 6720 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 6721 * zone stats (e.g., nr_isolate_pageblock) are touched. 6722 */ 6723 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 6724 unsigned long start_pfn, unsigned long zone_end_pfn, 6725 enum meminit_context context, 6726 struct vmem_altmap *altmap, int migratetype) 6727 { 6728 unsigned long pfn, end_pfn = start_pfn + size; 6729 struct page *page; 6730 6731 if (highest_memmap_pfn < end_pfn - 1) 6732 highest_memmap_pfn = end_pfn - 1; 6733 6734 #ifdef CONFIG_ZONE_DEVICE 6735 /* 6736 * Honor reservation requested by the driver for this ZONE_DEVICE 6737 * memory. We limit the total number of pages to initialize to just 6738 * those that might contain the memory mapping. We will defer the 6739 * ZONE_DEVICE page initialization until after we have released 6740 * the hotplug lock. 6741 */ 6742 if (zone == ZONE_DEVICE) { 6743 if (!altmap) 6744 return; 6745 6746 if (start_pfn == altmap->base_pfn) 6747 start_pfn += altmap->reserve; 6748 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6749 } 6750 #endif 6751 6752 for (pfn = start_pfn; pfn < end_pfn; ) { 6753 /* 6754 * There can be holes in boot-time mem_map[]s handed to this 6755 * function. They do not exist on hotplugged memory. 6756 */ 6757 if (context == MEMINIT_EARLY) { 6758 if (overlap_memmap_init(zone, &pfn)) 6759 continue; 6760 if (defer_init(nid, pfn, zone_end_pfn)) 6761 break; 6762 } 6763 6764 page = pfn_to_page(pfn); 6765 __init_single_page(page, pfn, zone, nid); 6766 if (context == MEMINIT_HOTPLUG) 6767 __SetPageReserved(page); 6768 6769 /* 6770 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 6771 * such that unmovable allocations won't be scattered all 6772 * over the place during system boot. 6773 */ 6774 if (pageblock_aligned(pfn)) { 6775 set_pageblock_migratetype(page, migratetype); 6776 cond_resched(); 6777 } 6778 pfn++; 6779 } 6780 } 6781 6782 #ifdef CONFIG_ZONE_DEVICE 6783 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 6784 unsigned long zone_idx, int nid, 6785 struct dev_pagemap *pgmap) 6786 { 6787 6788 __init_single_page(page, pfn, zone_idx, nid); 6789 6790 /* 6791 * Mark page reserved as it will need to wait for onlining 6792 * phase for it to be fully associated with a zone. 6793 * 6794 * We can use the non-atomic __set_bit operation for setting 6795 * the flag as we are still initializing the pages. 6796 */ 6797 __SetPageReserved(page); 6798 6799 /* 6800 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 6801 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 6802 * ever freed or placed on a driver-private list. 6803 */ 6804 page->pgmap = pgmap; 6805 page->zone_device_data = NULL; 6806 6807 /* 6808 * Mark the block movable so that blocks are reserved for 6809 * movable at startup. This will force kernel allocations 6810 * to reserve their blocks rather than leaking throughout 6811 * the address space during boot when many long-lived 6812 * kernel allocations are made. 6813 * 6814 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 6815 * because this is done early in section_activate() 6816 */ 6817 if (pageblock_aligned(pfn)) { 6818 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 6819 cond_resched(); 6820 } 6821 } 6822 6823 /* 6824 * With compound page geometry and when struct pages are stored in ram most 6825 * tail pages are reused. Consequently, the amount of unique struct pages to 6826 * initialize is a lot smaller that the total amount of struct pages being 6827 * mapped. This is a paired / mild layering violation with explicit knowledge 6828 * of how the sparse_vmemmap internals handle compound pages in the lack 6829 * of an altmap. See vmemmap_populate_compound_pages(). 6830 */ 6831 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, 6832 unsigned long nr_pages) 6833 { 6834 return is_power_of_2(sizeof(struct page)) && 6835 !altmap ? 2 * (PAGE_SIZE / sizeof(struct page)) : nr_pages; 6836 } 6837 6838 static void __ref memmap_init_compound(struct page *head, 6839 unsigned long head_pfn, 6840 unsigned long zone_idx, int nid, 6841 struct dev_pagemap *pgmap, 6842 unsigned long nr_pages) 6843 { 6844 unsigned long pfn, end_pfn = head_pfn + nr_pages; 6845 unsigned int order = pgmap->vmemmap_shift; 6846 6847 __SetPageHead(head); 6848 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 6849 struct page *page = pfn_to_page(pfn); 6850 6851 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 6852 prep_compound_tail(head, pfn - head_pfn); 6853 set_page_count(page, 0); 6854 6855 /* 6856 * The first tail page stores compound_mapcount_ptr() and 6857 * compound_order() and the second tail page stores 6858 * compound_pincount_ptr(). Call prep_compound_head() after 6859 * the first and second tail pages have been initialized to 6860 * not have the data overwritten. 6861 */ 6862 if (pfn == head_pfn + 2) 6863 prep_compound_head(head, order); 6864 } 6865 } 6866 6867 void __ref memmap_init_zone_device(struct zone *zone, 6868 unsigned long start_pfn, 6869 unsigned long nr_pages, 6870 struct dev_pagemap *pgmap) 6871 { 6872 unsigned long pfn, end_pfn = start_pfn + nr_pages; 6873 struct pglist_data *pgdat = zone->zone_pgdat; 6874 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 6875 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 6876 unsigned long zone_idx = zone_idx(zone); 6877 unsigned long start = jiffies; 6878 int nid = pgdat->node_id; 6879 6880 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE)) 6881 return; 6882 6883 /* 6884 * The call to memmap_init should have already taken care 6885 * of the pages reserved for the memmap, so we can just jump to 6886 * the end of that region and start processing the device pages. 6887 */ 6888 if (altmap) { 6889 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 6890 nr_pages = end_pfn - start_pfn; 6891 } 6892 6893 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 6894 struct page *page = pfn_to_page(pfn); 6895 6896 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 6897 6898 if (pfns_per_compound == 1) 6899 continue; 6900 6901 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 6902 compound_nr_pages(altmap, pfns_per_compound)); 6903 } 6904 6905 pr_info("%s initialised %lu pages in %ums\n", __func__, 6906 nr_pages, jiffies_to_msecs(jiffies - start)); 6907 } 6908 6909 #endif 6910 static void __meminit zone_init_free_lists(struct zone *zone) 6911 { 6912 unsigned int order, t; 6913 for_each_migratetype_order(order, t) { 6914 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 6915 zone->free_area[order].nr_free = 0; 6916 } 6917 } 6918 6919 /* 6920 * Only struct pages that correspond to ranges defined by memblock.memory 6921 * are zeroed and initialized by going through __init_single_page() during 6922 * memmap_init_zone_range(). 6923 * 6924 * But, there could be struct pages that correspond to holes in 6925 * memblock.memory. This can happen because of the following reasons: 6926 * - physical memory bank size is not necessarily the exact multiple of the 6927 * arbitrary section size 6928 * - early reserved memory may not be listed in memblock.memory 6929 * - memory layouts defined with memmap= kernel parameter may not align 6930 * nicely with memmap sections 6931 * 6932 * Explicitly initialize those struct pages so that: 6933 * - PG_Reserved is set 6934 * - zone and node links point to zone and node that span the page if the 6935 * hole is in the middle of a zone 6936 * - zone and node links point to adjacent zone/node if the hole falls on 6937 * the zone boundary; the pages in such holes will be prepended to the 6938 * zone/node above the hole except for the trailing pages in the last 6939 * section that will be appended to the zone/node below. 6940 */ 6941 static void __init init_unavailable_range(unsigned long spfn, 6942 unsigned long epfn, 6943 int zone, int node) 6944 { 6945 unsigned long pfn; 6946 u64 pgcnt = 0; 6947 6948 for (pfn = spfn; pfn < epfn; pfn++) { 6949 if (!pfn_valid(pageblock_start_pfn(pfn))) { 6950 pfn = pageblock_end_pfn(pfn) - 1; 6951 continue; 6952 } 6953 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 6954 __SetPageReserved(pfn_to_page(pfn)); 6955 pgcnt++; 6956 } 6957 6958 if (pgcnt) 6959 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", 6960 node, zone_names[zone], pgcnt); 6961 } 6962 6963 static void __init memmap_init_zone_range(struct zone *zone, 6964 unsigned long start_pfn, 6965 unsigned long end_pfn, 6966 unsigned long *hole_pfn) 6967 { 6968 unsigned long zone_start_pfn = zone->zone_start_pfn; 6969 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 6970 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 6971 6972 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 6973 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 6974 6975 if (start_pfn >= end_pfn) 6976 return; 6977 6978 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 6979 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); 6980 6981 if (*hole_pfn < start_pfn) 6982 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 6983 6984 *hole_pfn = end_pfn; 6985 } 6986 6987 static void __init memmap_init(void) 6988 { 6989 unsigned long start_pfn, end_pfn; 6990 unsigned long hole_pfn = 0; 6991 int i, j, zone_id = 0, nid; 6992 6993 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 6994 struct pglist_data *node = NODE_DATA(nid); 6995 6996 for (j = 0; j < MAX_NR_ZONES; j++) { 6997 struct zone *zone = node->node_zones + j; 6998 6999 if (!populated_zone(zone)) 7000 continue; 7001 7002 memmap_init_zone_range(zone, start_pfn, end_pfn, 7003 &hole_pfn); 7004 zone_id = j; 7005 } 7006 } 7007 7008 #ifdef CONFIG_SPARSEMEM 7009 /* 7010 * Initialize the memory map for hole in the range [memory_end, 7011 * section_end]. 7012 * Append the pages in this hole to the highest zone in the last 7013 * node. 7014 * The call to init_unavailable_range() is outside the ifdef to 7015 * silence the compiler warining about zone_id set but not used; 7016 * for FLATMEM it is a nop anyway 7017 */ 7018 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 7019 if (hole_pfn < end_pfn) 7020 #endif 7021 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 7022 } 7023 7024 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 7025 phys_addr_t min_addr, int nid, bool exact_nid) 7026 { 7027 void *ptr; 7028 7029 if (exact_nid) 7030 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 7031 MEMBLOCK_ALLOC_ACCESSIBLE, 7032 nid); 7033 else 7034 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 7035 MEMBLOCK_ALLOC_ACCESSIBLE, 7036 nid); 7037 7038 if (ptr && size > 0) 7039 page_init_poison(ptr, size); 7040 7041 return ptr; 7042 } 7043 7044 static int zone_batchsize(struct zone *zone) 7045 { 7046 #ifdef CONFIG_MMU 7047 int batch; 7048 7049 /* 7050 * The number of pages to batch allocate is either ~0.1% 7051 * of the zone or 1MB, whichever is smaller. The batch 7052 * size is striking a balance between allocation latency 7053 * and zone lock contention. 7054 */ 7055 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); 7056 batch /= 4; /* We effectively *= 4 below */ 7057 if (batch < 1) 7058 batch = 1; 7059 7060 /* 7061 * Clamp the batch to a 2^n - 1 value. Having a power 7062 * of 2 value was found to be more likely to have 7063 * suboptimal cache aliasing properties in some cases. 7064 * 7065 * For example if 2 tasks are alternately allocating 7066 * batches of pages, one task can end up with a lot 7067 * of pages of one half of the possible page colors 7068 * and the other with pages of the other colors. 7069 */ 7070 batch = rounddown_pow_of_two(batch + batch/2) - 1; 7071 7072 return batch; 7073 7074 #else 7075 /* The deferral and batching of frees should be suppressed under NOMMU 7076 * conditions. 7077 * 7078 * The problem is that NOMMU needs to be able to allocate large chunks 7079 * of contiguous memory as there's no hardware page translation to 7080 * assemble apparent contiguous memory from discontiguous pages. 7081 * 7082 * Queueing large contiguous runs of pages for batching, however, 7083 * causes the pages to actually be freed in smaller chunks. As there 7084 * can be a significant delay between the individual batches being 7085 * recycled, this leads to the once large chunks of space being 7086 * fragmented and becoming unavailable for high-order allocations. 7087 */ 7088 return 0; 7089 #endif 7090 } 7091 7092 static int zone_highsize(struct zone *zone, int batch, int cpu_online) 7093 { 7094 #ifdef CONFIG_MMU 7095 int high; 7096 int nr_split_cpus; 7097 unsigned long total_pages; 7098 7099 if (!percpu_pagelist_high_fraction) { 7100 /* 7101 * By default, the high value of the pcp is based on the zone 7102 * low watermark so that if they are full then background 7103 * reclaim will not be started prematurely. 7104 */ 7105 total_pages = low_wmark_pages(zone); 7106 } else { 7107 /* 7108 * If percpu_pagelist_high_fraction is configured, the high 7109 * value is based on a fraction of the managed pages in the 7110 * zone. 7111 */ 7112 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; 7113 } 7114 7115 /* 7116 * Split the high value across all online CPUs local to the zone. Note 7117 * that early in boot that CPUs may not be online yet and that during 7118 * CPU hotplug that the cpumask is not yet updated when a CPU is being 7119 * onlined. For memory nodes that have no CPUs, split pcp->high across 7120 * all online CPUs to mitigate the risk that reclaim is triggered 7121 * prematurely due to pages stored on pcp lists. 7122 */ 7123 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; 7124 if (!nr_split_cpus) 7125 nr_split_cpus = num_online_cpus(); 7126 high = total_pages / nr_split_cpus; 7127 7128 /* 7129 * Ensure high is at least batch*4. The multiple is based on the 7130 * historical relationship between high and batch. 7131 */ 7132 high = max(high, batch << 2); 7133 7134 return high; 7135 #else 7136 return 0; 7137 #endif 7138 } 7139 7140 /* 7141 * pcp->high and pcp->batch values are related and generally batch is lower 7142 * than high. They are also related to pcp->count such that count is lower 7143 * than high, and as soon as it reaches high, the pcplist is flushed. 7144 * 7145 * However, guaranteeing these relations at all times would require e.g. write 7146 * barriers here but also careful usage of read barriers at the read side, and 7147 * thus be prone to error and bad for performance. Thus the update only prevents 7148 * store tearing. Any new users of pcp->batch and pcp->high should ensure they 7149 * can cope with those fields changing asynchronously, and fully trust only the 7150 * pcp->count field on the local CPU with interrupts disabled. 7151 * 7152 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 7153 * outside of boot time (or some other assurance that no concurrent updaters 7154 * exist). 7155 */ 7156 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 7157 unsigned long batch) 7158 { 7159 WRITE_ONCE(pcp->batch, batch); 7160 WRITE_ONCE(pcp->high, high); 7161 } 7162 7163 static void per_cpu_pages_init(struct per_cpu_pages *pcp, struct per_cpu_zonestat *pzstats) 7164 { 7165 int pindex; 7166 7167 memset(pcp, 0, sizeof(*pcp)); 7168 memset(pzstats, 0, sizeof(*pzstats)); 7169 7170 spin_lock_init(&pcp->lock); 7171 for (pindex = 0; pindex < NR_PCP_LISTS; pindex++) 7172 INIT_LIST_HEAD(&pcp->lists[pindex]); 7173 7174 /* 7175 * Set batch and high values safe for a boot pageset. A true percpu 7176 * pageset's initialization will update them subsequently. Here we don't 7177 * need to be as careful as pageset_update() as nobody can access the 7178 * pageset yet. 7179 */ 7180 pcp->high = BOOT_PAGESET_HIGH; 7181 pcp->batch = BOOT_PAGESET_BATCH; 7182 pcp->free_factor = 0; 7183 } 7184 7185 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, 7186 unsigned long batch) 7187 { 7188 struct per_cpu_pages *pcp; 7189 int cpu; 7190 7191 for_each_possible_cpu(cpu) { 7192 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 7193 pageset_update(pcp, high, batch); 7194 } 7195 } 7196 7197 /* 7198 * Calculate and set new high and batch values for all per-cpu pagesets of a 7199 * zone based on the zone's size. 7200 */ 7201 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) 7202 { 7203 int new_high, new_batch; 7204 7205 new_batch = max(1, zone_batchsize(zone)); 7206 new_high = zone_highsize(zone, new_batch, cpu_online); 7207 7208 if (zone->pageset_high == new_high && 7209 zone->pageset_batch == new_batch) 7210 return; 7211 7212 zone->pageset_high = new_high; 7213 zone->pageset_batch = new_batch; 7214 7215 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); 7216 } 7217 7218 void __meminit setup_zone_pageset(struct zone *zone) 7219 { 7220 int cpu; 7221 7222 /* Size may be 0 on !SMP && !NUMA */ 7223 if (sizeof(struct per_cpu_zonestat) > 0) 7224 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); 7225 7226 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); 7227 for_each_possible_cpu(cpu) { 7228 struct per_cpu_pages *pcp; 7229 struct per_cpu_zonestat *pzstats; 7230 7231 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); 7232 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 7233 per_cpu_pages_init(pcp, pzstats); 7234 } 7235 7236 zone_set_pageset_high_and_batch(zone, 0); 7237 } 7238 7239 /* 7240 * The zone indicated has a new number of managed_pages; batch sizes and percpu 7241 * page high values need to be recalculated. 7242 */ 7243 static void zone_pcp_update(struct zone *zone, int cpu_online) 7244 { 7245 mutex_lock(&pcp_batch_high_lock); 7246 zone_set_pageset_high_and_batch(zone, cpu_online); 7247 mutex_unlock(&pcp_batch_high_lock); 7248 } 7249 7250 /* 7251 * Allocate per cpu pagesets and initialize them. 7252 * Before this call only boot pagesets were available. 7253 */ 7254 void __init setup_per_cpu_pageset(void) 7255 { 7256 struct pglist_data *pgdat; 7257 struct zone *zone; 7258 int __maybe_unused cpu; 7259 7260 for_each_populated_zone(zone) 7261 setup_zone_pageset(zone); 7262 7263 #ifdef CONFIG_NUMA 7264 /* 7265 * Unpopulated zones continue using the boot pagesets. 7266 * The numa stats for these pagesets need to be reset. 7267 * Otherwise, they will end up skewing the stats of 7268 * the nodes these zones are associated with. 7269 */ 7270 for_each_possible_cpu(cpu) { 7271 struct per_cpu_zonestat *pzstats = &per_cpu(boot_zonestats, cpu); 7272 memset(pzstats->vm_numa_event, 0, 7273 sizeof(pzstats->vm_numa_event)); 7274 } 7275 #endif 7276 7277 for_each_online_pgdat(pgdat) 7278 pgdat->per_cpu_nodestats = 7279 alloc_percpu(struct per_cpu_nodestat); 7280 } 7281 7282 static __meminit void zone_pcp_init(struct zone *zone) 7283 { 7284 /* 7285 * per cpu subsystem is not up at this point. The following code 7286 * relies on the ability of the linker to provide the 7287 * offset of a (static) per cpu variable into the per cpu area. 7288 */ 7289 zone->per_cpu_pageset = &boot_pageset; 7290 zone->per_cpu_zonestats = &boot_zonestats; 7291 zone->pageset_high = BOOT_PAGESET_HIGH; 7292 zone->pageset_batch = BOOT_PAGESET_BATCH; 7293 7294 if (populated_zone(zone)) 7295 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, 7296 zone->present_pages, zone_batchsize(zone)); 7297 } 7298 7299 void __meminit init_currently_empty_zone(struct zone *zone, 7300 unsigned long zone_start_pfn, 7301 unsigned long size) 7302 { 7303 struct pglist_data *pgdat = zone->zone_pgdat; 7304 int zone_idx = zone_idx(zone) + 1; 7305 7306 if (zone_idx > pgdat->nr_zones) 7307 pgdat->nr_zones = zone_idx; 7308 7309 zone->zone_start_pfn = zone_start_pfn; 7310 7311 mminit_dprintk(MMINIT_TRACE, "memmap_init", 7312 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 7313 pgdat->node_id, 7314 (unsigned long)zone_idx(zone), 7315 zone_start_pfn, (zone_start_pfn + size)); 7316 7317 zone_init_free_lists(zone); 7318 zone->initialized = 1; 7319 } 7320 7321 /** 7322 * get_pfn_range_for_nid - Return the start and end page frames for a node 7323 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 7324 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 7325 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 7326 * 7327 * It returns the start and end page frame of a node based on information 7328 * provided by memblock_set_node(). If called for a node 7329 * with no available memory, a warning is printed and the start and end 7330 * PFNs will be 0. 7331 */ 7332 void __init get_pfn_range_for_nid(unsigned int nid, 7333 unsigned long *start_pfn, unsigned long *end_pfn) 7334 { 7335 unsigned long this_start_pfn, this_end_pfn; 7336 int i; 7337 7338 *start_pfn = -1UL; 7339 *end_pfn = 0; 7340 7341 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 7342 *start_pfn = min(*start_pfn, this_start_pfn); 7343 *end_pfn = max(*end_pfn, this_end_pfn); 7344 } 7345 7346 if (*start_pfn == -1UL) 7347 *start_pfn = 0; 7348 } 7349 7350 /* 7351 * This finds a zone that can be used for ZONE_MOVABLE pages. The 7352 * assumption is made that zones within a node are ordered in monotonic 7353 * increasing memory addresses so that the "highest" populated zone is used 7354 */ 7355 static void __init find_usable_zone_for_movable(void) 7356 { 7357 int zone_index; 7358 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 7359 if (zone_index == ZONE_MOVABLE) 7360 continue; 7361 7362 if (arch_zone_highest_possible_pfn[zone_index] > 7363 arch_zone_lowest_possible_pfn[zone_index]) 7364 break; 7365 } 7366 7367 VM_BUG_ON(zone_index == -1); 7368 movable_zone = zone_index; 7369 } 7370 7371 /* 7372 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 7373 * because it is sized independent of architecture. Unlike the other zones, 7374 * the starting point for ZONE_MOVABLE is not fixed. It may be different 7375 * in each node depending on the size of each node and how evenly kernelcore 7376 * is distributed. This helper function adjusts the zone ranges 7377 * provided by the architecture for a given node by using the end of the 7378 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 7379 * zones within a node are in order of monotonic increases memory addresses 7380 */ 7381 static void __init adjust_zone_range_for_zone_movable(int nid, 7382 unsigned long zone_type, 7383 unsigned long node_start_pfn, 7384 unsigned long node_end_pfn, 7385 unsigned long *zone_start_pfn, 7386 unsigned long *zone_end_pfn) 7387 { 7388 /* Only adjust if ZONE_MOVABLE is on this node */ 7389 if (zone_movable_pfn[nid]) { 7390 /* Size ZONE_MOVABLE */ 7391 if (zone_type == ZONE_MOVABLE) { 7392 *zone_start_pfn = zone_movable_pfn[nid]; 7393 *zone_end_pfn = min(node_end_pfn, 7394 arch_zone_highest_possible_pfn[movable_zone]); 7395 7396 /* Adjust for ZONE_MOVABLE starting within this range */ 7397 } else if (!mirrored_kernelcore && 7398 *zone_start_pfn < zone_movable_pfn[nid] && 7399 *zone_end_pfn > zone_movable_pfn[nid]) { 7400 *zone_end_pfn = zone_movable_pfn[nid]; 7401 7402 /* Check if this whole range is within ZONE_MOVABLE */ 7403 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 7404 *zone_start_pfn = *zone_end_pfn; 7405 } 7406 } 7407 7408 /* 7409 * Return the number of pages a zone spans in a node, including holes 7410 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 7411 */ 7412 static unsigned long __init zone_spanned_pages_in_node(int nid, 7413 unsigned long zone_type, 7414 unsigned long node_start_pfn, 7415 unsigned long node_end_pfn, 7416 unsigned long *zone_start_pfn, 7417 unsigned long *zone_end_pfn) 7418 { 7419 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 7420 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 7421 /* When hotadd a new node from cpu_up(), the node should be empty */ 7422 if (!node_start_pfn && !node_end_pfn) 7423 return 0; 7424 7425 /* Get the start and end of the zone */ 7426 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 7427 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 7428 adjust_zone_range_for_zone_movable(nid, zone_type, 7429 node_start_pfn, node_end_pfn, 7430 zone_start_pfn, zone_end_pfn); 7431 7432 /* Check that this node has pages within the zone's required range */ 7433 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 7434 return 0; 7435 7436 /* Move the zone boundaries inside the node if necessary */ 7437 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 7438 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 7439 7440 /* Return the spanned pages */ 7441 return *zone_end_pfn - *zone_start_pfn; 7442 } 7443 7444 /* 7445 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 7446 * then all holes in the requested range will be accounted for. 7447 */ 7448 unsigned long __init __absent_pages_in_range(int nid, 7449 unsigned long range_start_pfn, 7450 unsigned long range_end_pfn) 7451 { 7452 unsigned long nr_absent = range_end_pfn - range_start_pfn; 7453 unsigned long start_pfn, end_pfn; 7454 int i; 7455 7456 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 7457 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 7458 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 7459 nr_absent -= end_pfn - start_pfn; 7460 } 7461 return nr_absent; 7462 } 7463 7464 /** 7465 * absent_pages_in_range - Return number of page frames in holes within a range 7466 * @start_pfn: The start PFN to start searching for holes 7467 * @end_pfn: The end PFN to stop searching for holes 7468 * 7469 * Return: the number of pages frames in memory holes within a range. 7470 */ 7471 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 7472 unsigned long end_pfn) 7473 { 7474 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 7475 } 7476 7477 /* Return the number of page frames in holes in a zone on a node */ 7478 static unsigned long __init zone_absent_pages_in_node(int nid, 7479 unsigned long zone_type, 7480 unsigned long node_start_pfn, 7481 unsigned long node_end_pfn) 7482 { 7483 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 7484 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 7485 unsigned long zone_start_pfn, zone_end_pfn; 7486 unsigned long nr_absent; 7487 7488 /* When hotadd a new node from cpu_up(), the node should be empty */ 7489 if (!node_start_pfn && !node_end_pfn) 7490 return 0; 7491 7492 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 7493 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 7494 7495 adjust_zone_range_for_zone_movable(nid, zone_type, 7496 node_start_pfn, node_end_pfn, 7497 &zone_start_pfn, &zone_end_pfn); 7498 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 7499 7500 /* 7501 * ZONE_MOVABLE handling. 7502 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 7503 * and vice versa. 7504 */ 7505 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 7506 unsigned long start_pfn, end_pfn; 7507 struct memblock_region *r; 7508 7509 for_each_mem_region(r) { 7510 start_pfn = clamp(memblock_region_memory_base_pfn(r), 7511 zone_start_pfn, zone_end_pfn); 7512 end_pfn = clamp(memblock_region_memory_end_pfn(r), 7513 zone_start_pfn, zone_end_pfn); 7514 7515 if (zone_type == ZONE_MOVABLE && 7516 memblock_is_mirror(r)) 7517 nr_absent += end_pfn - start_pfn; 7518 7519 if (zone_type == ZONE_NORMAL && 7520 !memblock_is_mirror(r)) 7521 nr_absent += end_pfn - start_pfn; 7522 } 7523 } 7524 7525 return nr_absent; 7526 } 7527 7528 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 7529 unsigned long node_start_pfn, 7530 unsigned long node_end_pfn) 7531 { 7532 unsigned long realtotalpages = 0, totalpages = 0; 7533 enum zone_type i; 7534 7535 for (i = 0; i < MAX_NR_ZONES; i++) { 7536 struct zone *zone = pgdat->node_zones + i; 7537 unsigned long zone_start_pfn, zone_end_pfn; 7538 unsigned long spanned, absent; 7539 unsigned long size, real_size; 7540 7541 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 7542 node_start_pfn, 7543 node_end_pfn, 7544 &zone_start_pfn, 7545 &zone_end_pfn); 7546 absent = zone_absent_pages_in_node(pgdat->node_id, i, 7547 node_start_pfn, 7548 node_end_pfn); 7549 7550 size = spanned; 7551 real_size = size - absent; 7552 7553 if (size) 7554 zone->zone_start_pfn = zone_start_pfn; 7555 else 7556 zone->zone_start_pfn = 0; 7557 zone->spanned_pages = size; 7558 zone->present_pages = real_size; 7559 #if defined(CONFIG_MEMORY_HOTPLUG) 7560 zone->present_early_pages = real_size; 7561 #endif 7562 7563 totalpages += size; 7564 realtotalpages += real_size; 7565 } 7566 7567 pgdat->node_spanned_pages = totalpages; 7568 pgdat->node_present_pages = realtotalpages; 7569 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 7570 } 7571 7572 #ifndef CONFIG_SPARSEMEM 7573 /* 7574 * Calculate the size of the zone->blockflags rounded to an unsigned long 7575 * Start by making sure zonesize is a multiple of pageblock_order by rounding 7576 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 7577 * round what is now in bits to nearest long in bits, then return it in 7578 * bytes. 7579 */ 7580 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 7581 { 7582 unsigned long usemapsize; 7583 7584 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 7585 usemapsize = roundup(zonesize, pageblock_nr_pages); 7586 usemapsize = usemapsize >> pageblock_order; 7587 usemapsize *= NR_PAGEBLOCK_BITS; 7588 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 7589 7590 return usemapsize / 8; 7591 } 7592 7593 static void __ref setup_usemap(struct zone *zone) 7594 { 7595 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 7596 zone->spanned_pages); 7597 zone->pageblock_flags = NULL; 7598 if (usemapsize) { 7599 zone->pageblock_flags = 7600 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 7601 zone_to_nid(zone)); 7602 if (!zone->pageblock_flags) 7603 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 7604 usemapsize, zone->name, zone_to_nid(zone)); 7605 } 7606 } 7607 #else 7608 static inline void setup_usemap(struct zone *zone) {} 7609 #endif /* CONFIG_SPARSEMEM */ 7610 7611 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 7612 7613 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 7614 void __init set_pageblock_order(void) 7615 { 7616 unsigned int order = MAX_ORDER - 1; 7617 7618 /* Check that pageblock_nr_pages has not already been setup */ 7619 if (pageblock_order) 7620 return; 7621 7622 /* Don't let pageblocks exceed the maximum allocation granularity. */ 7623 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) 7624 order = HUGETLB_PAGE_ORDER; 7625 7626 /* 7627 * Assume the largest contiguous order of interest is a huge page. 7628 * This value may be variable depending on boot parameters on IA64 and 7629 * powerpc. 7630 */ 7631 pageblock_order = order; 7632 } 7633 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 7634 7635 /* 7636 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 7637 * is unused as pageblock_order is set at compile-time. See 7638 * include/linux/pageblock-flags.h for the values of pageblock_order based on 7639 * the kernel config 7640 */ 7641 void __init set_pageblock_order(void) 7642 { 7643 } 7644 7645 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 7646 7647 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, 7648 unsigned long present_pages) 7649 { 7650 unsigned long pages = spanned_pages; 7651 7652 /* 7653 * Provide a more accurate estimation if there are holes within 7654 * the zone and SPARSEMEM is in use. If there are holes within the 7655 * zone, each populated memory region may cost us one or two extra 7656 * memmap pages due to alignment because memmap pages for each 7657 * populated regions may not be naturally aligned on page boundary. 7658 * So the (present_pages >> 4) heuristic is a tradeoff for that. 7659 */ 7660 if (spanned_pages > present_pages + (present_pages >> 4) && 7661 IS_ENABLED(CONFIG_SPARSEMEM)) 7662 pages = present_pages; 7663 7664 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 7665 } 7666 7667 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 7668 static void pgdat_init_split_queue(struct pglist_data *pgdat) 7669 { 7670 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 7671 7672 spin_lock_init(&ds_queue->split_queue_lock); 7673 INIT_LIST_HEAD(&ds_queue->split_queue); 7674 ds_queue->split_queue_len = 0; 7675 } 7676 #else 7677 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 7678 #endif 7679 7680 #ifdef CONFIG_COMPACTION 7681 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 7682 { 7683 init_waitqueue_head(&pgdat->kcompactd_wait); 7684 } 7685 #else 7686 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 7687 #endif 7688 7689 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 7690 { 7691 int i; 7692 7693 pgdat_resize_init(pgdat); 7694 pgdat_kswapd_lock_init(pgdat); 7695 7696 pgdat_init_split_queue(pgdat); 7697 pgdat_init_kcompactd(pgdat); 7698 7699 init_waitqueue_head(&pgdat->kswapd_wait); 7700 init_waitqueue_head(&pgdat->pfmemalloc_wait); 7701 7702 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 7703 init_waitqueue_head(&pgdat->reclaim_wait[i]); 7704 7705 pgdat_page_ext_init(pgdat); 7706 lruvec_init(&pgdat->__lruvec); 7707 } 7708 7709 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 7710 unsigned long remaining_pages) 7711 { 7712 atomic_long_set(&zone->managed_pages, remaining_pages); 7713 zone_set_nid(zone, nid); 7714 zone->name = zone_names[idx]; 7715 zone->zone_pgdat = NODE_DATA(nid); 7716 spin_lock_init(&zone->lock); 7717 zone_seqlock_init(zone); 7718 zone_pcp_init(zone); 7719 } 7720 7721 /* 7722 * Set up the zone data structures 7723 * - init pgdat internals 7724 * - init all zones belonging to this node 7725 * 7726 * NOTE: this function is only called during memory hotplug 7727 */ 7728 #ifdef CONFIG_MEMORY_HOTPLUG 7729 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) 7730 { 7731 int nid = pgdat->node_id; 7732 enum zone_type z; 7733 int cpu; 7734 7735 pgdat_init_internals(pgdat); 7736 7737 if (pgdat->per_cpu_nodestats == &boot_nodestats) 7738 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 7739 7740 /* 7741 * Reset the nr_zones, order and highest_zoneidx before reuse. 7742 * Note that kswapd will init kswapd_highest_zoneidx properly 7743 * when it starts in the near future. 7744 */ 7745 pgdat->nr_zones = 0; 7746 pgdat->kswapd_order = 0; 7747 pgdat->kswapd_highest_zoneidx = 0; 7748 pgdat->node_start_pfn = 0; 7749 for_each_online_cpu(cpu) { 7750 struct per_cpu_nodestat *p; 7751 7752 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 7753 memset(p, 0, sizeof(*p)); 7754 } 7755 7756 for (z = 0; z < MAX_NR_ZONES; z++) 7757 zone_init_internals(&pgdat->node_zones[z], z, nid, 0); 7758 } 7759 #endif 7760 7761 /* 7762 * Set up the zone data structures: 7763 * - mark all pages reserved 7764 * - mark all memory queues empty 7765 * - clear the memory bitmaps 7766 * 7767 * NOTE: pgdat should get zeroed by caller. 7768 * NOTE: this function is only called during early init. 7769 */ 7770 static void __init free_area_init_core(struct pglist_data *pgdat) 7771 { 7772 enum zone_type j; 7773 int nid = pgdat->node_id; 7774 7775 pgdat_init_internals(pgdat); 7776 pgdat->per_cpu_nodestats = &boot_nodestats; 7777 7778 for (j = 0; j < MAX_NR_ZONES; j++) { 7779 struct zone *zone = pgdat->node_zones + j; 7780 unsigned long size, freesize, memmap_pages; 7781 7782 size = zone->spanned_pages; 7783 freesize = zone->present_pages; 7784 7785 /* 7786 * Adjust freesize so that it accounts for how much memory 7787 * is used by this zone for memmap. This affects the watermark 7788 * and per-cpu initialisations 7789 */ 7790 memmap_pages = calc_memmap_size(size, freesize); 7791 if (!is_highmem_idx(j)) { 7792 if (freesize >= memmap_pages) { 7793 freesize -= memmap_pages; 7794 if (memmap_pages) 7795 pr_debug(" %s zone: %lu pages used for memmap\n", 7796 zone_names[j], memmap_pages); 7797 } else 7798 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", 7799 zone_names[j], memmap_pages, freesize); 7800 } 7801 7802 /* Account for reserved pages */ 7803 if (j == 0 && freesize > dma_reserve) { 7804 freesize -= dma_reserve; 7805 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); 7806 } 7807 7808 if (!is_highmem_idx(j)) 7809 nr_kernel_pages += freesize; 7810 /* Charge for highmem memmap if there are enough kernel pages */ 7811 else if (nr_kernel_pages > memmap_pages * 2) 7812 nr_kernel_pages -= memmap_pages; 7813 nr_all_pages += freesize; 7814 7815 /* 7816 * Set an approximate value for lowmem here, it will be adjusted 7817 * when the bootmem allocator frees pages into the buddy system. 7818 * And all highmem pages will be managed by the buddy system. 7819 */ 7820 zone_init_internals(zone, j, nid, freesize); 7821 7822 if (!size) 7823 continue; 7824 7825 set_pageblock_order(); 7826 setup_usemap(zone); 7827 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 7828 } 7829 } 7830 7831 #ifdef CONFIG_FLATMEM 7832 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 7833 { 7834 unsigned long __maybe_unused start = 0; 7835 unsigned long __maybe_unused offset = 0; 7836 7837 /* Skip empty nodes */ 7838 if (!pgdat->node_spanned_pages) 7839 return; 7840 7841 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 7842 offset = pgdat->node_start_pfn - start; 7843 /* ia64 gets its own node_mem_map, before this, without bootmem */ 7844 if (!pgdat->node_mem_map) { 7845 unsigned long size, end; 7846 struct page *map; 7847 7848 /* 7849 * The zone's endpoints aren't required to be MAX_ORDER 7850 * aligned but the node_mem_map endpoints must be in order 7851 * for the buddy allocator to function correctly. 7852 */ 7853 end = pgdat_end_pfn(pgdat); 7854 end = ALIGN(end, MAX_ORDER_NR_PAGES); 7855 size = (end - start) * sizeof(struct page); 7856 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 7857 pgdat->node_id, false); 7858 if (!map) 7859 panic("Failed to allocate %ld bytes for node %d memory map\n", 7860 size, pgdat->node_id); 7861 pgdat->node_mem_map = map + offset; 7862 } 7863 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 7864 __func__, pgdat->node_id, (unsigned long)pgdat, 7865 (unsigned long)pgdat->node_mem_map); 7866 #ifndef CONFIG_NUMA 7867 /* 7868 * With no DISCONTIG, the global mem_map is just set as node 0's 7869 */ 7870 if (pgdat == NODE_DATA(0)) { 7871 mem_map = NODE_DATA(0)->node_mem_map; 7872 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 7873 mem_map -= offset; 7874 } 7875 #endif 7876 } 7877 #else 7878 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 7879 #endif /* CONFIG_FLATMEM */ 7880 7881 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 7882 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 7883 { 7884 pgdat->first_deferred_pfn = ULONG_MAX; 7885 } 7886 #else 7887 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 7888 #endif 7889 7890 static void __init free_area_init_node(int nid) 7891 { 7892 pg_data_t *pgdat = NODE_DATA(nid); 7893 unsigned long start_pfn = 0; 7894 unsigned long end_pfn = 0; 7895 7896 /* pg_data_t should be reset to zero when it's allocated */ 7897 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 7898 7899 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 7900 7901 pgdat->node_id = nid; 7902 pgdat->node_start_pfn = start_pfn; 7903 pgdat->per_cpu_nodestats = NULL; 7904 7905 if (start_pfn != end_pfn) { 7906 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 7907 (u64)start_pfn << PAGE_SHIFT, 7908 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 7909 } else { 7910 pr_info("Initmem setup node %d as memoryless\n", nid); 7911 } 7912 7913 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 7914 7915 alloc_node_mem_map(pgdat); 7916 pgdat_set_deferred_range(pgdat); 7917 7918 free_area_init_core(pgdat); 7919 } 7920 7921 static void __init free_area_init_memoryless_node(int nid) 7922 { 7923 free_area_init_node(nid); 7924 } 7925 7926 #if MAX_NUMNODES > 1 7927 /* 7928 * Figure out the number of possible node ids. 7929 */ 7930 void __init setup_nr_node_ids(void) 7931 { 7932 unsigned int highest; 7933 7934 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 7935 nr_node_ids = highest + 1; 7936 } 7937 #endif 7938 7939 /** 7940 * node_map_pfn_alignment - determine the maximum internode alignment 7941 * 7942 * This function should be called after node map is populated and sorted. 7943 * It calculates the maximum power of two alignment which can distinguish 7944 * all the nodes. 7945 * 7946 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 7947 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 7948 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 7949 * shifted, 1GiB is enough and this function will indicate so. 7950 * 7951 * This is used to test whether pfn -> nid mapping of the chosen memory 7952 * model has fine enough granularity to avoid incorrect mapping for the 7953 * populated node map. 7954 * 7955 * Return: the determined alignment in pfn's. 0 if there is no alignment 7956 * requirement (single node). 7957 */ 7958 unsigned long __init node_map_pfn_alignment(void) 7959 { 7960 unsigned long accl_mask = 0, last_end = 0; 7961 unsigned long start, end, mask; 7962 int last_nid = NUMA_NO_NODE; 7963 int i, nid; 7964 7965 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 7966 if (!start || last_nid < 0 || last_nid == nid) { 7967 last_nid = nid; 7968 last_end = end; 7969 continue; 7970 } 7971 7972 /* 7973 * Start with a mask granular enough to pin-point to the 7974 * start pfn and tick off bits one-by-one until it becomes 7975 * too coarse to separate the current node from the last. 7976 */ 7977 mask = ~((1 << __ffs(start)) - 1); 7978 while (mask && last_end <= (start & (mask << 1))) 7979 mask <<= 1; 7980 7981 /* accumulate all internode masks */ 7982 accl_mask |= mask; 7983 } 7984 7985 /* convert mask to number of pages */ 7986 return ~accl_mask + 1; 7987 } 7988 7989 /* 7990 * early_calculate_totalpages() 7991 * Sum pages in active regions for movable zone. 7992 * Populate N_MEMORY for calculating usable_nodes. 7993 */ 7994 static unsigned long __init early_calculate_totalpages(void) 7995 { 7996 unsigned long totalpages = 0; 7997 unsigned long start_pfn, end_pfn; 7998 int i, nid; 7999 8000 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 8001 unsigned long pages = end_pfn - start_pfn; 8002 8003 totalpages += pages; 8004 if (pages) 8005 node_set_state(nid, N_MEMORY); 8006 } 8007 return totalpages; 8008 } 8009 8010 /* 8011 * Find the PFN the Movable zone begins in each node. Kernel memory 8012 * is spread evenly between nodes as long as the nodes have enough 8013 * memory. When they don't, some nodes will have more kernelcore than 8014 * others 8015 */ 8016 static void __init find_zone_movable_pfns_for_nodes(void) 8017 { 8018 int i, nid; 8019 unsigned long usable_startpfn; 8020 unsigned long kernelcore_node, kernelcore_remaining; 8021 /* save the state before borrow the nodemask */ 8022 nodemask_t saved_node_state = node_states[N_MEMORY]; 8023 unsigned long totalpages = early_calculate_totalpages(); 8024 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 8025 struct memblock_region *r; 8026 8027 /* Need to find movable_zone earlier when movable_node is specified. */ 8028 find_usable_zone_for_movable(); 8029 8030 /* 8031 * If movable_node is specified, ignore kernelcore and movablecore 8032 * options. 8033 */ 8034 if (movable_node_is_enabled()) { 8035 for_each_mem_region(r) { 8036 if (!memblock_is_hotpluggable(r)) 8037 continue; 8038 8039 nid = memblock_get_region_node(r); 8040 8041 usable_startpfn = PFN_DOWN(r->base); 8042 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 8043 min(usable_startpfn, zone_movable_pfn[nid]) : 8044 usable_startpfn; 8045 } 8046 8047 goto out2; 8048 } 8049 8050 /* 8051 * If kernelcore=mirror is specified, ignore movablecore option 8052 */ 8053 if (mirrored_kernelcore) { 8054 bool mem_below_4gb_not_mirrored = false; 8055 8056 for_each_mem_region(r) { 8057 if (memblock_is_mirror(r)) 8058 continue; 8059 8060 nid = memblock_get_region_node(r); 8061 8062 usable_startpfn = memblock_region_memory_base_pfn(r); 8063 8064 if (usable_startpfn < PHYS_PFN(SZ_4G)) { 8065 mem_below_4gb_not_mirrored = true; 8066 continue; 8067 } 8068 8069 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 8070 min(usable_startpfn, zone_movable_pfn[nid]) : 8071 usable_startpfn; 8072 } 8073 8074 if (mem_below_4gb_not_mirrored) 8075 pr_warn("This configuration results in unmirrored kernel memory.\n"); 8076 8077 goto out2; 8078 } 8079 8080 /* 8081 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 8082 * amount of necessary memory. 8083 */ 8084 if (required_kernelcore_percent) 8085 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 8086 10000UL; 8087 if (required_movablecore_percent) 8088 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 8089 10000UL; 8090 8091 /* 8092 * If movablecore= was specified, calculate what size of 8093 * kernelcore that corresponds so that memory usable for 8094 * any allocation type is evenly spread. If both kernelcore 8095 * and movablecore are specified, then the value of kernelcore 8096 * will be used for required_kernelcore if it's greater than 8097 * what movablecore would have allowed. 8098 */ 8099 if (required_movablecore) { 8100 unsigned long corepages; 8101 8102 /* 8103 * Round-up so that ZONE_MOVABLE is at least as large as what 8104 * was requested by the user 8105 */ 8106 required_movablecore = 8107 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 8108 required_movablecore = min(totalpages, required_movablecore); 8109 corepages = totalpages - required_movablecore; 8110 8111 required_kernelcore = max(required_kernelcore, corepages); 8112 } 8113 8114 /* 8115 * If kernelcore was not specified or kernelcore size is larger 8116 * than totalpages, there is no ZONE_MOVABLE. 8117 */ 8118 if (!required_kernelcore || required_kernelcore >= totalpages) 8119 goto out; 8120 8121 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 8122 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 8123 8124 restart: 8125 /* Spread kernelcore memory as evenly as possible throughout nodes */ 8126 kernelcore_node = required_kernelcore / usable_nodes; 8127 for_each_node_state(nid, N_MEMORY) { 8128 unsigned long start_pfn, end_pfn; 8129 8130 /* 8131 * Recalculate kernelcore_node if the division per node 8132 * now exceeds what is necessary to satisfy the requested 8133 * amount of memory for the kernel 8134 */ 8135 if (required_kernelcore < kernelcore_node) 8136 kernelcore_node = required_kernelcore / usable_nodes; 8137 8138 /* 8139 * As the map is walked, we track how much memory is usable 8140 * by the kernel using kernelcore_remaining. When it is 8141 * 0, the rest of the node is usable by ZONE_MOVABLE 8142 */ 8143 kernelcore_remaining = kernelcore_node; 8144 8145 /* Go through each range of PFNs within this node */ 8146 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 8147 unsigned long size_pages; 8148 8149 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 8150 if (start_pfn >= end_pfn) 8151 continue; 8152 8153 /* Account for what is only usable for kernelcore */ 8154 if (start_pfn < usable_startpfn) { 8155 unsigned long kernel_pages; 8156 kernel_pages = min(end_pfn, usable_startpfn) 8157 - start_pfn; 8158 8159 kernelcore_remaining -= min(kernel_pages, 8160 kernelcore_remaining); 8161 required_kernelcore -= min(kernel_pages, 8162 required_kernelcore); 8163 8164 /* Continue if range is now fully accounted */ 8165 if (end_pfn <= usable_startpfn) { 8166 8167 /* 8168 * Push zone_movable_pfn to the end so 8169 * that if we have to rebalance 8170 * kernelcore across nodes, we will 8171 * not double account here 8172 */ 8173 zone_movable_pfn[nid] = end_pfn; 8174 continue; 8175 } 8176 start_pfn = usable_startpfn; 8177 } 8178 8179 /* 8180 * The usable PFN range for ZONE_MOVABLE is from 8181 * start_pfn->end_pfn. Calculate size_pages as the 8182 * number of pages used as kernelcore 8183 */ 8184 size_pages = end_pfn - start_pfn; 8185 if (size_pages > kernelcore_remaining) 8186 size_pages = kernelcore_remaining; 8187 zone_movable_pfn[nid] = start_pfn + size_pages; 8188 8189 /* 8190 * Some kernelcore has been met, update counts and 8191 * break if the kernelcore for this node has been 8192 * satisfied 8193 */ 8194 required_kernelcore -= min(required_kernelcore, 8195 size_pages); 8196 kernelcore_remaining -= size_pages; 8197 if (!kernelcore_remaining) 8198 break; 8199 } 8200 } 8201 8202 /* 8203 * If there is still required_kernelcore, we do another pass with one 8204 * less node in the count. This will push zone_movable_pfn[nid] further 8205 * along on the nodes that still have memory until kernelcore is 8206 * satisfied 8207 */ 8208 usable_nodes--; 8209 if (usable_nodes && required_kernelcore > usable_nodes) 8210 goto restart; 8211 8212 out2: 8213 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 8214 for (nid = 0; nid < MAX_NUMNODES; nid++) { 8215 unsigned long start_pfn, end_pfn; 8216 8217 zone_movable_pfn[nid] = 8218 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 8219 8220 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 8221 if (zone_movable_pfn[nid] >= end_pfn) 8222 zone_movable_pfn[nid] = 0; 8223 } 8224 8225 out: 8226 /* restore the node_state */ 8227 node_states[N_MEMORY] = saved_node_state; 8228 } 8229 8230 /* Any regular or high memory on that node ? */ 8231 static void check_for_memory(pg_data_t *pgdat, int nid) 8232 { 8233 enum zone_type zone_type; 8234 8235 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 8236 struct zone *zone = &pgdat->node_zones[zone_type]; 8237 if (populated_zone(zone)) { 8238 if (IS_ENABLED(CONFIG_HIGHMEM)) 8239 node_set_state(nid, N_HIGH_MEMORY); 8240 if (zone_type <= ZONE_NORMAL) 8241 node_set_state(nid, N_NORMAL_MEMORY); 8242 break; 8243 } 8244 } 8245 } 8246 8247 /* 8248 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 8249 * such cases we allow max_zone_pfn sorted in the descending order 8250 */ 8251 bool __weak arch_has_descending_max_zone_pfns(void) 8252 { 8253 return false; 8254 } 8255 8256 /** 8257 * free_area_init - Initialise all pg_data_t and zone data 8258 * @max_zone_pfn: an array of max PFNs for each zone 8259 * 8260 * This will call free_area_init_node() for each active node in the system. 8261 * Using the page ranges provided by memblock_set_node(), the size of each 8262 * zone in each node and their holes is calculated. If the maximum PFN 8263 * between two adjacent zones match, it is assumed that the zone is empty. 8264 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 8265 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 8266 * starts where the previous one ended. For example, ZONE_DMA32 starts 8267 * at arch_max_dma_pfn. 8268 */ 8269 void __init free_area_init(unsigned long *max_zone_pfn) 8270 { 8271 unsigned long start_pfn, end_pfn; 8272 int i, nid, zone; 8273 bool descending; 8274 8275 /* Record where the zone boundaries are */ 8276 memset(arch_zone_lowest_possible_pfn, 0, 8277 sizeof(arch_zone_lowest_possible_pfn)); 8278 memset(arch_zone_highest_possible_pfn, 0, 8279 sizeof(arch_zone_highest_possible_pfn)); 8280 8281 start_pfn = PHYS_PFN(memblock_start_of_DRAM()); 8282 descending = arch_has_descending_max_zone_pfns(); 8283 8284 for (i = 0; i < MAX_NR_ZONES; i++) { 8285 if (descending) 8286 zone = MAX_NR_ZONES - i - 1; 8287 else 8288 zone = i; 8289 8290 if (zone == ZONE_MOVABLE) 8291 continue; 8292 8293 end_pfn = max(max_zone_pfn[zone], start_pfn); 8294 arch_zone_lowest_possible_pfn[zone] = start_pfn; 8295 arch_zone_highest_possible_pfn[zone] = end_pfn; 8296 8297 start_pfn = end_pfn; 8298 } 8299 8300 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 8301 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 8302 find_zone_movable_pfns_for_nodes(); 8303 8304 /* Print out the zone ranges */ 8305 pr_info("Zone ranges:\n"); 8306 for (i = 0; i < MAX_NR_ZONES; i++) { 8307 if (i == ZONE_MOVABLE) 8308 continue; 8309 pr_info(" %-8s ", zone_names[i]); 8310 if (arch_zone_lowest_possible_pfn[i] == 8311 arch_zone_highest_possible_pfn[i]) 8312 pr_cont("empty\n"); 8313 else 8314 pr_cont("[mem %#018Lx-%#018Lx]\n", 8315 (u64)arch_zone_lowest_possible_pfn[i] 8316 << PAGE_SHIFT, 8317 ((u64)arch_zone_highest_possible_pfn[i] 8318 << PAGE_SHIFT) - 1); 8319 } 8320 8321 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 8322 pr_info("Movable zone start for each node\n"); 8323 for (i = 0; i < MAX_NUMNODES; i++) { 8324 if (zone_movable_pfn[i]) 8325 pr_info(" Node %d: %#018Lx\n", i, 8326 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 8327 } 8328 8329 /* 8330 * Print out the early node map, and initialize the 8331 * subsection-map relative to active online memory ranges to 8332 * enable future "sub-section" extensions of the memory map. 8333 */ 8334 pr_info("Early memory node ranges\n"); 8335 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 8336 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 8337 (u64)start_pfn << PAGE_SHIFT, 8338 ((u64)end_pfn << PAGE_SHIFT) - 1); 8339 subsection_map_init(start_pfn, end_pfn - start_pfn); 8340 } 8341 8342 /* Initialise every node */ 8343 mminit_verify_pageflags_layout(); 8344 setup_nr_node_ids(); 8345 for_each_node(nid) { 8346 pg_data_t *pgdat; 8347 8348 if (!node_online(nid)) { 8349 pr_info("Initializing node %d as memoryless\n", nid); 8350 8351 /* Allocator not initialized yet */ 8352 pgdat = arch_alloc_nodedata(nid); 8353 if (!pgdat) { 8354 pr_err("Cannot allocate %zuB for node %d.\n", 8355 sizeof(*pgdat), nid); 8356 continue; 8357 } 8358 arch_refresh_nodedata(nid, pgdat); 8359 free_area_init_memoryless_node(nid); 8360 8361 /* 8362 * We do not want to confuse userspace by sysfs 8363 * files/directories for node without any memory 8364 * attached to it, so this node is not marked as 8365 * N_MEMORY and not marked online so that no sysfs 8366 * hierarchy will be created via register_one_node for 8367 * it. The pgdat will get fully initialized by 8368 * hotadd_init_pgdat() when memory is hotplugged into 8369 * this node. 8370 */ 8371 continue; 8372 } 8373 8374 pgdat = NODE_DATA(nid); 8375 free_area_init_node(nid); 8376 8377 /* Any memory on that node */ 8378 if (pgdat->node_present_pages) 8379 node_set_state(nid, N_MEMORY); 8380 check_for_memory(pgdat, nid); 8381 } 8382 8383 memmap_init(); 8384 } 8385 8386 static int __init cmdline_parse_core(char *p, unsigned long *core, 8387 unsigned long *percent) 8388 { 8389 unsigned long long coremem; 8390 char *endptr; 8391 8392 if (!p) 8393 return -EINVAL; 8394 8395 /* Value may be a percentage of total memory, otherwise bytes */ 8396 coremem = simple_strtoull(p, &endptr, 0); 8397 if (*endptr == '%') { 8398 /* Paranoid check for percent values greater than 100 */ 8399 WARN_ON(coremem > 100); 8400 8401 *percent = coremem; 8402 } else { 8403 coremem = memparse(p, &p); 8404 /* Paranoid check that UL is enough for the coremem value */ 8405 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 8406 8407 *core = coremem >> PAGE_SHIFT; 8408 *percent = 0UL; 8409 } 8410 return 0; 8411 } 8412 8413 /* 8414 * kernelcore=size sets the amount of memory for use for allocations that 8415 * cannot be reclaimed or migrated. 8416 */ 8417 static int __init cmdline_parse_kernelcore(char *p) 8418 { 8419 /* parse kernelcore=mirror */ 8420 if (parse_option_str(p, "mirror")) { 8421 mirrored_kernelcore = true; 8422 return 0; 8423 } 8424 8425 return cmdline_parse_core(p, &required_kernelcore, 8426 &required_kernelcore_percent); 8427 } 8428 8429 /* 8430 * movablecore=size sets the amount of memory for use for allocations that 8431 * can be reclaimed or migrated. 8432 */ 8433 static int __init cmdline_parse_movablecore(char *p) 8434 { 8435 return cmdline_parse_core(p, &required_movablecore, 8436 &required_movablecore_percent); 8437 } 8438 8439 early_param("kernelcore", cmdline_parse_kernelcore); 8440 early_param("movablecore", cmdline_parse_movablecore); 8441 8442 void adjust_managed_page_count(struct page *page, long count) 8443 { 8444 atomic_long_add(count, &page_zone(page)->managed_pages); 8445 totalram_pages_add(count); 8446 #ifdef CONFIG_HIGHMEM 8447 if (PageHighMem(page)) 8448 totalhigh_pages_add(count); 8449 #endif 8450 } 8451 EXPORT_SYMBOL(adjust_managed_page_count); 8452 8453 unsigned long free_reserved_area(void *start, void *end, int poison, const char *s) 8454 { 8455 void *pos; 8456 unsigned long pages = 0; 8457 8458 start = (void *)PAGE_ALIGN((unsigned long)start); 8459 end = (void *)((unsigned long)end & PAGE_MASK); 8460 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 8461 struct page *page = virt_to_page(pos); 8462 void *direct_map_addr; 8463 8464 /* 8465 * 'direct_map_addr' might be different from 'pos' 8466 * because some architectures' virt_to_page() 8467 * work with aliases. Getting the direct map 8468 * address ensures that we get a _writeable_ 8469 * alias for the memset(). 8470 */ 8471 direct_map_addr = page_address(page); 8472 /* 8473 * Perform a kasan-unchecked memset() since this memory 8474 * has not been initialized. 8475 */ 8476 direct_map_addr = kasan_reset_tag(direct_map_addr); 8477 if ((unsigned int)poison <= 0xFF) 8478 memset(direct_map_addr, poison, PAGE_SIZE); 8479 8480 free_reserved_page(page); 8481 } 8482 8483 if (pages && s) 8484 pr_info("Freeing %s memory: %ldK\n", s, K(pages)); 8485 8486 return pages; 8487 } 8488 8489 void __init mem_init_print_info(void) 8490 { 8491 unsigned long physpages, codesize, datasize, rosize, bss_size; 8492 unsigned long init_code_size, init_data_size; 8493 8494 physpages = get_num_physpages(); 8495 codesize = _etext - _stext; 8496 datasize = _edata - _sdata; 8497 rosize = __end_rodata - __start_rodata; 8498 bss_size = __bss_stop - __bss_start; 8499 init_data_size = __init_end - __init_begin; 8500 init_code_size = _einittext - _sinittext; 8501 8502 /* 8503 * Detect special cases and adjust section sizes accordingly: 8504 * 1) .init.* may be embedded into .data sections 8505 * 2) .init.text.* may be out of [__init_begin, __init_end], 8506 * please refer to arch/tile/kernel/vmlinux.lds.S. 8507 * 3) .rodata.* may be embedded into .text or .data sections. 8508 */ 8509 #define adj_init_size(start, end, size, pos, adj) \ 8510 do { \ 8511 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 8512 size -= adj; \ 8513 } while (0) 8514 8515 adj_init_size(__init_begin, __init_end, init_data_size, 8516 _sinittext, init_code_size); 8517 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 8518 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 8519 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 8520 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 8521 8522 #undef adj_init_size 8523 8524 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 8525 #ifdef CONFIG_HIGHMEM 8526 ", %luK highmem" 8527 #endif 8528 ")\n", 8529 K(nr_free_pages()), K(physpages), 8530 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K, 8531 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K, 8532 K(physpages - totalram_pages() - totalcma_pages), 8533 K(totalcma_pages) 8534 #ifdef CONFIG_HIGHMEM 8535 , K(totalhigh_pages()) 8536 #endif 8537 ); 8538 } 8539 8540 /** 8541 * set_dma_reserve - set the specified number of pages reserved in the first zone 8542 * @new_dma_reserve: The number of pages to mark reserved 8543 * 8544 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 8545 * In the DMA zone, a significant percentage may be consumed by kernel image 8546 * and other unfreeable allocations which can skew the watermarks badly. This 8547 * function may optionally be used to account for unfreeable pages in the 8548 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 8549 * smaller per-cpu batchsize. 8550 */ 8551 void __init set_dma_reserve(unsigned long new_dma_reserve) 8552 { 8553 dma_reserve = new_dma_reserve; 8554 } 8555 8556 static int page_alloc_cpu_dead(unsigned int cpu) 8557 { 8558 struct zone *zone; 8559 8560 lru_add_drain_cpu(cpu); 8561 mlock_page_drain_remote(cpu); 8562 drain_pages(cpu); 8563 8564 /* 8565 * Spill the event counters of the dead processor 8566 * into the current processors event counters. 8567 * This artificially elevates the count of the current 8568 * processor. 8569 */ 8570 vm_events_fold_cpu(cpu); 8571 8572 /* 8573 * Zero the differential counters of the dead processor 8574 * so that the vm statistics are consistent. 8575 * 8576 * This is only okay since the processor is dead and cannot 8577 * race with what we are doing. 8578 */ 8579 cpu_vm_stats_fold(cpu); 8580 8581 for_each_populated_zone(zone) 8582 zone_pcp_update(zone, 0); 8583 8584 return 0; 8585 } 8586 8587 static int page_alloc_cpu_online(unsigned int cpu) 8588 { 8589 struct zone *zone; 8590 8591 for_each_populated_zone(zone) 8592 zone_pcp_update(zone, 1); 8593 return 0; 8594 } 8595 8596 #ifdef CONFIG_NUMA 8597 int hashdist = HASHDIST_DEFAULT; 8598 8599 static int __init set_hashdist(char *str) 8600 { 8601 if (!str) 8602 return 0; 8603 hashdist = simple_strtoul(str, &str, 0); 8604 return 1; 8605 } 8606 __setup("hashdist=", set_hashdist); 8607 #endif 8608 8609 void __init page_alloc_init(void) 8610 { 8611 int ret; 8612 8613 #ifdef CONFIG_NUMA 8614 if (num_node_state(N_MEMORY) == 1) 8615 hashdist = 0; 8616 #endif 8617 8618 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC, 8619 "mm/page_alloc:pcp", 8620 page_alloc_cpu_online, 8621 page_alloc_cpu_dead); 8622 WARN_ON(ret < 0); 8623 } 8624 8625 /* 8626 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 8627 * or min_free_kbytes changes. 8628 */ 8629 static void calculate_totalreserve_pages(void) 8630 { 8631 struct pglist_data *pgdat; 8632 unsigned long reserve_pages = 0; 8633 enum zone_type i, j; 8634 8635 for_each_online_pgdat(pgdat) { 8636 8637 pgdat->totalreserve_pages = 0; 8638 8639 for (i = 0; i < MAX_NR_ZONES; i++) { 8640 struct zone *zone = pgdat->node_zones + i; 8641 long max = 0; 8642 unsigned long managed_pages = zone_managed_pages(zone); 8643 8644 /* Find valid and maximum lowmem_reserve in the zone */ 8645 for (j = i; j < MAX_NR_ZONES; j++) { 8646 if (zone->lowmem_reserve[j] > max) 8647 max = zone->lowmem_reserve[j]; 8648 } 8649 8650 /* we treat the high watermark as reserved pages. */ 8651 max += high_wmark_pages(zone); 8652 8653 if (max > managed_pages) 8654 max = managed_pages; 8655 8656 pgdat->totalreserve_pages += max; 8657 8658 reserve_pages += max; 8659 } 8660 } 8661 totalreserve_pages = reserve_pages; 8662 } 8663 8664 /* 8665 * setup_per_zone_lowmem_reserve - called whenever 8666 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 8667 * has a correct pages reserved value, so an adequate number of 8668 * pages are left in the zone after a successful __alloc_pages(). 8669 */ 8670 static void setup_per_zone_lowmem_reserve(void) 8671 { 8672 struct pglist_data *pgdat; 8673 enum zone_type i, j; 8674 8675 for_each_online_pgdat(pgdat) { 8676 for (i = 0; i < MAX_NR_ZONES - 1; i++) { 8677 struct zone *zone = &pgdat->node_zones[i]; 8678 int ratio = sysctl_lowmem_reserve_ratio[i]; 8679 bool clear = !ratio || !zone_managed_pages(zone); 8680 unsigned long managed_pages = 0; 8681 8682 for (j = i + 1; j < MAX_NR_ZONES; j++) { 8683 struct zone *upper_zone = &pgdat->node_zones[j]; 8684 8685 managed_pages += zone_managed_pages(upper_zone); 8686 8687 if (clear) 8688 zone->lowmem_reserve[j] = 0; 8689 else 8690 zone->lowmem_reserve[j] = managed_pages / ratio; 8691 } 8692 } 8693 } 8694 8695 /* update totalreserve_pages */ 8696 calculate_totalreserve_pages(); 8697 } 8698 8699 static void __setup_per_zone_wmarks(void) 8700 { 8701 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 8702 unsigned long lowmem_pages = 0; 8703 struct zone *zone; 8704 unsigned long flags; 8705 8706 /* Calculate total number of !ZONE_HIGHMEM pages */ 8707 for_each_zone(zone) { 8708 if (!is_highmem(zone)) 8709 lowmem_pages += zone_managed_pages(zone); 8710 } 8711 8712 for_each_zone(zone) { 8713 u64 tmp; 8714 8715 spin_lock_irqsave(&zone->lock, flags); 8716 tmp = (u64)pages_min * zone_managed_pages(zone); 8717 do_div(tmp, lowmem_pages); 8718 if (is_highmem(zone)) { 8719 /* 8720 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 8721 * need highmem pages, so cap pages_min to a small 8722 * value here. 8723 * 8724 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 8725 * deltas control async page reclaim, and so should 8726 * not be capped for highmem. 8727 */ 8728 unsigned long min_pages; 8729 8730 min_pages = zone_managed_pages(zone) / 1024; 8731 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 8732 zone->_watermark[WMARK_MIN] = min_pages; 8733 } else { 8734 /* 8735 * If it's a lowmem zone, reserve a number of pages 8736 * proportionate to the zone's size. 8737 */ 8738 zone->_watermark[WMARK_MIN] = tmp; 8739 } 8740 8741 /* 8742 * Set the kswapd watermarks distance according to the 8743 * scale factor in proportion to available memory, but 8744 * ensure a minimum size on small systems. 8745 */ 8746 tmp = max_t(u64, tmp >> 2, 8747 mult_frac(zone_managed_pages(zone), 8748 watermark_scale_factor, 10000)); 8749 8750 zone->watermark_boost = 0; 8751 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 8752 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; 8753 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; 8754 8755 spin_unlock_irqrestore(&zone->lock, flags); 8756 } 8757 8758 /* update totalreserve_pages */ 8759 calculate_totalreserve_pages(); 8760 } 8761 8762 /** 8763 * setup_per_zone_wmarks - called when min_free_kbytes changes 8764 * or when memory is hot-{added|removed} 8765 * 8766 * Ensures that the watermark[min,low,high] values for each zone are set 8767 * correctly with respect to min_free_kbytes. 8768 */ 8769 void setup_per_zone_wmarks(void) 8770 { 8771 struct zone *zone; 8772 static DEFINE_SPINLOCK(lock); 8773 8774 spin_lock(&lock); 8775 __setup_per_zone_wmarks(); 8776 spin_unlock(&lock); 8777 8778 /* 8779 * The watermark size have changed so update the pcpu batch 8780 * and high limits or the limits may be inappropriate. 8781 */ 8782 for_each_zone(zone) 8783 zone_pcp_update(zone, 0); 8784 } 8785 8786 /* 8787 * Initialise min_free_kbytes. 8788 * 8789 * For small machines we want it small (128k min). For large machines 8790 * we want it large (256MB max). But it is not linear, because network 8791 * bandwidth does not increase linearly with machine size. We use 8792 * 8793 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 8794 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 8795 * 8796 * which yields 8797 * 8798 * 16MB: 512k 8799 * 32MB: 724k 8800 * 64MB: 1024k 8801 * 128MB: 1448k 8802 * 256MB: 2048k 8803 * 512MB: 2896k 8804 * 1024MB: 4096k 8805 * 2048MB: 5792k 8806 * 4096MB: 8192k 8807 * 8192MB: 11584k 8808 * 16384MB: 16384k 8809 */ 8810 void calculate_min_free_kbytes(void) 8811 { 8812 unsigned long lowmem_kbytes; 8813 int new_min_free_kbytes; 8814 8815 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 8816 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 8817 8818 if (new_min_free_kbytes > user_min_free_kbytes) 8819 min_free_kbytes = clamp(new_min_free_kbytes, 128, 262144); 8820 else 8821 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 8822 new_min_free_kbytes, user_min_free_kbytes); 8823 8824 } 8825 8826 int __meminit init_per_zone_wmark_min(void) 8827 { 8828 calculate_min_free_kbytes(); 8829 setup_per_zone_wmarks(); 8830 refresh_zone_stat_thresholds(); 8831 setup_per_zone_lowmem_reserve(); 8832 8833 #ifdef CONFIG_NUMA 8834 setup_min_unmapped_ratio(); 8835 setup_min_slab_ratio(); 8836 #endif 8837 8838 khugepaged_min_free_kbytes_update(); 8839 8840 return 0; 8841 } 8842 postcore_initcall(init_per_zone_wmark_min) 8843 8844 /* 8845 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 8846 * that we can call two helper functions whenever min_free_kbytes 8847 * changes. 8848 */ 8849 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 8850 void *buffer, size_t *length, loff_t *ppos) 8851 { 8852 int rc; 8853 8854 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8855 if (rc) 8856 return rc; 8857 8858 if (write) { 8859 user_min_free_kbytes = min_free_kbytes; 8860 setup_per_zone_wmarks(); 8861 } 8862 return 0; 8863 } 8864 8865 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 8866 void *buffer, size_t *length, loff_t *ppos) 8867 { 8868 int rc; 8869 8870 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8871 if (rc) 8872 return rc; 8873 8874 if (write) 8875 setup_per_zone_wmarks(); 8876 8877 return 0; 8878 } 8879 8880 #ifdef CONFIG_NUMA 8881 static void setup_min_unmapped_ratio(void) 8882 { 8883 pg_data_t *pgdat; 8884 struct zone *zone; 8885 8886 for_each_online_pgdat(pgdat) 8887 pgdat->min_unmapped_pages = 0; 8888 8889 for_each_zone(zone) 8890 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * 8891 sysctl_min_unmapped_ratio) / 100; 8892 } 8893 8894 8895 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 8896 void *buffer, size_t *length, loff_t *ppos) 8897 { 8898 int rc; 8899 8900 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8901 if (rc) 8902 return rc; 8903 8904 setup_min_unmapped_ratio(); 8905 8906 return 0; 8907 } 8908 8909 static void setup_min_slab_ratio(void) 8910 { 8911 pg_data_t *pgdat; 8912 struct zone *zone; 8913 8914 for_each_online_pgdat(pgdat) 8915 pgdat->min_slab_pages = 0; 8916 8917 for_each_zone(zone) 8918 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * 8919 sysctl_min_slab_ratio) / 100; 8920 } 8921 8922 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 8923 void *buffer, size_t *length, loff_t *ppos) 8924 { 8925 int rc; 8926 8927 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 8928 if (rc) 8929 return rc; 8930 8931 setup_min_slab_ratio(); 8932 8933 return 0; 8934 } 8935 #endif 8936 8937 /* 8938 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 8939 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 8940 * whenever sysctl_lowmem_reserve_ratio changes. 8941 * 8942 * The reserve ratio obviously has absolutely no relation with the 8943 * minimum watermarks. The lowmem reserve ratio can only make sense 8944 * if in function of the boot time zone sizes. 8945 */ 8946 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 8947 void *buffer, size_t *length, loff_t *ppos) 8948 { 8949 int i; 8950 8951 proc_dointvec_minmax(table, write, buffer, length, ppos); 8952 8953 for (i = 0; i < MAX_NR_ZONES; i++) { 8954 if (sysctl_lowmem_reserve_ratio[i] < 1) 8955 sysctl_lowmem_reserve_ratio[i] = 0; 8956 } 8957 8958 setup_per_zone_lowmem_reserve(); 8959 return 0; 8960 } 8961 8962 /* 8963 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each 8964 * cpu. It is the fraction of total pages in each zone that a hot per cpu 8965 * pagelist can have before it gets flushed back to buddy allocator. 8966 */ 8967 int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, 8968 int write, void *buffer, size_t *length, loff_t *ppos) 8969 { 8970 struct zone *zone; 8971 int old_percpu_pagelist_high_fraction; 8972 int ret; 8973 8974 mutex_lock(&pcp_batch_high_lock); 8975 old_percpu_pagelist_high_fraction = percpu_pagelist_high_fraction; 8976 8977 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 8978 if (!write || ret < 0) 8979 goto out; 8980 8981 /* Sanity checking to avoid pcp imbalance */ 8982 if (percpu_pagelist_high_fraction && 8983 percpu_pagelist_high_fraction < MIN_PERCPU_PAGELIST_HIGH_FRACTION) { 8984 percpu_pagelist_high_fraction = old_percpu_pagelist_high_fraction; 8985 ret = -EINVAL; 8986 goto out; 8987 } 8988 8989 /* No change? */ 8990 if (percpu_pagelist_high_fraction == old_percpu_pagelist_high_fraction) 8991 goto out; 8992 8993 for_each_populated_zone(zone) 8994 zone_set_pageset_high_and_batch(zone, 0); 8995 out: 8996 mutex_unlock(&pcp_batch_high_lock); 8997 return ret; 8998 } 8999 9000 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 9001 /* 9002 * Returns the number of pages that arch has reserved but 9003 * is not known to alloc_large_system_hash(). 9004 */ 9005 static unsigned long __init arch_reserved_kernel_pages(void) 9006 { 9007 return 0; 9008 } 9009 #endif 9010 9011 /* 9012 * Adaptive scale is meant to reduce sizes of hash tables on large memory 9013 * machines. As memory size is increased the scale is also increased but at 9014 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 9015 * quadruples the scale is increased by one, which means the size of hash table 9016 * only doubles, instead of quadrupling as well. 9017 * Because 32-bit systems cannot have large physical memory, where this scaling 9018 * makes sense, it is disabled on such platforms. 9019 */ 9020 #if __BITS_PER_LONG > 32 9021 #define ADAPT_SCALE_BASE (64ul << 30) 9022 #define ADAPT_SCALE_SHIFT 2 9023 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 9024 #endif 9025 9026 /* 9027 * allocate a large system hash table from bootmem 9028 * - it is assumed that the hash table must contain an exact power-of-2 9029 * quantity of entries 9030 * - limit is the number of hash buckets, not the total allocation size 9031 */ 9032 void *__init alloc_large_system_hash(const char *tablename, 9033 unsigned long bucketsize, 9034 unsigned long numentries, 9035 int scale, 9036 int flags, 9037 unsigned int *_hash_shift, 9038 unsigned int *_hash_mask, 9039 unsigned long low_limit, 9040 unsigned long high_limit) 9041 { 9042 unsigned long long max = high_limit; 9043 unsigned long log2qty, size; 9044 void *table; 9045 gfp_t gfp_flags; 9046 bool virt; 9047 bool huge; 9048 9049 /* allow the kernel cmdline to have a say */ 9050 if (!numentries) { 9051 /* round applicable memory size up to nearest megabyte */ 9052 numentries = nr_kernel_pages; 9053 numentries -= arch_reserved_kernel_pages(); 9054 9055 /* It isn't necessary when PAGE_SIZE >= 1MB */ 9056 if (PAGE_SIZE < SZ_1M) 9057 numentries = round_up(numentries, SZ_1M / PAGE_SIZE); 9058 9059 #if __BITS_PER_LONG > 32 9060 if (!high_limit) { 9061 unsigned long adapt; 9062 9063 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 9064 adapt <<= ADAPT_SCALE_SHIFT) 9065 scale++; 9066 } 9067 #endif 9068 9069 /* limit to 1 bucket per 2^scale bytes of low memory */ 9070 if (scale > PAGE_SHIFT) 9071 numentries >>= (scale - PAGE_SHIFT); 9072 else 9073 numentries <<= (PAGE_SHIFT - scale); 9074 9075 /* Make sure we've got at least a 0-order allocation.. */ 9076 if (unlikely(flags & HASH_SMALL)) { 9077 /* Makes no sense without HASH_EARLY */ 9078 WARN_ON(!(flags & HASH_EARLY)); 9079 if (!(numentries >> *_hash_shift)) { 9080 numentries = 1UL << *_hash_shift; 9081 BUG_ON(!numentries); 9082 } 9083 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 9084 numentries = PAGE_SIZE / bucketsize; 9085 } 9086 numentries = roundup_pow_of_two(numentries); 9087 9088 /* limit allocation size to 1/16 total memory by default */ 9089 if (max == 0) { 9090 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 9091 do_div(max, bucketsize); 9092 } 9093 max = min(max, 0x80000000ULL); 9094 9095 if (numentries < low_limit) 9096 numentries = low_limit; 9097 if (numentries > max) 9098 numentries = max; 9099 9100 log2qty = ilog2(numentries); 9101 9102 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 9103 do { 9104 virt = false; 9105 size = bucketsize << log2qty; 9106 if (flags & HASH_EARLY) { 9107 if (flags & HASH_ZERO) 9108 table = memblock_alloc(size, SMP_CACHE_BYTES); 9109 else 9110 table = memblock_alloc_raw(size, 9111 SMP_CACHE_BYTES); 9112 } else if (get_order(size) >= MAX_ORDER || hashdist) { 9113 table = vmalloc_huge(size, gfp_flags); 9114 virt = true; 9115 if (table) 9116 huge = is_vm_area_hugepages(table); 9117 } else { 9118 /* 9119 * If bucketsize is not a power-of-two, we may free 9120 * some pages at the end of hash table which 9121 * alloc_pages_exact() automatically does 9122 */ 9123 table = alloc_pages_exact(size, gfp_flags); 9124 kmemleak_alloc(table, size, 1, gfp_flags); 9125 } 9126 } while (!table && size > PAGE_SIZE && --log2qty); 9127 9128 if (!table) 9129 panic("Failed to allocate %s hash table\n", tablename); 9130 9131 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 9132 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, 9133 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 9134 9135 if (_hash_shift) 9136 *_hash_shift = log2qty; 9137 if (_hash_mask) 9138 *_hash_mask = (1 << log2qty) - 1; 9139 9140 return table; 9141 } 9142 9143 #ifdef CONFIG_CONTIG_ALLOC 9144 #if defined(CONFIG_DYNAMIC_DEBUG) || \ 9145 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) 9146 /* Usage: See admin-guide/dynamic-debug-howto.rst */ 9147 static void alloc_contig_dump_pages(struct list_head *page_list) 9148 { 9149 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, "migrate failure"); 9150 9151 if (DYNAMIC_DEBUG_BRANCH(descriptor)) { 9152 struct page *page; 9153 9154 dump_stack(); 9155 list_for_each_entry(page, page_list, lru) 9156 dump_page(page, "migration failure"); 9157 } 9158 } 9159 #else 9160 static inline void alloc_contig_dump_pages(struct list_head *page_list) 9161 { 9162 } 9163 #endif 9164 9165 /* [start, end) must belong to a single zone. */ 9166 int __alloc_contig_migrate_range(struct compact_control *cc, 9167 unsigned long start, unsigned long end) 9168 { 9169 /* This function is based on compact_zone() from compaction.c. */ 9170 unsigned int nr_reclaimed; 9171 unsigned long pfn = start; 9172 unsigned int tries = 0; 9173 int ret = 0; 9174 struct migration_target_control mtc = { 9175 .nid = zone_to_nid(cc->zone), 9176 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, 9177 }; 9178 9179 lru_cache_disable(); 9180 9181 while (pfn < end || !list_empty(&cc->migratepages)) { 9182 if (fatal_signal_pending(current)) { 9183 ret = -EINTR; 9184 break; 9185 } 9186 9187 if (list_empty(&cc->migratepages)) { 9188 cc->nr_migratepages = 0; 9189 ret = isolate_migratepages_range(cc, pfn, end); 9190 if (ret && ret != -EAGAIN) 9191 break; 9192 pfn = cc->migrate_pfn; 9193 tries = 0; 9194 } else if (++tries == 5) { 9195 ret = -EBUSY; 9196 break; 9197 } 9198 9199 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 9200 &cc->migratepages); 9201 cc->nr_migratepages -= nr_reclaimed; 9202 9203 ret = migrate_pages(&cc->migratepages, alloc_migration_target, 9204 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); 9205 9206 /* 9207 * On -ENOMEM, migrate_pages() bails out right away. It is pointless 9208 * to retry again over this error, so do the same here. 9209 */ 9210 if (ret == -ENOMEM) 9211 break; 9212 } 9213 9214 lru_cache_enable(); 9215 if (ret < 0) { 9216 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) 9217 alloc_contig_dump_pages(&cc->migratepages); 9218 putback_movable_pages(&cc->migratepages); 9219 return ret; 9220 } 9221 return 0; 9222 } 9223 9224 /** 9225 * alloc_contig_range() -- tries to allocate given range of pages 9226 * @start: start PFN to allocate 9227 * @end: one-past-the-last PFN to allocate 9228 * @migratetype: migratetype of the underlying pageblocks (either 9229 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 9230 * in range must have the same migratetype and it must 9231 * be either of the two. 9232 * @gfp_mask: GFP mask to use during compaction 9233 * 9234 * The PFN range does not have to be pageblock aligned. The PFN range must 9235 * belong to a single zone. 9236 * 9237 * The first thing this routine does is attempt to MIGRATE_ISOLATE all 9238 * pageblocks in the range. Once isolated, the pageblocks should not 9239 * be modified by others. 9240 * 9241 * Return: zero on success or negative error code. On success all 9242 * pages which PFN is in [start, end) are allocated for the caller and 9243 * need to be freed with free_contig_range(). 9244 */ 9245 int alloc_contig_range(unsigned long start, unsigned long end, 9246 unsigned migratetype, gfp_t gfp_mask) 9247 { 9248 unsigned long outer_start, outer_end; 9249 int order; 9250 int ret = 0; 9251 9252 struct compact_control cc = { 9253 .nr_migratepages = 0, 9254 .order = -1, 9255 .zone = page_zone(pfn_to_page(start)), 9256 .mode = MIGRATE_SYNC, 9257 .ignore_skip_hint = true, 9258 .no_set_skip_hint = true, 9259 .gfp_mask = current_gfp_context(gfp_mask), 9260 .alloc_contig = true, 9261 }; 9262 INIT_LIST_HEAD(&cc.migratepages); 9263 9264 /* 9265 * What we do here is we mark all pageblocks in range as 9266 * MIGRATE_ISOLATE. Because pageblock and max order pages may 9267 * have different sizes, and due to the way page allocator 9268 * work, start_isolate_page_range() has special handlings for this. 9269 * 9270 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 9271 * migrate the pages from an unaligned range (ie. pages that 9272 * we are interested in). This will put all the pages in 9273 * range back to page allocator as MIGRATE_ISOLATE. 9274 * 9275 * When this is done, we take the pages in range from page 9276 * allocator removing them from the buddy system. This way 9277 * page allocator will never consider using them. 9278 * 9279 * This lets us mark the pageblocks back as 9280 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 9281 * aligned range but not in the unaligned, original range are 9282 * put back to page allocator so that buddy can use them. 9283 */ 9284 9285 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); 9286 if (ret) 9287 goto done; 9288 9289 drain_all_pages(cc.zone); 9290 9291 /* 9292 * In case of -EBUSY, we'd like to know which page causes problem. 9293 * So, just fall through. test_pages_isolated() has a tracepoint 9294 * which will report the busy page. 9295 * 9296 * It is possible that busy pages could become available before 9297 * the call to test_pages_isolated, and the range will actually be 9298 * allocated. So, if we fall through be sure to clear ret so that 9299 * -EBUSY is not accidentally used or returned to caller. 9300 */ 9301 ret = __alloc_contig_migrate_range(&cc, start, end); 9302 if (ret && ret != -EBUSY) 9303 goto done; 9304 ret = 0; 9305 9306 /* 9307 * Pages from [start, end) are within a pageblock_nr_pages 9308 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 9309 * more, all pages in [start, end) are free in page allocator. 9310 * What we are going to do is to allocate all pages from 9311 * [start, end) (that is remove them from page allocator). 9312 * 9313 * The only problem is that pages at the beginning and at the 9314 * end of interesting range may be not aligned with pages that 9315 * page allocator holds, ie. they can be part of higher order 9316 * pages. Because of this, we reserve the bigger range and 9317 * once this is done free the pages we are not interested in. 9318 * 9319 * We don't have to hold zone->lock here because the pages are 9320 * isolated thus they won't get removed from buddy. 9321 */ 9322 9323 order = 0; 9324 outer_start = start; 9325 while (!PageBuddy(pfn_to_page(outer_start))) { 9326 if (++order >= MAX_ORDER) { 9327 outer_start = start; 9328 break; 9329 } 9330 outer_start &= ~0UL << order; 9331 } 9332 9333 if (outer_start != start) { 9334 order = buddy_order(pfn_to_page(outer_start)); 9335 9336 /* 9337 * outer_start page could be small order buddy page and 9338 * it doesn't include start page. Adjust outer_start 9339 * in this case to report failed page properly 9340 * on tracepoint in test_pages_isolated() 9341 */ 9342 if (outer_start + (1UL << order) <= start) 9343 outer_start = start; 9344 } 9345 9346 /* Make sure the range is really isolated. */ 9347 if (test_pages_isolated(outer_start, end, 0)) { 9348 ret = -EBUSY; 9349 goto done; 9350 } 9351 9352 /* Grab isolated pages from freelists. */ 9353 outer_end = isolate_freepages_range(&cc, outer_start, end); 9354 if (!outer_end) { 9355 ret = -EBUSY; 9356 goto done; 9357 } 9358 9359 /* Free head and tail (if any) */ 9360 if (start != outer_start) 9361 free_contig_range(outer_start, start - outer_start); 9362 if (end != outer_end) 9363 free_contig_range(end, outer_end - end); 9364 9365 done: 9366 undo_isolate_page_range(start, end, migratetype); 9367 return ret; 9368 } 9369 EXPORT_SYMBOL(alloc_contig_range); 9370 9371 static int __alloc_contig_pages(unsigned long start_pfn, 9372 unsigned long nr_pages, gfp_t gfp_mask) 9373 { 9374 unsigned long end_pfn = start_pfn + nr_pages; 9375 9376 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, 9377 gfp_mask); 9378 } 9379 9380 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, 9381 unsigned long nr_pages) 9382 { 9383 unsigned long i, end_pfn = start_pfn + nr_pages; 9384 struct page *page; 9385 9386 for (i = start_pfn; i < end_pfn; i++) { 9387 page = pfn_to_online_page(i); 9388 if (!page) 9389 return false; 9390 9391 if (page_zone(page) != z) 9392 return false; 9393 9394 if (PageReserved(page)) 9395 return false; 9396 } 9397 return true; 9398 } 9399 9400 static bool zone_spans_last_pfn(const struct zone *zone, 9401 unsigned long start_pfn, unsigned long nr_pages) 9402 { 9403 unsigned long last_pfn = start_pfn + nr_pages - 1; 9404 9405 return zone_spans_pfn(zone, last_pfn); 9406 } 9407 9408 /** 9409 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages 9410 * @nr_pages: Number of contiguous pages to allocate 9411 * @gfp_mask: GFP mask to limit search and used during compaction 9412 * @nid: Target node 9413 * @nodemask: Mask for other possible nodes 9414 * 9415 * This routine is a wrapper around alloc_contig_range(). It scans over zones 9416 * on an applicable zonelist to find a contiguous pfn range which can then be 9417 * tried for allocation with alloc_contig_range(). This routine is intended 9418 * for allocation requests which can not be fulfilled with the buddy allocator. 9419 * 9420 * The allocated memory is always aligned to a page boundary. If nr_pages is a 9421 * power of two, then allocated range is also guaranteed to be aligned to same 9422 * nr_pages (e.g. 1GB request would be aligned to 1GB). 9423 * 9424 * Allocated pages can be freed with free_contig_range() or by manually calling 9425 * __free_page() on each allocated page. 9426 * 9427 * Return: pointer to contiguous pages on success, or NULL if not successful. 9428 */ 9429 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, 9430 int nid, nodemask_t *nodemask) 9431 { 9432 unsigned long ret, pfn, flags; 9433 struct zonelist *zonelist; 9434 struct zone *zone; 9435 struct zoneref *z; 9436 9437 zonelist = node_zonelist(nid, gfp_mask); 9438 for_each_zone_zonelist_nodemask(zone, z, zonelist, 9439 gfp_zone(gfp_mask), nodemask) { 9440 spin_lock_irqsave(&zone->lock, flags); 9441 9442 pfn = ALIGN(zone->zone_start_pfn, nr_pages); 9443 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { 9444 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { 9445 /* 9446 * We release the zone lock here because 9447 * alloc_contig_range() will also lock the zone 9448 * at some point. If there's an allocation 9449 * spinning on this lock, it may win the race 9450 * and cause alloc_contig_range() to fail... 9451 */ 9452 spin_unlock_irqrestore(&zone->lock, flags); 9453 ret = __alloc_contig_pages(pfn, nr_pages, 9454 gfp_mask); 9455 if (!ret) 9456 return pfn_to_page(pfn); 9457 spin_lock_irqsave(&zone->lock, flags); 9458 } 9459 pfn += nr_pages; 9460 } 9461 spin_unlock_irqrestore(&zone->lock, flags); 9462 } 9463 return NULL; 9464 } 9465 #endif /* CONFIG_CONTIG_ALLOC */ 9466 9467 void free_contig_range(unsigned long pfn, unsigned long nr_pages) 9468 { 9469 unsigned long count = 0; 9470 9471 for (; nr_pages--; pfn++) { 9472 struct page *page = pfn_to_page(pfn); 9473 9474 count += page_count(page) != 1; 9475 __free_page(page); 9476 } 9477 WARN(count != 0, "%lu pages are still in use!\n", count); 9478 } 9479 EXPORT_SYMBOL(free_contig_range); 9480 9481 /* 9482 * Effectively disable pcplists for the zone by setting the high limit to 0 9483 * and draining all cpus. A concurrent page freeing on another CPU that's about 9484 * to put the page on pcplist will either finish before the drain and the page 9485 * will be drained, or observe the new high limit and skip the pcplist. 9486 * 9487 * Must be paired with a call to zone_pcp_enable(). 9488 */ 9489 void zone_pcp_disable(struct zone *zone) 9490 { 9491 mutex_lock(&pcp_batch_high_lock); 9492 __zone_set_pageset_high_and_batch(zone, 0, 1); 9493 __drain_all_pages(zone, true); 9494 } 9495 9496 void zone_pcp_enable(struct zone *zone) 9497 { 9498 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); 9499 mutex_unlock(&pcp_batch_high_lock); 9500 } 9501 9502 void zone_pcp_reset(struct zone *zone) 9503 { 9504 int cpu; 9505 struct per_cpu_zonestat *pzstats; 9506 9507 if (zone->per_cpu_pageset != &boot_pageset) { 9508 for_each_online_cpu(cpu) { 9509 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); 9510 drain_zonestat(zone, pzstats); 9511 } 9512 free_percpu(zone->per_cpu_pageset); 9513 zone->per_cpu_pageset = &boot_pageset; 9514 if (zone->per_cpu_zonestats != &boot_zonestats) { 9515 free_percpu(zone->per_cpu_zonestats); 9516 zone->per_cpu_zonestats = &boot_zonestats; 9517 } 9518 } 9519 } 9520 9521 #ifdef CONFIG_MEMORY_HOTREMOVE 9522 /* 9523 * All pages in the range must be in a single zone, must not contain holes, 9524 * must span full sections, and must be isolated before calling this function. 9525 */ 9526 void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 9527 { 9528 unsigned long pfn = start_pfn; 9529 struct page *page; 9530 struct zone *zone; 9531 unsigned int order; 9532 unsigned long flags; 9533 9534 offline_mem_sections(pfn, end_pfn); 9535 zone = page_zone(pfn_to_page(pfn)); 9536 spin_lock_irqsave(&zone->lock, flags); 9537 while (pfn < end_pfn) { 9538 page = pfn_to_page(pfn); 9539 /* 9540 * The HWPoisoned page may be not in buddy system, and 9541 * page_count() is not 0. 9542 */ 9543 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 9544 pfn++; 9545 continue; 9546 } 9547 /* 9548 * At this point all remaining PageOffline() pages have a 9549 * reference count of 0 and can simply be skipped. 9550 */ 9551 if (PageOffline(page)) { 9552 BUG_ON(page_count(page)); 9553 BUG_ON(PageBuddy(page)); 9554 pfn++; 9555 continue; 9556 } 9557 9558 BUG_ON(page_count(page)); 9559 BUG_ON(!PageBuddy(page)); 9560 order = buddy_order(page); 9561 del_page_from_free_list(page, zone, order); 9562 pfn += (1 << order); 9563 } 9564 spin_unlock_irqrestore(&zone->lock, flags); 9565 } 9566 #endif 9567 9568 /* 9569 * This function returns a stable result only if called under zone lock. 9570 */ 9571 bool is_free_buddy_page(struct page *page) 9572 { 9573 unsigned long pfn = page_to_pfn(page); 9574 unsigned int order; 9575 9576 for (order = 0; order < MAX_ORDER; order++) { 9577 struct page *page_head = page - (pfn & ((1 << order) - 1)); 9578 9579 if (PageBuddy(page_head) && 9580 buddy_order_unsafe(page_head) >= order) 9581 break; 9582 } 9583 9584 return order < MAX_ORDER; 9585 } 9586 EXPORT_SYMBOL(is_free_buddy_page); 9587 9588 #ifdef CONFIG_MEMORY_FAILURE 9589 /* 9590 * Break down a higher-order page in sub-pages, and keep our target out of 9591 * buddy allocator. 9592 */ 9593 static void break_down_buddy_pages(struct zone *zone, struct page *page, 9594 struct page *target, int low, int high, 9595 int migratetype) 9596 { 9597 unsigned long size = 1 << high; 9598 struct page *current_buddy, *next_page; 9599 9600 while (high > low) { 9601 high--; 9602 size >>= 1; 9603 9604 if (target >= &page[size]) { 9605 next_page = page + size; 9606 current_buddy = page; 9607 } else { 9608 next_page = page; 9609 current_buddy = page + size; 9610 } 9611 9612 if (set_page_guard(zone, current_buddy, high, migratetype)) 9613 continue; 9614 9615 if (current_buddy != target) { 9616 add_to_free_list(current_buddy, zone, high, migratetype); 9617 set_buddy_order(current_buddy, high); 9618 page = next_page; 9619 } 9620 } 9621 } 9622 9623 /* 9624 * Take a page that will be marked as poisoned off the buddy allocator. 9625 */ 9626 bool take_page_off_buddy(struct page *page) 9627 { 9628 struct zone *zone = page_zone(page); 9629 unsigned long pfn = page_to_pfn(page); 9630 unsigned long flags; 9631 unsigned int order; 9632 bool ret = false; 9633 9634 spin_lock_irqsave(&zone->lock, flags); 9635 for (order = 0; order < MAX_ORDER; order++) { 9636 struct page *page_head = page - (pfn & ((1 << order) - 1)); 9637 int page_order = buddy_order(page_head); 9638 9639 if (PageBuddy(page_head) && page_order >= order) { 9640 unsigned long pfn_head = page_to_pfn(page_head); 9641 int migratetype = get_pfnblock_migratetype(page_head, 9642 pfn_head); 9643 9644 del_page_from_free_list(page_head, zone, page_order); 9645 break_down_buddy_pages(zone, page_head, page, 0, 9646 page_order, migratetype); 9647 SetPageHWPoisonTakenOff(page); 9648 if (!is_migrate_isolate(migratetype)) 9649 __mod_zone_freepage_state(zone, -1, migratetype); 9650 ret = true; 9651 break; 9652 } 9653 if (page_count(page_head) > 0) 9654 break; 9655 } 9656 spin_unlock_irqrestore(&zone->lock, flags); 9657 return ret; 9658 } 9659 9660 /* 9661 * Cancel takeoff done by take_page_off_buddy(). 9662 */ 9663 bool put_page_back_buddy(struct page *page) 9664 { 9665 struct zone *zone = page_zone(page); 9666 unsigned long pfn = page_to_pfn(page); 9667 unsigned long flags; 9668 int migratetype = get_pfnblock_migratetype(page, pfn); 9669 bool ret = false; 9670 9671 spin_lock_irqsave(&zone->lock, flags); 9672 if (put_page_testzero(page)) { 9673 ClearPageHWPoisonTakenOff(page); 9674 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); 9675 if (TestClearPageHWPoison(page)) { 9676 ret = true; 9677 } 9678 } 9679 spin_unlock_irqrestore(&zone->lock, flags); 9680 9681 return ret; 9682 } 9683 #endif 9684 9685 #ifdef CONFIG_ZONE_DMA 9686 bool has_managed_dma(void) 9687 { 9688 struct pglist_data *pgdat; 9689 9690 for_each_online_pgdat(pgdat) { 9691 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; 9692 9693 if (managed_zone(zone)) 9694 return true; 9695 } 9696 return false; 9697 } 9698 #endif /* CONFIG_ZONE_DMA */ 9699