1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/jiffies.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/compiler.h> 26 #include <linux/kernel.h> 27 #include <linux/kmemcheck.h> 28 #include <linux/kasan.h> 29 #include <linux/module.h> 30 #include <linux/suspend.h> 31 #include <linux/pagevec.h> 32 #include <linux/blkdev.h> 33 #include <linux/slab.h> 34 #include <linux/ratelimit.h> 35 #include <linux/oom.h> 36 #include <linux/notifier.h> 37 #include <linux/topology.h> 38 #include <linux/sysctl.h> 39 #include <linux/cpu.h> 40 #include <linux/cpuset.h> 41 #include <linux/memory_hotplug.h> 42 #include <linux/nodemask.h> 43 #include <linux/vmalloc.h> 44 #include <linux/vmstat.h> 45 #include <linux/mempolicy.h> 46 #include <linux/memremap.h> 47 #include <linux/stop_machine.h> 48 #include <linux/sort.h> 49 #include <linux/pfn.h> 50 #include <linux/backing-dev.h> 51 #include <linux/fault-inject.h> 52 #include <linux/page-isolation.h> 53 #include <linux/page_ext.h> 54 #include <linux/debugobjects.h> 55 #include <linux/kmemleak.h> 56 #include <linux/compaction.h> 57 #include <trace/events/kmem.h> 58 #include <trace/events/oom.h> 59 #include <linux/prefetch.h> 60 #include <linux/mm_inline.h> 61 #include <linux/migrate.h> 62 #include <linux/hugetlb.h> 63 #include <linux/sched/rt.h> 64 #include <linux/sched/mm.h> 65 #include <linux/page_owner.h> 66 #include <linux/kthread.h> 67 #include <linux/memcontrol.h> 68 69 #include <asm/sections.h> 70 #include <asm/tlbflush.h> 71 #include <asm/div64.h> 72 #include "internal.h" 73 74 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 75 static DEFINE_MUTEX(pcp_batch_high_lock); 76 #define MIN_PERCPU_PAGELIST_FRACTION (8) 77 78 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 79 DEFINE_PER_CPU(int, numa_node); 80 EXPORT_PER_CPU_SYMBOL(numa_node); 81 #endif 82 83 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 84 /* 85 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 86 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 87 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 88 * defined in <linux/topology.h>. 89 */ 90 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 91 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 92 int _node_numa_mem_[MAX_NUMNODES]; 93 #endif 94 95 /* work_structs for global per-cpu drains */ 96 DEFINE_MUTEX(pcpu_drain_mutex); 97 DEFINE_PER_CPU(struct work_struct, pcpu_drain); 98 99 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 100 volatile unsigned long latent_entropy __latent_entropy; 101 EXPORT_SYMBOL(latent_entropy); 102 #endif 103 104 /* 105 * Array of node states. 106 */ 107 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 108 [N_POSSIBLE] = NODE_MASK_ALL, 109 [N_ONLINE] = { { [0] = 1UL } }, 110 #ifndef CONFIG_NUMA 111 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 112 #ifdef CONFIG_HIGHMEM 113 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 114 #endif 115 #ifdef CONFIG_MOVABLE_NODE 116 [N_MEMORY] = { { [0] = 1UL } }, 117 #endif 118 [N_CPU] = { { [0] = 1UL } }, 119 #endif /* NUMA */ 120 }; 121 EXPORT_SYMBOL(node_states); 122 123 /* Protect totalram_pages and zone->managed_pages */ 124 static DEFINE_SPINLOCK(managed_page_count_lock); 125 126 unsigned long totalram_pages __read_mostly; 127 unsigned long totalreserve_pages __read_mostly; 128 unsigned long totalcma_pages __read_mostly; 129 130 int percpu_pagelist_fraction; 131 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 132 133 /* 134 * A cached value of the page's pageblock's migratetype, used when the page is 135 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 136 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 137 * Also the migratetype set in the page does not necessarily match the pcplist 138 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 139 * other index - this ensures that it will be put on the correct CMA freelist. 140 */ 141 static inline int get_pcppage_migratetype(struct page *page) 142 { 143 return page->index; 144 } 145 146 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 147 { 148 page->index = migratetype; 149 } 150 151 #ifdef CONFIG_PM_SLEEP 152 /* 153 * The following functions are used by the suspend/hibernate code to temporarily 154 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 155 * while devices are suspended. To avoid races with the suspend/hibernate code, 156 * they should always be called with pm_mutex held (gfp_allowed_mask also should 157 * only be modified with pm_mutex held, unless the suspend/hibernate code is 158 * guaranteed not to run in parallel with that modification). 159 */ 160 161 static gfp_t saved_gfp_mask; 162 163 void pm_restore_gfp_mask(void) 164 { 165 WARN_ON(!mutex_is_locked(&pm_mutex)); 166 if (saved_gfp_mask) { 167 gfp_allowed_mask = saved_gfp_mask; 168 saved_gfp_mask = 0; 169 } 170 } 171 172 void pm_restrict_gfp_mask(void) 173 { 174 WARN_ON(!mutex_is_locked(&pm_mutex)); 175 WARN_ON(saved_gfp_mask); 176 saved_gfp_mask = gfp_allowed_mask; 177 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 178 } 179 180 bool pm_suspended_storage(void) 181 { 182 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 183 return false; 184 return true; 185 } 186 #endif /* CONFIG_PM_SLEEP */ 187 188 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 189 unsigned int pageblock_order __read_mostly; 190 #endif 191 192 static void __free_pages_ok(struct page *page, unsigned int order); 193 194 /* 195 * results with 256, 32 in the lowmem_reserve sysctl: 196 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 197 * 1G machine -> (16M dma, 784M normal, 224M high) 198 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 199 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 200 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 201 * 202 * TBD: should special case ZONE_DMA32 machines here - in those we normally 203 * don't need any ZONE_NORMAL reservation 204 */ 205 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 206 #ifdef CONFIG_ZONE_DMA 207 256, 208 #endif 209 #ifdef CONFIG_ZONE_DMA32 210 256, 211 #endif 212 #ifdef CONFIG_HIGHMEM 213 32, 214 #endif 215 32, 216 }; 217 218 EXPORT_SYMBOL(totalram_pages); 219 220 static char * const zone_names[MAX_NR_ZONES] = { 221 #ifdef CONFIG_ZONE_DMA 222 "DMA", 223 #endif 224 #ifdef CONFIG_ZONE_DMA32 225 "DMA32", 226 #endif 227 "Normal", 228 #ifdef CONFIG_HIGHMEM 229 "HighMem", 230 #endif 231 "Movable", 232 #ifdef CONFIG_ZONE_DEVICE 233 "Device", 234 #endif 235 }; 236 237 char * const migratetype_names[MIGRATE_TYPES] = { 238 "Unmovable", 239 "Movable", 240 "Reclaimable", 241 "HighAtomic", 242 #ifdef CONFIG_CMA 243 "CMA", 244 #endif 245 #ifdef CONFIG_MEMORY_ISOLATION 246 "Isolate", 247 #endif 248 }; 249 250 compound_page_dtor * const compound_page_dtors[] = { 251 NULL, 252 free_compound_page, 253 #ifdef CONFIG_HUGETLB_PAGE 254 free_huge_page, 255 #endif 256 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 257 free_transhuge_page, 258 #endif 259 }; 260 261 int min_free_kbytes = 1024; 262 int user_min_free_kbytes = -1; 263 int watermark_scale_factor = 10; 264 265 static unsigned long __meminitdata nr_kernel_pages; 266 static unsigned long __meminitdata nr_all_pages; 267 static unsigned long __meminitdata dma_reserve; 268 269 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 270 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 271 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 272 static unsigned long __initdata required_kernelcore; 273 static unsigned long __initdata required_movablecore; 274 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 275 static bool mirrored_kernelcore; 276 277 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 278 int movable_zone; 279 EXPORT_SYMBOL(movable_zone); 280 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 281 282 #if MAX_NUMNODES > 1 283 int nr_node_ids __read_mostly = MAX_NUMNODES; 284 int nr_online_nodes __read_mostly = 1; 285 EXPORT_SYMBOL(nr_node_ids); 286 EXPORT_SYMBOL(nr_online_nodes); 287 #endif 288 289 int page_group_by_mobility_disabled __read_mostly; 290 291 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 292 static inline void reset_deferred_meminit(pg_data_t *pgdat) 293 { 294 pgdat->first_deferred_pfn = ULONG_MAX; 295 } 296 297 /* Returns true if the struct page for the pfn is uninitialised */ 298 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 299 { 300 int nid = early_pfn_to_nid(pfn); 301 302 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 303 return true; 304 305 return false; 306 } 307 308 /* 309 * Returns false when the remaining initialisation should be deferred until 310 * later in the boot cycle when it can be parallelised. 311 */ 312 static inline bool update_defer_init(pg_data_t *pgdat, 313 unsigned long pfn, unsigned long zone_end, 314 unsigned long *nr_initialised) 315 { 316 unsigned long max_initialise; 317 318 /* Always populate low zones for address-contrained allocations */ 319 if (zone_end < pgdat_end_pfn(pgdat)) 320 return true; 321 /* 322 * Initialise at least 2G of a node but also take into account that 323 * two large system hashes that can take up 1GB for 0.25TB/node. 324 */ 325 max_initialise = max(2UL << (30 - PAGE_SHIFT), 326 (pgdat->node_spanned_pages >> 8)); 327 328 (*nr_initialised)++; 329 if ((*nr_initialised > max_initialise) && 330 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 331 pgdat->first_deferred_pfn = pfn; 332 return false; 333 } 334 335 return true; 336 } 337 #else 338 static inline void reset_deferred_meminit(pg_data_t *pgdat) 339 { 340 } 341 342 static inline bool early_page_uninitialised(unsigned long pfn) 343 { 344 return false; 345 } 346 347 static inline bool update_defer_init(pg_data_t *pgdat, 348 unsigned long pfn, unsigned long zone_end, 349 unsigned long *nr_initialised) 350 { 351 return true; 352 } 353 #endif 354 355 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 356 static inline unsigned long *get_pageblock_bitmap(struct page *page, 357 unsigned long pfn) 358 { 359 #ifdef CONFIG_SPARSEMEM 360 return __pfn_to_section(pfn)->pageblock_flags; 361 #else 362 return page_zone(page)->pageblock_flags; 363 #endif /* CONFIG_SPARSEMEM */ 364 } 365 366 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) 367 { 368 #ifdef CONFIG_SPARSEMEM 369 pfn &= (PAGES_PER_SECTION-1); 370 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 371 #else 372 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); 373 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 374 #endif /* CONFIG_SPARSEMEM */ 375 } 376 377 /** 378 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 379 * @page: The page within the block of interest 380 * @pfn: The target page frame number 381 * @end_bitidx: The last bit of interest to retrieve 382 * @mask: mask of bits that the caller is interested in 383 * 384 * Return: pageblock_bits flags 385 */ 386 static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page, 387 unsigned long pfn, 388 unsigned long end_bitidx, 389 unsigned long mask) 390 { 391 unsigned long *bitmap; 392 unsigned long bitidx, word_bitidx; 393 unsigned long word; 394 395 bitmap = get_pageblock_bitmap(page, pfn); 396 bitidx = pfn_to_bitidx(page, pfn); 397 word_bitidx = bitidx / BITS_PER_LONG; 398 bitidx &= (BITS_PER_LONG-1); 399 400 word = bitmap[word_bitidx]; 401 bitidx += end_bitidx; 402 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; 403 } 404 405 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 406 unsigned long end_bitidx, 407 unsigned long mask) 408 { 409 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask); 410 } 411 412 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) 413 { 414 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK); 415 } 416 417 /** 418 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 419 * @page: The page within the block of interest 420 * @flags: The flags to set 421 * @pfn: The target page frame number 422 * @end_bitidx: The last bit of interest 423 * @mask: mask of bits that the caller is interested in 424 */ 425 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 426 unsigned long pfn, 427 unsigned long end_bitidx, 428 unsigned long mask) 429 { 430 unsigned long *bitmap; 431 unsigned long bitidx, word_bitidx; 432 unsigned long old_word, word; 433 434 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 435 436 bitmap = get_pageblock_bitmap(page, pfn); 437 bitidx = pfn_to_bitidx(page, pfn); 438 word_bitidx = bitidx / BITS_PER_LONG; 439 bitidx &= (BITS_PER_LONG-1); 440 441 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 442 443 bitidx += end_bitidx; 444 mask <<= (BITS_PER_LONG - bitidx - 1); 445 flags <<= (BITS_PER_LONG - bitidx - 1); 446 447 word = READ_ONCE(bitmap[word_bitidx]); 448 for (;;) { 449 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 450 if (word == old_word) 451 break; 452 word = old_word; 453 } 454 } 455 456 void set_pageblock_migratetype(struct page *page, int migratetype) 457 { 458 if (unlikely(page_group_by_mobility_disabled && 459 migratetype < MIGRATE_PCPTYPES)) 460 migratetype = MIGRATE_UNMOVABLE; 461 462 set_pageblock_flags_group(page, (unsigned long)migratetype, 463 PB_migrate, PB_migrate_end); 464 } 465 466 #ifdef CONFIG_DEBUG_VM 467 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 468 { 469 int ret = 0; 470 unsigned seq; 471 unsigned long pfn = page_to_pfn(page); 472 unsigned long sp, start_pfn; 473 474 do { 475 seq = zone_span_seqbegin(zone); 476 start_pfn = zone->zone_start_pfn; 477 sp = zone->spanned_pages; 478 if (!zone_spans_pfn(zone, pfn)) 479 ret = 1; 480 } while (zone_span_seqretry(zone, seq)); 481 482 if (ret) 483 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 484 pfn, zone_to_nid(zone), zone->name, 485 start_pfn, start_pfn + sp); 486 487 return ret; 488 } 489 490 static int page_is_consistent(struct zone *zone, struct page *page) 491 { 492 if (!pfn_valid_within(page_to_pfn(page))) 493 return 0; 494 if (zone != page_zone(page)) 495 return 0; 496 497 return 1; 498 } 499 /* 500 * Temporary debugging check for pages not lying within a given zone. 501 */ 502 static int bad_range(struct zone *zone, struct page *page) 503 { 504 if (page_outside_zone_boundaries(zone, page)) 505 return 1; 506 if (!page_is_consistent(zone, page)) 507 return 1; 508 509 return 0; 510 } 511 #else 512 static inline int bad_range(struct zone *zone, struct page *page) 513 { 514 return 0; 515 } 516 #endif 517 518 static void bad_page(struct page *page, const char *reason, 519 unsigned long bad_flags) 520 { 521 static unsigned long resume; 522 static unsigned long nr_shown; 523 static unsigned long nr_unshown; 524 525 /* 526 * Allow a burst of 60 reports, then keep quiet for that minute; 527 * or allow a steady drip of one report per second. 528 */ 529 if (nr_shown == 60) { 530 if (time_before(jiffies, resume)) { 531 nr_unshown++; 532 goto out; 533 } 534 if (nr_unshown) { 535 pr_alert( 536 "BUG: Bad page state: %lu messages suppressed\n", 537 nr_unshown); 538 nr_unshown = 0; 539 } 540 nr_shown = 0; 541 } 542 if (nr_shown++ == 0) 543 resume = jiffies + 60 * HZ; 544 545 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 546 current->comm, page_to_pfn(page)); 547 __dump_page(page, reason); 548 bad_flags &= page->flags; 549 if (bad_flags) 550 pr_alert("bad because of flags: %#lx(%pGp)\n", 551 bad_flags, &bad_flags); 552 dump_page_owner(page); 553 554 print_modules(); 555 dump_stack(); 556 out: 557 /* Leave bad fields for debug, except PageBuddy could make trouble */ 558 page_mapcount_reset(page); /* remove PageBuddy */ 559 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 560 } 561 562 /* 563 * Higher-order pages are called "compound pages". They are structured thusly: 564 * 565 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 566 * 567 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 568 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 569 * 570 * The first tail page's ->compound_dtor holds the offset in array of compound 571 * page destructors. See compound_page_dtors. 572 * 573 * The first tail page's ->compound_order holds the order of allocation. 574 * This usage means that zero-order pages may not be compound. 575 */ 576 577 void free_compound_page(struct page *page) 578 { 579 __free_pages_ok(page, compound_order(page)); 580 } 581 582 void prep_compound_page(struct page *page, unsigned int order) 583 { 584 int i; 585 int nr_pages = 1 << order; 586 587 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 588 set_compound_order(page, order); 589 __SetPageHead(page); 590 for (i = 1; i < nr_pages; i++) { 591 struct page *p = page + i; 592 set_page_count(p, 0); 593 p->mapping = TAIL_MAPPING; 594 set_compound_head(p, page); 595 } 596 atomic_set(compound_mapcount_ptr(page), -1); 597 } 598 599 #ifdef CONFIG_DEBUG_PAGEALLOC 600 unsigned int _debug_guardpage_minorder; 601 bool _debug_pagealloc_enabled __read_mostly 602 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 603 EXPORT_SYMBOL(_debug_pagealloc_enabled); 604 bool _debug_guardpage_enabled __read_mostly; 605 606 static int __init early_debug_pagealloc(char *buf) 607 { 608 if (!buf) 609 return -EINVAL; 610 return kstrtobool(buf, &_debug_pagealloc_enabled); 611 } 612 early_param("debug_pagealloc", early_debug_pagealloc); 613 614 static bool need_debug_guardpage(void) 615 { 616 /* If we don't use debug_pagealloc, we don't need guard page */ 617 if (!debug_pagealloc_enabled()) 618 return false; 619 620 if (!debug_guardpage_minorder()) 621 return false; 622 623 return true; 624 } 625 626 static void init_debug_guardpage(void) 627 { 628 if (!debug_pagealloc_enabled()) 629 return; 630 631 if (!debug_guardpage_minorder()) 632 return; 633 634 _debug_guardpage_enabled = true; 635 } 636 637 struct page_ext_operations debug_guardpage_ops = { 638 .need = need_debug_guardpage, 639 .init = init_debug_guardpage, 640 }; 641 642 static int __init debug_guardpage_minorder_setup(char *buf) 643 { 644 unsigned long res; 645 646 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 647 pr_err("Bad debug_guardpage_minorder value\n"); 648 return 0; 649 } 650 _debug_guardpage_minorder = res; 651 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 652 return 0; 653 } 654 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); 655 656 static inline bool set_page_guard(struct zone *zone, struct page *page, 657 unsigned int order, int migratetype) 658 { 659 struct page_ext *page_ext; 660 661 if (!debug_guardpage_enabled()) 662 return false; 663 664 if (order >= debug_guardpage_minorder()) 665 return false; 666 667 page_ext = lookup_page_ext(page); 668 if (unlikely(!page_ext)) 669 return false; 670 671 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 672 673 INIT_LIST_HEAD(&page->lru); 674 set_page_private(page, order); 675 /* Guard pages are not available for any usage */ 676 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 677 678 return true; 679 } 680 681 static inline void clear_page_guard(struct zone *zone, struct page *page, 682 unsigned int order, int migratetype) 683 { 684 struct page_ext *page_ext; 685 686 if (!debug_guardpage_enabled()) 687 return; 688 689 page_ext = lookup_page_ext(page); 690 if (unlikely(!page_ext)) 691 return; 692 693 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 694 695 set_page_private(page, 0); 696 if (!is_migrate_isolate(migratetype)) 697 __mod_zone_freepage_state(zone, (1 << order), migratetype); 698 } 699 #else 700 struct page_ext_operations debug_guardpage_ops; 701 static inline bool set_page_guard(struct zone *zone, struct page *page, 702 unsigned int order, int migratetype) { return false; } 703 static inline void clear_page_guard(struct zone *zone, struct page *page, 704 unsigned int order, int migratetype) {} 705 #endif 706 707 static inline void set_page_order(struct page *page, unsigned int order) 708 { 709 set_page_private(page, order); 710 __SetPageBuddy(page); 711 } 712 713 static inline void rmv_page_order(struct page *page) 714 { 715 __ClearPageBuddy(page); 716 set_page_private(page, 0); 717 } 718 719 /* 720 * This function checks whether a page is free && is the buddy 721 * we can do coalesce a page and its buddy if 722 * (a) the buddy is not in a hole (check before calling!) && 723 * (b) the buddy is in the buddy system && 724 * (c) a page and its buddy have the same order && 725 * (d) a page and its buddy are in the same zone. 726 * 727 * For recording whether a page is in the buddy system, we set ->_mapcount 728 * PAGE_BUDDY_MAPCOUNT_VALUE. 729 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is 730 * serialized by zone->lock. 731 * 732 * For recording page's order, we use page_private(page). 733 */ 734 static inline int page_is_buddy(struct page *page, struct page *buddy, 735 unsigned int order) 736 { 737 if (page_is_guard(buddy) && page_order(buddy) == order) { 738 if (page_zone_id(page) != page_zone_id(buddy)) 739 return 0; 740 741 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 742 743 return 1; 744 } 745 746 if (PageBuddy(buddy) && page_order(buddy) == order) { 747 /* 748 * zone check is done late to avoid uselessly 749 * calculating zone/node ids for pages that could 750 * never merge. 751 */ 752 if (page_zone_id(page) != page_zone_id(buddy)) 753 return 0; 754 755 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 756 757 return 1; 758 } 759 return 0; 760 } 761 762 /* 763 * Freeing function for a buddy system allocator. 764 * 765 * The concept of a buddy system is to maintain direct-mapped table 766 * (containing bit values) for memory blocks of various "orders". 767 * The bottom level table contains the map for the smallest allocatable 768 * units of memory (here, pages), and each level above it describes 769 * pairs of units from the levels below, hence, "buddies". 770 * At a high level, all that happens here is marking the table entry 771 * at the bottom level available, and propagating the changes upward 772 * as necessary, plus some accounting needed to play nicely with other 773 * parts of the VM system. 774 * At each level, we keep a list of pages, which are heads of continuous 775 * free pages of length of (1 << order) and marked with _mapcount 776 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) 777 * field. 778 * So when we are allocating or freeing one, we can derive the state of the 779 * other. That is, if we allocate a small block, and both were 780 * free, the remainder of the region must be split into blocks. 781 * If a block is freed, and its buddy is also free, then this 782 * triggers coalescing into a block of larger size. 783 * 784 * -- nyc 785 */ 786 787 static inline void __free_one_page(struct page *page, 788 unsigned long pfn, 789 struct zone *zone, unsigned int order, 790 int migratetype) 791 { 792 unsigned long combined_pfn; 793 unsigned long uninitialized_var(buddy_pfn); 794 struct page *buddy; 795 unsigned int max_order; 796 797 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); 798 799 VM_BUG_ON(!zone_is_initialized(zone)); 800 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 801 802 VM_BUG_ON(migratetype == -1); 803 if (likely(!is_migrate_isolate(migratetype))) 804 __mod_zone_freepage_state(zone, 1 << order, migratetype); 805 806 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 807 VM_BUG_ON_PAGE(bad_range(zone, page), page); 808 809 continue_merging: 810 while (order < max_order - 1) { 811 buddy_pfn = __find_buddy_pfn(pfn, order); 812 buddy = page + (buddy_pfn - pfn); 813 814 if (!pfn_valid_within(buddy_pfn)) 815 goto done_merging; 816 if (!page_is_buddy(page, buddy, order)) 817 goto done_merging; 818 /* 819 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 820 * merge with it and move up one order. 821 */ 822 if (page_is_guard(buddy)) { 823 clear_page_guard(zone, buddy, order, migratetype); 824 } else { 825 list_del(&buddy->lru); 826 zone->free_area[order].nr_free--; 827 rmv_page_order(buddy); 828 } 829 combined_pfn = buddy_pfn & pfn; 830 page = page + (combined_pfn - pfn); 831 pfn = combined_pfn; 832 order++; 833 } 834 if (max_order < MAX_ORDER) { 835 /* If we are here, it means order is >= pageblock_order. 836 * We want to prevent merge between freepages on isolate 837 * pageblock and normal pageblock. Without this, pageblock 838 * isolation could cause incorrect freepage or CMA accounting. 839 * 840 * We don't want to hit this code for the more frequent 841 * low-order merging. 842 */ 843 if (unlikely(has_isolate_pageblock(zone))) { 844 int buddy_mt; 845 846 buddy_pfn = __find_buddy_pfn(pfn, order); 847 buddy = page + (buddy_pfn - pfn); 848 buddy_mt = get_pageblock_migratetype(buddy); 849 850 if (migratetype != buddy_mt 851 && (is_migrate_isolate(migratetype) || 852 is_migrate_isolate(buddy_mt))) 853 goto done_merging; 854 } 855 max_order++; 856 goto continue_merging; 857 } 858 859 done_merging: 860 set_page_order(page, order); 861 862 /* 863 * If this is not the largest possible page, check if the buddy 864 * of the next-highest order is free. If it is, it's possible 865 * that pages are being freed that will coalesce soon. In case, 866 * that is happening, add the free page to the tail of the list 867 * so it's less likely to be used soon and more likely to be merged 868 * as a higher order page 869 */ 870 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) { 871 struct page *higher_page, *higher_buddy; 872 combined_pfn = buddy_pfn & pfn; 873 higher_page = page + (combined_pfn - pfn); 874 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); 875 higher_buddy = higher_page + (buddy_pfn - combined_pfn); 876 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 877 list_add_tail(&page->lru, 878 &zone->free_area[order].free_list[migratetype]); 879 goto out; 880 } 881 } 882 883 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 884 out: 885 zone->free_area[order].nr_free++; 886 } 887 888 /* 889 * A bad page could be due to a number of fields. Instead of multiple branches, 890 * try and check multiple fields with one check. The caller must do a detailed 891 * check if necessary. 892 */ 893 static inline bool page_expected_state(struct page *page, 894 unsigned long check_flags) 895 { 896 if (unlikely(atomic_read(&page->_mapcount) != -1)) 897 return false; 898 899 if (unlikely((unsigned long)page->mapping | 900 page_ref_count(page) | 901 #ifdef CONFIG_MEMCG 902 (unsigned long)page->mem_cgroup | 903 #endif 904 (page->flags & check_flags))) 905 return false; 906 907 return true; 908 } 909 910 static void free_pages_check_bad(struct page *page) 911 { 912 const char *bad_reason; 913 unsigned long bad_flags; 914 915 bad_reason = NULL; 916 bad_flags = 0; 917 918 if (unlikely(atomic_read(&page->_mapcount) != -1)) 919 bad_reason = "nonzero mapcount"; 920 if (unlikely(page->mapping != NULL)) 921 bad_reason = "non-NULL mapping"; 922 if (unlikely(page_ref_count(page) != 0)) 923 bad_reason = "nonzero _refcount"; 924 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 925 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 926 bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 927 } 928 #ifdef CONFIG_MEMCG 929 if (unlikely(page->mem_cgroup)) 930 bad_reason = "page still charged to cgroup"; 931 #endif 932 bad_page(page, bad_reason, bad_flags); 933 } 934 935 static inline int free_pages_check(struct page *page) 936 { 937 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 938 return 0; 939 940 /* Something has gone sideways, find it */ 941 free_pages_check_bad(page); 942 return 1; 943 } 944 945 static int free_tail_pages_check(struct page *head_page, struct page *page) 946 { 947 int ret = 1; 948 949 /* 950 * We rely page->lru.next never has bit 0 set, unless the page 951 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 952 */ 953 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 954 955 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 956 ret = 0; 957 goto out; 958 } 959 switch (page - head_page) { 960 case 1: 961 /* the first tail page: ->mapping is compound_mapcount() */ 962 if (unlikely(compound_mapcount(page))) { 963 bad_page(page, "nonzero compound_mapcount", 0); 964 goto out; 965 } 966 break; 967 case 2: 968 /* 969 * the second tail page: ->mapping is 970 * page_deferred_list().next -- ignore value. 971 */ 972 break; 973 default: 974 if (page->mapping != TAIL_MAPPING) { 975 bad_page(page, "corrupted mapping in tail page", 0); 976 goto out; 977 } 978 break; 979 } 980 if (unlikely(!PageTail(page))) { 981 bad_page(page, "PageTail not set", 0); 982 goto out; 983 } 984 if (unlikely(compound_head(page) != head_page)) { 985 bad_page(page, "compound_head not consistent", 0); 986 goto out; 987 } 988 ret = 0; 989 out: 990 page->mapping = NULL; 991 clear_compound_head(page); 992 return ret; 993 } 994 995 static __always_inline bool free_pages_prepare(struct page *page, 996 unsigned int order, bool check_free) 997 { 998 int bad = 0; 999 1000 VM_BUG_ON_PAGE(PageTail(page), page); 1001 1002 trace_mm_page_free(page, order); 1003 kmemcheck_free_shadow(page, order); 1004 1005 /* 1006 * Check tail pages before head page information is cleared to 1007 * avoid checking PageCompound for order-0 pages. 1008 */ 1009 if (unlikely(order)) { 1010 bool compound = PageCompound(page); 1011 int i; 1012 1013 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1014 1015 if (compound) 1016 ClearPageDoubleMap(page); 1017 for (i = 1; i < (1 << order); i++) { 1018 if (compound) 1019 bad += free_tail_pages_check(page, page + i); 1020 if (unlikely(free_pages_check(page + i))) { 1021 bad++; 1022 continue; 1023 } 1024 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1025 } 1026 } 1027 if (PageMappingFlags(page)) 1028 page->mapping = NULL; 1029 if (memcg_kmem_enabled() && PageKmemcg(page)) 1030 memcg_kmem_uncharge(page, order); 1031 if (check_free) 1032 bad += free_pages_check(page); 1033 if (bad) 1034 return false; 1035 1036 page_cpupid_reset_last(page); 1037 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1038 reset_page_owner(page, order); 1039 1040 if (!PageHighMem(page)) { 1041 debug_check_no_locks_freed(page_address(page), 1042 PAGE_SIZE << order); 1043 debug_check_no_obj_freed(page_address(page), 1044 PAGE_SIZE << order); 1045 } 1046 arch_free_page(page, order); 1047 kernel_poison_pages(page, 1 << order, 0); 1048 kernel_map_pages(page, 1 << order, 0); 1049 kasan_free_pages(page, order); 1050 1051 return true; 1052 } 1053 1054 #ifdef CONFIG_DEBUG_VM 1055 static inline bool free_pcp_prepare(struct page *page) 1056 { 1057 return free_pages_prepare(page, 0, true); 1058 } 1059 1060 static inline bool bulkfree_pcp_prepare(struct page *page) 1061 { 1062 return false; 1063 } 1064 #else 1065 static bool free_pcp_prepare(struct page *page) 1066 { 1067 return free_pages_prepare(page, 0, false); 1068 } 1069 1070 static bool bulkfree_pcp_prepare(struct page *page) 1071 { 1072 return free_pages_check(page); 1073 } 1074 #endif /* CONFIG_DEBUG_VM */ 1075 1076 /* 1077 * Frees a number of pages from the PCP lists 1078 * Assumes all pages on list are in same zone, and of same order. 1079 * count is the number of pages to free. 1080 * 1081 * If the zone was previously in an "all pages pinned" state then look to 1082 * see if this freeing clears that state. 1083 * 1084 * And clear the zone's pages_scanned counter, to hold off the "all pages are 1085 * pinned" detection logic. 1086 */ 1087 static void free_pcppages_bulk(struct zone *zone, int count, 1088 struct per_cpu_pages *pcp) 1089 { 1090 int migratetype = 0; 1091 int batch_free = 0; 1092 unsigned long nr_scanned, flags; 1093 bool isolated_pageblocks; 1094 1095 spin_lock_irqsave(&zone->lock, flags); 1096 isolated_pageblocks = has_isolate_pageblock(zone); 1097 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1098 if (nr_scanned) 1099 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); 1100 1101 while (count) { 1102 struct page *page; 1103 struct list_head *list; 1104 1105 /* 1106 * Remove pages from lists in a round-robin fashion. A 1107 * batch_free count is maintained that is incremented when an 1108 * empty list is encountered. This is so more pages are freed 1109 * off fuller lists instead of spinning excessively around empty 1110 * lists 1111 */ 1112 do { 1113 batch_free++; 1114 if (++migratetype == MIGRATE_PCPTYPES) 1115 migratetype = 0; 1116 list = &pcp->lists[migratetype]; 1117 } while (list_empty(list)); 1118 1119 /* This is the only non-empty list. Free them all. */ 1120 if (batch_free == MIGRATE_PCPTYPES) 1121 batch_free = count; 1122 1123 do { 1124 int mt; /* migratetype of the to-be-freed page */ 1125 1126 page = list_last_entry(list, struct page, lru); 1127 /* must delete as __free_one_page list manipulates */ 1128 list_del(&page->lru); 1129 1130 mt = get_pcppage_migratetype(page); 1131 /* MIGRATE_ISOLATE page should not go to pcplists */ 1132 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1133 /* Pageblock could have been isolated meanwhile */ 1134 if (unlikely(isolated_pageblocks)) 1135 mt = get_pageblock_migratetype(page); 1136 1137 if (bulkfree_pcp_prepare(page)) 1138 continue; 1139 1140 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 1141 trace_mm_page_pcpu_drain(page, 0, mt); 1142 } while (--count && --batch_free && !list_empty(list)); 1143 } 1144 spin_unlock_irqrestore(&zone->lock, flags); 1145 } 1146 1147 static void free_one_page(struct zone *zone, 1148 struct page *page, unsigned long pfn, 1149 unsigned int order, 1150 int migratetype) 1151 { 1152 unsigned long nr_scanned, flags; 1153 spin_lock_irqsave(&zone->lock, flags); 1154 __count_vm_events(PGFREE, 1 << order); 1155 nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); 1156 if (nr_scanned) 1157 __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); 1158 1159 if (unlikely(has_isolate_pageblock(zone) || 1160 is_migrate_isolate(migratetype))) { 1161 migratetype = get_pfnblock_migratetype(page, pfn); 1162 } 1163 __free_one_page(page, pfn, zone, order, migratetype); 1164 spin_unlock_irqrestore(&zone->lock, flags); 1165 } 1166 1167 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1168 unsigned long zone, int nid) 1169 { 1170 set_page_links(page, zone, nid, pfn); 1171 init_page_count(page); 1172 page_mapcount_reset(page); 1173 page_cpupid_reset_last(page); 1174 1175 INIT_LIST_HEAD(&page->lru); 1176 #ifdef WANT_PAGE_VIRTUAL 1177 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1178 if (!is_highmem_idx(zone)) 1179 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1180 #endif 1181 } 1182 1183 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, 1184 int nid) 1185 { 1186 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 1187 } 1188 1189 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1190 static void init_reserved_page(unsigned long pfn) 1191 { 1192 pg_data_t *pgdat; 1193 int nid, zid; 1194 1195 if (!early_page_uninitialised(pfn)) 1196 return; 1197 1198 nid = early_pfn_to_nid(pfn); 1199 pgdat = NODE_DATA(nid); 1200 1201 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1202 struct zone *zone = &pgdat->node_zones[zid]; 1203 1204 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) 1205 break; 1206 } 1207 __init_single_pfn(pfn, zid, nid); 1208 } 1209 #else 1210 static inline void init_reserved_page(unsigned long pfn) 1211 { 1212 } 1213 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1214 1215 /* 1216 * Initialised pages do not have PageReserved set. This function is 1217 * called for each range allocated by the bootmem allocator and 1218 * marks the pages PageReserved. The remaining valid pages are later 1219 * sent to the buddy page allocator. 1220 */ 1221 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 1222 { 1223 unsigned long start_pfn = PFN_DOWN(start); 1224 unsigned long end_pfn = PFN_UP(end); 1225 1226 for (; start_pfn < end_pfn; start_pfn++) { 1227 if (pfn_valid(start_pfn)) { 1228 struct page *page = pfn_to_page(start_pfn); 1229 1230 init_reserved_page(start_pfn); 1231 1232 /* Avoid false-positive PageTail() */ 1233 INIT_LIST_HEAD(&page->lru); 1234 1235 SetPageReserved(page); 1236 } 1237 } 1238 } 1239 1240 static void __free_pages_ok(struct page *page, unsigned int order) 1241 { 1242 int migratetype; 1243 unsigned long pfn = page_to_pfn(page); 1244 1245 if (!free_pages_prepare(page, order, true)) 1246 return; 1247 1248 migratetype = get_pfnblock_migratetype(page, pfn); 1249 free_one_page(page_zone(page), page, pfn, order, migratetype); 1250 } 1251 1252 static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1253 { 1254 unsigned int nr_pages = 1 << order; 1255 struct page *p = page; 1256 unsigned int loop; 1257 1258 prefetchw(p); 1259 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1260 prefetchw(p + 1); 1261 __ClearPageReserved(p); 1262 set_page_count(p, 0); 1263 } 1264 __ClearPageReserved(p); 1265 set_page_count(p, 0); 1266 1267 page_zone(page)->managed_pages += nr_pages; 1268 set_page_refcounted(page); 1269 __free_pages(page, order); 1270 } 1271 1272 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 1273 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1274 1275 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1276 1277 int __meminit early_pfn_to_nid(unsigned long pfn) 1278 { 1279 static DEFINE_SPINLOCK(early_pfn_lock); 1280 int nid; 1281 1282 spin_lock(&early_pfn_lock); 1283 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1284 if (nid < 0) 1285 nid = first_online_node; 1286 spin_unlock(&early_pfn_lock); 1287 1288 return nid; 1289 } 1290 #endif 1291 1292 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 1293 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1294 struct mminit_pfnnid_cache *state) 1295 { 1296 int nid; 1297 1298 nid = __early_pfn_to_nid(pfn, state); 1299 if (nid >= 0 && nid != node) 1300 return false; 1301 return true; 1302 } 1303 1304 /* Only safe to use early in boot when initialisation is single-threaded */ 1305 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1306 { 1307 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); 1308 } 1309 1310 #else 1311 1312 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1313 { 1314 return true; 1315 } 1316 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1317 struct mminit_pfnnid_cache *state) 1318 { 1319 return true; 1320 } 1321 #endif 1322 1323 1324 void __init __free_pages_bootmem(struct page *page, unsigned long pfn, 1325 unsigned int order) 1326 { 1327 if (early_page_uninitialised(pfn)) 1328 return; 1329 return __free_pages_boot_core(page, order); 1330 } 1331 1332 /* 1333 * Check that the whole (or subset of) a pageblock given by the interval of 1334 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1335 * with the migration of free compaction scanner. The scanners then need to 1336 * use only pfn_valid_within() check for arches that allow holes within 1337 * pageblocks. 1338 * 1339 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1340 * 1341 * It's possible on some configurations to have a setup like node0 node1 node0 1342 * i.e. it's possible that all pages within a zones range of pages do not 1343 * belong to a single zone. We assume that a border between node0 and node1 1344 * can occur within a single pageblock, but not a node0 node1 node0 1345 * interleaving within a single pageblock. It is therefore sufficient to check 1346 * the first and last page of a pageblock and avoid checking each individual 1347 * page in a pageblock. 1348 */ 1349 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1350 unsigned long end_pfn, struct zone *zone) 1351 { 1352 struct page *start_page; 1353 struct page *end_page; 1354 1355 /* end_pfn is one past the range we are checking */ 1356 end_pfn--; 1357 1358 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1359 return NULL; 1360 1361 start_page = pfn_to_page(start_pfn); 1362 1363 if (page_zone(start_page) != zone) 1364 return NULL; 1365 1366 end_page = pfn_to_page(end_pfn); 1367 1368 /* This gives a shorter code than deriving page_zone(end_page) */ 1369 if (page_zone_id(start_page) != page_zone_id(end_page)) 1370 return NULL; 1371 1372 return start_page; 1373 } 1374 1375 void set_zone_contiguous(struct zone *zone) 1376 { 1377 unsigned long block_start_pfn = zone->zone_start_pfn; 1378 unsigned long block_end_pfn; 1379 1380 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); 1381 for (; block_start_pfn < zone_end_pfn(zone); 1382 block_start_pfn = block_end_pfn, 1383 block_end_pfn += pageblock_nr_pages) { 1384 1385 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1386 1387 if (!__pageblock_pfn_to_page(block_start_pfn, 1388 block_end_pfn, zone)) 1389 return; 1390 } 1391 1392 /* We confirm that there is no hole */ 1393 zone->contiguous = true; 1394 } 1395 1396 void clear_zone_contiguous(struct zone *zone) 1397 { 1398 zone->contiguous = false; 1399 } 1400 1401 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1402 static void __init deferred_free_range(struct page *page, 1403 unsigned long pfn, int nr_pages) 1404 { 1405 int i; 1406 1407 if (!page) 1408 return; 1409 1410 /* Free a large naturally-aligned chunk if possible */ 1411 if (nr_pages == pageblock_nr_pages && 1412 (pfn & (pageblock_nr_pages - 1)) == 0) { 1413 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1414 __free_pages_boot_core(page, pageblock_order); 1415 return; 1416 } 1417 1418 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1419 if ((pfn & (pageblock_nr_pages - 1)) == 0) 1420 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1421 __free_pages_boot_core(page, 0); 1422 } 1423 } 1424 1425 /* Completion tracking for deferred_init_memmap() threads */ 1426 static atomic_t pgdat_init_n_undone __initdata; 1427 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1428 1429 static inline void __init pgdat_init_report_one_done(void) 1430 { 1431 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1432 complete(&pgdat_init_all_done_comp); 1433 } 1434 1435 /* Initialise remaining memory on a node */ 1436 static int __init deferred_init_memmap(void *data) 1437 { 1438 pg_data_t *pgdat = data; 1439 int nid = pgdat->node_id; 1440 struct mminit_pfnnid_cache nid_init_state = { }; 1441 unsigned long start = jiffies; 1442 unsigned long nr_pages = 0; 1443 unsigned long walk_start, walk_end; 1444 int i, zid; 1445 struct zone *zone; 1446 unsigned long first_init_pfn = pgdat->first_deferred_pfn; 1447 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1448 1449 if (first_init_pfn == ULONG_MAX) { 1450 pgdat_init_report_one_done(); 1451 return 0; 1452 } 1453 1454 /* Bind memory initialisation thread to a local node if possible */ 1455 if (!cpumask_empty(cpumask)) 1456 set_cpus_allowed_ptr(current, cpumask); 1457 1458 /* Sanity check boundaries */ 1459 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 1460 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 1461 pgdat->first_deferred_pfn = ULONG_MAX; 1462 1463 /* Only the highest zone is deferred so find it */ 1464 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1465 zone = pgdat->node_zones + zid; 1466 if (first_init_pfn < zone_end_pfn(zone)) 1467 break; 1468 } 1469 1470 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { 1471 unsigned long pfn, end_pfn; 1472 struct page *page = NULL; 1473 struct page *free_base_page = NULL; 1474 unsigned long free_base_pfn = 0; 1475 int nr_to_free = 0; 1476 1477 end_pfn = min(walk_end, zone_end_pfn(zone)); 1478 pfn = first_init_pfn; 1479 if (pfn < walk_start) 1480 pfn = walk_start; 1481 if (pfn < zone->zone_start_pfn) 1482 pfn = zone->zone_start_pfn; 1483 1484 for (; pfn < end_pfn; pfn++) { 1485 if (!pfn_valid_within(pfn)) 1486 goto free_range; 1487 1488 /* 1489 * Ensure pfn_valid is checked every 1490 * pageblock_nr_pages for memory holes 1491 */ 1492 if ((pfn & (pageblock_nr_pages - 1)) == 0) { 1493 if (!pfn_valid(pfn)) { 1494 page = NULL; 1495 goto free_range; 1496 } 1497 } 1498 1499 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { 1500 page = NULL; 1501 goto free_range; 1502 } 1503 1504 /* Minimise pfn page lookups and scheduler checks */ 1505 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) { 1506 page++; 1507 } else { 1508 nr_pages += nr_to_free; 1509 deferred_free_range(free_base_page, 1510 free_base_pfn, nr_to_free); 1511 free_base_page = NULL; 1512 free_base_pfn = nr_to_free = 0; 1513 1514 page = pfn_to_page(pfn); 1515 cond_resched(); 1516 } 1517 1518 if (page->flags) { 1519 VM_BUG_ON(page_zone(page) != zone); 1520 goto free_range; 1521 } 1522 1523 __init_single_page(page, pfn, zid, nid); 1524 if (!free_base_page) { 1525 free_base_page = page; 1526 free_base_pfn = pfn; 1527 nr_to_free = 0; 1528 } 1529 nr_to_free++; 1530 1531 /* Where possible, batch up pages for a single free */ 1532 continue; 1533 free_range: 1534 /* Free the current block of pages to allocator */ 1535 nr_pages += nr_to_free; 1536 deferred_free_range(free_base_page, free_base_pfn, 1537 nr_to_free); 1538 free_base_page = NULL; 1539 free_base_pfn = nr_to_free = 0; 1540 } 1541 /* Free the last block of pages to allocator */ 1542 nr_pages += nr_to_free; 1543 deferred_free_range(free_base_page, free_base_pfn, nr_to_free); 1544 1545 first_init_pfn = max(end_pfn, first_init_pfn); 1546 } 1547 1548 /* Sanity check that the next zone really is unpopulated */ 1549 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 1550 1551 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1552 jiffies_to_msecs(jiffies - start)); 1553 1554 pgdat_init_report_one_done(); 1555 return 0; 1556 } 1557 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1558 1559 void __init page_alloc_init_late(void) 1560 { 1561 struct zone *zone; 1562 1563 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1564 int nid; 1565 1566 /* There will be num_node_state(N_MEMORY) threads */ 1567 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 1568 for_each_node_state(nid, N_MEMORY) { 1569 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1570 } 1571 1572 /* Block until all are initialised */ 1573 wait_for_completion(&pgdat_init_all_done_comp); 1574 1575 /* Reinit limits that are based on free pages after the kernel is up */ 1576 files_maxfiles_init(); 1577 #endif 1578 1579 for_each_populated_zone(zone) 1580 set_zone_contiguous(zone); 1581 } 1582 1583 #ifdef CONFIG_CMA 1584 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1585 void __init init_cma_reserved_pageblock(struct page *page) 1586 { 1587 unsigned i = pageblock_nr_pages; 1588 struct page *p = page; 1589 1590 do { 1591 __ClearPageReserved(p); 1592 set_page_count(p, 0); 1593 } while (++p, --i); 1594 1595 set_pageblock_migratetype(page, MIGRATE_CMA); 1596 1597 if (pageblock_order >= MAX_ORDER) { 1598 i = pageblock_nr_pages; 1599 p = page; 1600 do { 1601 set_page_refcounted(p); 1602 __free_pages(p, MAX_ORDER - 1); 1603 p += MAX_ORDER_NR_PAGES; 1604 } while (i -= MAX_ORDER_NR_PAGES); 1605 } else { 1606 set_page_refcounted(page); 1607 __free_pages(page, pageblock_order); 1608 } 1609 1610 adjust_managed_page_count(page, pageblock_nr_pages); 1611 } 1612 #endif 1613 1614 /* 1615 * The order of subdivision here is critical for the IO subsystem. 1616 * Please do not alter this order without good reasons and regression 1617 * testing. Specifically, as large blocks of memory are subdivided, 1618 * the order in which smaller blocks are delivered depends on the order 1619 * they're subdivided in this function. This is the primary factor 1620 * influencing the order in which pages are delivered to the IO 1621 * subsystem according to empirical testing, and this is also justified 1622 * by considering the behavior of a buddy system containing a single 1623 * large block of memory acted on by a series of small allocations. 1624 * This behavior is a critical factor in sglist merging's success. 1625 * 1626 * -- nyc 1627 */ 1628 static inline void expand(struct zone *zone, struct page *page, 1629 int low, int high, struct free_area *area, 1630 int migratetype) 1631 { 1632 unsigned long size = 1 << high; 1633 1634 while (high > low) { 1635 area--; 1636 high--; 1637 size >>= 1; 1638 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1639 1640 /* 1641 * Mark as guard pages (or page), that will allow to 1642 * merge back to allocator when buddy will be freed. 1643 * Corresponding page table entries will not be touched, 1644 * pages will stay not present in virtual address space 1645 */ 1646 if (set_page_guard(zone, &page[size], high, migratetype)) 1647 continue; 1648 1649 list_add(&page[size].lru, &area->free_list[migratetype]); 1650 area->nr_free++; 1651 set_page_order(&page[size], high); 1652 } 1653 } 1654 1655 static void check_new_page_bad(struct page *page) 1656 { 1657 const char *bad_reason = NULL; 1658 unsigned long bad_flags = 0; 1659 1660 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1661 bad_reason = "nonzero mapcount"; 1662 if (unlikely(page->mapping != NULL)) 1663 bad_reason = "non-NULL mapping"; 1664 if (unlikely(page_ref_count(page) != 0)) 1665 bad_reason = "nonzero _count"; 1666 if (unlikely(page->flags & __PG_HWPOISON)) { 1667 bad_reason = "HWPoisoned (hardware-corrupted)"; 1668 bad_flags = __PG_HWPOISON; 1669 /* Don't complain about hwpoisoned pages */ 1670 page_mapcount_reset(page); /* remove PageBuddy */ 1671 return; 1672 } 1673 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1674 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1675 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1676 } 1677 #ifdef CONFIG_MEMCG 1678 if (unlikely(page->mem_cgroup)) 1679 bad_reason = "page still charged to cgroup"; 1680 #endif 1681 bad_page(page, bad_reason, bad_flags); 1682 } 1683 1684 /* 1685 * This page is about to be returned from the page allocator 1686 */ 1687 static inline int check_new_page(struct page *page) 1688 { 1689 if (likely(page_expected_state(page, 1690 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1691 return 0; 1692 1693 check_new_page_bad(page); 1694 return 1; 1695 } 1696 1697 static inline bool free_pages_prezeroed(bool poisoned) 1698 { 1699 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && 1700 page_poisoning_enabled() && poisoned; 1701 } 1702 1703 #ifdef CONFIG_DEBUG_VM 1704 static bool check_pcp_refill(struct page *page) 1705 { 1706 return false; 1707 } 1708 1709 static bool check_new_pcp(struct page *page) 1710 { 1711 return check_new_page(page); 1712 } 1713 #else 1714 static bool check_pcp_refill(struct page *page) 1715 { 1716 return check_new_page(page); 1717 } 1718 static bool check_new_pcp(struct page *page) 1719 { 1720 return false; 1721 } 1722 #endif /* CONFIG_DEBUG_VM */ 1723 1724 static bool check_new_pages(struct page *page, unsigned int order) 1725 { 1726 int i; 1727 for (i = 0; i < (1 << order); i++) { 1728 struct page *p = page + i; 1729 1730 if (unlikely(check_new_page(p))) 1731 return true; 1732 } 1733 1734 return false; 1735 } 1736 1737 inline void post_alloc_hook(struct page *page, unsigned int order, 1738 gfp_t gfp_flags) 1739 { 1740 set_page_private(page, 0); 1741 set_page_refcounted(page); 1742 1743 arch_alloc_page(page, order); 1744 kernel_map_pages(page, 1 << order, 1); 1745 kernel_poison_pages(page, 1 << order, 1); 1746 kasan_alloc_pages(page, order); 1747 set_page_owner(page, order, gfp_flags); 1748 } 1749 1750 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1751 unsigned int alloc_flags) 1752 { 1753 int i; 1754 bool poisoned = true; 1755 1756 for (i = 0; i < (1 << order); i++) { 1757 struct page *p = page + i; 1758 if (poisoned) 1759 poisoned &= page_is_poisoned(p); 1760 } 1761 1762 post_alloc_hook(page, order, gfp_flags); 1763 1764 if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO)) 1765 for (i = 0; i < (1 << order); i++) 1766 clear_highpage(page + i); 1767 1768 if (order && (gfp_flags & __GFP_COMP)) 1769 prep_compound_page(page, order); 1770 1771 /* 1772 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1773 * allocate the page. The expectation is that the caller is taking 1774 * steps that will free more memory. The caller should avoid the page 1775 * being used for !PFMEMALLOC purposes. 1776 */ 1777 if (alloc_flags & ALLOC_NO_WATERMARKS) 1778 set_page_pfmemalloc(page); 1779 else 1780 clear_page_pfmemalloc(page); 1781 } 1782 1783 /* 1784 * Go through the free lists for the given migratetype and remove 1785 * the smallest available page from the freelists 1786 */ 1787 static inline 1788 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1789 int migratetype) 1790 { 1791 unsigned int current_order; 1792 struct free_area *area; 1793 struct page *page; 1794 1795 /* Find a page of the appropriate size in the preferred list */ 1796 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 1797 area = &(zone->free_area[current_order]); 1798 page = list_first_entry_or_null(&area->free_list[migratetype], 1799 struct page, lru); 1800 if (!page) 1801 continue; 1802 list_del(&page->lru); 1803 rmv_page_order(page); 1804 area->nr_free--; 1805 expand(zone, page, order, current_order, area, migratetype); 1806 set_pcppage_migratetype(page, migratetype); 1807 return page; 1808 } 1809 1810 return NULL; 1811 } 1812 1813 1814 /* 1815 * This array describes the order lists are fallen back to when 1816 * the free lists for the desirable migrate type are depleted 1817 */ 1818 static int fallbacks[MIGRATE_TYPES][4] = { 1819 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1820 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1821 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 1822 #ifdef CONFIG_CMA 1823 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ 1824 #endif 1825 #ifdef CONFIG_MEMORY_ISOLATION 1826 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ 1827 #endif 1828 }; 1829 1830 #ifdef CONFIG_CMA 1831 static struct page *__rmqueue_cma_fallback(struct zone *zone, 1832 unsigned int order) 1833 { 1834 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1835 } 1836 #else 1837 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1838 unsigned int order) { return NULL; } 1839 #endif 1840 1841 /* 1842 * Move the free pages in a range to the free lists of the requested type. 1843 * Note that start_page and end_pages are not aligned on a pageblock 1844 * boundary. If alignment is required, use move_freepages_block() 1845 */ 1846 int move_freepages(struct zone *zone, 1847 struct page *start_page, struct page *end_page, 1848 int migratetype) 1849 { 1850 struct page *page; 1851 unsigned int order; 1852 int pages_moved = 0; 1853 1854 #ifndef CONFIG_HOLES_IN_ZONE 1855 /* 1856 * page_zone is not safe to call in this context when 1857 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 1858 * anyway as we check zone boundaries in move_freepages_block(). 1859 * Remove at a later date when no bug reports exist related to 1860 * grouping pages by mobility 1861 */ 1862 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1863 #endif 1864 1865 for (page = start_page; page <= end_page;) { 1866 if (!pfn_valid_within(page_to_pfn(page))) { 1867 page++; 1868 continue; 1869 } 1870 1871 /* Make sure we are not inadvertently changing nodes */ 1872 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1873 1874 if (!PageBuddy(page)) { 1875 page++; 1876 continue; 1877 } 1878 1879 order = page_order(page); 1880 list_move(&page->lru, 1881 &zone->free_area[order].free_list[migratetype]); 1882 page += 1 << order; 1883 pages_moved += 1 << order; 1884 } 1885 1886 return pages_moved; 1887 } 1888 1889 int move_freepages_block(struct zone *zone, struct page *page, 1890 int migratetype) 1891 { 1892 unsigned long start_pfn, end_pfn; 1893 struct page *start_page, *end_page; 1894 1895 start_pfn = page_to_pfn(page); 1896 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 1897 start_page = pfn_to_page(start_pfn); 1898 end_page = start_page + pageblock_nr_pages - 1; 1899 end_pfn = start_pfn + pageblock_nr_pages - 1; 1900 1901 /* Do not cross zone boundaries */ 1902 if (!zone_spans_pfn(zone, start_pfn)) 1903 start_page = page; 1904 if (!zone_spans_pfn(zone, end_pfn)) 1905 return 0; 1906 1907 return move_freepages(zone, start_page, end_page, migratetype); 1908 } 1909 1910 static void change_pageblock_range(struct page *pageblock_page, 1911 int start_order, int migratetype) 1912 { 1913 int nr_pageblocks = 1 << (start_order - pageblock_order); 1914 1915 while (nr_pageblocks--) { 1916 set_pageblock_migratetype(pageblock_page, migratetype); 1917 pageblock_page += pageblock_nr_pages; 1918 } 1919 } 1920 1921 /* 1922 * When we are falling back to another migratetype during allocation, try to 1923 * steal extra free pages from the same pageblocks to satisfy further 1924 * allocations, instead of polluting multiple pageblocks. 1925 * 1926 * If we are stealing a relatively large buddy page, it is likely there will 1927 * be more free pages in the pageblock, so try to steal them all. For 1928 * reclaimable and unmovable allocations, we steal regardless of page size, 1929 * as fragmentation caused by those allocations polluting movable pageblocks 1930 * is worse than movable allocations stealing from unmovable and reclaimable 1931 * pageblocks. 1932 */ 1933 static bool can_steal_fallback(unsigned int order, int start_mt) 1934 { 1935 /* 1936 * Leaving this order check is intended, although there is 1937 * relaxed order check in next check. The reason is that 1938 * we can actually steal whole pageblock if this condition met, 1939 * but, below check doesn't guarantee it and that is just heuristic 1940 * so could be changed anytime. 1941 */ 1942 if (order >= pageblock_order) 1943 return true; 1944 1945 if (order >= pageblock_order / 2 || 1946 start_mt == MIGRATE_RECLAIMABLE || 1947 start_mt == MIGRATE_UNMOVABLE || 1948 page_group_by_mobility_disabled) 1949 return true; 1950 1951 return false; 1952 } 1953 1954 /* 1955 * This function implements actual steal behaviour. If order is large enough, 1956 * we can steal whole pageblock. If not, we first move freepages in this 1957 * pageblock and check whether half of pages are moved or not. If half of 1958 * pages are moved, we can change migratetype of pageblock and permanently 1959 * use it's pages as requested migratetype in the future. 1960 */ 1961 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1962 int start_type) 1963 { 1964 unsigned int current_order = page_order(page); 1965 int pages; 1966 1967 /* Take ownership for orders >= pageblock_order */ 1968 if (current_order >= pageblock_order) { 1969 change_pageblock_range(page, current_order, start_type); 1970 return; 1971 } 1972 1973 pages = move_freepages_block(zone, page, start_type); 1974 1975 /* Claim the whole block if over half of it is free */ 1976 if (pages >= (1 << (pageblock_order-1)) || 1977 page_group_by_mobility_disabled) 1978 set_pageblock_migratetype(page, start_type); 1979 } 1980 1981 /* 1982 * Check whether there is a suitable fallback freepage with requested order. 1983 * If only_stealable is true, this function returns fallback_mt only if 1984 * we can steal other freepages all together. This would help to reduce 1985 * fragmentation due to mixed migratetype pages in one pageblock. 1986 */ 1987 int find_suitable_fallback(struct free_area *area, unsigned int order, 1988 int migratetype, bool only_stealable, bool *can_steal) 1989 { 1990 int i; 1991 int fallback_mt; 1992 1993 if (area->nr_free == 0) 1994 return -1; 1995 1996 *can_steal = false; 1997 for (i = 0;; i++) { 1998 fallback_mt = fallbacks[migratetype][i]; 1999 if (fallback_mt == MIGRATE_TYPES) 2000 break; 2001 2002 if (list_empty(&area->free_list[fallback_mt])) 2003 continue; 2004 2005 if (can_steal_fallback(order, migratetype)) 2006 *can_steal = true; 2007 2008 if (!only_stealable) 2009 return fallback_mt; 2010 2011 if (*can_steal) 2012 return fallback_mt; 2013 } 2014 2015 return -1; 2016 } 2017 2018 /* 2019 * Reserve a pageblock for exclusive use of high-order atomic allocations if 2020 * there are no empty page blocks that contain a page with a suitable order 2021 */ 2022 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2023 unsigned int alloc_order) 2024 { 2025 int mt; 2026 unsigned long max_managed, flags; 2027 2028 /* 2029 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2030 * Check is race-prone but harmless. 2031 */ 2032 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; 2033 if (zone->nr_reserved_highatomic >= max_managed) 2034 return; 2035 2036 spin_lock_irqsave(&zone->lock, flags); 2037 2038 /* Recheck the nr_reserved_highatomic limit under the lock */ 2039 if (zone->nr_reserved_highatomic >= max_managed) 2040 goto out_unlock; 2041 2042 /* Yoink! */ 2043 mt = get_pageblock_migratetype(page); 2044 if (mt != MIGRATE_HIGHATOMIC && 2045 !is_migrate_isolate(mt) && !is_migrate_cma(mt)) { 2046 zone->nr_reserved_highatomic += pageblock_nr_pages; 2047 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2048 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC); 2049 } 2050 2051 out_unlock: 2052 spin_unlock_irqrestore(&zone->lock, flags); 2053 } 2054 2055 /* 2056 * Used when an allocation is about to fail under memory pressure. This 2057 * potentially hurts the reliability of high-order allocations when under 2058 * intense memory pressure but failed atomic allocations should be easier 2059 * to recover from than an OOM. 2060 * 2061 * If @force is true, try to unreserve a pageblock even though highatomic 2062 * pageblock is exhausted. 2063 */ 2064 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2065 bool force) 2066 { 2067 struct zonelist *zonelist = ac->zonelist; 2068 unsigned long flags; 2069 struct zoneref *z; 2070 struct zone *zone; 2071 struct page *page; 2072 int order; 2073 bool ret; 2074 2075 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, 2076 ac->nodemask) { 2077 /* 2078 * Preserve at least one pageblock unless memory pressure 2079 * is really high. 2080 */ 2081 if (!force && zone->nr_reserved_highatomic <= 2082 pageblock_nr_pages) 2083 continue; 2084 2085 spin_lock_irqsave(&zone->lock, flags); 2086 for (order = 0; order < MAX_ORDER; order++) { 2087 struct free_area *area = &(zone->free_area[order]); 2088 2089 page = list_first_entry_or_null( 2090 &area->free_list[MIGRATE_HIGHATOMIC], 2091 struct page, lru); 2092 if (!page) 2093 continue; 2094 2095 /* 2096 * In page freeing path, migratetype change is racy so 2097 * we can counter several free pages in a pageblock 2098 * in this loop althoug we changed the pageblock type 2099 * from highatomic to ac->migratetype. So we should 2100 * adjust the count once. 2101 */ 2102 if (get_pageblock_migratetype(page) == 2103 MIGRATE_HIGHATOMIC) { 2104 /* 2105 * It should never happen but changes to 2106 * locking could inadvertently allow a per-cpu 2107 * drain to add pages to MIGRATE_HIGHATOMIC 2108 * while unreserving so be safe and watch for 2109 * underflows. 2110 */ 2111 zone->nr_reserved_highatomic -= min( 2112 pageblock_nr_pages, 2113 zone->nr_reserved_highatomic); 2114 } 2115 2116 /* 2117 * Convert to ac->migratetype and avoid the normal 2118 * pageblock stealing heuristics. Minimally, the caller 2119 * is doing the work and needs the pages. More 2120 * importantly, if the block was always converted to 2121 * MIGRATE_UNMOVABLE or another type then the number 2122 * of pageblocks that cannot be completely freed 2123 * may increase. 2124 */ 2125 set_pageblock_migratetype(page, ac->migratetype); 2126 ret = move_freepages_block(zone, page, ac->migratetype); 2127 if (ret) { 2128 spin_unlock_irqrestore(&zone->lock, flags); 2129 return ret; 2130 } 2131 } 2132 spin_unlock_irqrestore(&zone->lock, flags); 2133 } 2134 2135 return false; 2136 } 2137 2138 /* Remove an element from the buddy allocator from the fallback list */ 2139 static inline struct page * 2140 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) 2141 { 2142 struct free_area *area; 2143 unsigned int current_order; 2144 struct page *page; 2145 int fallback_mt; 2146 bool can_steal; 2147 2148 /* Find the largest possible block of pages in the other list */ 2149 for (current_order = MAX_ORDER-1; 2150 current_order >= order && current_order <= MAX_ORDER-1; 2151 --current_order) { 2152 area = &(zone->free_area[current_order]); 2153 fallback_mt = find_suitable_fallback(area, current_order, 2154 start_migratetype, false, &can_steal); 2155 if (fallback_mt == -1) 2156 continue; 2157 2158 page = list_first_entry(&area->free_list[fallback_mt], 2159 struct page, lru); 2160 if (can_steal && 2161 get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC) 2162 steal_suitable_fallback(zone, page, start_migratetype); 2163 2164 /* Remove the page from the freelists */ 2165 area->nr_free--; 2166 list_del(&page->lru); 2167 rmv_page_order(page); 2168 2169 expand(zone, page, order, current_order, area, 2170 start_migratetype); 2171 /* 2172 * The pcppage_migratetype may differ from pageblock's 2173 * migratetype depending on the decisions in 2174 * find_suitable_fallback(). This is OK as long as it does not 2175 * differ for MIGRATE_CMA pageblocks. Those can be used as 2176 * fallback only via special __rmqueue_cma_fallback() function 2177 */ 2178 set_pcppage_migratetype(page, start_migratetype); 2179 2180 trace_mm_page_alloc_extfrag(page, order, current_order, 2181 start_migratetype, fallback_mt); 2182 2183 return page; 2184 } 2185 2186 return NULL; 2187 } 2188 2189 /* 2190 * Do the hard work of removing an element from the buddy allocator. 2191 * Call me with the zone->lock already held. 2192 */ 2193 static struct page *__rmqueue(struct zone *zone, unsigned int order, 2194 int migratetype) 2195 { 2196 struct page *page; 2197 2198 page = __rmqueue_smallest(zone, order, migratetype); 2199 if (unlikely(!page)) { 2200 if (migratetype == MIGRATE_MOVABLE) 2201 page = __rmqueue_cma_fallback(zone, order); 2202 2203 if (!page) 2204 page = __rmqueue_fallback(zone, order, migratetype); 2205 } 2206 2207 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2208 return page; 2209 } 2210 2211 /* 2212 * Obtain a specified number of elements from the buddy allocator, all under 2213 * a single hold of the lock, for efficiency. Add them to the supplied list. 2214 * Returns the number of new pages which were placed at *list. 2215 */ 2216 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2217 unsigned long count, struct list_head *list, 2218 int migratetype, bool cold) 2219 { 2220 int i, alloced = 0; 2221 unsigned long flags; 2222 2223 spin_lock_irqsave(&zone->lock, flags); 2224 for (i = 0; i < count; ++i) { 2225 struct page *page = __rmqueue(zone, order, migratetype); 2226 if (unlikely(page == NULL)) 2227 break; 2228 2229 if (unlikely(check_pcp_refill(page))) 2230 continue; 2231 2232 /* 2233 * Split buddy pages returned by expand() are received here 2234 * in physical page order. The page is added to the callers and 2235 * list and the list head then moves forward. From the callers 2236 * perspective, the linked list is ordered by page number in 2237 * some conditions. This is useful for IO devices that can 2238 * merge IO requests if the physical pages are ordered 2239 * properly. 2240 */ 2241 if (likely(!cold)) 2242 list_add(&page->lru, list); 2243 else 2244 list_add_tail(&page->lru, list); 2245 list = &page->lru; 2246 alloced++; 2247 if (is_migrate_cma(get_pcppage_migratetype(page))) 2248 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2249 -(1 << order)); 2250 } 2251 2252 /* 2253 * i pages were removed from the buddy list even if some leak due 2254 * to check_pcp_refill failing so adjust NR_FREE_PAGES based 2255 * on i. Do not confuse with 'alloced' which is the number of 2256 * pages added to the pcp list. 2257 */ 2258 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2259 spin_unlock_irqrestore(&zone->lock, flags); 2260 return alloced; 2261 } 2262 2263 #ifdef CONFIG_NUMA 2264 /* 2265 * Called from the vmstat counter updater to drain pagesets of this 2266 * currently executing processor on remote nodes after they have 2267 * expired. 2268 * 2269 * Note that this function must be called with the thread pinned to 2270 * a single processor. 2271 */ 2272 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2273 { 2274 unsigned long flags; 2275 int to_drain, batch; 2276 2277 local_irq_save(flags); 2278 batch = READ_ONCE(pcp->batch); 2279 to_drain = min(pcp->count, batch); 2280 if (to_drain > 0) { 2281 free_pcppages_bulk(zone, to_drain, pcp); 2282 pcp->count -= to_drain; 2283 } 2284 local_irq_restore(flags); 2285 } 2286 #endif 2287 2288 /* 2289 * Drain pcplists of the indicated processor and zone. 2290 * 2291 * The processor must either be the current processor and the 2292 * thread pinned to the current processor or a processor that 2293 * is not online. 2294 */ 2295 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2296 { 2297 unsigned long flags; 2298 struct per_cpu_pageset *pset; 2299 struct per_cpu_pages *pcp; 2300 2301 local_irq_save(flags); 2302 pset = per_cpu_ptr(zone->pageset, cpu); 2303 2304 pcp = &pset->pcp; 2305 if (pcp->count) { 2306 free_pcppages_bulk(zone, pcp->count, pcp); 2307 pcp->count = 0; 2308 } 2309 local_irq_restore(flags); 2310 } 2311 2312 /* 2313 * Drain pcplists of all zones on the indicated processor. 2314 * 2315 * The processor must either be the current processor and the 2316 * thread pinned to the current processor or a processor that 2317 * is not online. 2318 */ 2319 static void drain_pages(unsigned int cpu) 2320 { 2321 struct zone *zone; 2322 2323 for_each_populated_zone(zone) { 2324 drain_pages_zone(cpu, zone); 2325 } 2326 } 2327 2328 /* 2329 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2330 * 2331 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 2332 * the single zone's pages. 2333 */ 2334 void drain_local_pages(struct zone *zone) 2335 { 2336 int cpu = smp_processor_id(); 2337 2338 if (zone) 2339 drain_pages_zone(cpu, zone); 2340 else 2341 drain_pages(cpu); 2342 } 2343 2344 static void drain_local_pages_wq(struct work_struct *work) 2345 { 2346 /* 2347 * drain_all_pages doesn't use proper cpu hotplug protection so 2348 * we can race with cpu offline when the WQ can move this from 2349 * a cpu pinned worker to an unbound one. We can operate on a different 2350 * cpu which is allright but we also have to make sure to not move to 2351 * a different one. 2352 */ 2353 preempt_disable(); 2354 drain_local_pages(NULL); 2355 preempt_enable(); 2356 } 2357 2358 /* 2359 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2360 * 2361 * When zone parameter is non-NULL, spill just the single zone's pages. 2362 * 2363 * Note that this can be extremely slow as the draining happens in a workqueue. 2364 */ 2365 void drain_all_pages(struct zone *zone) 2366 { 2367 int cpu; 2368 2369 /* 2370 * Allocate in the BSS so we wont require allocation in 2371 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2372 */ 2373 static cpumask_t cpus_with_pcps; 2374 2375 /* Workqueues cannot recurse */ 2376 if (current->flags & PF_WQ_WORKER) 2377 return; 2378 2379 /* 2380 * Do not drain if one is already in progress unless it's specific to 2381 * a zone. Such callers are primarily CMA and memory hotplug and need 2382 * the drain to be complete when the call returns. 2383 */ 2384 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2385 if (!zone) 2386 return; 2387 mutex_lock(&pcpu_drain_mutex); 2388 } 2389 2390 /* 2391 * We don't care about racing with CPU hotplug event 2392 * as offline notification will cause the notified 2393 * cpu to drain that CPU pcps and on_each_cpu_mask 2394 * disables preemption as part of its processing 2395 */ 2396 for_each_online_cpu(cpu) { 2397 struct per_cpu_pageset *pcp; 2398 struct zone *z; 2399 bool has_pcps = false; 2400 2401 if (zone) { 2402 pcp = per_cpu_ptr(zone->pageset, cpu); 2403 if (pcp->pcp.count) 2404 has_pcps = true; 2405 } else { 2406 for_each_populated_zone(z) { 2407 pcp = per_cpu_ptr(z->pageset, cpu); 2408 if (pcp->pcp.count) { 2409 has_pcps = true; 2410 break; 2411 } 2412 } 2413 } 2414 2415 if (has_pcps) 2416 cpumask_set_cpu(cpu, &cpus_with_pcps); 2417 else 2418 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2419 } 2420 2421 for_each_cpu(cpu, &cpus_with_pcps) { 2422 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2423 INIT_WORK(work, drain_local_pages_wq); 2424 schedule_work_on(cpu, work); 2425 } 2426 for_each_cpu(cpu, &cpus_with_pcps) 2427 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2428 2429 mutex_unlock(&pcpu_drain_mutex); 2430 } 2431 2432 #ifdef CONFIG_HIBERNATION 2433 2434 void mark_free_pages(struct zone *zone) 2435 { 2436 unsigned long pfn, max_zone_pfn; 2437 unsigned long flags; 2438 unsigned int order, t; 2439 struct page *page; 2440 2441 if (zone_is_empty(zone)) 2442 return; 2443 2444 spin_lock_irqsave(&zone->lock, flags); 2445 2446 max_zone_pfn = zone_end_pfn(zone); 2447 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 2448 if (pfn_valid(pfn)) { 2449 page = pfn_to_page(pfn); 2450 2451 if (page_zone(page) != zone) 2452 continue; 2453 2454 if (!swsusp_page_is_forbidden(page)) 2455 swsusp_unset_page_free(page); 2456 } 2457 2458 for_each_migratetype_order(order, t) { 2459 list_for_each_entry(page, 2460 &zone->free_area[order].free_list[t], lru) { 2461 unsigned long i; 2462 2463 pfn = page_to_pfn(page); 2464 for (i = 0; i < (1UL << order); i++) 2465 swsusp_set_page_free(pfn_to_page(pfn + i)); 2466 } 2467 } 2468 spin_unlock_irqrestore(&zone->lock, flags); 2469 } 2470 #endif /* CONFIG_PM */ 2471 2472 /* 2473 * Free a 0-order page 2474 * cold == true ? free a cold page : free a hot page 2475 */ 2476 void free_hot_cold_page(struct page *page, bool cold) 2477 { 2478 struct zone *zone = page_zone(page); 2479 struct per_cpu_pages *pcp; 2480 unsigned long pfn = page_to_pfn(page); 2481 int migratetype; 2482 2483 if (in_interrupt()) { 2484 __free_pages_ok(page, 0); 2485 return; 2486 } 2487 2488 if (!free_pcp_prepare(page)) 2489 return; 2490 2491 migratetype = get_pfnblock_migratetype(page, pfn); 2492 set_pcppage_migratetype(page, migratetype); 2493 preempt_disable(); 2494 2495 /* 2496 * We only track unmovable, reclaimable and movable on pcp lists. 2497 * Free ISOLATE pages back to the allocator because they are being 2498 * offlined but treat RESERVE as movable pages so we can get those 2499 * areas back if necessary. Otherwise, we may have to free 2500 * excessively into the page allocator 2501 */ 2502 if (migratetype >= MIGRATE_PCPTYPES) { 2503 if (unlikely(is_migrate_isolate(migratetype))) { 2504 free_one_page(zone, page, pfn, 0, migratetype); 2505 goto out; 2506 } 2507 migratetype = MIGRATE_MOVABLE; 2508 } 2509 2510 __count_vm_event(PGFREE); 2511 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2512 if (!cold) 2513 list_add(&page->lru, &pcp->lists[migratetype]); 2514 else 2515 list_add_tail(&page->lru, &pcp->lists[migratetype]); 2516 pcp->count++; 2517 if (pcp->count >= pcp->high) { 2518 unsigned long batch = READ_ONCE(pcp->batch); 2519 free_pcppages_bulk(zone, batch, pcp); 2520 pcp->count -= batch; 2521 } 2522 2523 out: 2524 preempt_enable(); 2525 } 2526 2527 /* 2528 * Free a list of 0-order pages 2529 */ 2530 void free_hot_cold_page_list(struct list_head *list, bool cold) 2531 { 2532 struct page *page, *next; 2533 2534 list_for_each_entry_safe(page, next, list, lru) { 2535 trace_mm_page_free_batched(page, cold); 2536 free_hot_cold_page(page, cold); 2537 } 2538 } 2539 2540 /* 2541 * split_page takes a non-compound higher-order page, and splits it into 2542 * n (1<<order) sub-pages: page[0..n] 2543 * Each sub-page must be freed individually. 2544 * 2545 * Note: this is probably too low level an operation for use in drivers. 2546 * Please consult with lkml before using this in your driver. 2547 */ 2548 void split_page(struct page *page, unsigned int order) 2549 { 2550 int i; 2551 2552 VM_BUG_ON_PAGE(PageCompound(page), page); 2553 VM_BUG_ON_PAGE(!page_count(page), page); 2554 2555 #ifdef CONFIG_KMEMCHECK 2556 /* 2557 * Split shadow pages too, because free(page[0]) would 2558 * otherwise free the whole shadow. 2559 */ 2560 if (kmemcheck_page_is_tracked(page)) 2561 split_page(virt_to_page(page[0].shadow), order); 2562 #endif 2563 2564 for (i = 1; i < (1 << order); i++) 2565 set_page_refcounted(page + i); 2566 split_page_owner(page, order); 2567 } 2568 EXPORT_SYMBOL_GPL(split_page); 2569 2570 int __isolate_free_page(struct page *page, unsigned int order) 2571 { 2572 unsigned long watermark; 2573 struct zone *zone; 2574 int mt; 2575 2576 BUG_ON(!PageBuddy(page)); 2577 2578 zone = page_zone(page); 2579 mt = get_pageblock_migratetype(page); 2580 2581 if (!is_migrate_isolate(mt)) { 2582 /* 2583 * Obey watermarks as if the page was being allocated. We can 2584 * emulate a high-order watermark check with a raised order-0 2585 * watermark, because we already know our high-order page 2586 * exists. 2587 */ 2588 watermark = min_wmark_pages(zone) + (1UL << order); 2589 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2590 return 0; 2591 2592 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2593 } 2594 2595 /* Remove page from free list */ 2596 list_del(&page->lru); 2597 zone->free_area[order].nr_free--; 2598 rmv_page_order(page); 2599 2600 /* 2601 * Set the pageblock if the isolated page is at least half of a 2602 * pageblock 2603 */ 2604 if (order >= pageblock_order - 1) { 2605 struct page *endpage = page + (1 << order) - 1; 2606 for (; page < endpage; page += pageblock_nr_pages) { 2607 int mt = get_pageblock_migratetype(page); 2608 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) 2609 && mt != MIGRATE_HIGHATOMIC) 2610 set_pageblock_migratetype(page, 2611 MIGRATE_MOVABLE); 2612 } 2613 } 2614 2615 2616 return 1UL << order; 2617 } 2618 2619 /* 2620 * Update NUMA hit/miss statistics 2621 * 2622 * Must be called with interrupts disabled. 2623 */ 2624 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) 2625 { 2626 #ifdef CONFIG_NUMA 2627 enum zone_stat_item local_stat = NUMA_LOCAL; 2628 2629 if (z->node != numa_node_id()) 2630 local_stat = NUMA_OTHER; 2631 2632 if (z->node == preferred_zone->node) 2633 __inc_zone_state(z, NUMA_HIT); 2634 else { 2635 __inc_zone_state(z, NUMA_MISS); 2636 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 2637 } 2638 __inc_zone_state(z, local_stat); 2639 #endif 2640 } 2641 2642 /* Remove page from the per-cpu list, caller must protect the list */ 2643 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, 2644 bool cold, struct per_cpu_pages *pcp, 2645 struct list_head *list) 2646 { 2647 struct page *page; 2648 2649 VM_BUG_ON(in_interrupt()); 2650 2651 do { 2652 if (list_empty(list)) { 2653 pcp->count += rmqueue_bulk(zone, 0, 2654 pcp->batch, list, 2655 migratetype, cold); 2656 if (unlikely(list_empty(list))) 2657 return NULL; 2658 } 2659 2660 if (cold) 2661 page = list_last_entry(list, struct page, lru); 2662 else 2663 page = list_first_entry(list, struct page, lru); 2664 2665 list_del(&page->lru); 2666 pcp->count--; 2667 } while (check_new_pcp(page)); 2668 2669 return page; 2670 } 2671 2672 /* Lock and remove page from the per-cpu list */ 2673 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2674 struct zone *zone, unsigned int order, 2675 gfp_t gfp_flags, int migratetype) 2676 { 2677 struct per_cpu_pages *pcp; 2678 struct list_head *list; 2679 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2680 struct page *page; 2681 2682 preempt_disable(); 2683 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2684 list = &pcp->lists[migratetype]; 2685 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); 2686 if (page) { 2687 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2688 zone_statistics(preferred_zone, zone); 2689 } 2690 preempt_enable(); 2691 return page; 2692 } 2693 2694 /* 2695 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 2696 */ 2697 static inline 2698 struct page *rmqueue(struct zone *preferred_zone, 2699 struct zone *zone, unsigned int order, 2700 gfp_t gfp_flags, unsigned int alloc_flags, 2701 int migratetype) 2702 { 2703 unsigned long flags; 2704 struct page *page; 2705 2706 if (likely(order == 0) && !in_interrupt()) { 2707 page = rmqueue_pcplist(preferred_zone, zone, order, 2708 gfp_flags, migratetype); 2709 goto out; 2710 } 2711 2712 /* 2713 * We most definitely don't want callers attempting to 2714 * allocate greater than order-1 page units with __GFP_NOFAIL. 2715 */ 2716 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 2717 spin_lock_irqsave(&zone->lock, flags); 2718 2719 do { 2720 page = NULL; 2721 if (alloc_flags & ALLOC_HARDER) { 2722 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2723 if (page) 2724 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2725 } 2726 if (!page) 2727 page = __rmqueue(zone, order, migratetype); 2728 } while (page && check_new_pages(page, order)); 2729 spin_unlock(&zone->lock); 2730 if (!page) 2731 goto failed; 2732 __mod_zone_freepage_state(zone, -(1 << order), 2733 get_pcppage_migratetype(page)); 2734 2735 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2736 zone_statistics(preferred_zone, zone); 2737 local_irq_restore(flags); 2738 2739 out: 2740 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 2741 return page; 2742 2743 failed: 2744 local_irq_restore(flags); 2745 return NULL; 2746 } 2747 2748 #ifdef CONFIG_FAIL_PAGE_ALLOC 2749 2750 static struct { 2751 struct fault_attr attr; 2752 2753 bool ignore_gfp_highmem; 2754 bool ignore_gfp_reclaim; 2755 u32 min_order; 2756 } fail_page_alloc = { 2757 .attr = FAULT_ATTR_INITIALIZER, 2758 .ignore_gfp_reclaim = true, 2759 .ignore_gfp_highmem = true, 2760 .min_order = 1, 2761 }; 2762 2763 static int __init setup_fail_page_alloc(char *str) 2764 { 2765 return setup_fault_attr(&fail_page_alloc.attr, str); 2766 } 2767 __setup("fail_page_alloc=", setup_fail_page_alloc); 2768 2769 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2770 { 2771 if (order < fail_page_alloc.min_order) 2772 return false; 2773 if (gfp_mask & __GFP_NOFAIL) 2774 return false; 2775 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 2776 return false; 2777 if (fail_page_alloc.ignore_gfp_reclaim && 2778 (gfp_mask & __GFP_DIRECT_RECLAIM)) 2779 return false; 2780 2781 return should_fail(&fail_page_alloc.attr, 1 << order); 2782 } 2783 2784 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 2785 2786 static int __init fail_page_alloc_debugfs(void) 2787 { 2788 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 2789 struct dentry *dir; 2790 2791 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 2792 &fail_page_alloc.attr); 2793 if (IS_ERR(dir)) 2794 return PTR_ERR(dir); 2795 2796 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 2797 &fail_page_alloc.ignore_gfp_reclaim)) 2798 goto fail; 2799 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 2800 &fail_page_alloc.ignore_gfp_highmem)) 2801 goto fail; 2802 if (!debugfs_create_u32("min-order", mode, dir, 2803 &fail_page_alloc.min_order)) 2804 goto fail; 2805 2806 return 0; 2807 fail: 2808 debugfs_remove_recursive(dir); 2809 2810 return -ENOMEM; 2811 } 2812 2813 late_initcall(fail_page_alloc_debugfs); 2814 2815 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 2816 2817 #else /* CONFIG_FAIL_PAGE_ALLOC */ 2818 2819 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2820 { 2821 return false; 2822 } 2823 2824 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 2825 2826 /* 2827 * Return true if free base pages are above 'mark'. For high-order checks it 2828 * will return true of the order-0 watermark is reached and there is at least 2829 * one free page of a suitable size. Checking now avoids taking the zone lock 2830 * to check in the allocation paths if no pages are free. 2831 */ 2832 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2833 int classzone_idx, unsigned int alloc_flags, 2834 long free_pages) 2835 { 2836 long min = mark; 2837 int o; 2838 const bool alloc_harder = (alloc_flags & ALLOC_HARDER); 2839 2840 /* free_pages may go negative - that's OK */ 2841 free_pages -= (1 << order) - 1; 2842 2843 if (alloc_flags & ALLOC_HIGH) 2844 min -= min / 2; 2845 2846 /* 2847 * If the caller does not have rights to ALLOC_HARDER then subtract 2848 * the high-atomic reserves. This will over-estimate the size of the 2849 * atomic reserve but it avoids a search. 2850 */ 2851 if (likely(!alloc_harder)) 2852 free_pages -= z->nr_reserved_highatomic; 2853 else 2854 min -= min / 4; 2855 2856 #ifdef CONFIG_CMA 2857 /* If allocation can't use CMA areas don't use free CMA pages */ 2858 if (!(alloc_flags & ALLOC_CMA)) 2859 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 2860 #endif 2861 2862 /* 2863 * Check watermarks for an order-0 allocation request. If these 2864 * are not met, then a high-order request also cannot go ahead 2865 * even if a suitable page happened to be free. 2866 */ 2867 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 2868 return false; 2869 2870 /* If this is an order-0 request then the watermark is fine */ 2871 if (!order) 2872 return true; 2873 2874 /* For a high-order request, check at least one suitable page is free */ 2875 for (o = order; o < MAX_ORDER; o++) { 2876 struct free_area *area = &z->free_area[o]; 2877 int mt; 2878 2879 if (!area->nr_free) 2880 continue; 2881 2882 if (alloc_harder) 2883 return true; 2884 2885 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2886 if (!list_empty(&area->free_list[mt])) 2887 return true; 2888 } 2889 2890 #ifdef CONFIG_CMA 2891 if ((alloc_flags & ALLOC_CMA) && 2892 !list_empty(&area->free_list[MIGRATE_CMA])) { 2893 return true; 2894 } 2895 #endif 2896 } 2897 return false; 2898 } 2899 2900 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2901 int classzone_idx, unsigned int alloc_flags) 2902 { 2903 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2904 zone_page_state(z, NR_FREE_PAGES)); 2905 } 2906 2907 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 2908 unsigned long mark, int classzone_idx, unsigned int alloc_flags) 2909 { 2910 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2911 long cma_pages = 0; 2912 2913 #ifdef CONFIG_CMA 2914 /* If allocation can't use CMA areas don't use free CMA pages */ 2915 if (!(alloc_flags & ALLOC_CMA)) 2916 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES); 2917 #endif 2918 2919 /* 2920 * Fast check for order-0 only. If this fails then the reserves 2921 * need to be calculated. There is a corner case where the check 2922 * passes but only the high-order atomic reserve are free. If 2923 * the caller is !atomic then it'll uselessly search the free 2924 * list. That corner case is then slower but it is harmless. 2925 */ 2926 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) 2927 return true; 2928 2929 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2930 free_pages); 2931 } 2932 2933 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2934 unsigned long mark, int classzone_idx) 2935 { 2936 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2937 2938 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2939 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2940 2941 return __zone_watermark_ok(z, order, mark, classzone_idx, 0, 2942 free_pages); 2943 } 2944 2945 #ifdef CONFIG_NUMA 2946 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2947 { 2948 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 2949 RECLAIM_DISTANCE; 2950 } 2951 #else /* CONFIG_NUMA */ 2952 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2953 { 2954 return true; 2955 } 2956 #endif /* CONFIG_NUMA */ 2957 2958 /* 2959 * get_page_from_freelist goes through the zonelist trying to allocate 2960 * a page. 2961 */ 2962 static struct page * 2963 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 2964 const struct alloc_context *ac) 2965 { 2966 struct zoneref *z = ac->preferred_zoneref; 2967 struct zone *zone; 2968 struct pglist_data *last_pgdat_dirty_limit = NULL; 2969 2970 /* 2971 * Scan zonelist, looking for a zone with enough free. 2972 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 2973 */ 2974 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 2975 ac->nodemask) { 2976 struct page *page; 2977 unsigned long mark; 2978 2979 if (cpusets_enabled() && 2980 (alloc_flags & ALLOC_CPUSET) && 2981 !__cpuset_zone_allowed(zone, gfp_mask)) 2982 continue; 2983 /* 2984 * When allocating a page cache page for writing, we 2985 * want to get it from a node that is within its dirty 2986 * limit, such that no single node holds more than its 2987 * proportional share of globally allowed dirty pages. 2988 * The dirty limits take into account the node's 2989 * lowmem reserves and high watermark so that kswapd 2990 * should be able to balance it without having to 2991 * write pages from its LRU list. 2992 * 2993 * XXX: For now, allow allocations to potentially 2994 * exceed the per-node dirty limit in the slowpath 2995 * (spread_dirty_pages unset) before going into reclaim, 2996 * which is important when on a NUMA setup the allowed 2997 * nodes are together not big enough to reach the 2998 * global limit. The proper fix for these situations 2999 * will require awareness of nodes in the 3000 * dirty-throttling and the flusher threads. 3001 */ 3002 if (ac->spread_dirty_pages) { 3003 if (last_pgdat_dirty_limit == zone->zone_pgdat) 3004 continue; 3005 3006 if (!node_dirty_ok(zone->zone_pgdat)) { 3007 last_pgdat_dirty_limit = zone->zone_pgdat; 3008 continue; 3009 } 3010 } 3011 3012 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 3013 if (!zone_watermark_fast(zone, order, mark, 3014 ac_classzone_idx(ac), alloc_flags)) { 3015 int ret; 3016 3017 /* Checked here to keep the fast path fast */ 3018 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3019 if (alloc_flags & ALLOC_NO_WATERMARKS) 3020 goto try_this_zone; 3021 3022 if (node_reclaim_mode == 0 || 3023 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3024 continue; 3025 3026 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3027 switch (ret) { 3028 case NODE_RECLAIM_NOSCAN: 3029 /* did not scan */ 3030 continue; 3031 case NODE_RECLAIM_FULL: 3032 /* scanned but unreclaimable */ 3033 continue; 3034 default: 3035 /* did we reclaim enough */ 3036 if (zone_watermark_ok(zone, order, mark, 3037 ac_classzone_idx(ac), alloc_flags)) 3038 goto try_this_zone; 3039 3040 continue; 3041 } 3042 } 3043 3044 try_this_zone: 3045 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3046 gfp_mask, alloc_flags, ac->migratetype); 3047 if (page) { 3048 prep_new_page(page, order, gfp_mask, alloc_flags); 3049 3050 /* 3051 * If this is a high-order atomic allocation then check 3052 * if the pageblock should be reserved for the future 3053 */ 3054 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 3055 reserve_highatomic_pageblock(page, zone, order); 3056 3057 return page; 3058 } 3059 } 3060 3061 return NULL; 3062 } 3063 3064 /* 3065 * Large machines with many possible nodes should not always dump per-node 3066 * meminfo in irq context. 3067 */ 3068 static inline bool should_suppress_show_mem(void) 3069 { 3070 bool ret = false; 3071 3072 #if NODES_SHIFT > 8 3073 ret = in_interrupt(); 3074 #endif 3075 return ret; 3076 } 3077 3078 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3079 { 3080 unsigned int filter = SHOW_MEM_FILTER_NODES; 3081 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1); 3082 3083 if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs)) 3084 return; 3085 3086 /* 3087 * This documents exceptions given to allocations in certain 3088 * contexts that are allowed to allocate outside current's set 3089 * of allowed nodes. 3090 */ 3091 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3092 if (test_thread_flag(TIF_MEMDIE) || 3093 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3094 filter &= ~SHOW_MEM_FILTER_NODES; 3095 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3096 filter &= ~SHOW_MEM_FILTER_NODES; 3097 3098 show_mem(filter, nodemask); 3099 } 3100 3101 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3102 { 3103 struct va_format vaf; 3104 va_list args; 3105 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL, 3106 DEFAULT_RATELIMIT_BURST); 3107 3108 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || 3109 debug_guardpage_minorder() > 0) 3110 return; 3111 3112 pr_warn("%s: ", current->comm); 3113 3114 va_start(args, fmt); 3115 vaf.fmt = fmt; 3116 vaf.va = &args; 3117 pr_cont("%pV", &vaf); 3118 va_end(args); 3119 3120 pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask); 3121 if (nodemask) 3122 pr_cont("%*pbl\n", nodemask_pr_args(nodemask)); 3123 else 3124 pr_cont("(null)\n"); 3125 3126 cpuset_print_current_mems_allowed(); 3127 3128 dump_stack(); 3129 warn_alloc_show_mem(gfp_mask, nodemask); 3130 } 3131 3132 static inline struct page * 3133 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3134 unsigned int alloc_flags, 3135 const struct alloc_context *ac) 3136 { 3137 struct page *page; 3138 3139 page = get_page_from_freelist(gfp_mask, order, 3140 alloc_flags|ALLOC_CPUSET, ac); 3141 /* 3142 * fallback to ignore cpuset restriction if our nodes 3143 * are depleted 3144 */ 3145 if (!page) 3146 page = get_page_from_freelist(gfp_mask, order, 3147 alloc_flags, ac); 3148 3149 return page; 3150 } 3151 3152 static inline struct page * 3153 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3154 const struct alloc_context *ac, unsigned long *did_some_progress) 3155 { 3156 struct oom_control oc = { 3157 .zonelist = ac->zonelist, 3158 .nodemask = ac->nodemask, 3159 .memcg = NULL, 3160 .gfp_mask = gfp_mask, 3161 .order = order, 3162 }; 3163 struct page *page; 3164 3165 *did_some_progress = 0; 3166 3167 /* 3168 * Acquire the oom lock. If that fails, somebody else is 3169 * making progress for us. 3170 */ 3171 if (!mutex_trylock(&oom_lock)) { 3172 *did_some_progress = 1; 3173 schedule_timeout_uninterruptible(1); 3174 return NULL; 3175 } 3176 3177 /* 3178 * Go through the zonelist yet one more time, keep very high watermark 3179 * here, this is only to catch a parallel oom killing, we must fail if 3180 * we're still under heavy pressure. 3181 */ 3182 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, 3183 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3184 if (page) 3185 goto out; 3186 3187 /* Coredumps can quickly deplete all memory reserves */ 3188 if (current->flags & PF_DUMPCORE) 3189 goto out; 3190 /* The OOM killer will not help higher order allocs */ 3191 if (order > PAGE_ALLOC_COSTLY_ORDER) 3192 goto out; 3193 /* The OOM killer does not needlessly kill tasks for lowmem */ 3194 if (ac->high_zoneidx < ZONE_NORMAL) 3195 goto out; 3196 if (pm_suspended_storage()) 3197 goto out; 3198 /* 3199 * XXX: GFP_NOFS allocations should rather fail than rely on 3200 * other request to make a forward progress. 3201 * We are in an unfortunate situation where out_of_memory cannot 3202 * do much for this context but let's try it to at least get 3203 * access to memory reserved if the current task is killed (see 3204 * out_of_memory). Once filesystems are ready to handle allocation 3205 * failures more gracefully we should just bail out here. 3206 */ 3207 3208 /* The OOM killer may not free memory on a specific node */ 3209 if (gfp_mask & __GFP_THISNODE) 3210 goto out; 3211 3212 /* Exhausted what can be done so it's blamo time */ 3213 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { 3214 *did_some_progress = 1; 3215 3216 /* 3217 * Help non-failing allocations by giving them access to memory 3218 * reserves 3219 */ 3220 if (gfp_mask & __GFP_NOFAIL) 3221 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3222 ALLOC_NO_WATERMARKS, ac); 3223 } 3224 out: 3225 mutex_unlock(&oom_lock); 3226 return page; 3227 } 3228 3229 /* 3230 * Maximum number of compaction retries wit a progress before OOM 3231 * killer is consider as the only way to move forward. 3232 */ 3233 #define MAX_COMPACT_RETRIES 16 3234 3235 #ifdef CONFIG_COMPACTION 3236 /* Try memory compaction for high-order allocations before reclaim */ 3237 static struct page * 3238 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3239 unsigned int alloc_flags, const struct alloc_context *ac, 3240 enum compact_priority prio, enum compact_result *compact_result) 3241 { 3242 struct page *page; 3243 3244 if (!order) 3245 return NULL; 3246 3247 current->flags |= PF_MEMALLOC; 3248 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3249 prio); 3250 current->flags &= ~PF_MEMALLOC; 3251 3252 if (*compact_result <= COMPACT_INACTIVE) 3253 return NULL; 3254 3255 /* 3256 * At least in one zone compaction wasn't deferred or skipped, so let's 3257 * count a compaction stall 3258 */ 3259 count_vm_event(COMPACTSTALL); 3260 3261 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3262 3263 if (page) { 3264 struct zone *zone = page_zone(page); 3265 3266 zone->compact_blockskip_flush = false; 3267 compaction_defer_reset(zone, order, true); 3268 count_vm_event(COMPACTSUCCESS); 3269 return page; 3270 } 3271 3272 /* 3273 * It's bad if compaction run occurs and fails. The most likely reason 3274 * is that pages exist, but not enough to satisfy watermarks. 3275 */ 3276 count_vm_event(COMPACTFAIL); 3277 3278 cond_resched(); 3279 3280 return NULL; 3281 } 3282 3283 static inline bool 3284 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3285 enum compact_result compact_result, 3286 enum compact_priority *compact_priority, 3287 int *compaction_retries) 3288 { 3289 int max_retries = MAX_COMPACT_RETRIES; 3290 int min_priority; 3291 bool ret = false; 3292 int retries = *compaction_retries; 3293 enum compact_priority priority = *compact_priority; 3294 3295 if (!order) 3296 return false; 3297 3298 if (compaction_made_progress(compact_result)) 3299 (*compaction_retries)++; 3300 3301 /* 3302 * compaction considers all the zone as desperately out of memory 3303 * so it doesn't really make much sense to retry except when the 3304 * failure could be caused by insufficient priority 3305 */ 3306 if (compaction_failed(compact_result)) 3307 goto check_priority; 3308 3309 /* 3310 * make sure the compaction wasn't deferred or didn't bail out early 3311 * due to locks contention before we declare that we should give up. 3312 * But do not retry if the given zonelist is not suitable for 3313 * compaction. 3314 */ 3315 if (compaction_withdrawn(compact_result)) { 3316 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3317 goto out; 3318 } 3319 3320 /* 3321 * !costly requests are much more important than __GFP_REPEAT 3322 * costly ones because they are de facto nofail and invoke OOM 3323 * killer to move on while costly can fail and users are ready 3324 * to cope with that. 1/4 retries is rather arbitrary but we 3325 * would need much more detailed feedback from compaction to 3326 * make a better decision. 3327 */ 3328 if (order > PAGE_ALLOC_COSTLY_ORDER) 3329 max_retries /= 4; 3330 if (*compaction_retries <= max_retries) { 3331 ret = true; 3332 goto out; 3333 } 3334 3335 /* 3336 * Make sure there are attempts at the highest priority if we exhausted 3337 * all retries or failed at the lower priorities. 3338 */ 3339 check_priority: 3340 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3341 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3342 3343 if (*compact_priority > min_priority) { 3344 (*compact_priority)--; 3345 *compaction_retries = 0; 3346 ret = true; 3347 } 3348 out: 3349 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3350 return ret; 3351 } 3352 #else 3353 static inline struct page * 3354 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3355 unsigned int alloc_flags, const struct alloc_context *ac, 3356 enum compact_priority prio, enum compact_result *compact_result) 3357 { 3358 *compact_result = COMPACT_SKIPPED; 3359 return NULL; 3360 } 3361 3362 static inline bool 3363 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3364 enum compact_result compact_result, 3365 enum compact_priority *compact_priority, 3366 int *compaction_retries) 3367 { 3368 struct zone *zone; 3369 struct zoneref *z; 3370 3371 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3372 return false; 3373 3374 /* 3375 * There are setups with compaction disabled which would prefer to loop 3376 * inside the allocator rather than hit the oom killer prematurely. 3377 * Let's give them a good hope and keep retrying while the order-0 3378 * watermarks are OK. 3379 */ 3380 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 3381 ac->nodemask) { 3382 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3383 ac_classzone_idx(ac), alloc_flags)) 3384 return true; 3385 } 3386 return false; 3387 } 3388 #endif /* CONFIG_COMPACTION */ 3389 3390 /* Perform direct synchronous page reclaim */ 3391 static int 3392 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3393 const struct alloc_context *ac) 3394 { 3395 struct reclaim_state reclaim_state; 3396 int progress; 3397 3398 cond_resched(); 3399 3400 /* We now go into synchronous reclaim */ 3401 cpuset_memory_pressure_bump(); 3402 current->flags |= PF_MEMALLOC; 3403 lockdep_set_current_reclaim_state(gfp_mask); 3404 reclaim_state.reclaimed_slab = 0; 3405 current->reclaim_state = &reclaim_state; 3406 3407 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3408 ac->nodemask); 3409 3410 current->reclaim_state = NULL; 3411 lockdep_clear_current_reclaim_state(); 3412 current->flags &= ~PF_MEMALLOC; 3413 3414 cond_resched(); 3415 3416 return progress; 3417 } 3418 3419 /* The really slow allocator path where we enter direct reclaim */ 3420 static inline struct page * 3421 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3422 unsigned int alloc_flags, const struct alloc_context *ac, 3423 unsigned long *did_some_progress) 3424 { 3425 struct page *page = NULL; 3426 bool drained = false; 3427 3428 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3429 if (unlikely(!(*did_some_progress))) 3430 return NULL; 3431 3432 retry: 3433 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3434 3435 /* 3436 * If an allocation failed after direct reclaim, it could be because 3437 * pages are pinned on the per-cpu lists or in high alloc reserves. 3438 * Shrink them them and try again 3439 */ 3440 if (!page && !drained) { 3441 unreserve_highatomic_pageblock(ac, false); 3442 drain_all_pages(NULL); 3443 drained = true; 3444 goto retry; 3445 } 3446 3447 return page; 3448 } 3449 3450 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) 3451 { 3452 struct zoneref *z; 3453 struct zone *zone; 3454 pg_data_t *last_pgdat = NULL; 3455 3456 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3457 ac->high_zoneidx, ac->nodemask) { 3458 if (last_pgdat != zone->zone_pgdat) 3459 wakeup_kswapd(zone, order, ac->high_zoneidx); 3460 last_pgdat = zone->zone_pgdat; 3461 } 3462 } 3463 3464 static inline unsigned int 3465 gfp_to_alloc_flags(gfp_t gfp_mask) 3466 { 3467 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3468 3469 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 3470 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 3471 3472 /* 3473 * The caller may dip into page reserves a bit more if the caller 3474 * cannot run direct reclaim, or if the caller has realtime scheduling 3475 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3476 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 3477 */ 3478 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 3479 3480 if (gfp_mask & __GFP_ATOMIC) { 3481 /* 3482 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3483 * if it can't schedule. 3484 */ 3485 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3486 alloc_flags |= ALLOC_HARDER; 3487 /* 3488 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 3489 * comment for __cpuset_node_allowed(). 3490 */ 3491 alloc_flags &= ~ALLOC_CPUSET; 3492 } else if (unlikely(rt_task(current)) && !in_interrupt()) 3493 alloc_flags |= ALLOC_HARDER; 3494 3495 #ifdef CONFIG_CMA 3496 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3497 alloc_flags |= ALLOC_CMA; 3498 #endif 3499 return alloc_flags; 3500 } 3501 3502 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 3503 { 3504 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 3505 return false; 3506 3507 if (gfp_mask & __GFP_MEMALLOC) 3508 return true; 3509 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 3510 return true; 3511 if (!in_interrupt() && 3512 ((current->flags & PF_MEMALLOC) || 3513 unlikely(test_thread_flag(TIF_MEMDIE)))) 3514 return true; 3515 3516 return false; 3517 } 3518 3519 /* 3520 * Maximum number of reclaim retries without any progress before OOM killer 3521 * is consider as the only way to move forward. 3522 */ 3523 #define MAX_RECLAIM_RETRIES 16 3524 3525 /* 3526 * Checks whether it makes sense to retry the reclaim to make a forward progress 3527 * for the given allocation request. 3528 * The reclaim feedback represented by did_some_progress (any progress during 3529 * the last reclaim round) and no_progress_loops (number of reclaim rounds without 3530 * any progress in a row) is considered as well as the reclaimable pages on the 3531 * applicable zone list (with a backoff mechanism which is a function of 3532 * no_progress_loops). 3533 * 3534 * Returns true if a retry is viable or false to enter the oom path. 3535 */ 3536 static inline bool 3537 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 3538 struct alloc_context *ac, int alloc_flags, 3539 bool did_some_progress, int *no_progress_loops) 3540 { 3541 struct zone *zone; 3542 struct zoneref *z; 3543 3544 /* 3545 * Costly allocations might have made a progress but this doesn't mean 3546 * their order will become available due to high fragmentation so 3547 * always increment the no progress counter for them 3548 */ 3549 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 3550 *no_progress_loops = 0; 3551 else 3552 (*no_progress_loops)++; 3553 3554 /* 3555 * Make sure we converge to OOM if we cannot make any progress 3556 * several times in the row. 3557 */ 3558 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 3559 /* Before OOM, exhaust highatomic_reserve */ 3560 return unreserve_highatomic_pageblock(ac, true); 3561 } 3562 3563 /* 3564 * Keep reclaiming pages while there is a chance this will lead 3565 * somewhere. If none of the target zones can satisfy our allocation 3566 * request even if all reclaimable pages are considered then we are 3567 * screwed and have to go OOM. 3568 */ 3569 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 3570 ac->nodemask) { 3571 unsigned long available; 3572 unsigned long reclaimable; 3573 unsigned long min_wmark = min_wmark_pages(zone); 3574 bool wmark; 3575 3576 available = reclaimable = zone_reclaimable_pages(zone); 3577 available -= DIV_ROUND_UP((*no_progress_loops) * available, 3578 MAX_RECLAIM_RETRIES); 3579 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 3580 3581 /* 3582 * Would the allocation succeed if we reclaimed the whole 3583 * available? 3584 */ 3585 wmark = __zone_watermark_ok(zone, order, min_wmark, 3586 ac_classzone_idx(ac), alloc_flags, available); 3587 trace_reclaim_retry_zone(z, order, reclaimable, 3588 available, min_wmark, *no_progress_loops, wmark); 3589 if (wmark) { 3590 /* 3591 * If we didn't make any progress and have a lot of 3592 * dirty + writeback pages then we should wait for 3593 * an IO to complete to slow down the reclaim and 3594 * prevent from pre mature OOM 3595 */ 3596 if (!did_some_progress) { 3597 unsigned long write_pending; 3598 3599 write_pending = zone_page_state_snapshot(zone, 3600 NR_ZONE_WRITE_PENDING); 3601 3602 if (2 * write_pending > reclaimable) { 3603 congestion_wait(BLK_RW_ASYNC, HZ/10); 3604 return true; 3605 } 3606 } 3607 3608 /* 3609 * Memory allocation/reclaim might be called from a WQ 3610 * context and the current implementation of the WQ 3611 * concurrency control doesn't recognize that 3612 * a particular WQ is congested if the worker thread is 3613 * looping without ever sleeping. Therefore we have to 3614 * do a short sleep here rather than calling 3615 * cond_resched(). 3616 */ 3617 if (current->flags & PF_WQ_WORKER) 3618 schedule_timeout_uninterruptible(1); 3619 else 3620 cond_resched(); 3621 3622 return true; 3623 } 3624 } 3625 3626 return false; 3627 } 3628 3629 static inline struct page * 3630 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 3631 struct alloc_context *ac) 3632 { 3633 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3634 struct page *page = NULL; 3635 unsigned int alloc_flags; 3636 unsigned long did_some_progress; 3637 enum compact_priority compact_priority; 3638 enum compact_result compact_result; 3639 int compaction_retries; 3640 int no_progress_loops; 3641 unsigned long alloc_start = jiffies; 3642 unsigned int stall_timeout = 10 * HZ; 3643 unsigned int cpuset_mems_cookie; 3644 3645 /* 3646 * In the slowpath, we sanity check order to avoid ever trying to 3647 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 3648 * be using allocators in order of preference for an area that is 3649 * too large. 3650 */ 3651 if (order >= MAX_ORDER) { 3652 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 3653 return NULL; 3654 } 3655 3656 /* 3657 * We also sanity check to catch abuse of atomic reserves being used by 3658 * callers that are not in atomic context. 3659 */ 3660 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 3661 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 3662 gfp_mask &= ~__GFP_ATOMIC; 3663 3664 retry_cpuset: 3665 compaction_retries = 0; 3666 no_progress_loops = 0; 3667 compact_priority = DEF_COMPACT_PRIORITY; 3668 cpuset_mems_cookie = read_mems_allowed_begin(); 3669 3670 /* 3671 * The fast path uses conservative alloc_flags to succeed only until 3672 * kswapd needs to be woken up, and to avoid the cost of setting up 3673 * alloc_flags precisely. So we do that now. 3674 */ 3675 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3676 3677 /* 3678 * We need to recalculate the starting point for the zonelist iterator 3679 * because we might have used different nodemask in the fast path, or 3680 * there was a cpuset modification and we are retrying - otherwise we 3681 * could end up iterating over non-eligible zones endlessly. 3682 */ 3683 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3684 ac->high_zoneidx, ac->nodemask); 3685 if (!ac->preferred_zoneref->zone) 3686 goto nopage; 3687 3688 if (gfp_mask & __GFP_KSWAPD_RECLAIM) 3689 wake_all_kswapds(order, ac); 3690 3691 /* 3692 * The adjusted alloc_flags might result in immediate success, so try 3693 * that first 3694 */ 3695 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3696 if (page) 3697 goto got_pg; 3698 3699 /* 3700 * For costly allocations, try direct compaction first, as it's likely 3701 * that we have enough base pages and don't need to reclaim. Don't try 3702 * that for allocations that are allowed to ignore watermarks, as the 3703 * ALLOC_NO_WATERMARKS attempt didn't yet happen. 3704 */ 3705 if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER && 3706 !gfp_pfmemalloc_allowed(gfp_mask)) { 3707 page = __alloc_pages_direct_compact(gfp_mask, order, 3708 alloc_flags, ac, 3709 INIT_COMPACT_PRIORITY, 3710 &compact_result); 3711 if (page) 3712 goto got_pg; 3713 3714 /* 3715 * Checks for costly allocations with __GFP_NORETRY, which 3716 * includes THP page fault allocations 3717 */ 3718 if (gfp_mask & __GFP_NORETRY) { 3719 /* 3720 * If compaction is deferred for high-order allocations, 3721 * it is because sync compaction recently failed. If 3722 * this is the case and the caller requested a THP 3723 * allocation, we do not want to heavily disrupt the 3724 * system, so we fail the allocation instead of entering 3725 * direct reclaim. 3726 */ 3727 if (compact_result == COMPACT_DEFERRED) 3728 goto nopage; 3729 3730 /* 3731 * Looks like reclaim/compaction is worth trying, but 3732 * sync compaction could be very expensive, so keep 3733 * using async compaction. 3734 */ 3735 compact_priority = INIT_COMPACT_PRIORITY; 3736 } 3737 } 3738 3739 retry: 3740 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 3741 if (gfp_mask & __GFP_KSWAPD_RECLAIM) 3742 wake_all_kswapds(order, ac); 3743 3744 if (gfp_pfmemalloc_allowed(gfp_mask)) 3745 alloc_flags = ALLOC_NO_WATERMARKS; 3746 3747 /* 3748 * Reset the zonelist iterators if memory policies can be ignored. 3749 * These allocations are high priority and system rather than user 3750 * orientated. 3751 */ 3752 if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) { 3753 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); 3754 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3755 ac->high_zoneidx, ac->nodemask); 3756 } 3757 3758 /* Attempt with potentially adjusted zonelist and alloc_flags */ 3759 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3760 if (page) 3761 goto got_pg; 3762 3763 /* Caller is not willing to reclaim, we can't balance anything */ 3764 if (!can_direct_reclaim) 3765 goto nopage; 3766 3767 /* Make sure we know about allocations which stall for too long */ 3768 if (time_after(jiffies, alloc_start + stall_timeout)) { 3769 warn_alloc(gfp_mask, ac->nodemask, 3770 "page allocation stalls for %ums, order:%u", 3771 jiffies_to_msecs(jiffies-alloc_start), order); 3772 stall_timeout += 10 * HZ; 3773 } 3774 3775 /* Avoid recursion of direct reclaim */ 3776 if (current->flags & PF_MEMALLOC) 3777 goto nopage; 3778 3779 /* Try direct reclaim and then allocating */ 3780 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 3781 &did_some_progress); 3782 if (page) 3783 goto got_pg; 3784 3785 /* Try direct compaction and then allocating */ 3786 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 3787 compact_priority, &compact_result); 3788 if (page) 3789 goto got_pg; 3790 3791 /* Do not loop if specifically requested */ 3792 if (gfp_mask & __GFP_NORETRY) 3793 goto nopage; 3794 3795 /* 3796 * Do not retry costly high order allocations unless they are 3797 * __GFP_REPEAT 3798 */ 3799 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT)) 3800 goto nopage; 3801 3802 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 3803 did_some_progress > 0, &no_progress_loops)) 3804 goto retry; 3805 3806 /* 3807 * It doesn't make any sense to retry for the compaction if the order-0 3808 * reclaim is not able to make any progress because the current 3809 * implementation of the compaction depends on the sufficient amount 3810 * of free memory (see __compaction_suitable) 3811 */ 3812 if (did_some_progress > 0 && 3813 should_compact_retry(ac, order, alloc_flags, 3814 compact_result, &compact_priority, 3815 &compaction_retries)) 3816 goto retry; 3817 3818 /* 3819 * It's possible we raced with cpuset update so the OOM would be 3820 * premature (see below the nopage: label for full explanation). 3821 */ 3822 if (read_mems_allowed_retry(cpuset_mems_cookie)) 3823 goto retry_cpuset; 3824 3825 /* Reclaim has failed us, start killing things */ 3826 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 3827 if (page) 3828 goto got_pg; 3829 3830 /* Avoid allocations with no watermarks from looping endlessly */ 3831 if (test_thread_flag(TIF_MEMDIE)) 3832 goto nopage; 3833 3834 /* Retry as long as the OOM killer is making progress */ 3835 if (did_some_progress) { 3836 no_progress_loops = 0; 3837 goto retry; 3838 } 3839 3840 nopage: 3841 /* 3842 * When updating a task's mems_allowed or mempolicy nodemask, it is 3843 * possible to race with parallel threads in such a way that our 3844 * allocation can fail while the mask is being updated. If we are about 3845 * to fail, check if the cpuset changed during allocation and if so, 3846 * retry. 3847 */ 3848 if (read_mems_allowed_retry(cpuset_mems_cookie)) 3849 goto retry_cpuset; 3850 3851 /* 3852 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 3853 * we always retry 3854 */ 3855 if (gfp_mask & __GFP_NOFAIL) { 3856 /* 3857 * All existing users of the __GFP_NOFAIL are blockable, so warn 3858 * of any new users that actually require GFP_NOWAIT 3859 */ 3860 if (WARN_ON_ONCE(!can_direct_reclaim)) 3861 goto fail; 3862 3863 /* 3864 * PF_MEMALLOC request from this context is rather bizarre 3865 * because we cannot reclaim anything and only can loop waiting 3866 * for somebody to do a work for us 3867 */ 3868 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 3869 3870 /* 3871 * non failing costly orders are a hard requirement which we 3872 * are not prepared for much so let's warn about these users 3873 * so that we can identify them and convert them to something 3874 * else. 3875 */ 3876 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); 3877 3878 /* 3879 * Help non-failing allocations by giving them access to memory 3880 * reserves but do not use ALLOC_NO_WATERMARKS because this 3881 * could deplete whole memory reserves which would just make 3882 * the situation worse 3883 */ 3884 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); 3885 if (page) 3886 goto got_pg; 3887 3888 cond_resched(); 3889 goto retry; 3890 } 3891 fail: 3892 warn_alloc(gfp_mask, ac->nodemask, 3893 "page allocation failure: order:%u", order); 3894 got_pg: 3895 return page; 3896 } 3897 3898 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 3899 struct zonelist *zonelist, nodemask_t *nodemask, 3900 struct alloc_context *ac, gfp_t *alloc_mask, 3901 unsigned int *alloc_flags) 3902 { 3903 ac->high_zoneidx = gfp_zone(gfp_mask); 3904 ac->zonelist = zonelist; 3905 ac->nodemask = nodemask; 3906 ac->migratetype = gfpflags_to_migratetype(gfp_mask); 3907 3908 if (cpusets_enabled()) { 3909 *alloc_mask |= __GFP_HARDWALL; 3910 if (!ac->nodemask) 3911 ac->nodemask = &cpuset_current_mems_allowed; 3912 else 3913 *alloc_flags |= ALLOC_CPUSET; 3914 } 3915 3916 lockdep_trace_alloc(gfp_mask); 3917 3918 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 3919 3920 if (should_fail_alloc_page(gfp_mask, order)) 3921 return false; 3922 3923 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) 3924 *alloc_flags |= ALLOC_CMA; 3925 3926 return true; 3927 } 3928 3929 /* Determine whether to spread dirty pages and what the first usable zone */ 3930 static inline void finalise_ac(gfp_t gfp_mask, 3931 unsigned int order, struct alloc_context *ac) 3932 { 3933 /* Dirty zone balancing only done in the fast path */ 3934 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3935 3936 /* 3937 * The preferred zone is used for statistics but crucially it is 3938 * also used as the starting point for the zonelist iterator. It 3939 * may get reset for allocations that ignore memory policies. 3940 */ 3941 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3942 ac->high_zoneidx, ac->nodemask); 3943 } 3944 3945 /* 3946 * This is the 'heart' of the zoned buddy allocator. 3947 */ 3948 struct page * 3949 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 3950 struct zonelist *zonelist, nodemask_t *nodemask) 3951 { 3952 struct page *page; 3953 unsigned int alloc_flags = ALLOC_WMARK_LOW; 3954 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ 3955 struct alloc_context ac = { }; 3956 3957 gfp_mask &= gfp_allowed_mask; 3958 if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags)) 3959 return NULL; 3960 3961 finalise_ac(gfp_mask, order, &ac); 3962 3963 /* First allocation attempt */ 3964 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); 3965 if (likely(page)) 3966 goto out; 3967 3968 /* 3969 * Runtime PM, block IO and its error handling path can deadlock 3970 * because I/O on the device might not complete. 3971 */ 3972 alloc_mask = memalloc_noio_flags(gfp_mask); 3973 ac.spread_dirty_pages = false; 3974 3975 /* 3976 * Restore the original nodemask if it was potentially replaced with 3977 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 3978 */ 3979 if (unlikely(ac.nodemask != nodemask)) 3980 ac.nodemask = nodemask; 3981 3982 page = __alloc_pages_slowpath(alloc_mask, order, &ac); 3983 3984 out: 3985 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 3986 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { 3987 __free_pages(page, order); 3988 page = NULL; 3989 } 3990 3991 if (kmemcheck_enabled && page) 3992 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 3993 3994 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); 3995 3996 return page; 3997 } 3998 EXPORT_SYMBOL(__alloc_pages_nodemask); 3999 4000 /* 4001 * Common helper functions. 4002 */ 4003 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 4004 { 4005 struct page *page; 4006 4007 /* 4008 * __get_free_pages() returns a 32-bit address, which cannot represent 4009 * a highmem page 4010 */ 4011 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 4012 4013 page = alloc_pages(gfp_mask, order); 4014 if (!page) 4015 return 0; 4016 return (unsigned long) page_address(page); 4017 } 4018 EXPORT_SYMBOL(__get_free_pages); 4019 4020 unsigned long get_zeroed_page(gfp_t gfp_mask) 4021 { 4022 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 4023 } 4024 EXPORT_SYMBOL(get_zeroed_page); 4025 4026 void __free_pages(struct page *page, unsigned int order) 4027 { 4028 if (put_page_testzero(page)) { 4029 if (order == 0) 4030 free_hot_cold_page(page, false); 4031 else 4032 __free_pages_ok(page, order); 4033 } 4034 } 4035 4036 EXPORT_SYMBOL(__free_pages); 4037 4038 void free_pages(unsigned long addr, unsigned int order) 4039 { 4040 if (addr != 0) { 4041 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4042 __free_pages(virt_to_page((void *)addr), order); 4043 } 4044 } 4045 4046 EXPORT_SYMBOL(free_pages); 4047 4048 /* 4049 * Page Fragment: 4050 * An arbitrary-length arbitrary-offset area of memory which resides 4051 * within a 0 or higher order page. Multiple fragments within that page 4052 * are individually refcounted, in the page's reference counter. 4053 * 4054 * The page_frag functions below provide a simple allocation framework for 4055 * page fragments. This is used by the network stack and network device 4056 * drivers to provide a backing region of memory for use as either an 4057 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4058 */ 4059 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4060 gfp_t gfp_mask) 4061 { 4062 struct page *page = NULL; 4063 gfp_t gfp = gfp_mask; 4064 4065 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4066 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 4067 __GFP_NOMEMALLOC; 4068 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4069 PAGE_FRAG_CACHE_MAX_ORDER); 4070 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4071 #endif 4072 if (unlikely(!page)) 4073 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4074 4075 nc->va = page ? page_address(page) : NULL; 4076 4077 return page; 4078 } 4079 4080 void __page_frag_cache_drain(struct page *page, unsigned int count) 4081 { 4082 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4083 4084 if (page_ref_sub_and_test(page, count)) { 4085 unsigned int order = compound_order(page); 4086 4087 if (order == 0) 4088 free_hot_cold_page(page, false); 4089 else 4090 __free_pages_ok(page, order); 4091 } 4092 } 4093 EXPORT_SYMBOL(__page_frag_cache_drain); 4094 4095 void *page_frag_alloc(struct page_frag_cache *nc, 4096 unsigned int fragsz, gfp_t gfp_mask) 4097 { 4098 unsigned int size = PAGE_SIZE; 4099 struct page *page; 4100 int offset; 4101 4102 if (unlikely(!nc->va)) { 4103 refill: 4104 page = __page_frag_cache_refill(nc, gfp_mask); 4105 if (!page) 4106 return NULL; 4107 4108 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4109 /* if size can vary use size else just use PAGE_SIZE */ 4110 size = nc->size; 4111 #endif 4112 /* Even if we own the page, we do not use atomic_set(). 4113 * This would break get_page_unless_zero() users. 4114 */ 4115 page_ref_add(page, size - 1); 4116 4117 /* reset page count bias and offset to start of new frag */ 4118 nc->pfmemalloc = page_is_pfmemalloc(page); 4119 nc->pagecnt_bias = size; 4120 nc->offset = size; 4121 } 4122 4123 offset = nc->offset - fragsz; 4124 if (unlikely(offset < 0)) { 4125 page = virt_to_page(nc->va); 4126 4127 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4128 goto refill; 4129 4130 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4131 /* if size can vary use size else just use PAGE_SIZE */ 4132 size = nc->size; 4133 #endif 4134 /* OK, page count is 0, we can safely set it */ 4135 set_page_count(page, size); 4136 4137 /* reset page count bias and offset to start of new frag */ 4138 nc->pagecnt_bias = size; 4139 offset = size - fragsz; 4140 } 4141 4142 nc->pagecnt_bias--; 4143 nc->offset = offset; 4144 4145 return nc->va + offset; 4146 } 4147 EXPORT_SYMBOL(page_frag_alloc); 4148 4149 /* 4150 * Frees a page fragment allocated out of either a compound or order 0 page. 4151 */ 4152 void page_frag_free(void *addr) 4153 { 4154 struct page *page = virt_to_head_page(addr); 4155 4156 if (unlikely(put_page_testzero(page))) 4157 __free_pages_ok(page, compound_order(page)); 4158 } 4159 EXPORT_SYMBOL(page_frag_free); 4160 4161 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4162 size_t size) 4163 { 4164 if (addr) { 4165 unsigned long alloc_end = addr + (PAGE_SIZE << order); 4166 unsigned long used = addr + PAGE_ALIGN(size); 4167 4168 split_page(virt_to_page((void *)addr), order); 4169 while (used < alloc_end) { 4170 free_page(used); 4171 used += PAGE_SIZE; 4172 } 4173 } 4174 return (void *)addr; 4175 } 4176 4177 /** 4178 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4179 * @size: the number of bytes to allocate 4180 * @gfp_mask: GFP flags for the allocation 4181 * 4182 * This function is similar to alloc_pages(), except that it allocates the 4183 * minimum number of pages to satisfy the request. alloc_pages() can only 4184 * allocate memory in power-of-two pages. 4185 * 4186 * This function is also limited by MAX_ORDER. 4187 * 4188 * Memory allocated by this function must be released by free_pages_exact(). 4189 */ 4190 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 4191 { 4192 unsigned int order = get_order(size); 4193 unsigned long addr; 4194 4195 addr = __get_free_pages(gfp_mask, order); 4196 return make_alloc_exact(addr, order, size); 4197 } 4198 EXPORT_SYMBOL(alloc_pages_exact); 4199 4200 /** 4201 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4202 * pages on a node. 4203 * @nid: the preferred node ID where memory should be allocated 4204 * @size: the number of bytes to allocate 4205 * @gfp_mask: GFP flags for the allocation 4206 * 4207 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4208 * back. 4209 */ 4210 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 4211 { 4212 unsigned int order = get_order(size); 4213 struct page *p = alloc_pages_node(nid, gfp_mask, order); 4214 if (!p) 4215 return NULL; 4216 return make_alloc_exact((unsigned long)page_address(p), order, size); 4217 } 4218 4219 /** 4220 * free_pages_exact - release memory allocated via alloc_pages_exact() 4221 * @virt: the value returned by alloc_pages_exact. 4222 * @size: size of allocation, same value as passed to alloc_pages_exact(). 4223 * 4224 * Release the memory allocated by a previous call to alloc_pages_exact. 4225 */ 4226 void free_pages_exact(void *virt, size_t size) 4227 { 4228 unsigned long addr = (unsigned long)virt; 4229 unsigned long end = addr + PAGE_ALIGN(size); 4230 4231 while (addr < end) { 4232 free_page(addr); 4233 addr += PAGE_SIZE; 4234 } 4235 } 4236 EXPORT_SYMBOL(free_pages_exact); 4237 4238 /** 4239 * nr_free_zone_pages - count number of pages beyond high watermark 4240 * @offset: The zone index of the highest zone 4241 * 4242 * nr_free_zone_pages() counts the number of counts pages which are beyond the 4243 * high watermark within all zones at or below a given zone index. For each 4244 * zone, the number of pages is calculated as: 4245 * managed_pages - high_pages 4246 */ 4247 static unsigned long nr_free_zone_pages(int offset) 4248 { 4249 struct zoneref *z; 4250 struct zone *zone; 4251 4252 /* Just pick one node, since fallback list is circular */ 4253 unsigned long sum = 0; 4254 4255 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 4256 4257 for_each_zone_zonelist(zone, z, zonelist, offset) { 4258 unsigned long size = zone->managed_pages; 4259 unsigned long high = high_wmark_pages(zone); 4260 if (size > high) 4261 sum += size - high; 4262 } 4263 4264 return sum; 4265 } 4266 4267 /** 4268 * nr_free_buffer_pages - count number of pages beyond high watermark 4269 * 4270 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4271 * watermark within ZONE_DMA and ZONE_NORMAL. 4272 */ 4273 unsigned long nr_free_buffer_pages(void) 4274 { 4275 return nr_free_zone_pages(gfp_zone(GFP_USER)); 4276 } 4277 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 4278 4279 /** 4280 * nr_free_pagecache_pages - count number of pages beyond high watermark 4281 * 4282 * nr_free_pagecache_pages() counts the number of pages which are beyond the 4283 * high watermark within all zones. 4284 */ 4285 unsigned long nr_free_pagecache_pages(void) 4286 { 4287 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 4288 } 4289 4290 static inline void show_node(struct zone *zone) 4291 { 4292 if (IS_ENABLED(CONFIG_NUMA)) 4293 printk("Node %d ", zone_to_nid(zone)); 4294 } 4295 4296 long si_mem_available(void) 4297 { 4298 long available; 4299 unsigned long pagecache; 4300 unsigned long wmark_low = 0; 4301 unsigned long pages[NR_LRU_LISTS]; 4302 struct zone *zone; 4303 int lru; 4304 4305 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 4306 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 4307 4308 for_each_zone(zone) 4309 wmark_low += zone->watermark[WMARK_LOW]; 4310 4311 /* 4312 * Estimate the amount of memory available for userspace allocations, 4313 * without causing swapping. 4314 */ 4315 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages; 4316 4317 /* 4318 * Not all the page cache can be freed, otherwise the system will 4319 * start swapping. Assume at least half of the page cache, or the 4320 * low watermark worth of cache, needs to stay. 4321 */ 4322 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 4323 pagecache -= min(pagecache / 2, wmark_low); 4324 available += pagecache; 4325 4326 /* 4327 * Part of the reclaimable slab consists of items that are in use, 4328 * and cannot be freed. Cap this estimate at the low watermark. 4329 */ 4330 available += global_page_state(NR_SLAB_RECLAIMABLE) - 4331 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); 4332 4333 if (available < 0) 4334 available = 0; 4335 return available; 4336 } 4337 EXPORT_SYMBOL_GPL(si_mem_available); 4338 4339 void si_meminfo(struct sysinfo *val) 4340 { 4341 val->totalram = totalram_pages; 4342 val->sharedram = global_node_page_state(NR_SHMEM); 4343 val->freeram = global_page_state(NR_FREE_PAGES); 4344 val->bufferram = nr_blockdev_pages(); 4345 val->totalhigh = totalhigh_pages; 4346 val->freehigh = nr_free_highpages(); 4347 val->mem_unit = PAGE_SIZE; 4348 } 4349 4350 EXPORT_SYMBOL(si_meminfo); 4351 4352 #ifdef CONFIG_NUMA 4353 void si_meminfo_node(struct sysinfo *val, int nid) 4354 { 4355 int zone_type; /* needs to be signed */ 4356 unsigned long managed_pages = 0; 4357 unsigned long managed_highpages = 0; 4358 unsigned long free_highpages = 0; 4359 pg_data_t *pgdat = NODE_DATA(nid); 4360 4361 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 4362 managed_pages += pgdat->node_zones[zone_type].managed_pages; 4363 val->totalram = managed_pages; 4364 val->sharedram = node_page_state(pgdat, NR_SHMEM); 4365 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 4366 #ifdef CONFIG_HIGHMEM 4367 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 4368 struct zone *zone = &pgdat->node_zones[zone_type]; 4369 4370 if (is_highmem(zone)) { 4371 managed_highpages += zone->managed_pages; 4372 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 4373 } 4374 } 4375 val->totalhigh = managed_highpages; 4376 val->freehigh = free_highpages; 4377 #else 4378 val->totalhigh = managed_highpages; 4379 val->freehigh = free_highpages; 4380 #endif 4381 val->mem_unit = PAGE_SIZE; 4382 } 4383 #endif 4384 4385 /* 4386 * Determine whether the node should be displayed or not, depending on whether 4387 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 4388 */ 4389 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 4390 { 4391 if (!(flags & SHOW_MEM_FILTER_NODES)) 4392 return false; 4393 4394 /* 4395 * no node mask - aka implicit memory numa policy. Do not bother with 4396 * the synchronization - read_mems_allowed_begin - because we do not 4397 * have to be precise here. 4398 */ 4399 if (!nodemask) 4400 nodemask = &cpuset_current_mems_allowed; 4401 4402 return !node_isset(nid, *nodemask); 4403 } 4404 4405 #define K(x) ((x) << (PAGE_SHIFT-10)) 4406 4407 static void show_migration_types(unsigned char type) 4408 { 4409 static const char types[MIGRATE_TYPES] = { 4410 [MIGRATE_UNMOVABLE] = 'U', 4411 [MIGRATE_MOVABLE] = 'M', 4412 [MIGRATE_RECLAIMABLE] = 'E', 4413 [MIGRATE_HIGHATOMIC] = 'H', 4414 #ifdef CONFIG_CMA 4415 [MIGRATE_CMA] = 'C', 4416 #endif 4417 #ifdef CONFIG_MEMORY_ISOLATION 4418 [MIGRATE_ISOLATE] = 'I', 4419 #endif 4420 }; 4421 char tmp[MIGRATE_TYPES + 1]; 4422 char *p = tmp; 4423 int i; 4424 4425 for (i = 0; i < MIGRATE_TYPES; i++) { 4426 if (type & (1 << i)) 4427 *p++ = types[i]; 4428 } 4429 4430 *p = '\0'; 4431 printk(KERN_CONT "(%s) ", tmp); 4432 } 4433 4434 /* 4435 * Show free area list (used inside shift_scroll-lock stuff) 4436 * We also calculate the percentage fragmentation. We do this by counting the 4437 * memory on each free list with the exception of the first item on the list. 4438 * 4439 * Bits in @filter: 4440 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 4441 * cpuset. 4442 */ 4443 void show_free_areas(unsigned int filter, nodemask_t *nodemask) 4444 { 4445 unsigned long free_pcp = 0; 4446 int cpu; 4447 struct zone *zone; 4448 pg_data_t *pgdat; 4449 4450 for_each_populated_zone(zone) { 4451 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 4452 continue; 4453 4454 for_each_online_cpu(cpu) 4455 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 4456 } 4457 4458 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 4459 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 4460 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" 4461 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 4462 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 4463 " free:%lu free_pcp:%lu free_cma:%lu\n", 4464 global_node_page_state(NR_ACTIVE_ANON), 4465 global_node_page_state(NR_INACTIVE_ANON), 4466 global_node_page_state(NR_ISOLATED_ANON), 4467 global_node_page_state(NR_ACTIVE_FILE), 4468 global_node_page_state(NR_INACTIVE_FILE), 4469 global_node_page_state(NR_ISOLATED_FILE), 4470 global_node_page_state(NR_UNEVICTABLE), 4471 global_node_page_state(NR_FILE_DIRTY), 4472 global_node_page_state(NR_WRITEBACK), 4473 global_node_page_state(NR_UNSTABLE_NFS), 4474 global_page_state(NR_SLAB_RECLAIMABLE), 4475 global_page_state(NR_SLAB_UNRECLAIMABLE), 4476 global_node_page_state(NR_FILE_MAPPED), 4477 global_node_page_state(NR_SHMEM), 4478 global_page_state(NR_PAGETABLE), 4479 global_page_state(NR_BOUNCE), 4480 global_page_state(NR_FREE_PAGES), 4481 free_pcp, 4482 global_page_state(NR_FREE_CMA_PAGES)); 4483 4484 for_each_online_pgdat(pgdat) { 4485 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 4486 continue; 4487 4488 printk("Node %d" 4489 " active_anon:%lukB" 4490 " inactive_anon:%lukB" 4491 " active_file:%lukB" 4492 " inactive_file:%lukB" 4493 " unevictable:%lukB" 4494 " isolated(anon):%lukB" 4495 " isolated(file):%lukB" 4496 " mapped:%lukB" 4497 " dirty:%lukB" 4498 " writeback:%lukB" 4499 " shmem:%lukB" 4500 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4501 " shmem_thp: %lukB" 4502 " shmem_pmdmapped: %lukB" 4503 " anon_thp: %lukB" 4504 #endif 4505 " writeback_tmp:%lukB" 4506 " unstable:%lukB" 4507 " pages_scanned:%lu" 4508 " all_unreclaimable? %s" 4509 "\n", 4510 pgdat->node_id, 4511 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 4512 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 4513 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 4514 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 4515 K(node_page_state(pgdat, NR_UNEVICTABLE)), 4516 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 4517 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 4518 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4519 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4520 K(node_page_state(pgdat, NR_WRITEBACK)), 4521 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4522 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4523 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4524 * HPAGE_PMD_NR), 4525 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4526 #endif 4527 K(node_page_state(pgdat, NR_SHMEM)), 4528 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4529 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4530 node_page_state(pgdat, NR_PAGES_SCANNED), 4531 !pgdat_reclaimable(pgdat) ? "yes" : "no"); 4532 } 4533 4534 for_each_populated_zone(zone) { 4535 int i; 4536 4537 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 4538 continue; 4539 4540 free_pcp = 0; 4541 for_each_online_cpu(cpu) 4542 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 4543 4544 show_node(zone); 4545 printk(KERN_CONT 4546 "%s" 4547 " free:%lukB" 4548 " min:%lukB" 4549 " low:%lukB" 4550 " high:%lukB" 4551 " active_anon:%lukB" 4552 " inactive_anon:%lukB" 4553 " active_file:%lukB" 4554 " inactive_file:%lukB" 4555 " unevictable:%lukB" 4556 " writepending:%lukB" 4557 " present:%lukB" 4558 " managed:%lukB" 4559 " mlocked:%lukB" 4560 " slab_reclaimable:%lukB" 4561 " slab_unreclaimable:%lukB" 4562 " kernel_stack:%lukB" 4563 " pagetables:%lukB" 4564 " bounce:%lukB" 4565 " free_pcp:%lukB" 4566 " local_pcp:%ukB" 4567 " free_cma:%lukB" 4568 "\n", 4569 zone->name, 4570 K(zone_page_state(zone, NR_FREE_PAGES)), 4571 K(min_wmark_pages(zone)), 4572 K(low_wmark_pages(zone)), 4573 K(high_wmark_pages(zone)), 4574 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 4575 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 4576 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 4577 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 4578 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 4579 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 4580 K(zone->present_pages), 4581 K(zone->managed_pages), 4582 K(zone_page_state(zone, NR_MLOCK)), 4583 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 4584 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 4585 zone_page_state(zone, NR_KERNEL_STACK_KB), 4586 K(zone_page_state(zone, NR_PAGETABLE)), 4587 K(zone_page_state(zone, NR_BOUNCE)), 4588 K(free_pcp), 4589 K(this_cpu_read(zone->pageset->pcp.count)), 4590 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 4591 printk("lowmem_reserve[]:"); 4592 for (i = 0; i < MAX_NR_ZONES; i++) 4593 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 4594 printk(KERN_CONT "\n"); 4595 } 4596 4597 for_each_populated_zone(zone) { 4598 unsigned int order; 4599 unsigned long nr[MAX_ORDER], flags, total = 0; 4600 unsigned char types[MAX_ORDER]; 4601 4602 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 4603 continue; 4604 show_node(zone); 4605 printk(KERN_CONT "%s: ", zone->name); 4606 4607 spin_lock_irqsave(&zone->lock, flags); 4608 for (order = 0; order < MAX_ORDER; order++) { 4609 struct free_area *area = &zone->free_area[order]; 4610 int type; 4611 4612 nr[order] = area->nr_free; 4613 total += nr[order] << order; 4614 4615 types[order] = 0; 4616 for (type = 0; type < MIGRATE_TYPES; type++) { 4617 if (!list_empty(&area->free_list[type])) 4618 types[order] |= 1 << type; 4619 } 4620 } 4621 spin_unlock_irqrestore(&zone->lock, flags); 4622 for (order = 0; order < MAX_ORDER; order++) { 4623 printk(KERN_CONT "%lu*%lukB ", 4624 nr[order], K(1UL) << order); 4625 if (nr[order]) 4626 show_migration_types(types[order]); 4627 } 4628 printk(KERN_CONT "= %lukB\n", K(total)); 4629 } 4630 4631 hugetlb_show_meminfo(); 4632 4633 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 4634 4635 show_swap_cache_info(); 4636 } 4637 4638 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 4639 { 4640 zoneref->zone = zone; 4641 zoneref->zone_idx = zone_idx(zone); 4642 } 4643 4644 /* 4645 * Builds allocation fallback zone lists. 4646 * 4647 * Add all populated zones of a node to the zonelist. 4648 */ 4649 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 4650 int nr_zones) 4651 { 4652 struct zone *zone; 4653 enum zone_type zone_type = MAX_NR_ZONES; 4654 4655 do { 4656 zone_type--; 4657 zone = pgdat->node_zones + zone_type; 4658 if (managed_zone(zone)) { 4659 zoneref_set_zone(zone, 4660 &zonelist->_zonerefs[nr_zones++]); 4661 check_highest_zone(zone_type); 4662 } 4663 } while (zone_type); 4664 4665 return nr_zones; 4666 } 4667 4668 4669 /* 4670 * zonelist_order: 4671 * 0 = automatic detection of better ordering. 4672 * 1 = order by ([node] distance, -zonetype) 4673 * 2 = order by (-zonetype, [node] distance) 4674 * 4675 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 4676 * the same zonelist. So only NUMA can configure this param. 4677 */ 4678 #define ZONELIST_ORDER_DEFAULT 0 4679 #define ZONELIST_ORDER_NODE 1 4680 #define ZONELIST_ORDER_ZONE 2 4681 4682 /* zonelist order in the kernel. 4683 * set_zonelist_order() will set this to NODE or ZONE. 4684 */ 4685 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 4686 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 4687 4688 4689 #ifdef CONFIG_NUMA 4690 /* The value user specified ....changed by config */ 4691 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 4692 /* string for sysctl */ 4693 #define NUMA_ZONELIST_ORDER_LEN 16 4694 char numa_zonelist_order[16] = "default"; 4695 4696 /* 4697 * interface for configure zonelist ordering. 4698 * command line option "numa_zonelist_order" 4699 * = "[dD]efault - default, automatic configuration. 4700 * = "[nN]ode - order by node locality, then by zone within node 4701 * = "[zZ]one - order by zone, then by locality within zone 4702 */ 4703 4704 static int __parse_numa_zonelist_order(char *s) 4705 { 4706 if (*s == 'd' || *s == 'D') { 4707 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 4708 } else if (*s == 'n' || *s == 'N') { 4709 user_zonelist_order = ZONELIST_ORDER_NODE; 4710 } else if (*s == 'z' || *s == 'Z') { 4711 user_zonelist_order = ZONELIST_ORDER_ZONE; 4712 } else { 4713 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s); 4714 return -EINVAL; 4715 } 4716 return 0; 4717 } 4718 4719 static __init int setup_numa_zonelist_order(char *s) 4720 { 4721 int ret; 4722 4723 if (!s) 4724 return 0; 4725 4726 ret = __parse_numa_zonelist_order(s); 4727 if (ret == 0) 4728 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 4729 4730 return ret; 4731 } 4732 early_param("numa_zonelist_order", setup_numa_zonelist_order); 4733 4734 /* 4735 * sysctl handler for numa_zonelist_order 4736 */ 4737 int numa_zonelist_order_handler(struct ctl_table *table, int write, 4738 void __user *buffer, size_t *length, 4739 loff_t *ppos) 4740 { 4741 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 4742 int ret; 4743 static DEFINE_MUTEX(zl_order_mutex); 4744 4745 mutex_lock(&zl_order_mutex); 4746 if (write) { 4747 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { 4748 ret = -EINVAL; 4749 goto out; 4750 } 4751 strcpy(saved_string, (char *)table->data); 4752 } 4753 ret = proc_dostring(table, write, buffer, length, ppos); 4754 if (ret) 4755 goto out; 4756 if (write) { 4757 int oldval = user_zonelist_order; 4758 4759 ret = __parse_numa_zonelist_order((char *)table->data); 4760 if (ret) { 4761 /* 4762 * bogus value. restore saved string 4763 */ 4764 strncpy((char *)table->data, saved_string, 4765 NUMA_ZONELIST_ORDER_LEN); 4766 user_zonelist_order = oldval; 4767 } else if (oldval != user_zonelist_order) { 4768 mutex_lock(&zonelists_mutex); 4769 build_all_zonelists(NULL, NULL); 4770 mutex_unlock(&zonelists_mutex); 4771 } 4772 } 4773 out: 4774 mutex_unlock(&zl_order_mutex); 4775 return ret; 4776 } 4777 4778 4779 #define MAX_NODE_LOAD (nr_online_nodes) 4780 static int node_load[MAX_NUMNODES]; 4781 4782 /** 4783 * find_next_best_node - find the next node that should appear in a given node's fallback list 4784 * @node: node whose fallback list we're appending 4785 * @used_node_mask: nodemask_t of already used nodes 4786 * 4787 * We use a number of factors to determine which is the next node that should 4788 * appear on a given node's fallback list. The node should not have appeared 4789 * already in @node's fallback list, and it should be the next closest node 4790 * according to the distance array (which contains arbitrary distance values 4791 * from each node to each node in the system), and should also prefer nodes 4792 * with no CPUs, since presumably they'll have very little allocation pressure 4793 * on them otherwise. 4794 * It returns -1 if no node is found. 4795 */ 4796 static int find_next_best_node(int node, nodemask_t *used_node_mask) 4797 { 4798 int n, val; 4799 int min_val = INT_MAX; 4800 int best_node = NUMA_NO_NODE; 4801 const struct cpumask *tmp = cpumask_of_node(0); 4802 4803 /* Use the local node if we haven't already */ 4804 if (!node_isset(node, *used_node_mask)) { 4805 node_set(node, *used_node_mask); 4806 return node; 4807 } 4808 4809 for_each_node_state(n, N_MEMORY) { 4810 4811 /* Don't want a node to appear more than once */ 4812 if (node_isset(n, *used_node_mask)) 4813 continue; 4814 4815 /* Use the distance array to find the distance */ 4816 val = node_distance(node, n); 4817 4818 /* Penalize nodes under us ("prefer the next node") */ 4819 val += (n < node); 4820 4821 /* Give preference to headless and unused nodes */ 4822 tmp = cpumask_of_node(n); 4823 if (!cpumask_empty(tmp)) 4824 val += PENALTY_FOR_NODE_WITH_CPUS; 4825 4826 /* Slight preference for less loaded node */ 4827 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 4828 val += node_load[n]; 4829 4830 if (val < min_val) { 4831 min_val = val; 4832 best_node = n; 4833 } 4834 } 4835 4836 if (best_node >= 0) 4837 node_set(best_node, *used_node_mask); 4838 4839 return best_node; 4840 } 4841 4842 4843 /* 4844 * Build zonelists ordered by node and zones within node. 4845 * This results in maximum locality--normal zone overflows into local 4846 * DMA zone, if any--but risks exhausting DMA zone. 4847 */ 4848 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 4849 { 4850 int j; 4851 struct zonelist *zonelist; 4852 4853 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK]; 4854 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 4855 ; 4856 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4857 zonelist->_zonerefs[j].zone = NULL; 4858 zonelist->_zonerefs[j].zone_idx = 0; 4859 } 4860 4861 /* 4862 * Build gfp_thisnode zonelists 4863 */ 4864 static void build_thisnode_zonelists(pg_data_t *pgdat) 4865 { 4866 int j; 4867 struct zonelist *zonelist; 4868 4869 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK]; 4870 j = build_zonelists_node(pgdat, zonelist, 0); 4871 zonelist->_zonerefs[j].zone = NULL; 4872 zonelist->_zonerefs[j].zone_idx = 0; 4873 } 4874 4875 /* 4876 * Build zonelists ordered by zone and nodes within zones. 4877 * This results in conserving DMA zone[s] until all Normal memory is 4878 * exhausted, but results in overflowing to remote node while memory 4879 * may still exist in local DMA zone. 4880 */ 4881 static int node_order[MAX_NUMNODES]; 4882 4883 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 4884 { 4885 int pos, j, node; 4886 int zone_type; /* needs to be signed */ 4887 struct zone *z; 4888 struct zonelist *zonelist; 4889 4890 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK]; 4891 pos = 0; 4892 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 4893 for (j = 0; j < nr_nodes; j++) { 4894 node = node_order[j]; 4895 z = &NODE_DATA(node)->node_zones[zone_type]; 4896 if (managed_zone(z)) { 4897 zoneref_set_zone(z, 4898 &zonelist->_zonerefs[pos++]); 4899 check_highest_zone(zone_type); 4900 } 4901 } 4902 } 4903 zonelist->_zonerefs[pos].zone = NULL; 4904 zonelist->_zonerefs[pos].zone_idx = 0; 4905 } 4906 4907 #if defined(CONFIG_64BIT) 4908 /* 4909 * Devices that require DMA32/DMA are relatively rare and do not justify a 4910 * penalty to every machine in case the specialised case applies. Default 4911 * to Node-ordering on 64-bit NUMA machines 4912 */ 4913 static int default_zonelist_order(void) 4914 { 4915 return ZONELIST_ORDER_NODE; 4916 } 4917 #else 4918 /* 4919 * On 32-bit, the Normal zone needs to be preserved for allocations accessible 4920 * by the kernel. If processes running on node 0 deplete the low memory zone 4921 * then reclaim will occur more frequency increasing stalls and potentially 4922 * be easier to OOM if a large percentage of the zone is under writeback or 4923 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set. 4924 * Hence, default to zone ordering on 32-bit. 4925 */ 4926 static int default_zonelist_order(void) 4927 { 4928 return ZONELIST_ORDER_ZONE; 4929 } 4930 #endif /* CONFIG_64BIT */ 4931 4932 static void set_zonelist_order(void) 4933 { 4934 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 4935 current_zonelist_order = default_zonelist_order(); 4936 else 4937 current_zonelist_order = user_zonelist_order; 4938 } 4939 4940 static void build_zonelists(pg_data_t *pgdat) 4941 { 4942 int i, node, load; 4943 nodemask_t used_mask; 4944 int local_node, prev_node; 4945 struct zonelist *zonelist; 4946 unsigned int order = current_zonelist_order; 4947 4948 /* initialize zonelists */ 4949 for (i = 0; i < MAX_ZONELISTS; i++) { 4950 zonelist = pgdat->node_zonelists + i; 4951 zonelist->_zonerefs[0].zone = NULL; 4952 zonelist->_zonerefs[0].zone_idx = 0; 4953 } 4954 4955 /* NUMA-aware ordering of nodes */ 4956 local_node = pgdat->node_id; 4957 load = nr_online_nodes; 4958 prev_node = local_node; 4959 nodes_clear(used_mask); 4960 4961 memset(node_order, 0, sizeof(node_order)); 4962 i = 0; 4963 4964 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 4965 /* 4966 * We don't want to pressure a particular node. 4967 * So adding penalty to the first node in same 4968 * distance group to make it round-robin. 4969 */ 4970 if (node_distance(local_node, node) != 4971 node_distance(local_node, prev_node)) 4972 node_load[node] = load; 4973 4974 prev_node = node; 4975 load--; 4976 if (order == ZONELIST_ORDER_NODE) 4977 build_zonelists_in_node_order(pgdat, node); 4978 else 4979 node_order[i++] = node; /* remember order */ 4980 } 4981 4982 if (order == ZONELIST_ORDER_ZONE) { 4983 /* calculate node order -- i.e., DMA last! */ 4984 build_zonelists_in_zone_order(pgdat, i); 4985 } 4986 4987 build_thisnode_zonelists(pgdat); 4988 } 4989 4990 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 4991 /* 4992 * Return node id of node used for "local" allocations. 4993 * I.e., first node id of first zone in arg node's generic zonelist. 4994 * Used for initializing percpu 'numa_mem', which is used primarily 4995 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 4996 */ 4997 int local_memory_node(int node) 4998 { 4999 struct zoneref *z; 5000 5001 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5002 gfp_zone(GFP_KERNEL), 5003 NULL); 5004 return z->zone->node; 5005 } 5006 #endif 5007 5008 static void setup_min_unmapped_ratio(void); 5009 static void setup_min_slab_ratio(void); 5010 #else /* CONFIG_NUMA */ 5011 5012 static void set_zonelist_order(void) 5013 { 5014 current_zonelist_order = ZONELIST_ORDER_ZONE; 5015 } 5016 5017 static void build_zonelists(pg_data_t *pgdat) 5018 { 5019 int node, local_node; 5020 enum zone_type j; 5021 struct zonelist *zonelist; 5022 5023 local_node = pgdat->node_id; 5024 5025 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK]; 5026 j = build_zonelists_node(pgdat, zonelist, 0); 5027 5028 /* 5029 * Now we build the zonelist so that it contains the zones 5030 * of all the other nodes. 5031 * We don't want to pressure a particular node, so when 5032 * building the zones for node N, we make sure that the 5033 * zones coming right after the local ones are those from 5034 * node N+1 (modulo N) 5035 */ 5036 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 5037 if (!node_online(node)) 5038 continue; 5039 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 5040 } 5041 for (node = 0; node < local_node; node++) { 5042 if (!node_online(node)) 5043 continue; 5044 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 5045 } 5046 5047 zonelist->_zonerefs[j].zone = NULL; 5048 zonelist->_zonerefs[j].zone_idx = 0; 5049 } 5050 5051 #endif /* CONFIG_NUMA */ 5052 5053 /* 5054 * Boot pageset table. One per cpu which is going to be used for all 5055 * zones and all nodes. The parameters will be set in such a way 5056 * that an item put on a list will immediately be handed over to 5057 * the buddy list. This is safe since pageset manipulation is done 5058 * with interrupts disabled. 5059 * 5060 * The boot_pagesets must be kept even after bootup is complete for 5061 * unused processors and/or zones. They do play a role for bootstrapping 5062 * hotplugged processors. 5063 * 5064 * zoneinfo_show() and maybe other functions do 5065 * not check if the processor is online before following the pageset pointer. 5066 * Other parts of the kernel may not check if the zone is available. 5067 */ 5068 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 5069 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 5070 static void setup_zone_pageset(struct zone *zone); 5071 5072 /* 5073 * Global mutex to protect against size modification of zonelists 5074 * as well as to serialize pageset setup for the new populated zone. 5075 */ 5076 DEFINE_MUTEX(zonelists_mutex); 5077 5078 /* return values int ....just for stop_machine() */ 5079 static int __build_all_zonelists(void *data) 5080 { 5081 int nid; 5082 int cpu; 5083 pg_data_t *self = data; 5084 5085 #ifdef CONFIG_NUMA 5086 memset(node_load, 0, sizeof(node_load)); 5087 #endif 5088 5089 if (self && !node_online(self->node_id)) { 5090 build_zonelists(self); 5091 } 5092 5093 for_each_online_node(nid) { 5094 pg_data_t *pgdat = NODE_DATA(nid); 5095 5096 build_zonelists(pgdat); 5097 } 5098 5099 /* 5100 * Initialize the boot_pagesets that are going to be used 5101 * for bootstrapping processors. The real pagesets for 5102 * each zone will be allocated later when the per cpu 5103 * allocator is available. 5104 * 5105 * boot_pagesets are used also for bootstrapping offline 5106 * cpus if the system is already booted because the pagesets 5107 * are needed to initialize allocators on a specific cpu too. 5108 * F.e. the percpu allocator needs the page allocator which 5109 * needs the percpu allocator in order to allocate its pagesets 5110 * (a chicken-egg dilemma). 5111 */ 5112 for_each_possible_cpu(cpu) { 5113 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 5114 5115 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5116 /* 5117 * We now know the "local memory node" for each node-- 5118 * i.e., the node of the first zone in the generic zonelist. 5119 * Set up numa_mem percpu variable for on-line cpus. During 5120 * boot, only the boot cpu should be on-line; we'll init the 5121 * secondary cpus' numa_mem as they come on-line. During 5122 * node/memory hotplug, we'll fixup all on-line cpus. 5123 */ 5124 if (cpu_online(cpu)) 5125 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5126 #endif 5127 } 5128 5129 return 0; 5130 } 5131 5132 static noinline void __init 5133 build_all_zonelists_init(void) 5134 { 5135 __build_all_zonelists(NULL); 5136 mminit_verify_zonelist(); 5137 cpuset_init_current_mems_allowed(); 5138 } 5139 5140 /* 5141 * Called with zonelists_mutex held always 5142 * unless system_state == SYSTEM_BOOTING. 5143 * 5144 * __ref due to (1) call of __meminit annotated setup_zone_pageset 5145 * [we're only called with non-NULL zone through __meminit paths] and 5146 * (2) call of __init annotated helper build_all_zonelists_init 5147 * [protected by SYSTEM_BOOTING]. 5148 */ 5149 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) 5150 { 5151 set_zonelist_order(); 5152 5153 if (system_state == SYSTEM_BOOTING) { 5154 build_all_zonelists_init(); 5155 } else { 5156 #ifdef CONFIG_MEMORY_HOTPLUG 5157 if (zone) 5158 setup_zone_pageset(zone); 5159 #endif 5160 /* we have to stop all cpus to guarantee there is no user 5161 of zonelist */ 5162 stop_machine(__build_all_zonelists, pgdat, NULL); 5163 /* cpuset refresh routine should be here */ 5164 } 5165 vm_total_pages = nr_free_pagecache_pages(); 5166 /* 5167 * Disable grouping by mobility if the number of pages in the 5168 * system is too low to allow the mechanism to work. It would be 5169 * more accurate, but expensive to check per-zone. This check is 5170 * made on memory-hotadd so a system can start with mobility 5171 * disabled and enable it later 5172 */ 5173 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5174 page_group_by_mobility_disabled = 1; 5175 else 5176 page_group_by_mobility_disabled = 0; 5177 5178 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n", 5179 nr_online_nodes, 5180 zonelist_order_name[current_zonelist_order], 5181 page_group_by_mobility_disabled ? "off" : "on", 5182 vm_total_pages); 5183 #ifdef CONFIG_NUMA 5184 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5185 #endif 5186 } 5187 5188 /* 5189 * Initially all pages are reserved - free ones are freed 5190 * up by free_all_bootmem() once the early boot process is 5191 * done. Non-atomic initialization, single-pass. 5192 */ 5193 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 5194 unsigned long start_pfn, enum memmap_context context) 5195 { 5196 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn)); 5197 unsigned long end_pfn = start_pfn + size; 5198 pg_data_t *pgdat = NODE_DATA(nid); 5199 unsigned long pfn; 5200 unsigned long nr_initialised = 0; 5201 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5202 struct memblock_region *r = NULL, *tmp; 5203 #endif 5204 5205 if (highest_memmap_pfn < end_pfn - 1) 5206 highest_memmap_pfn = end_pfn - 1; 5207 5208 /* 5209 * Honor reservation requested by the driver for this ZONE_DEVICE 5210 * memory 5211 */ 5212 if (altmap && start_pfn == altmap->base_pfn) 5213 start_pfn += altmap->reserve; 5214 5215 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 5216 /* 5217 * There can be holes in boot-time mem_map[]s handed to this 5218 * function. They do not exist on hotplugged memory. 5219 */ 5220 if (context != MEMMAP_EARLY) 5221 goto not_early; 5222 5223 if (!early_pfn_valid(pfn)) { 5224 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5225 /* 5226 * Skip to the pfn preceding the next valid one (or 5227 * end_pfn), such that we hit a valid pfn (or end_pfn) 5228 * on our next iteration of the loop. 5229 */ 5230 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1; 5231 #endif 5232 continue; 5233 } 5234 if (!early_pfn_in_nid(pfn, nid)) 5235 continue; 5236 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) 5237 break; 5238 5239 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5240 /* 5241 * Check given memblock attribute by firmware which can affect 5242 * kernel memory layout. If zone==ZONE_MOVABLE but memory is 5243 * mirrored, it's an overlapped memmap init. skip it. 5244 */ 5245 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 5246 if (!r || pfn >= memblock_region_memory_end_pfn(r)) { 5247 for_each_memblock(memory, tmp) 5248 if (pfn < memblock_region_memory_end_pfn(tmp)) 5249 break; 5250 r = tmp; 5251 } 5252 if (pfn >= memblock_region_memory_base_pfn(r) && 5253 memblock_is_mirror(r)) { 5254 /* already initialized as NORMAL */ 5255 pfn = memblock_region_memory_end_pfn(r); 5256 continue; 5257 } 5258 } 5259 #endif 5260 5261 not_early: 5262 /* 5263 * Mark the block movable so that blocks are reserved for 5264 * movable at startup. This will force kernel allocations 5265 * to reserve their blocks rather than leaking throughout 5266 * the address space during boot when many long-lived 5267 * kernel allocations are made. 5268 * 5269 * bitmap is created for zone's valid pfn range. but memmap 5270 * can be created for invalid pages (for alignment) 5271 * check here not to call set_pageblock_migratetype() against 5272 * pfn out of zone. 5273 */ 5274 if (!(pfn & (pageblock_nr_pages - 1))) { 5275 struct page *page = pfn_to_page(pfn); 5276 5277 __init_single_page(page, pfn, zone, nid); 5278 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 5279 } else { 5280 __init_single_pfn(pfn, zone, nid); 5281 } 5282 } 5283 } 5284 5285 static void __meminit zone_init_free_lists(struct zone *zone) 5286 { 5287 unsigned int order, t; 5288 for_each_migratetype_order(order, t) { 5289 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 5290 zone->free_area[order].nr_free = 0; 5291 } 5292 } 5293 5294 #ifndef __HAVE_ARCH_MEMMAP_INIT 5295 #define memmap_init(size, nid, zone, start_pfn) \ 5296 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 5297 #endif 5298 5299 static int zone_batchsize(struct zone *zone) 5300 { 5301 #ifdef CONFIG_MMU 5302 int batch; 5303 5304 /* 5305 * The per-cpu-pages pools are set to around 1000th of the 5306 * size of the zone. But no more than 1/2 of a meg. 5307 * 5308 * OK, so we don't know how big the cache is. So guess. 5309 */ 5310 batch = zone->managed_pages / 1024; 5311 if (batch * PAGE_SIZE > 512 * 1024) 5312 batch = (512 * 1024) / PAGE_SIZE; 5313 batch /= 4; /* We effectively *= 4 below */ 5314 if (batch < 1) 5315 batch = 1; 5316 5317 /* 5318 * Clamp the batch to a 2^n - 1 value. Having a power 5319 * of 2 value was found to be more likely to have 5320 * suboptimal cache aliasing properties in some cases. 5321 * 5322 * For example if 2 tasks are alternately allocating 5323 * batches of pages, one task can end up with a lot 5324 * of pages of one half of the possible page colors 5325 * and the other with pages of the other colors. 5326 */ 5327 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5328 5329 return batch; 5330 5331 #else 5332 /* The deferral and batching of frees should be suppressed under NOMMU 5333 * conditions. 5334 * 5335 * The problem is that NOMMU needs to be able to allocate large chunks 5336 * of contiguous memory as there's no hardware page translation to 5337 * assemble apparent contiguous memory from discontiguous pages. 5338 * 5339 * Queueing large contiguous runs of pages for batching, however, 5340 * causes the pages to actually be freed in smaller chunks. As there 5341 * can be a significant delay between the individual batches being 5342 * recycled, this leads to the once large chunks of space being 5343 * fragmented and becoming unavailable for high-order allocations. 5344 */ 5345 return 0; 5346 #endif 5347 } 5348 5349 /* 5350 * pcp->high and pcp->batch values are related and dependent on one another: 5351 * ->batch must never be higher then ->high. 5352 * The following function updates them in a safe manner without read side 5353 * locking. 5354 * 5355 * Any new users of pcp->batch and pcp->high should ensure they can cope with 5356 * those fields changing asynchronously (acording the the above rule). 5357 * 5358 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5359 * outside of boot time (or some other assurance that no concurrent updaters 5360 * exist). 5361 */ 5362 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 5363 unsigned long batch) 5364 { 5365 /* start with a fail safe value for batch */ 5366 pcp->batch = 1; 5367 smp_wmb(); 5368 5369 /* Update high, then batch, in order */ 5370 pcp->high = high; 5371 smp_wmb(); 5372 5373 pcp->batch = batch; 5374 } 5375 5376 /* a companion to pageset_set_high() */ 5377 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) 5378 { 5379 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); 5380 } 5381 5382 static void pageset_init(struct per_cpu_pageset *p) 5383 { 5384 struct per_cpu_pages *pcp; 5385 int migratetype; 5386 5387 memset(p, 0, sizeof(*p)); 5388 5389 pcp = &p->pcp; 5390 pcp->count = 0; 5391 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 5392 INIT_LIST_HEAD(&pcp->lists[migratetype]); 5393 } 5394 5395 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 5396 { 5397 pageset_init(p); 5398 pageset_set_batch(p, batch); 5399 } 5400 5401 /* 5402 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist 5403 * to the value high for the pageset p. 5404 */ 5405 static void pageset_set_high(struct per_cpu_pageset *p, 5406 unsigned long high) 5407 { 5408 unsigned long batch = max(1UL, high / 4); 5409 if ((high / 4) > (PAGE_SHIFT * 8)) 5410 batch = PAGE_SHIFT * 8; 5411 5412 pageset_update(&p->pcp, high, batch); 5413 } 5414 5415 static void pageset_set_high_and_batch(struct zone *zone, 5416 struct per_cpu_pageset *pcp) 5417 { 5418 if (percpu_pagelist_fraction) 5419 pageset_set_high(pcp, 5420 (zone->managed_pages / 5421 percpu_pagelist_fraction)); 5422 else 5423 pageset_set_batch(pcp, zone_batchsize(zone)); 5424 } 5425 5426 static void __meminit zone_pageset_init(struct zone *zone, int cpu) 5427 { 5428 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 5429 5430 pageset_init(pcp); 5431 pageset_set_high_and_batch(zone, pcp); 5432 } 5433 5434 static void __meminit setup_zone_pageset(struct zone *zone) 5435 { 5436 int cpu; 5437 zone->pageset = alloc_percpu(struct per_cpu_pageset); 5438 for_each_possible_cpu(cpu) 5439 zone_pageset_init(zone, cpu); 5440 } 5441 5442 /* 5443 * Allocate per cpu pagesets and initialize them. 5444 * Before this call only boot pagesets were available. 5445 */ 5446 void __init setup_per_cpu_pageset(void) 5447 { 5448 struct pglist_data *pgdat; 5449 struct zone *zone; 5450 5451 for_each_populated_zone(zone) 5452 setup_zone_pageset(zone); 5453 5454 for_each_online_pgdat(pgdat) 5455 pgdat->per_cpu_nodestats = 5456 alloc_percpu(struct per_cpu_nodestat); 5457 } 5458 5459 static __meminit void zone_pcp_init(struct zone *zone) 5460 { 5461 /* 5462 * per cpu subsystem is not up at this point. The following code 5463 * relies on the ability of the linker to provide the 5464 * offset of a (static) per cpu variable into the per cpu area. 5465 */ 5466 zone->pageset = &boot_pageset; 5467 5468 if (populated_zone(zone)) 5469 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 5470 zone->name, zone->present_pages, 5471 zone_batchsize(zone)); 5472 } 5473 5474 int __meminit init_currently_empty_zone(struct zone *zone, 5475 unsigned long zone_start_pfn, 5476 unsigned long size) 5477 { 5478 struct pglist_data *pgdat = zone->zone_pgdat; 5479 5480 pgdat->nr_zones = zone_idx(zone) + 1; 5481 5482 zone->zone_start_pfn = zone_start_pfn; 5483 5484 mminit_dprintk(MMINIT_TRACE, "memmap_init", 5485 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 5486 pgdat->node_id, 5487 (unsigned long)zone_idx(zone), 5488 zone_start_pfn, (zone_start_pfn + size)); 5489 5490 zone_init_free_lists(zone); 5491 zone->initialized = 1; 5492 5493 return 0; 5494 } 5495 5496 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5497 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 5498 5499 /* 5500 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 5501 */ 5502 int __meminit __early_pfn_to_nid(unsigned long pfn, 5503 struct mminit_pfnnid_cache *state) 5504 { 5505 unsigned long start_pfn, end_pfn; 5506 int nid; 5507 5508 if (state->last_start <= pfn && pfn < state->last_end) 5509 return state->last_nid; 5510 5511 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 5512 if (nid != -1) { 5513 state->last_start = start_pfn; 5514 state->last_end = end_pfn; 5515 state->last_nid = nid; 5516 } 5517 5518 return nid; 5519 } 5520 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 5521 5522 /** 5523 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 5524 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 5525 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid 5526 * 5527 * If an architecture guarantees that all ranges registered contain no holes 5528 * and may be freed, this this function may be used instead of calling 5529 * memblock_free_early_nid() manually. 5530 */ 5531 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 5532 { 5533 unsigned long start_pfn, end_pfn; 5534 int i, this_nid; 5535 5536 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { 5537 start_pfn = min(start_pfn, max_low_pfn); 5538 end_pfn = min(end_pfn, max_low_pfn); 5539 5540 if (start_pfn < end_pfn) 5541 memblock_free_early_nid(PFN_PHYS(start_pfn), 5542 (end_pfn - start_pfn) << PAGE_SHIFT, 5543 this_nid); 5544 } 5545 } 5546 5547 /** 5548 * sparse_memory_present_with_active_regions - Call memory_present for each active range 5549 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 5550 * 5551 * If an architecture guarantees that all ranges registered contain no holes and may 5552 * be freed, this function may be used instead of calling memory_present() manually. 5553 */ 5554 void __init sparse_memory_present_with_active_regions(int nid) 5555 { 5556 unsigned long start_pfn, end_pfn; 5557 int i, this_nid; 5558 5559 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 5560 memory_present(this_nid, start_pfn, end_pfn); 5561 } 5562 5563 /** 5564 * get_pfn_range_for_nid - Return the start and end page frames for a node 5565 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 5566 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 5567 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 5568 * 5569 * It returns the start and end page frame of a node based on information 5570 * provided by memblock_set_node(). If called for a node 5571 * with no available memory, a warning is printed and the start and end 5572 * PFNs will be 0. 5573 */ 5574 void __meminit get_pfn_range_for_nid(unsigned int nid, 5575 unsigned long *start_pfn, unsigned long *end_pfn) 5576 { 5577 unsigned long this_start_pfn, this_end_pfn; 5578 int i; 5579 5580 *start_pfn = -1UL; 5581 *end_pfn = 0; 5582 5583 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 5584 *start_pfn = min(*start_pfn, this_start_pfn); 5585 *end_pfn = max(*end_pfn, this_end_pfn); 5586 } 5587 5588 if (*start_pfn == -1UL) 5589 *start_pfn = 0; 5590 } 5591 5592 /* 5593 * This finds a zone that can be used for ZONE_MOVABLE pages. The 5594 * assumption is made that zones within a node are ordered in monotonic 5595 * increasing memory addresses so that the "highest" populated zone is used 5596 */ 5597 static void __init find_usable_zone_for_movable(void) 5598 { 5599 int zone_index; 5600 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 5601 if (zone_index == ZONE_MOVABLE) 5602 continue; 5603 5604 if (arch_zone_highest_possible_pfn[zone_index] > 5605 arch_zone_lowest_possible_pfn[zone_index]) 5606 break; 5607 } 5608 5609 VM_BUG_ON(zone_index == -1); 5610 movable_zone = zone_index; 5611 } 5612 5613 /* 5614 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 5615 * because it is sized independent of architecture. Unlike the other zones, 5616 * the starting point for ZONE_MOVABLE is not fixed. It may be different 5617 * in each node depending on the size of each node and how evenly kernelcore 5618 * is distributed. This helper function adjusts the zone ranges 5619 * provided by the architecture for a given node by using the end of the 5620 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 5621 * zones within a node are in order of monotonic increases memory addresses 5622 */ 5623 static void __meminit adjust_zone_range_for_zone_movable(int nid, 5624 unsigned long zone_type, 5625 unsigned long node_start_pfn, 5626 unsigned long node_end_pfn, 5627 unsigned long *zone_start_pfn, 5628 unsigned long *zone_end_pfn) 5629 { 5630 /* Only adjust if ZONE_MOVABLE is on this node */ 5631 if (zone_movable_pfn[nid]) { 5632 /* Size ZONE_MOVABLE */ 5633 if (zone_type == ZONE_MOVABLE) { 5634 *zone_start_pfn = zone_movable_pfn[nid]; 5635 *zone_end_pfn = min(node_end_pfn, 5636 arch_zone_highest_possible_pfn[movable_zone]); 5637 5638 /* Adjust for ZONE_MOVABLE starting within this range */ 5639 } else if (!mirrored_kernelcore && 5640 *zone_start_pfn < zone_movable_pfn[nid] && 5641 *zone_end_pfn > zone_movable_pfn[nid]) { 5642 *zone_end_pfn = zone_movable_pfn[nid]; 5643 5644 /* Check if this whole range is within ZONE_MOVABLE */ 5645 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 5646 *zone_start_pfn = *zone_end_pfn; 5647 } 5648 } 5649 5650 /* 5651 * Return the number of pages a zone spans in a node, including holes 5652 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 5653 */ 5654 static unsigned long __meminit zone_spanned_pages_in_node(int nid, 5655 unsigned long zone_type, 5656 unsigned long node_start_pfn, 5657 unsigned long node_end_pfn, 5658 unsigned long *zone_start_pfn, 5659 unsigned long *zone_end_pfn, 5660 unsigned long *ignored) 5661 { 5662 /* When hotadd a new node from cpu_up(), the node should be empty */ 5663 if (!node_start_pfn && !node_end_pfn) 5664 return 0; 5665 5666 /* Get the start and end of the zone */ 5667 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 5668 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 5669 adjust_zone_range_for_zone_movable(nid, zone_type, 5670 node_start_pfn, node_end_pfn, 5671 zone_start_pfn, zone_end_pfn); 5672 5673 /* Check that this node has pages within the zone's required range */ 5674 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 5675 return 0; 5676 5677 /* Move the zone boundaries inside the node if necessary */ 5678 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 5679 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 5680 5681 /* Return the spanned pages */ 5682 return *zone_end_pfn - *zone_start_pfn; 5683 } 5684 5685 /* 5686 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 5687 * then all holes in the requested range will be accounted for. 5688 */ 5689 unsigned long __meminit __absent_pages_in_range(int nid, 5690 unsigned long range_start_pfn, 5691 unsigned long range_end_pfn) 5692 { 5693 unsigned long nr_absent = range_end_pfn - range_start_pfn; 5694 unsigned long start_pfn, end_pfn; 5695 int i; 5696 5697 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 5698 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 5699 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 5700 nr_absent -= end_pfn - start_pfn; 5701 } 5702 return nr_absent; 5703 } 5704 5705 /** 5706 * absent_pages_in_range - Return number of page frames in holes within a range 5707 * @start_pfn: The start PFN to start searching for holes 5708 * @end_pfn: The end PFN to stop searching for holes 5709 * 5710 * It returns the number of pages frames in memory holes within a range. 5711 */ 5712 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 5713 unsigned long end_pfn) 5714 { 5715 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 5716 } 5717 5718 /* Return the number of page frames in holes in a zone on a node */ 5719 static unsigned long __meminit zone_absent_pages_in_node(int nid, 5720 unsigned long zone_type, 5721 unsigned long node_start_pfn, 5722 unsigned long node_end_pfn, 5723 unsigned long *ignored) 5724 { 5725 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 5726 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5727 unsigned long zone_start_pfn, zone_end_pfn; 5728 unsigned long nr_absent; 5729 5730 /* When hotadd a new node from cpu_up(), the node should be empty */ 5731 if (!node_start_pfn && !node_end_pfn) 5732 return 0; 5733 5734 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5735 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5736 5737 adjust_zone_range_for_zone_movable(nid, zone_type, 5738 node_start_pfn, node_end_pfn, 5739 &zone_start_pfn, &zone_end_pfn); 5740 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 5741 5742 /* 5743 * ZONE_MOVABLE handling. 5744 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 5745 * and vice versa. 5746 */ 5747 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 5748 unsigned long start_pfn, end_pfn; 5749 struct memblock_region *r; 5750 5751 for_each_memblock(memory, r) { 5752 start_pfn = clamp(memblock_region_memory_base_pfn(r), 5753 zone_start_pfn, zone_end_pfn); 5754 end_pfn = clamp(memblock_region_memory_end_pfn(r), 5755 zone_start_pfn, zone_end_pfn); 5756 5757 if (zone_type == ZONE_MOVABLE && 5758 memblock_is_mirror(r)) 5759 nr_absent += end_pfn - start_pfn; 5760 5761 if (zone_type == ZONE_NORMAL && 5762 !memblock_is_mirror(r)) 5763 nr_absent += end_pfn - start_pfn; 5764 } 5765 } 5766 5767 return nr_absent; 5768 } 5769 5770 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5771 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 5772 unsigned long zone_type, 5773 unsigned long node_start_pfn, 5774 unsigned long node_end_pfn, 5775 unsigned long *zone_start_pfn, 5776 unsigned long *zone_end_pfn, 5777 unsigned long *zones_size) 5778 { 5779 unsigned int zone; 5780 5781 *zone_start_pfn = node_start_pfn; 5782 for (zone = 0; zone < zone_type; zone++) 5783 *zone_start_pfn += zones_size[zone]; 5784 5785 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type]; 5786 5787 return zones_size[zone_type]; 5788 } 5789 5790 static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 5791 unsigned long zone_type, 5792 unsigned long node_start_pfn, 5793 unsigned long node_end_pfn, 5794 unsigned long *zholes_size) 5795 { 5796 if (!zholes_size) 5797 return 0; 5798 5799 return zholes_size[zone_type]; 5800 } 5801 5802 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5803 5804 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 5805 unsigned long node_start_pfn, 5806 unsigned long node_end_pfn, 5807 unsigned long *zones_size, 5808 unsigned long *zholes_size) 5809 { 5810 unsigned long realtotalpages = 0, totalpages = 0; 5811 enum zone_type i; 5812 5813 for (i = 0; i < MAX_NR_ZONES; i++) { 5814 struct zone *zone = pgdat->node_zones + i; 5815 unsigned long zone_start_pfn, zone_end_pfn; 5816 unsigned long size, real_size; 5817 5818 size = zone_spanned_pages_in_node(pgdat->node_id, i, 5819 node_start_pfn, 5820 node_end_pfn, 5821 &zone_start_pfn, 5822 &zone_end_pfn, 5823 zones_size); 5824 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i, 5825 node_start_pfn, node_end_pfn, 5826 zholes_size); 5827 if (size) 5828 zone->zone_start_pfn = zone_start_pfn; 5829 else 5830 zone->zone_start_pfn = 0; 5831 zone->spanned_pages = size; 5832 zone->present_pages = real_size; 5833 5834 totalpages += size; 5835 realtotalpages += real_size; 5836 } 5837 5838 pgdat->node_spanned_pages = totalpages; 5839 pgdat->node_present_pages = realtotalpages; 5840 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 5841 realtotalpages); 5842 } 5843 5844 #ifndef CONFIG_SPARSEMEM 5845 /* 5846 * Calculate the size of the zone->blockflags rounded to an unsigned long 5847 * Start by making sure zonesize is a multiple of pageblock_order by rounding 5848 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 5849 * round what is now in bits to nearest long in bits, then return it in 5850 * bytes. 5851 */ 5852 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 5853 { 5854 unsigned long usemapsize; 5855 5856 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 5857 usemapsize = roundup(zonesize, pageblock_nr_pages); 5858 usemapsize = usemapsize >> pageblock_order; 5859 usemapsize *= NR_PAGEBLOCK_BITS; 5860 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 5861 5862 return usemapsize / 8; 5863 } 5864 5865 static void __init setup_usemap(struct pglist_data *pgdat, 5866 struct zone *zone, 5867 unsigned long zone_start_pfn, 5868 unsigned long zonesize) 5869 { 5870 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 5871 zone->pageblock_flags = NULL; 5872 if (usemapsize) 5873 zone->pageblock_flags = 5874 memblock_virt_alloc_node_nopanic(usemapsize, 5875 pgdat->node_id); 5876 } 5877 #else 5878 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 5879 unsigned long zone_start_pfn, unsigned long zonesize) {} 5880 #endif /* CONFIG_SPARSEMEM */ 5881 5882 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 5883 5884 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 5885 void __paginginit set_pageblock_order(void) 5886 { 5887 unsigned int order; 5888 5889 /* Check that pageblock_nr_pages has not already been setup */ 5890 if (pageblock_order) 5891 return; 5892 5893 if (HPAGE_SHIFT > PAGE_SHIFT) 5894 order = HUGETLB_PAGE_ORDER; 5895 else 5896 order = MAX_ORDER - 1; 5897 5898 /* 5899 * Assume the largest contiguous order of interest is a huge page. 5900 * This value may be variable depending on boot parameters on IA64 and 5901 * powerpc. 5902 */ 5903 pageblock_order = order; 5904 } 5905 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5906 5907 /* 5908 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 5909 * is unused as pageblock_order is set at compile-time. See 5910 * include/linux/pageblock-flags.h for the values of pageblock_order based on 5911 * the kernel config 5912 */ 5913 void __paginginit set_pageblock_order(void) 5914 { 5915 } 5916 5917 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5918 5919 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, 5920 unsigned long present_pages) 5921 { 5922 unsigned long pages = spanned_pages; 5923 5924 /* 5925 * Provide a more accurate estimation if there are holes within 5926 * the zone and SPARSEMEM is in use. If there are holes within the 5927 * zone, each populated memory region may cost us one or two extra 5928 * memmap pages due to alignment because memmap pages for each 5929 * populated regions may not be naturally aligned on page boundary. 5930 * So the (present_pages >> 4) heuristic is a tradeoff for that. 5931 */ 5932 if (spanned_pages > present_pages + (present_pages >> 4) && 5933 IS_ENABLED(CONFIG_SPARSEMEM)) 5934 pages = present_pages; 5935 5936 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 5937 } 5938 5939 /* 5940 * Set up the zone data structures: 5941 * - mark all pages reserved 5942 * - mark all memory queues empty 5943 * - clear the memory bitmaps 5944 * 5945 * NOTE: pgdat should get zeroed by caller. 5946 */ 5947 static void __paginginit free_area_init_core(struct pglist_data *pgdat) 5948 { 5949 enum zone_type j; 5950 int nid = pgdat->node_id; 5951 int ret; 5952 5953 pgdat_resize_init(pgdat); 5954 #ifdef CONFIG_NUMA_BALANCING 5955 spin_lock_init(&pgdat->numabalancing_migrate_lock); 5956 pgdat->numabalancing_migrate_nr_pages = 0; 5957 pgdat->numabalancing_migrate_next_window = jiffies; 5958 #endif 5959 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5960 spin_lock_init(&pgdat->split_queue_lock); 5961 INIT_LIST_HEAD(&pgdat->split_queue); 5962 pgdat->split_queue_len = 0; 5963 #endif 5964 init_waitqueue_head(&pgdat->kswapd_wait); 5965 init_waitqueue_head(&pgdat->pfmemalloc_wait); 5966 #ifdef CONFIG_COMPACTION 5967 init_waitqueue_head(&pgdat->kcompactd_wait); 5968 #endif 5969 pgdat_page_ext_init(pgdat); 5970 spin_lock_init(&pgdat->lru_lock); 5971 lruvec_init(node_lruvec(pgdat)); 5972 5973 for (j = 0; j < MAX_NR_ZONES; j++) { 5974 struct zone *zone = pgdat->node_zones + j; 5975 unsigned long size, realsize, freesize, memmap_pages; 5976 unsigned long zone_start_pfn = zone->zone_start_pfn; 5977 5978 size = zone->spanned_pages; 5979 realsize = freesize = zone->present_pages; 5980 5981 /* 5982 * Adjust freesize so that it accounts for how much memory 5983 * is used by this zone for memmap. This affects the watermark 5984 * and per-cpu initialisations 5985 */ 5986 memmap_pages = calc_memmap_size(size, realsize); 5987 if (!is_highmem_idx(j)) { 5988 if (freesize >= memmap_pages) { 5989 freesize -= memmap_pages; 5990 if (memmap_pages) 5991 printk(KERN_DEBUG 5992 " %s zone: %lu pages used for memmap\n", 5993 zone_names[j], memmap_pages); 5994 } else 5995 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", 5996 zone_names[j], memmap_pages, freesize); 5997 } 5998 5999 /* Account for reserved pages */ 6000 if (j == 0 && freesize > dma_reserve) { 6001 freesize -= dma_reserve; 6002 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 6003 zone_names[0], dma_reserve); 6004 } 6005 6006 if (!is_highmem_idx(j)) 6007 nr_kernel_pages += freesize; 6008 /* Charge for highmem memmap if there are enough kernel pages */ 6009 else if (nr_kernel_pages > memmap_pages * 2) 6010 nr_kernel_pages -= memmap_pages; 6011 nr_all_pages += freesize; 6012 6013 /* 6014 * Set an approximate value for lowmem here, it will be adjusted 6015 * when the bootmem allocator frees pages into the buddy system. 6016 * And all highmem pages will be managed by the buddy system. 6017 */ 6018 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 6019 #ifdef CONFIG_NUMA 6020 zone->node = nid; 6021 #endif 6022 zone->name = zone_names[j]; 6023 zone->zone_pgdat = pgdat; 6024 spin_lock_init(&zone->lock); 6025 zone_seqlock_init(zone); 6026 zone_pcp_init(zone); 6027 6028 if (!size) 6029 continue; 6030 6031 set_pageblock_order(); 6032 setup_usemap(pgdat, zone, zone_start_pfn, size); 6033 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 6034 BUG_ON(ret); 6035 memmap_init(size, nid, j, zone_start_pfn); 6036 } 6037 } 6038 6039 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) 6040 { 6041 unsigned long __maybe_unused start = 0; 6042 unsigned long __maybe_unused offset = 0; 6043 6044 /* Skip empty nodes */ 6045 if (!pgdat->node_spanned_pages) 6046 return; 6047 6048 #ifdef CONFIG_FLAT_NODE_MEM_MAP 6049 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 6050 offset = pgdat->node_start_pfn - start; 6051 /* ia64 gets its own node_mem_map, before this, without bootmem */ 6052 if (!pgdat->node_mem_map) { 6053 unsigned long size, end; 6054 struct page *map; 6055 6056 /* 6057 * The zone's endpoints aren't required to be MAX_ORDER 6058 * aligned but the node_mem_map endpoints must be in order 6059 * for the buddy allocator to function correctly. 6060 */ 6061 end = pgdat_end_pfn(pgdat); 6062 end = ALIGN(end, MAX_ORDER_NR_PAGES); 6063 size = (end - start) * sizeof(struct page); 6064 map = alloc_remap(pgdat->node_id, size); 6065 if (!map) 6066 map = memblock_virt_alloc_node_nopanic(size, 6067 pgdat->node_id); 6068 pgdat->node_mem_map = map + offset; 6069 } 6070 #ifndef CONFIG_NEED_MULTIPLE_NODES 6071 /* 6072 * With no DISCONTIG, the global mem_map is just set as node 0's 6073 */ 6074 if (pgdat == NODE_DATA(0)) { 6075 mem_map = NODE_DATA(0)->node_mem_map; 6076 #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) 6077 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 6078 mem_map -= offset; 6079 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 6080 } 6081 #endif 6082 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 6083 } 6084 6085 void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 6086 unsigned long node_start_pfn, unsigned long *zholes_size) 6087 { 6088 pg_data_t *pgdat = NODE_DATA(nid); 6089 unsigned long start_pfn = 0; 6090 unsigned long end_pfn = 0; 6091 6092 /* pg_data_t should be reset to zero when it's allocated */ 6093 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); 6094 6095 reset_deferred_meminit(pgdat); 6096 pgdat->node_id = nid; 6097 pgdat->node_start_pfn = node_start_pfn; 6098 pgdat->per_cpu_nodestats = NULL; 6099 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 6100 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 6101 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 6102 (u64)start_pfn << PAGE_SHIFT, 6103 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 6104 #else 6105 start_pfn = node_start_pfn; 6106 #endif 6107 calculate_node_totalpages(pgdat, start_pfn, end_pfn, 6108 zones_size, zholes_size); 6109 6110 alloc_node_mem_map(pgdat); 6111 #ifdef CONFIG_FLAT_NODE_MEM_MAP 6112 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 6113 nid, (unsigned long)pgdat, 6114 (unsigned long)pgdat->node_mem_map); 6115 #endif 6116 6117 free_area_init_core(pgdat); 6118 } 6119 6120 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 6121 6122 #if MAX_NUMNODES > 1 6123 /* 6124 * Figure out the number of possible node ids. 6125 */ 6126 void __init setup_nr_node_ids(void) 6127 { 6128 unsigned int highest; 6129 6130 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 6131 nr_node_ids = highest + 1; 6132 } 6133 #endif 6134 6135 /** 6136 * node_map_pfn_alignment - determine the maximum internode alignment 6137 * 6138 * This function should be called after node map is populated and sorted. 6139 * It calculates the maximum power of two alignment which can distinguish 6140 * all the nodes. 6141 * 6142 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 6143 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 6144 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 6145 * shifted, 1GiB is enough and this function will indicate so. 6146 * 6147 * This is used to test whether pfn -> nid mapping of the chosen memory 6148 * model has fine enough granularity to avoid incorrect mapping for the 6149 * populated node map. 6150 * 6151 * Returns the determined alignment in pfn's. 0 if there is no alignment 6152 * requirement (single node). 6153 */ 6154 unsigned long __init node_map_pfn_alignment(void) 6155 { 6156 unsigned long accl_mask = 0, last_end = 0; 6157 unsigned long start, end, mask; 6158 int last_nid = -1; 6159 int i, nid; 6160 6161 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 6162 if (!start || last_nid < 0 || last_nid == nid) { 6163 last_nid = nid; 6164 last_end = end; 6165 continue; 6166 } 6167 6168 /* 6169 * Start with a mask granular enough to pin-point to the 6170 * start pfn and tick off bits one-by-one until it becomes 6171 * too coarse to separate the current node from the last. 6172 */ 6173 mask = ~((1 << __ffs(start)) - 1); 6174 while (mask && last_end <= (start & (mask << 1))) 6175 mask <<= 1; 6176 6177 /* accumulate all internode masks */ 6178 accl_mask |= mask; 6179 } 6180 6181 /* convert mask to number of pages */ 6182 return ~accl_mask + 1; 6183 } 6184 6185 /* Find the lowest pfn for a node */ 6186 static unsigned long __init find_min_pfn_for_node(int nid) 6187 { 6188 unsigned long min_pfn = ULONG_MAX; 6189 unsigned long start_pfn; 6190 int i; 6191 6192 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) 6193 min_pfn = min(min_pfn, start_pfn); 6194 6195 if (min_pfn == ULONG_MAX) { 6196 pr_warn("Could not find start_pfn for node %d\n", nid); 6197 return 0; 6198 } 6199 6200 return min_pfn; 6201 } 6202 6203 /** 6204 * find_min_pfn_with_active_regions - Find the minimum PFN registered 6205 * 6206 * It returns the minimum PFN based on information provided via 6207 * memblock_set_node(). 6208 */ 6209 unsigned long __init find_min_pfn_with_active_regions(void) 6210 { 6211 return find_min_pfn_for_node(MAX_NUMNODES); 6212 } 6213 6214 /* 6215 * early_calculate_totalpages() 6216 * Sum pages in active regions for movable zone. 6217 * Populate N_MEMORY for calculating usable_nodes. 6218 */ 6219 static unsigned long __init early_calculate_totalpages(void) 6220 { 6221 unsigned long totalpages = 0; 6222 unsigned long start_pfn, end_pfn; 6223 int i, nid; 6224 6225 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 6226 unsigned long pages = end_pfn - start_pfn; 6227 6228 totalpages += pages; 6229 if (pages) 6230 node_set_state(nid, N_MEMORY); 6231 } 6232 return totalpages; 6233 } 6234 6235 /* 6236 * Find the PFN the Movable zone begins in each node. Kernel memory 6237 * is spread evenly between nodes as long as the nodes have enough 6238 * memory. When they don't, some nodes will have more kernelcore than 6239 * others 6240 */ 6241 static void __init find_zone_movable_pfns_for_nodes(void) 6242 { 6243 int i, nid; 6244 unsigned long usable_startpfn; 6245 unsigned long kernelcore_node, kernelcore_remaining; 6246 /* save the state before borrow the nodemask */ 6247 nodemask_t saved_node_state = node_states[N_MEMORY]; 6248 unsigned long totalpages = early_calculate_totalpages(); 6249 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 6250 struct memblock_region *r; 6251 6252 /* Need to find movable_zone earlier when movable_node is specified. */ 6253 find_usable_zone_for_movable(); 6254 6255 /* 6256 * If movable_node is specified, ignore kernelcore and movablecore 6257 * options. 6258 */ 6259 if (movable_node_is_enabled()) { 6260 for_each_memblock(memory, r) { 6261 if (!memblock_is_hotpluggable(r)) 6262 continue; 6263 6264 nid = r->nid; 6265 6266 usable_startpfn = PFN_DOWN(r->base); 6267 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 6268 min(usable_startpfn, zone_movable_pfn[nid]) : 6269 usable_startpfn; 6270 } 6271 6272 goto out2; 6273 } 6274 6275 /* 6276 * If kernelcore=mirror is specified, ignore movablecore option 6277 */ 6278 if (mirrored_kernelcore) { 6279 bool mem_below_4gb_not_mirrored = false; 6280 6281 for_each_memblock(memory, r) { 6282 if (memblock_is_mirror(r)) 6283 continue; 6284 6285 nid = r->nid; 6286 6287 usable_startpfn = memblock_region_memory_base_pfn(r); 6288 6289 if (usable_startpfn < 0x100000) { 6290 mem_below_4gb_not_mirrored = true; 6291 continue; 6292 } 6293 6294 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 6295 min(usable_startpfn, zone_movable_pfn[nid]) : 6296 usable_startpfn; 6297 } 6298 6299 if (mem_below_4gb_not_mirrored) 6300 pr_warn("This configuration results in unmirrored kernel memory."); 6301 6302 goto out2; 6303 } 6304 6305 /* 6306 * If movablecore=nn[KMG] was specified, calculate what size of 6307 * kernelcore that corresponds so that memory usable for 6308 * any allocation type is evenly spread. If both kernelcore 6309 * and movablecore are specified, then the value of kernelcore 6310 * will be used for required_kernelcore if it's greater than 6311 * what movablecore would have allowed. 6312 */ 6313 if (required_movablecore) { 6314 unsigned long corepages; 6315 6316 /* 6317 * Round-up so that ZONE_MOVABLE is at least as large as what 6318 * was requested by the user 6319 */ 6320 required_movablecore = 6321 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 6322 required_movablecore = min(totalpages, required_movablecore); 6323 corepages = totalpages - required_movablecore; 6324 6325 required_kernelcore = max(required_kernelcore, corepages); 6326 } 6327 6328 /* 6329 * If kernelcore was not specified or kernelcore size is larger 6330 * than totalpages, there is no ZONE_MOVABLE. 6331 */ 6332 if (!required_kernelcore || required_kernelcore >= totalpages) 6333 goto out; 6334 6335 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 6336 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 6337 6338 restart: 6339 /* Spread kernelcore memory as evenly as possible throughout nodes */ 6340 kernelcore_node = required_kernelcore / usable_nodes; 6341 for_each_node_state(nid, N_MEMORY) { 6342 unsigned long start_pfn, end_pfn; 6343 6344 /* 6345 * Recalculate kernelcore_node if the division per node 6346 * now exceeds what is necessary to satisfy the requested 6347 * amount of memory for the kernel 6348 */ 6349 if (required_kernelcore < kernelcore_node) 6350 kernelcore_node = required_kernelcore / usable_nodes; 6351 6352 /* 6353 * As the map is walked, we track how much memory is usable 6354 * by the kernel using kernelcore_remaining. When it is 6355 * 0, the rest of the node is usable by ZONE_MOVABLE 6356 */ 6357 kernelcore_remaining = kernelcore_node; 6358 6359 /* Go through each range of PFNs within this node */ 6360 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 6361 unsigned long size_pages; 6362 6363 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 6364 if (start_pfn >= end_pfn) 6365 continue; 6366 6367 /* Account for what is only usable for kernelcore */ 6368 if (start_pfn < usable_startpfn) { 6369 unsigned long kernel_pages; 6370 kernel_pages = min(end_pfn, usable_startpfn) 6371 - start_pfn; 6372 6373 kernelcore_remaining -= min(kernel_pages, 6374 kernelcore_remaining); 6375 required_kernelcore -= min(kernel_pages, 6376 required_kernelcore); 6377 6378 /* Continue if range is now fully accounted */ 6379 if (end_pfn <= usable_startpfn) { 6380 6381 /* 6382 * Push zone_movable_pfn to the end so 6383 * that if we have to rebalance 6384 * kernelcore across nodes, we will 6385 * not double account here 6386 */ 6387 zone_movable_pfn[nid] = end_pfn; 6388 continue; 6389 } 6390 start_pfn = usable_startpfn; 6391 } 6392 6393 /* 6394 * The usable PFN range for ZONE_MOVABLE is from 6395 * start_pfn->end_pfn. Calculate size_pages as the 6396 * number of pages used as kernelcore 6397 */ 6398 size_pages = end_pfn - start_pfn; 6399 if (size_pages > kernelcore_remaining) 6400 size_pages = kernelcore_remaining; 6401 zone_movable_pfn[nid] = start_pfn + size_pages; 6402 6403 /* 6404 * Some kernelcore has been met, update counts and 6405 * break if the kernelcore for this node has been 6406 * satisfied 6407 */ 6408 required_kernelcore -= min(required_kernelcore, 6409 size_pages); 6410 kernelcore_remaining -= size_pages; 6411 if (!kernelcore_remaining) 6412 break; 6413 } 6414 } 6415 6416 /* 6417 * If there is still required_kernelcore, we do another pass with one 6418 * less node in the count. This will push zone_movable_pfn[nid] further 6419 * along on the nodes that still have memory until kernelcore is 6420 * satisfied 6421 */ 6422 usable_nodes--; 6423 if (usable_nodes && required_kernelcore > usable_nodes) 6424 goto restart; 6425 6426 out2: 6427 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 6428 for (nid = 0; nid < MAX_NUMNODES; nid++) 6429 zone_movable_pfn[nid] = 6430 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 6431 6432 out: 6433 /* restore the node_state */ 6434 node_states[N_MEMORY] = saved_node_state; 6435 } 6436 6437 /* Any regular or high memory on that node ? */ 6438 static void check_for_memory(pg_data_t *pgdat, int nid) 6439 { 6440 enum zone_type zone_type; 6441 6442 if (N_MEMORY == N_NORMAL_MEMORY) 6443 return; 6444 6445 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 6446 struct zone *zone = &pgdat->node_zones[zone_type]; 6447 if (populated_zone(zone)) { 6448 node_set_state(nid, N_HIGH_MEMORY); 6449 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 6450 zone_type <= ZONE_NORMAL) 6451 node_set_state(nid, N_NORMAL_MEMORY); 6452 break; 6453 } 6454 } 6455 } 6456 6457 /** 6458 * free_area_init_nodes - Initialise all pg_data_t and zone data 6459 * @max_zone_pfn: an array of max PFNs for each zone 6460 * 6461 * This will call free_area_init_node() for each active node in the system. 6462 * Using the page ranges provided by memblock_set_node(), the size of each 6463 * zone in each node and their holes is calculated. If the maximum PFN 6464 * between two adjacent zones match, it is assumed that the zone is empty. 6465 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 6466 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 6467 * starts where the previous one ended. For example, ZONE_DMA32 starts 6468 * at arch_max_dma_pfn. 6469 */ 6470 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 6471 { 6472 unsigned long start_pfn, end_pfn; 6473 int i, nid; 6474 6475 /* Record where the zone boundaries are */ 6476 memset(arch_zone_lowest_possible_pfn, 0, 6477 sizeof(arch_zone_lowest_possible_pfn)); 6478 memset(arch_zone_highest_possible_pfn, 0, 6479 sizeof(arch_zone_highest_possible_pfn)); 6480 6481 start_pfn = find_min_pfn_with_active_regions(); 6482 6483 for (i = 0; i < MAX_NR_ZONES; i++) { 6484 if (i == ZONE_MOVABLE) 6485 continue; 6486 6487 end_pfn = max(max_zone_pfn[i], start_pfn); 6488 arch_zone_lowest_possible_pfn[i] = start_pfn; 6489 arch_zone_highest_possible_pfn[i] = end_pfn; 6490 6491 start_pfn = end_pfn; 6492 } 6493 6494 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 6495 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 6496 find_zone_movable_pfns_for_nodes(); 6497 6498 /* Print out the zone ranges */ 6499 pr_info("Zone ranges:\n"); 6500 for (i = 0; i < MAX_NR_ZONES; i++) { 6501 if (i == ZONE_MOVABLE) 6502 continue; 6503 pr_info(" %-8s ", zone_names[i]); 6504 if (arch_zone_lowest_possible_pfn[i] == 6505 arch_zone_highest_possible_pfn[i]) 6506 pr_cont("empty\n"); 6507 else 6508 pr_cont("[mem %#018Lx-%#018Lx]\n", 6509 (u64)arch_zone_lowest_possible_pfn[i] 6510 << PAGE_SHIFT, 6511 ((u64)arch_zone_highest_possible_pfn[i] 6512 << PAGE_SHIFT) - 1); 6513 } 6514 6515 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 6516 pr_info("Movable zone start for each node\n"); 6517 for (i = 0; i < MAX_NUMNODES; i++) { 6518 if (zone_movable_pfn[i]) 6519 pr_info(" Node %d: %#018Lx\n", i, 6520 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 6521 } 6522 6523 /* Print out the early node map */ 6524 pr_info("Early memory node ranges\n"); 6525 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 6526 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 6527 (u64)start_pfn << PAGE_SHIFT, 6528 ((u64)end_pfn << PAGE_SHIFT) - 1); 6529 6530 /* Initialise every node */ 6531 mminit_verify_pageflags_layout(); 6532 setup_nr_node_ids(); 6533 for_each_online_node(nid) { 6534 pg_data_t *pgdat = NODE_DATA(nid); 6535 free_area_init_node(nid, NULL, 6536 find_min_pfn_for_node(nid), NULL); 6537 6538 /* Any memory on that node */ 6539 if (pgdat->node_present_pages) 6540 node_set_state(nid, N_MEMORY); 6541 check_for_memory(pgdat, nid); 6542 } 6543 } 6544 6545 static int __init cmdline_parse_core(char *p, unsigned long *core) 6546 { 6547 unsigned long long coremem; 6548 if (!p) 6549 return -EINVAL; 6550 6551 coremem = memparse(p, &p); 6552 *core = coremem >> PAGE_SHIFT; 6553 6554 /* Paranoid check that UL is enough for the coremem value */ 6555 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 6556 6557 return 0; 6558 } 6559 6560 /* 6561 * kernelcore=size sets the amount of memory for use for allocations that 6562 * cannot be reclaimed or migrated. 6563 */ 6564 static int __init cmdline_parse_kernelcore(char *p) 6565 { 6566 /* parse kernelcore=mirror */ 6567 if (parse_option_str(p, "mirror")) { 6568 mirrored_kernelcore = true; 6569 return 0; 6570 } 6571 6572 return cmdline_parse_core(p, &required_kernelcore); 6573 } 6574 6575 /* 6576 * movablecore=size sets the amount of memory for use for allocations that 6577 * can be reclaimed or migrated. 6578 */ 6579 static int __init cmdline_parse_movablecore(char *p) 6580 { 6581 return cmdline_parse_core(p, &required_movablecore); 6582 } 6583 6584 early_param("kernelcore", cmdline_parse_kernelcore); 6585 early_param("movablecore", cmdline_parse_movablecore); 6586 6587 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 6588 6589 void adjust_managed_page_count(struct page *page, long count) 6590 { 6591 spin_lock(&managed_page_count_lock); 6592 page_zone(page)->managed_pages += count; 6593 totalram_pages += count; 6594 #ifdef CONFIG_HIGHMEM 6595 if (PageHighMem(page)) 6596 totalhigh_pages += count; 6597 #endif 6598 spin_unlock(&managed_page_count_lock); 6599 } 6600 EXPORT_SYMBOL(adjust_managed_page_count); 6601 6602 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 6603 { 6604 void *pos; 6605 unsigned long pages = 0; 6606 6607 start = (void *)PAGE_ALIGN((unsigned long)start); 6608 end = (void *)((unsigned long)end & PAGE_MASK); 6609 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6610 if ((unsigned int)poison <= 0xFF) 6611 memset(pos, poison, PAGE_SIZE); 6612 free_reserved_page(virt_to_page(pos)); 6613 } 6614 6615 if (pages && s) 6616 pr_info("Freeing %s memory: %ldK\n", 6617 s, pages << (PAGE_SHIFT - 10)); 6618 6619 return pages; 6620 } 6621 EXPORT_SYMBOL(free_reserved_area); 6622 6623 #ifdef CONFIG_HIGHMEM 6624 void free_highmem_page(struct page *page) 6625 { 6626 __free_reserved_page(page); 6627 totalram_pages++; 6628 page_zone(page)->managed_pages++; 6629 totalhigh_pages++; 6630 } 6631 #endif 6632 6633 6634 void __init mem_init_print_info(const char *str) 6635 { 6636 unsigned long physpages, codesize, datasize, rosize, bss_size; 6637 unsigned long init_code_size, init_data_size; 6638 6639 physpages = get_num_physpages(); 6640 codesize = _etext - _stext; 6641 datasize = _edata - _sdata; 6642 rosize = __end_rodata - __start_rodata; 6643 bss_size = __bss_stop - __bss_start; 6644 init_data_size = __init_end - __init_begin; 6645 init_code_size = _einittext - _sinittext; 6646 6647 /* 6648 * Detect special cases and adjust section sizes accordingly: 6649 * 1) .init.* may be embedded into .data sections 6650 * 2) .init.text.* may be out of [__init_begin, __init_end], 6651 * please refer to arch/tile/kernel/vmlinux.lds.S. 6652 * 3) .rodata.* may be embedded into .text or .data sections. 6653 */ 6654 #define adj_init_size(start, end, size, pos, adj) \ 6655 do { \ 6656 if (start <= pos && pos < end && size > adj) \ 6657 size -= adj; \ 6658 } while (0) 6659 6660 adj_init_size(__init_begin, __init_end, init_data_size, 6661 _sinittext, init_code_size); 6662 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 6663 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 6664 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 6665 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 6666 6667 #undef adj_init_size 6668 6669 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 6670 #ifdef CONFIG_HIGHMEM 6671 ", %luK highmem" 6672 #endif 6673 "%s%s)\n", 6674 nr_free_pages() << (PAGE_SHIFT - 10), 6675 physpages << (PAGE_SHIFT - 10), 6676 codesize >> 10, datasize >> 10, rosize >> 10, 6677 (init_data_size + init_code_size) >> 10, bss_size >> 10, 6678 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10), 6679 totalcma_pages << (PAGE_SHIFT - 10), 6680 #ifdef CONFIG_HIGHMEM 6681 totalhigh_pages << (PAGE_SHIFT - 10), 6682 #endif 6683 str ? ", " : "", str ? str : ""); 6684 } 6685 6686 /** 6687 * set_dma_reserve - set the specified number of pages reserved in the first zone 6688 * @new_dma_reserve: The number of pages to mark reserved 6689 * 6690 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 6691 * In the DMA zone, a significant percentage may be consumed by kernel image 6692 * and other unfreeable allocations which can skew the watermarks badly. This 6693 * function may optionally be used to account for unfreeable pages in the 6694 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 6695 * smaller per-cpu batchsize. 6696 */ 6697 void __init set_dma_reserve(unsigned long new_dma_reserve) 6698 { 6699 dma_reserve = new_dma_reserve; 6700 } 6701 6702 void __init free_area_init(unsigned long *zones_size) 6703 { 6704 free_area_init_node(0, zones_size, 6705 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 6706 } 6707 6708 static int page_alloc_cpu_dead(unsigned int cpu) 6709 { 6710 6711 lru_add_drain_cpu(cpu); 6712 drain_pages(cpu); 6713 6714 /* 6715 * Spill the event counters of the dead processor 6716 * into the current processors event counters. 6717 * This artificially elevates the count of the current 6718 * processor. 6719 */ 6720 vm_events_fold_cpu(cpu); 6721 6722 /* 6723 * Zero the differential counters of the dead processor 6724 * so that the vm statistics are consistent. 6725 * 6726 * This is only okay since the processor is dead and cannot 6727 * race with what we are doing. 6728 */ 6729 cpu_vm_stats_fold(cpu); 6730 return 0; 6731 } 6732 6733 void __init page_alloc_init(void) 6734 { 6735 int ret; 6736 6737 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD, 6738 "mm/page_alloc:dead", NULL, 6739 page_alloc_cpu_dead); 6740 WARN_ON(ret < 0); 6741 } 6742 6743 /* 6744 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6745 * or min_free_kbytes changes. 6746 */ 6747 static void calculate_totalreserve_pages(void) 6748 { 6749 struct pglist_data *pgdat; 6750 unsigned long reserve_pages = 0; 6751 enum zone_type i, j; 6752 6753 for_each_online_pgdat(pgdat) { 6754 6755 pgdat->totalreserve_pages = 0; 6756 6757 for (i = 0; i < MAX_NR_ZONES; i++) { 6758 struct zone *zone = pgdat->node_zones + i; 6759 long max = 0; 6760 6761 /* Find valid and maximum lowmem_reserve in the zone */ 6762 for (j = i; j < MAX_NR_ZONES; j++) { 6763 if (zone->lowmem_reserve[j] > max) 6764 max = zone->lowmem_reserve[j]; 6765 } 6766 6767 /* we treat the high watermark as reserved pages. */ 6768 max += high_wmark_pages(zone); 6769 6770 if (max > zone->managed_pages) 6771 max = zone->managed_pages; 6772 6773 pgdat->totalreserve_pages += max; 6774 6775 reserve_pages += max; 6776 } 6777 } 6778 totalreserve_pages = reserve_pages; 6779 } 6780 6781 /* 6782 * setup_per_zone_lowmem_reserve - called whenever 6783 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6784 * has a correct pages reserved value, so an adequate number of 6785 * pages are left in the zone after a successful __alloc_pages(). 6786 */ 6787 static void setup_per_zone_lowmem_reserve(void) 6788 { 6789 struct pglist_data *pgdat; 6790 enum zone_type j, idx; 6791 6792 for_each_online_pgdat(pgdat) { 6793 for (j = 0; j < MAX_NR_ZONES; j++) { 6794 struct zone *zone = pgdat->node_zones + j; 6795 unsigned long managed_pages = zone->managed_pages; 6796 6797 zone->lowmem_reserve[j] = 0; 6798 6799 idx = j; 6800 while (idx) { 6801 struct zone *lower_zone; 6802 6803 idx--; 6804 6805 if (sysctl_lowmem_reserve_ratio[idx] < 1) 6806 sysctl_lowmem_reserve_ratio[idx] = 1; 6807 6808 lower_zone = pgdat->node_zones + idx; 6809 lower_zone->lowmem_reserve[j] = managed_pages / 6810 sysctl_lowmem_reserve_ratio[idx]; 6811 managed_pages += lower_zone->managed_pages; 6812 } 6813 } 6814 } 6815 6816 /* update totalreserve_pages */ 6817 calculate_totalreserve_pages(); 6818 } 6819 6820 static void __setup_per_zone_wmarks(void) 6821 { 6822 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6823 unsigned long lowmem_pages = 0; 6824 struct zone *zone; 6825 unsigned long flags; 6826 6827 /* Calculate total number of !ZONE_HIGHMEM pages */ 6828 for_each_zone(zone) { 6829 if (!is_highmem(zone)) 6830 lowmem_pages += zone->managed_pages; 6831 } 6832 6833 for_each_zone(zone) { 6834 u64 tmp; 6835 6836 spin_lock_irqsave(&zone->lock, flags); 6837 tmp = (u64)pages_min * zone->managed_pages; 6838 do_div(tmp, lowmem_pages); 6839 if (is_highmem(zone)) { 6840 /* 6841 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6842 * need highmem pages, so cap pages_min to a small 6843 * value here. 6844 * 6845 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6846 * deltas control asynch page reclaim, and so should 6847 * not be capped for highmem. 6848 */ 6849 unsigned long min_pages; 6850 6851 min_pages = zone->managed_pages / 1024; 6852 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6853 zone->watermark[WMARK_MIN] = min_pages; 6854 } else { 6855 /* 6856 * If it's a lowmem zone, reserve a number of pages 6857 * proportionate to the zone's size. 6858 */ 6859 zone->watermark[WMARK_MIN] = tmp; 6860 } 6861 6862 /* 6863 * Set the kswapd watermarks distance according to the 6864 * scale factor in proportion to available memory, but 6865 * ensure a minimum size on small systems. 6866 */ 6867 tmp = max_t(u64, tmp >> 2, 6868 mult_frac(zone->managed_pages, 6869 watermark_scale_factor, 10000)); 6870 6871 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6872 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; 6873 6874 spin_unlock_irqrestore(&zone->lock, flags); 6875 } 6876 6877 /* update totalreserve_pages */ 6878 calculate_totalreserve_pages(); 6879 } 6880 6881 /** 6882 * setup_per_zone_wmarks - called when min_free_kbytes changes 6883 * or when memory is hot-{added|removed} 6884 * 6885 * Ensures that the watermark[min,low,high] values for each zone are set 6886 * correctly with respect to min_free_kbytes. 6887 */ 6888 void setup_per_zone_wmarks(void) 6889 { 6890 mutex_lock(&zonelists_mutex); 6891 __setup_per_zone_wmarks(); 6892 mutex_unlock(&zonelists_mutex); 6893 } 6894 6895 /* 6896 * Initialise min_free_kbytes. 6897 * 6898 * For small machines we want it small (128k min). For large machines 6899 * we want it large (64MB max). But it is not linear, because network 6900 * bandwidth does not increase linearly with machine size. We use 6901 * 6902 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6903 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6904 * 6905 * which yields 6906 * 6907 * 16MB: 512k 6908 * 32MB: 724k 6909 * 64MB: 1024k 6910 * 128MB: 1448k 6911 * 256MB: 2048k 6912 * 512MB: 2896k 6913 * 1024MB: 4096k 6914 * 2048MB: 5792k 6915 * 4096MB: 8192k 6916 * 8192MB: 11584k 6917 * 16384MB: 16384k 6918 */ 6919 int __meminit init_per_zone_wmark_min(void) 6920 { 6921 unsigned long lowmem_kbytes; 6922 int new_min_free_kbytes; 6923 6924 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6925 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6926 6927 if (new_min_free_kbytes > user_min_free_kbytes) { 6928 min_free_kbytes = new_min_free_kbytes; 6929 if (min_free_kbytes < 128) 6930 min_free_kbytes = 128; 6931 if (min_free_kbytes > 65536) 6932 min_free_kbytes = 65536; 6933 } else { 6934 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6935 new_min_free_kbytes, user_min_free_kbytes); 6936 } 6937 setup_per_zone_wmarks(); 6938 refresh_zone_stat_thresholds(); 6939 setup_per_zone_lowmem_reserve(); 6940 6941 #ifdef CONFIG_NUMA 6942 setup_min_unmapped_ratio(); 6943 setup_min_slab_ratio(); 6944 #endif 6945 6946 return 0; 6947 } 6948 core_initcall(init_per_zone_wmark_min) 6949 6950 /* 6951 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6952 * that we can call two helper functions whenever min_free_kbytes 6953 * changes. 6954 */ 6955 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 6956 void __user *buffer, size_t *length, loff_t *ppos) 6957 { 6958 int rc; 6959 6960 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6961 if (rc) 6962 return rc; 6963 6964 if (write) { 6965 user_min_free_kbytes = min_free_kbytes; 6966 setup_per_zone_wmarks(); 6967 } 6968 return 0; 6969 } 6970 6971 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 6972 void __user *buffer, size_t *length, loff_t *ppos) 6973 { 6974 int rc; 6975 6976 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 6977 if (rc) 6978 return rc; 6979 6980 if (write) 6981 setup_per_zone_wmarks(); 6982 6983 return 0; 6984 } 6985 6986 #ifdef CONFIG_NUMA 6987 static void setup_min_unmapped_ratio(void) 6988 { 6989 pg_data_t *pgdat; 6990 struct zone *zone; 6991 6992 for_each_online_pgdat(pgdat) 6993 pgdat->min_unmapped_pages = 0; 6994 6995 for_each_zone(zone) 6996 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * 6997 sysctl_min_unmapped_ratio) / 100; 6998 } 6999 7000 7001 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 7002 void __user *buffer, size_t *length, loff_t *ppos) 7003 { 7004 int rc; 7005 7006 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 7007 if (rc) 7008 return rc; 7009 7010 setup_min_unmapped_ratio(); 7011 7012 return 0; 7013 } 7014 7015 static void setup_min_slab_ratio(void) 7016 { 7017 pg_data_t *pgdat; 7018 struct zone *zone; 7019 7020 for_each_online_pgdat(pgdat) 7021 pgdat->min_slab_pages = 0; 7022 7023 for_each_zone(zone) 7024 zone->zone_pgdat->min_slab_pages += (zone->managed_pages * 7025 sysctl_min_slab_ratio) / 100; 7026 } 7027 7028 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 7029 void __user *buffer, size_t *length, loff_t *ppos) 7030 { 7031 int rc; 7032 7033 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 7034 if (rc) 7035 return rc; 7036 7037 setup_min_slab_ratio(); 7038 7039 return 0; 7040 } 7041 #endif 7042 7043 /* 7044 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 7045 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 7046 * whenever sysctl_lowmem_reserve_ratio changes. 7047 * 7048 * The reserve ratio obviously has absolutely no relation with the 7049 * minimum watermarks. The lowmem reserve ratio can only make sense 7050 * if in function of the boot time zone sizes. 7051 */ 7052 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 7053 void __user *buffer, size_t *length, loff_t *ppos) 7054 { 7055 proc_dointvec_minmax(table, write, buffer, length, ppos); 7056 setup_per_zone_lowmem_reserve(); 7057 return 0; 7058 } 7059 7060 /* 7061 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 7062 * cpu. It is the fraction of total pages in each zone that a hot per cpu 7063 * pagelist can have before it gets flushed back to buddy allocator. 7064 */ 7065 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, 7066 void __user *buffer, size_t *length, loff_t *ppos) 7067 { 7068 struct zone *zone; 7069 int old_percpu_pagelist_fraction; 7070 int ret; 7071 7072 mutex_lock(&pcp_batch_high_lock); 7073 old_percpu_pagelist_fraction = percpu_pagelist_fraction; 7074 7075 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 7076 if (!write || ret < 0) 7077 goto out; 7078 7079 /* Sanity checking to avoid pcp imbalance */ 7080 if (percpu_pagelist_fraction && 7081 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { 7082 percpu_pagelist_fraction = old_percpu_pagelist_fraction; 7083 ret = -EINVAL; 7084 goto out; 7085 } 7086 7087 /* No change? */ 7088 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) 7089 goto out; 7090 7091 for_each_populated_zone(zone) { 7092 unsigned int cpu; 7093 7094 for_each_possible_cpu(cpu) 7095 pageset_set_high_and_batch(zone, 7096 per_cpu_ptr(zone->pageset, cpu)); 7097 } 7098 out: 7099 mutex_unlock(&pcp_batch_high_lock); 7100 return ret; 7101 } 7102 7103 #ifdef CONFIG_NUMA 7104 int hashdist = HASHDIST_DEFAULT; 7105 7106 static int __init set_hashdist(char *str) 7107 { 7108 if (!str) 7109 return 0; 7110 hashdist = simple_strtoul(str, &str, 0); 7111 return 1; 7112 } 7113 __setup("hashdist=", set_hashdist); 7114 #endif 7115 7116 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 7117 /* 7118 * Returns the number of pages that arch has reserved but 7119 * is not known to alloc_large_system_hash(). 7120 */ 7121 static unsigned long __init arch_reserved_kernel_pages(void) 7122 { 7123 return 0; 7124 } 7125 #endif 7126 7127 /* 7128 * allocate a large system hash table from bootmem 7129 * - it is assumed that the hash table must contain an exact power-of-2 7130 * quantity of entries 7131 * - limit is the number of hash buckets, not the total allocation size 7132 */ 7133 void *__init alloc_large_system_hash(const char *tablename, 7134 unsigned long bucketsize, 7135 unsigned long numentries, 7136 int scale, 7137 int flags, 7138 unsigned int *_hash_shift, 7139 unsigned int *_hash_mask, 7140 unsigned long low_limit, 7141 unsigned long high_limit) 7142 { 7143 unsigned long long max = high_limit; 7144 unsigned long log2qty, size; 7145 void *table = NULL; 7146 7147 /* allow the kernel cmdline to have a say */ 7148 if (!numentries) { 7149 /* round applicable memory size up to nearest megabyte */ 7150 numentries = nr_kernel_pages; 7151 numentries -= arch_reserved_kernel_pages(); 7152 7153 /* It isn't necessary when PAGE_SIZE >= 1MB */ 7154 if (PAGE_SHIFT < 20) 7155 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 7156 7157 /* limit to 1 bucket per 2^scale bytes of low memory */ 7158 if (scale > PAGE_SHIFT) 7159 numentries >>= (scale - PAGE_SHIFT); 7160 else 7161 numentries <<= (PAGE_SHIFT - scale); 7162 7163 /* Make sure we've got at least a 0-order allocation.. */ 7164 if (unlikely(flags & HASH_SMALL)) { 7165 /* Makes no sense without HASH_EARLY */ 7166 WARN_ON(!(flags & HASH_EARLY)); 7167 if (!(numentries >> *_hash_shift)) { 7168 numentries = 1UL << *_hash_shift; 7169 BUG_ON(!numentries); 7170 } 7171 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 7172 numentries = PAGE_SIZE / bucketsize; 7173 } 7174 numentries = roundup_pow_of_two(numentries); 7175 7176 /* limit allocation size to 1/16 total memory by default */ 7177 if (max == 0) { 7178 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 7179 do_div(max, bucketsize); 7180 } 7181 max = min(max, 0x80000000ULL); 7182 7183 if (numentries < low_limit) 7184 numentries = low_limit; 7185 if (numentries > max) 7186 numentries = max; 7187 7188 log2qty = ilog2(numentries); 7189 7190 do { 7191 size = bucketsize << log2qty; 7192 if (flags & HASH_EARLY) 7193 table = memblock_virt_alloc_nopanic(size, 0); 7194 else if (hashdist) 7195 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 7196 else { 7197 /* 7198 * If bucketsize is not a power-of-two, we may free 7199 * some pages at the end of hash table which 7200 * alloc_pages_exact() automatically does 7201 */ 7202 if (get_order(size) < MAX_ORDER) { 7203 table = alloc_pages_exact(size, GFP_ATOMIC); 7204 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 7205 } 7206 } 7207 } while (!table && size > PAGE_SIZE && --log2qty); 7208 7209 if (!table) 7210 panic("Failed to allocate %s hash table\n", tablename); 7211 7212 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n", 7213 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size); 7214 7215 if (_hash_shift) 7216 *_hash_shift = log2qty; 7217 if (_hash_mask) 7218 *_hash_mask = (1 << log2qty) - 1; 7219 7220 return table; 7221 } 7222 7223 /* 7224 * This function checks whether pageblock includes unmovable pages or not. 7225 * If @count is not zero, it is okay to include less @count unmovable pages 7226 * 7227 * PageLRU check without isolation or lru_lock could race so that 7228 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable 7229 * check without lock_page also may miss some movable non-lru pages at 7230 * race condition. So you can't expect this function should be exact. 7231 */ 7232 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 7233 bool skip_hwpoisoned_pages) 7234 { 7235 unsigned long pfn, iter, found; 7236 int mt; 7237 7238 /* 7239 * For avoiding noise data, lru_add_drain_all() should be called 7240 * If ZONE_MOVABLE, the zone never contains unmovable pages 7241 */ 7242 if (zone_idx(zone) == ZONE_MOVABLE) 7243 return false; 7244 mt = get_pageblock_migratetype(page); 7245 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) 7246 return false; 7247 7248 pfn = page_to_pfn(page); 7249 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 7250 unsigned long check = pfn + iter; 7251 7252 if (!pfn_valid_within(check)) 7253 continue; 7254 7255 page = pfn_to_page(check); 7256 7257 /* 7258 * Hugepages are not in LRU lists, but they're movable. 7259 * We need not scan over tail pages bacause we don't 7260 * handle each tail page individually in migration. 7261 */ 7262 if (PageHuge(page)) { 7263 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 7264 continue; 7265 } 7266 7267 /* 7268 * We can't use page_count without pin a page 7269 * because another CPU can free compound page. 7270 * This check already skips compound tails of THP 7271 * because their page->_refcount is zero at all time. 7272 */ 7273 if (!page_ref_count(page)) { 7274 if (PageBuddy(page)) 7275 iter += (1 << page_order(page)) - 1; 7276 continue; 7277 } 7278 7279 /* 7280 * The HWPoisoned page may be not in buddy system, and 7281 * page_count() is not 0. 7282 */ 7283 if (skip_hwpoisoned_pages && PageHWPoison(page)) 7284 continue; 7285 7286 if (__PageMovable(page)) 7287 continue; 7288 7289 if (!PageLRU(page)) 7290 found++; 7291 /* 7292 * If there are RECLAIMABLE pages, we need to check 7293 * it. But now, memory offline itself doesn't call 7294 * shrink_node_slabs() and it still to be fixed. 7295 */ 7296 /* 7297 * If the page is not RAM, page_count()should be 0. 7298 * we don't need more check. This is an _used_ not-movable page. 7299 * 7300 * The problematic thing here is PG_reserved pages. PG_reserved 7301 * is set to both of a memory hole page and a _used_ kernel 7302 * page at boot. 7303 */ 7304 if (found > count) 7305 return true; 7306 } 7307 return false; 7308 } 7309 7310 bool is_pageblock_removable_nolock(struct page *page) 7311 { 7312 struct zone *zone; 7313 unsigned long pfn; 7314 7315 /* 7316 * We have to be careful here because we are iterating over memory 7317 * sections which are not zone aware so we might end up outside of 7318 * the zone but still within the section. 7319 * We have to take care about the node as well. If the node is offline 7320 * its NODE_DATA will be NULL - see page_zone. 7321 */ 7322 if (!node_online(page_to_nid(page))) 7323 return false; 7324 7325 zone = page_zone(page); 7326 pfn = page_to_pfn(page); 7327 if (!zone_spans_pfn(zone, pfn)) 7328 return false; 7329 7330 return !has_unmovable_pages(zone, page, 0, true); 7331 } 7332 7333 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) 7334 7335 static unsigned long pfn_max_align_down(unsigned long pfn) 7336 { 7337 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 7338 pageblock_nr_pages) - 1); 7339 } 7340 7341 static unsigned long pfn_max_align_up(unsigned long pfn) 7342 { 7343 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 7344 pageblock_nr_pages)); 7345 } 7346 7347 /* [start, end) must belong to a single zone. */ 7348 static int __alloc_contig_migrate_range(struct compact_control *cc, 7349 unsigned long start, unsigned long end) 7350 { 7351 /* This function is based on compact_zone() from compaction.c. */ 7352 unsigned long nr_reclaimed; 7353 unsigned long pfn = start; 7354 unsigned int tries = 0; 7355 int ret = 0; 7356 7357 migrate_prep(); 7358 7359 while (pfn < end || !list_empty(&cc->migratepages)) { 7360 if (fatal_signal_pending(current)) { 7361 ret = -EINTR; 7362 break; 7363 } 7364 7365 if (list_empty(&cc->migratepages)) { 7366 cc->nr_migratepages = 0; 7367 pfn = isolate_migratepages_range(cc, pfn, end); 7368 if (!pfn) { 7369 ret = -EINTR; 7370 break; 7371 } 7372 tries = 0; 7373 } else if (++tries == 5) { 7374 ret = ret < 0 ? ret : -EBUSY; 7375 break; 7376 } 7377 7378 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 7379 &cc->migratepages); 7380 cc->nr_migratepages -= nr_reclaimed; 7381 7382 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 7383 NULL, 0, cc->mode, MR_CMA); 7384 } 7385 if (ret < 0) { 7386 putback_movable_pages(&cc->migratepages); 7387 return ret; 7388 } 7389 return 0; 7390 } 7391 7392 /** 7393 * alloc_contig_range() -- tries to allocate given range of pages 7394 * @start: start PFN to allocate 7395 * @end: one-past-the-last PFN to allocate 7396 * @migratetype: migratetype of the underlaying pageblocks (either 7397 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 7398 * in range must have the same migratetype and it must 7399 * be either of the two. 7400 * @gfp_mask: GFP mask to use during compaction 7401 * 7402 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 7403 * aligned, however it's the caller's responsibility to guarantee that 7404 * we are the only thread that changes migrate type of pageblocks the 7405 * pages fall in. 7406 * 7407 * The PFN range must belong to a single zone. 7408 * 7409 * Returns zero on success or negative error code. On success all 7410 * pages which PFN is in [start, end) are allocated for the caller and 7411 * need to be freed with free_contig_range(). 7412 */ 7413 int alloc_contig_range(unsigned long start, unsigned long end, 7414 unsigned migratetype, gfp_t gfp_mask) 7415 { 7416 unsigned long outer_start, outer_end; 7417 unsigned int order; 7418 int ret = 0; 7419 7420 struct compact_control cc = { 7421 .nr_migratepages = 0, 7422 .order = -1, 7423 .zone = page_zone(pfn_to_page(start)), 7424 .mode = MIGRATE_SYNC, 7425 .ignore_skip_hint = true, 7426 .gfp_mask = memalloc_noio_flags(gfp_mask), 7427 }; 7428 INIT_LIST_HEAD(&cc.migratepages); 7429 7430 /* 7431 * What we do here is we mark all pageblocks in range as 7432 * MIGRATE_ISOLATE. Because pageblock and max order pages may 7433 * have different sizes, and due to the way page allocator 7434 * work, we align the range to biggest of the two pages so 7435 * that page allocator won't try to merge buddies from 7436 * different pageblocks and change MIGRATE_ISOLATE to some 7437 * other migration type. 7438 * 7439 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 7440 * migrate the pages from an unaligned range (ie. pages that 7441 * we are interested in). This will put all the pages in 7442 * range back to page allocator as MIGRATE_ISOLATE. 7443 * 7444 * When this is done, we take the pages in range from page 7445 * allocator removing them from the buddy system. This way 7446 * page allocator will never consider using them. 7447 * 7448 * This lets us mark the pageblocks back as 7449 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 7450 * aligned range but not in the unaligned, original range are 7451 * put back to page allocator so that buddy can use them. 7452 */ 7453 7454 ret = start_isolate_page_range(pfn_max_align_down(start), 7455 pfn_max_align_up(end), migratetype, 7456 false); 7457 if (ret) 7458 return ret; 7459 7460 /* 7461 * In case of -EBUSY, we'd like to know which page causes problem. 7462 * So, just fall through. We will check it in test_pages_isolated(). 7463 */ 7464 ret = __alloc_contig_migrate_range(&cc, start, end); 7465 if (ret && ret != -EBUSY) 7466 goto done; 7467 7468 /* 7469 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 7470 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 7471 * more, all pages in [start, end) are free in page allocator. 7472 * What we are going to do is to allocate all pages from 7473 * [start, end) (that is remove them from page allocator). 7474 * 7475 * The only problem is that pages at the beginning and at the 7476 * end of interesting range may be not aligned with pages that 7477 * page allocator holds, ie. they can be part of higher order 7478 * pages. Because of this, we reserve the bigger range and 7479 * once this is done free the pages we are not interested in. 7480 * 7481 * We don't have to hold zone->lock here because the pages are 7482 * isolated thus they won't get removed from buddy. 7483 */ 7484 7485 lru_add_drain_all(); 7486 drain_all_pages(cc.zone); 7487 7488 order = 0; 7489 outer_start = start; 7490 while (!PageBuddy(pfn_to_page(outer_start))) { 7491 if (++order >= MAX_ORDER) { 7492 outer_start = start; 7493 break; 7494 } 7495 outer_start &= ~0UL << order; 7496 } 7497 7498 if (outer_start != start) { 7499 order = page_order(pfn_to_page(outer_start)); 7500 7501 /* 7502 * outer_start page could be small order buddy page and 7503 * it doesn't include start page. Adjust outer_start 7504 * in this case to report failed page properly 7505 * on tracepoint in test_pages_isolated() 7506 */ 7507 if (outer_start + (1UL << order) <= start) 7508 outer_start = start; 7509 } 7510 7511 /* Make sure the range is really isolated. */ 7512 if (test_pages_isolated(outer_start, end, false)) { 7513 pr_info("%s: [%lx, %lx) PFNs busy\n", 7514 __func__, outer_start, end); 7515 ret = -EBUSY; 7516 goto done; 7517 } 7518 7519 /* Grab isolated pages from freelists. */ 7520 outer_end = isolate_freepages_range(&cc, outer_start, end); 7521 if (!outer_end) { 7522 ret = -EBUSY; 7523 goto done; 7524 } 7525 7526 /* Free head and tail (if any) */ 7527 if (start != outer_start) 7528 free_contig_range(outer_start, start - outer_start); 7529 if (end != outer_end) 7530 free_contig_range(end, outer_end - end); 7531 7532 done: 7533 undo_isolate_page_range(pfn_max_align_down(start), 7534 pfn_max_align_up(end), migratetype); 7535 return ret; 7536 } 7537 7538 void free_contig_range(unsigned long pfn, unsigned nr_pages) 7539 { 7540 unsigned int count = 0; 7541 7542 for (; nr_pages--; pfn++) { 7543 struct page *page = pfn_to_page(pfn); 7544 7545 count += page_count(page) != 1; 7546 __free_page(page); 7547 } 7548 WARN(count != 0, "%d pages are still in use!\n", count); 7549 } 7550 #endif 7551 7552 #ifdef CONFIG_MEMORY_HOTPLUG 7553 /* 7554 * The zone indicated has a new number of managed_pages; batch sizes and percpu 7555 * page high values need to be recalulated. 7556 */ 7557 void __meminit zone_pcp_update(struct zone *zone) 7558 { 7559 unsigned cpu; 7560 mutex_lock(&pcp_batch_high_lock); 7561 for_each_possible_cpu(cpu) 7562 pageset_set_high_and_batch(zone, 7563 per_cpu_ptr(zone->pageset, cpu)); 7564 mutex_unlock(&pcp_batch_high_lock); 7565 } 7566 #endif 7567 7568 void zone_pcp_reset(struct zone *zone) 7569 { 7570 unsigned long flags; 7571 int cpu; 7572 struct per_cpu_pageset *pset; 7573 7574 /* avoid races with drain_pages() */ 7575 local_irq_save(flags); 7576 if (zone->pageset != &boot_pageset) { 7577 for_each_online_cpu(cpu) { 7578 pset = per_cpu_ptr(zone->pageset, cpu); 7579 drain_zonestat(zone, pset); 7580 } 7581 free_percpu(zone->pageset); 7582 zone->pageset = &boot_pageset; 7583 } 7584 local_irq_restore(flags); 7585 } 7586 7587 #ifdef CONFIG_MEMORY_HOTREMOVE 7588 /* 7589 * All pages in the range must be in a single zone and isolated 7590 * before calling this. 7591 */ 7592 void 7593 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 7594 { 7595 struct page *page; 7596 struct zone *zone; 7597 unsigned int order, i; 7598 unsigned long pfn; 7599 unsigned long flags; 7600 /* find the first valid pfn */ 7601 for (pfn = start_pfn; pfn < end_pfn; pfn++) 7602 if (pfn_valid(pfn)) 7603 break; 7604 if (pfn == end_pfn) 7605 return; 7606 zone = page_zone(pfn_to_page(pfn)); 7607 spin_lock_irqsave(&zone->lock, flags); 7608 pfn = start_pfn; 7609 while (pfn < end_pfn) { 7610 if (!pfn_valid(pfn)) { 7611 pfn++; 7612 continue; 7613 } 7614 page = pfn_to_page(pfn); 7615 /* 7616 * The HWPoisoned page may be not in buddy system, and 7617 * page_count() is not 0. 7618 */ 7619 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7620 pfn++; 7621 SetPageReserved(page); 7622 continue; 7623 } 7624 7625 BUG_ON(page_count(page)); 7626 BUG_ON(!PageBuddy(page)); 7627 order = page_order(page); 7628 #ifdef CONFIG_DEBUG_VM 7629 pr_info("remove from free list %lx %d %lx\n", 7630 pfn, 1 << order, end_pfn); 7631 #endif 7632 list_del(&page->lru); 7633 rmv_page_order(page); 7634 zone->free_area[order].nr_free--; 7635 for (i = 0; i < (1 << order); i++) 7636 SetPageReserved((page+i)); 7637 pfn += (1 << order); 7638 } 7639 spin_unlock_irqrestore(&zone->lock, flags); 7640 } 7641 #endif 7642 7643 bool is_free_buddy_page(struct page *page) 7644 { 7645 struct zone *zone = page_zone(page); 7646 unsigned long pfn = page_to_pfn(page); 7647 unsigned long flags; 7648 unsigned int order; 7649 7650 spin_lock_irqsave(&zone->lock, flags); 7651 for (order = 0; order < MAX_ORDER; order++) { 7652 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7653 7654 if (PageBuddy(page_head) && page_order(page_head) >= order) 7655 break; 7656 } 7657 spin_unlock_irqrestore(&zone->lock, flags); 7658 7659 return order < MAX_ORDER; 7660 } 7661