1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/jiffies.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/compiler.h> 26 #include <linux/kernel.h> 27 #include <linux/kmemcheck.h> 28 #include <linux/kasan.h> 29 #include <linux/module.h> 30 #include <linux/suspend.h> 31 #include <linux/pagevec.h> 32 #include <linux/blkdev.h> 33 #include <linux/slab.h> 34 #include <linux/ratelimit.h> 35 #include <linux/oom.h> 36 #include <linux/notifier.h> 37 #include <linux/topology.h> 38 #include <linux/sysctl.h> 39 #include <linux/cpu.h> 40 #include <linux/cpuset.h> 41 #include <linux/memory_hotplug.h> 42 #include <linux/nodemask.h> 43 #include <linux/vmalloc.h> 44 #include <linux/vmstat.h> 45 #include <linux/mempolicy.h> 46 #include <linux/memremap.h> 47 #include <linux/stop_machine.h> 48 #include <linux/sort.h> 49 #include <linux/pfn.h> 50 #include <linux/backing-dev.h> 51 #include <linux/fault-inject.h> 52 #include <linux/page-isolation.h> 53 #include <linux/page_ext.h> 54 #include <linux/debugobjects.h> 55 #include <linux/kmemleak.h> 56 #include <linux/compaction.h> 57 #include <trace/events/kmem.h> 58 #include <trace/events/oom.h> 59 #include <linux/prefetch.h> 60 #include <linux/mm_inline.h> 61 #include <linux/migrate.h> 62 #include <linux/hugetlb.h> 63 #include <linux/sched/rt.h> 64 #include <linux/sched/mm.h> 65 #include <linux/page_owner.h> 66 #include <linux/kthread.h> 67 #include <linux/memcontrol.h> 68 #include <linux/ftrace.h> 69 70 #include <asm/sections.h> 71 #include <asm/tlbflush.h> 72 #include <asm/div64.h> 73 #include "internal.h" 74 75 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 76 static DEFINE_MUTEX(pcp_batch_high_lock); 77 #define MIN_PERCPU_PAGELIST_FRACTION (8) 78 79 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 80 DEFINE_PER_CPU(int, numa_node); 81 EXPORT_PER_CPU_SYMBOL(numa_node); 82 #endif 83 84 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 85 /* 86 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 87 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 88 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 89 * defined in <linux/topology.h>. 90 */ 91 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 92 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 93 int _node_numa_mem_[MAX_NUMNODES]; 94 #endif 95 96 /* work_structs for global per-cpu drains */ 97 DEFINE_MUTEX(pcpu_drain_mutex); 98 DEFINE_PER_CPU(struct work_struct, pcpu_drain); 99 100 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 101 volatile unsigned long latent_entropy __latent_entropy; 102 EXPORT_SYMBOL(latent_entropy); 103 #endif 104 105 /* 106 * Array of node states. 107 */ 108 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 109 [N_POSSIBLE] = NODE_MASK_ALL, 110 [N_ONLINE] = { { [0] = 1UL } }, 111 #ifndef CONFIG_NUMA 112 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 113 #ifdef CONFIG_HIGHMEM 114 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 115 #endif 116 #ifdef CONFIG_MOVABLE_NODE 117 [N_MEMORY] = { { [0] = 1UL } }, 118 #endif 119 [N_CPU] = { { [0] = 1UL } }, 120 #endif /* NUMA */ 121 }; 122 EXPORT_SYMBOL(node_states); 123 124 /* Protect totalram_pages and zone->managed_pages */ 125 static DEFINE_SPINLOCK(managed_page_count_lock); 126 127 unsigned long totalram_pages __read_mostly; 128 unsigned long totalreserve_pages __read_mostly; 129 unsigned long totalcma_pages __read_mostly; 130 131 int percpu_pagelist_fraction; 132 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 133 134 /* 135 * A cached value of the page's pageblock's migratetype, used when the page is 136 * put on a pcplist. Used to avoid the pageblock migratetype lookup when 137 * freeing from pcplists in most cases, at the cost of possibly becoming stale. 138 * Also the migratetype set in the page does not necessarily match the pcplist 139 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any 140 * other index - this ensures that it will be put on the correct CMA freelist. 141 */ 142 static inline int get_pcppage_migratetype(struct page *page) 143 { 144 return page->index; 145 } 146 147 static inline void set_pcppage_migratetype(struct page *page, int migratetype) 148 { 149 page->index = migratetype; 150 } 151 152 #ifdef CONFIG_PM_SLEEP 153 /* 154 * The following functions are used by the suspend/hibernate code to temporarily 155 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 156 * while devices are suspended. To avoid races with the suspend/hibernate code, 157 * they should always be called with pm_mutex held (gfp_allowed_mask also should 158 * only be modified with pm_mutex held, unless the suspend/hibernate code is 159 * guaranteed not to run in parallel with that modification). 160 */ 161 162 static gfp_t saved_gfp_mask; 163 164 void pm_restore_gfp_mask(void) 165 { 166 WARN_ON(!mutex_is_locked(&pm_mutex)); 167 if (saved_gfp_mask) { 168 gfp_allowed_mask = saved_gfp_mask; 169 saved_gfp_mask = 0; 170 } 171 } 172 173 void pm_restrict_gfp_mask(void) 174 { 175 WARN_ON(!mutex_is_locked(&pm_mutex)); 176 WARN_ON(saved_gfp_mask); 177 saved_gfp_mask = gfp_allowed_mask; 178 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); 179 } 180 181 bool pm_suspended_storage(void) 182 { 183 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) 184 return false; 185 return true; 186 } 187 #endif /* CONFIG_PM_SLEEP */ 188 189 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 190 unsigned int pageblock_order __read_mostly; 191 #endif 192 193 static void __free_pages_ok(struct page *page, unsigned int order); 194 195 /* 196 * results with 256, 32 in the lowmem_reserve sysctl: 197 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 198 * 1G machine -> (16M dma, 784M normal, 224M high) 199 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 200 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 201 * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA 202 * 203 * TBD: should special case ZONE_DMA32 machines here - in those we normally 204 * don't need any ZONE_NORMAL reservation 205 */ 206 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 207 #ifdef CONFIG_ZONE_DMA 208 256, 209 #endif 210 #ifdef CONFIG_ZONE_DMA32 211 256, 212 #endif 213 #ifdef CONFIG_HIGHMEM 214 32, 215 #endif 216 32, 217 }; 218 219 EXPORT_SYMBOL(totalram_pages); 220 221 static char * const zone_names[MAX_NR_ZONES] = { 222 #ifdef CONFIG_ZONE_DMA 223 "DMA", 224 #endif 225 #ifdef CONFIG_ZONE_DMA32 226 "DMA32", 227 #endif 228 "Normal", 229 #ifdef CONFIG_HIGHMEM 230 "HighMem", 231 #endif 232 "Movable", 233 #ifdef CONFIG_ZONE_DEVICE 234 "Device", 235 #endif 236 }; 237 238 char * const migratetype_names[MIGRATE_TYPES] = { 239 "Unmovable", 240 "Movable", 241 "Reclaimable", 242 "HighAtomic", 243 #ifdef CONFIG_CMA 244 "CMA", 245 #endif 246 #ifdef CONFIG_MEMORY_ISOLATION 247 "Isolate", 248 #endif 249 }; 250 251 compound_page_dtor * const compound_page_dtors[] = { 252 NULL, 253 free_compound_page, 254 #ifdef CONFIG_HUGETLB_PAGE 255 free_huge_page, 256 #endif 257 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 258 free_transhuge_page, 259 #endif 260 }; 261 262 int min_free_kbytes = 1024; 263 int user_min_free_kbytes = -1; 264 int watermark_scale_factor = 10; 265 266 static unsigned long __meminitdata nr_kernel_pages; 267 static unsigned long __meminitdata nr_all_pages; 268 static unsigned long __meminitdata dma_reserve; 269 270 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 271 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 272 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 273 static unsigned long __initdata required_kernelcore; 274 static unsigned long __initdata required_movablecore; 275 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 276 static bool mirrored_kernelcore; 277 278 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 279 int movable_zone; 280 EXPORT_SYMBOL(movable_zone); 281 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 282 283 #if MAX_NUMNODES > 1 284 int nr_node_ids __read_mostly = MAX_NUMNODES; 285 int nr_online_nodes __read_mostly = 1; 286 EXPORT_SYMBOL(nr_node_ids); 287 EXPORT_SYMBOL(nr_online_nodes); 288 #endif 289 290 int page_group_by_mobility_disabled __read_mostly; 291 292 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 293 static inline void reset_deferred_meminit(pg_data_t *pgdat) 294 { 295 pgdat->first_deferred_pfn = ULONG_MAX; 296 } 297 298 /* Returns true if the struct page for the pfn is uninitialised */ 299 static inline bool __meminit early_page_uninitialised(unsigned long pfn) 300 { 301 int nid = early_pfn_to_nid(pfn); 302 303 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 304 return true; 305 306 return false; 307 } 308 309 /* 310 * Returns false when the remaining initialisation should be deferred until 311 * later in the boot cycle when it can be parallelised. 312 */ 313 static inline bool update_defer_init(pg_data_t *pgdat, 314 unsigned long pfn, unsigned long zone_end, 315 unsigned long *nr_initialised) 316 { 317 unsigned long max_initialise; 318 319 /* Always populate low zones for address-contrained allocations */ 320 if (zone_end < pgdat_end_pfn(pgdat)) 321 return true; 322 /* 323 * Initialise at least 2G of a node but also take into account that 324 * two large system hashes that can take up 1GB for 0.25TB/node. 325 */ 326 max_initialise = max(2UL << (30 - PAGE_SHIFT), 327 (pgdat->node_spanned_pages >> 8)); 328 329 (*nr_initialised)++; 330 if ((*nr_initialised > max_initialise) && 331 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 332 pgdat->first_deferred_pfn = pfn; 333 return false; 334 } 335 336 return true; 337 } 338 #else 339 static inline void reset_deferred_meminit(pg_data_t *pgdat) 340 { 341 } 342 343 static inline bool early_page_uninitialised(unsigned long pfn) 344 { 345 return false; 346 } 347 348 static inline bool update_defer_init(pg_data_t *pgdat, 349 unsigned long pfn, unsigned long zone_end, 350 unsigned long *nr_initialised) 351 { 352 return true; 353 } 354 #endif 355 356 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 357 static inline unsigned long *get_pageblock_bitmap(struct page *page, 358 unsigned long pfn) 359 { 360 #ifdef CONFIG_SPARSEMEM 361 return __pfn_to_section(pfn)->pageblock_flags; 362 #else 363 return page_zone(page)->pageblock_flags; 364 #endif /* CONFIG_SPARSEMEM */ 365 } 366 367 static inline int pfn_to_bitidx(struct page *page, unsigned long pfn) 368 { 369 #ifdef CONFIG_SPARSEMEM 370 pfn &= (PAGES_PER_SECTION-1); 371 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 372 #else 373 pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages); 374 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 375 #endif /* CONFIG_SPARSEMEM */ 376 } 377 378 /** 379 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 380 * @page: The page within the block of interest 381 * @pfn: The target page frame number 382 * @end_bitidx: The last bit of interest to retrieve 383 * @mask: mask of bits that the caller is interested in 384 * 385 * Return: pageblock_bits flags 386 */ 387 static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page, 388 unsigned long pfn, 389 unsigned long end_bitidx, 390 unsigned long mask) 391 { 392 unsigned long *bitmap; 393 unsigned long bitidx, word_bitidx; 394 unsigned long word; 395 396 bitmap = get_pageblock_bitmap(page, pfn); 397 bitidx = pfn_to_bitidx(page, pfn); 398 word_bitidx = bitidx / BITS_PER_LONG; 399 bitidx &= (BITS_PER_LONG-1); 400 401 word = bitmap[word_bitidx]; 402 bitidx += end_bitidx; 403 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; 404 } 405 406 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 407 unsigned long end_bitidx, 408 unsigned long mask) 409 { 410 return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask); 411 } 412 413 static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn) 414 { 415 return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK); 416 } 417 418 /** 419 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 420 * @page: The page within the block of interest 421 * @flags: The flags to set 422 * @pfn: The target page frame number 423 * @end_bitidx: The last bit of interest 424 * @mask: mask of bits that the caller is interested in 425 */ 426 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 427 unsigned long pfn, 428 unsigned long end_bitidx, 429 unsigned long mask) 430 { 431 unsigned long *bitmap; 432 unsigned long bitidx, word_bitidx; 433 unsigned long old_word, word; 434 435 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 436 437 bitmap = get_pageblock_bitmap(page, pfn); 438 bitidx = pfn_to_bitidx(page, pfn); 439 word_bitidx = bitidx / BITS_PER_LONG; 440 bitidx &= (BITS_PER_LONG-1); 441 442 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); 443 444 bitidx += end_bitidx; 445 mask <<= (BITS_PER_LONG - bitidx - 1); 446 flags <<= (BITS_PER_LONG - bitidx - 1); 447 448 word = READ_ONCE(bitmap[word_bitidx]); 449 for (;;) { 450 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 451 if (word == old_word) 452 break; 453 word = old_word; 454 } 455 } 456 457 void set_pageblock_migratetype(struct page *page, int migratetype) 458 { 459 if (unlikely(page_group_by_mobility_disabled && 460 migratetype < MIGRATE_PCPTYPES)) 461 migratetype = MIGRATE_UNMOVABLE; 462 463 set_pageblock_flags_group(page, (unsigned long)migratetype, 464 PB_migrate, PB_migrate_end); 465 } 466 467 #ifdef CONFIG_DEBUG_VM 468 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 469 { 470 int ret = 0; 471 unsigned seq; 472 unsigned long pfn = page_to_pfn(page); 473 unsigned long sp, start_pfn; 474 475 do { 476 seq = zone_span_seqbegin(zone); 477 start_pfn = zone->zone_start_pfn; 478 sp = zone->spanned_pages; 479 if (!zone_spans_pfn(zone, pfn)) 480 ret = 1; 481 } while (zone_span_seqretry(zone, seq)); 482 483 if (ret) 484 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 485 pfn, zone_to_nid(zone), zone->name, 486 start_pfn, start_pfn + sp); 487 488 return ret; 489 } 490 491 static int page_is_consistent(struct zone *zone, struct page *page) 492 { 493 if (!pfn_valid_within(page_to_pfn(page))) 494 return 0; 495 if (zone != page_zone(page)) 496 return 0; 497 498 return 1; 499 } 500 /* 501 * Temporary debugging check for pages not lying within a given zone. 502 */ 503 static int bad_range(struct zone *zone, struct page *page) 504 { 505 if (page_outside_zone_boundaries(zone, page)) 506 return 1; 507 if (!page_is_consistent(zone, page)) 508 return 1; 509 510 return 0; 511 } 512 #else 513 static inline int bad_range(struct zone *zone, struct page *page) 514 { 515 return 0; 516 } 517 #endif 518 519 static void bad_page(struct page *page, const char *reason, 520 unsigned long bad_flags) 521 { 522 static unsigned long resume; 523 static unsigned long nr_shown; 524 static unsigned long nr_unshown; 525 526 /* 527 * Allow a burst of 60 reports, then keep quiet for that minute; 528 * or allow a steady drip of one report per second. 529 */ 530 if (nr_shown == 60) { 531 if (time_before(jiffies, resume)) { 532 nr_unshown++; 533 goto out; 534 } 535 if (nr_unshown) { 536 pr_alert( 537 "BUG: Bad page state: %lu messages suppressed\n", 538 nr_unshown); 539 nr_unshown = 0; 540 } 541 nr_shown = 0; 542 } 543 if (nr_shown++ == 0) 544 resume = jiffies + 60 * HZ; 545 546 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", 547 current->comm, page_to_pfn(page)); 548 __dump_page(page, reason); 549 bad_flags &= page->flags; 550 if (bad_flags) 551 pr_alert("bad because of flags: %#lx(%pGp)\n", 552 bad_flags, &bad_flags); 553 dump_page_owner(page); 554 555 print_modules(); 556 dump_stack(); 557 out: 558 /* Leave bad fields for debug, except PageBuddy could make trouble */ 559 page_mapcount_reset(page); /* remove PageBuddy */ 560 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 561 } 562 563 /* 564 * Higher-order pages are called "compound pages". They are structured thusly: 565 * 566 * The first PAGE_SIZE page is called the "head page" and have PG_head set. 567 * 568 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded 569 * in bit 0 of page->compound_head. The rest of bits is pointer to head page. 570 * 571 * The first tail page's ->compound_dtor holds the offset in array of compound 572 * page destructors. See compound_page_dtors. 573 * 574 * The first tail page's ->compound_order holds the order of allocation. 575 * This usage means that zero-order pages may not be compound. 576 */ 577 578 void free_compound_page(struct page *page) 579 { 580 __free_pages_ok(page, compound_order(page)); 581 } 582 583 void prep_compound_page(struct page *page, unsigned int order) 584 { 585 int i; 586 int nr_pages = 1 << order; 587 588 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); 589 set_compound_order(page, order); 590 __SetPageHead(page); 591 for (i = 1; i < nr_pages; i++) { 592 struct page *p = page + i; 593 set_page_count(p, 0); 594 p->mapping = TAIL_MAPPING; 595 set_compound_head(p, page); 596 } 597 atomic_set(compound_mapcount_ptr(page), -1); 598 } 599 600 #ifdef CONFIG_DEBUG_PAGEALLOC 601 unsigned int _debug_guardpage_minorder; 602 bool _debug_pagealloc_enabled __read_mostly 603 = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); 604 EXPORT_SYMBOL(_debug_pagealloc_enabled); 605 bool _debug_guardpage_enabled __read_mostly; 606 607 static int __init early_debug_pagealloc(char *buf) 608 { 609 if (!buf) 610 return -EINVAL; 611 return kstrtobool(buf, &_debug_pagealloc_enabled); 612 } 613 early_param("debug_pagealloc", early_debug_pagealloc); 614 615 static bool need_debug_guardpage(void) 616 { 617 /* If we don't use debug_pagealloc, we don't need guard page */ 618 if (!debug_pagealloc_enabled()) 619 return false; 620 621 if (!debug_guardpage_minorder()) 622 return false; 623 624 return true; 625 } 626 627 static void init_debug_guardpage(void) 628 { 629 if (!debug_pagealloc_enabled()) 630 return; 631 632 if (!debug_guardpage_minorder()) 633 return; 634 635 _debug_guardpage_enabled = true; 636 } 637 638 struct page_ext_operations debug_guardpage_ops = { 639 .need = need_debug_guardpage, 640 .init = init_debug_guardpage, 641 }; 642 643 static int __init debug_guardpage_minorder_setup(char *buf) 644 { 645 unsigned long res; 646 647 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 648 pr_err("Bad debug_guardpage_minorder value\n"); 649 return 0; 650 } 651 _debug_guardpage_minorder = res; 652 pr_info("Setting debug_guardpage_minorder to %lu\n", res); 653 return 0; 654 } 655 early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); 656 657 static inline bool set_page_guard(struct zone *zone, struct page *page, 658 unsigned int order, int migratetype) 659 { 660 struct page_ext *page_ext; 661 662 if (!debug_guardpage_enabled()) 663 return false; 664 665 if (order >= debug_guardpage_minorder()) 666 return false; 667 668 page_ext = lookup_page_ext(page); 669 if (unlikely(!page_ext)) 670 return false; 671 672 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 673 674 INIT_LIST_HEAD(&page->lru); 675 set_page_private(page, order); 676 /* Guard pages are not available for any usage */ 677 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 678 679 return true; 680 } 681 682 static inline void clear_page_guard(struct zone *zone, struct page *page, 683 unsigned int order, int migratetype) 684 { 685 struct page_ext *page_ext; 686 687 if (!debug_guardpage_enabled()) 688 return; 689 690 page_ext = lookup_page_ext(page); 691 if (unlikely(!page_ext)) 692 return; 693 694 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 695 696 set_page_private(page, 0); 697 if (!is_migrate_isolate(migratetype)) 698 __mod_zone_freepage_state(zone, (1 << order), migratetype); 699 } 700 #else 701 struct page_ext_operations debug_guardpage_ops; 702 static inline bool set_page_guard(struct zone *zone, struct page *page, 703 unsigned int order, int migratetype) { return false; } 704 static inline void clear_page_guard(struct zone *zone, struct page *page, 705 unsigned int order, int migratetype) {} 706 #endif 707 708 static inline void set_page_order(struct page *page, unsigned int order) 709 { 710 set_page_private(page, order); 711 __SetPageBuddy(page); 712 } 713 714 static inline void rmv_page_order(struct page *page) 715 { 716 __ClearPageBuddy(page); 717 set_page_private(page, 0); 718 } 719 720 /* 721 * This function checks whether a page is free && is the buddy 722 * we can do coalesce a page and its buddy if 723 * (a) the buddy is not in a hole (check before calling!) && 724 * (b) the buddy is in the buddy system && 725 * (c) a page and its buddy have the same order && 726 * (d) a page and its buddy are in the same zone. 727 * 728 * For recording whether a page is in the buddy system, we set ->_mapcount 729 * PAGE_BUDDY_MAPCOUNT_VALUE. 730 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is 731 * serialized by zone->lock. 732 * 733 * For recording page's order, we use page_private(page). 734 */ 735 static inline int page_is_buddy(struct page *page, struct page *buddy, 736 unsigned int order) 737 { 738 if (page_is_guard(buddy) && page_order(buddy) == order) { 739 if (page_zone_id(page) != page_zone_id(buddy)) 740 return 0; 741 742 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 743 744 return 1; 745 } 746 747 if (PageBuddy(buddy) && page_order(buddy) == order) { 748 /* 749 * zone check is done late to avoid uselessly 750 * calculating zone/node ids for pages that could 751 * never merge. 752 */ 753 if (page_zone_id(page) != page_zone_id(buddy)) 754 return 0; 755 756 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 757 758 return 1; 759 } 760 return 0; 761 } 762 763 /* 764 * Freeing function for a buddy system allocator. 765 * 766 * The concept of a buddy system is to maintain direct-mapped table 767 * (containing bit values) for memory blocks of various "orders". 768 * The bottom level table contains the map for the smallest allocatable 769 * units of memory (here, pages), and each level above it describes 770 * pairs of units from the levels below, hence, "buddies". 771 * At a high level, all that happens here is marking the table entry 772 * at the bottom level available, and propagating the changes upward 773 * as necessary, plus some accounting needed to play nicely with other 774 * parts of the VM system. 775 * At each level, we keep a list of pages, which are heads of continuous 776 * free pages of length of (1 << order) and marked with _mapcount 777 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) 778 * field. 779 * So when we are allocating or freeing one, we can derive the state of the 780 * other. That is, if we allocate a small block, and both were 781 * free, the remainder of the region must be split into blocks. 782 * If a block is freed, and its buddy is also free, then this 783 * triggers coalescing into a block of larger size. 784 * 785 * -- nyc 786 */ 787 788 static inline void __free_one_page(struct page *page, 789 unsigned long pfn, 790 struct zone *zone, unsigned int order, 791 int migratetype) 792 { 793 unsigned long combined_pfn; 794 unsigned long uninitialized_var(buddy_pfn); 795 struct page *buddy; 796 unsigned int max_order; 797 798 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); 799 800 VM_BUG_ON(!zone_is_initialized(zone)); 801 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); 802 803 VM_BUG_ON(migratetype == -1); 804 if (likely(!is_migrate_isolate(migratetype))) 805 __mod_zone_freepage_state(zone, 1 << order, migratetype); 806 807 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); 808 VM_BUG_ON_PAGE(bad_range(zone, page), page); 809 810 continue_merging: 811 while (order < max_order - 1) { 812 buddy_pfn = __find_buddy_pfn(pfn, order); 813 buddy = page + (buddy_pfn - pfn); 814 815 if (!pfn_valid_within(buddy_pfn)) 816 goto done_merging; 817 if (!page_is_buddy(page, buddy, order)) 818 goto done_merging; 819 /* 820 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 821 * merge with it and move up one order. 822 */ 823 if (page_is_guard(buddy)) { 824 clear_page_guard(zone, buddy, order, migratetype); 825 } else { 826 list_del(&buddy->lru); 827 zone->free_area[order].nr_free--; 828 rmv_page_order(buddy); 829 } 830 combined_pfn = buddy_pfn & pfn; 831 page = page + (combined_pfn - pfn); 832 pfn = combined_pfn; 833 order++; 834 } 835 if (max_order < MAX_ORDER) { 836 /* If we are here, it means order is >= pageblock_order. 837 * We want to prevent merge between freepages on isolate 838 * pageblock and normal pageblock. Without this, pageblock 839 * isolation could cause incorrect freepage or CMA accounting. 840 * 841 * We don't want to hit this code for the more frequent 842 * low-order merging. 843 */ 844 if (unlikely(has_isolate_pageblock(zone))) { 845 int buddy_mt; 846 847 buddy_pfn = __find_buddy_pfn(pfn, order); 848 buddy = page + (buddy_pfn - pfn); 849 buddy_mt = get_pageblock_migratetype(buddy); 850 851 if (migratetype != buddy_mt 852 && (is_migrate_isolate(migratetype) || 853 is_migrate_isolate(buddy_mt))) 854 goto done_merging; 855 } 856 max_order++; 857 goto continue_merging; 858 } 859 860 done_merging: 861 set_page_order(page, order); 862 863 /* 864 * If this is not the largest possible page, check if the buddy 865 * of the next-highest order is free. If it is, it's possible 866 * that pages are being freed that will coalesce soon. In case, 867 * that is happening, add the free page to the tail of the list 868 * so it's less likely to be used soon and more likely to be merged 869 * as a higher order page 870 */ 871 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) { 872 struct page *higher_page, *higher_buddy; 873 combined_pfn = buddy_pfn & pfn; 874 higher_page = page + (combined_pfn - pfn); 875 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); 876 higher_buddy = higher_page + (buddy_pfn - combined_pfn); 877 if (pfn_valid_within(buddy_pfn) && 878 page_is_buddy(higher_page, higher_buddy, order + 1)) { 879 list_add_tail(&page->lru, 880 &zone->free_area[order].free_list[migratetype]); 881 goto out; 882 } 883 } 884 885 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 886 out: 887 zone->free_area[order].nr_free++; 888 } 889 890 /* 891 * A bad page could be due to a number of fields. Instead of multiple branches, 892 * try and check multiple fields with one check. The caller must do a detailed 893 * check if necessary. 894 */ 895 static inline bool page_expected_state(struct page *page, 896 unsigned long check_flags) 897 { 898 if (unlikely(atomic_read(&page->_mapcount) != -1)) 899 return false; 900 901 if (unlikely((unsigned long)page->mapping | 902 page_ref_count(page) | 903 #ifdef CONFIG_MEMCG 904 (unsigned long)page->mem_cgroup | 905 #endif 906 (page->flags & check_flags))) 907 return false; 908 909 return true; 910 } 911 912 static void free_pages_check_bad(struct page *page) 913 { 914 const char *bad_reason; 915 unsigned long bad_flags; 916 917 bad_reason = NULL; 918 bad_flags = 0; 919 920 if (unlikely(atomic_read(&page->_mapcount) != -1)) 921 bad_reason = "nonzero mapcount"; 922 if (unlikely(page->mapping != NULL)) 923 bad_reason = "non-NULL mapping"; 924 if (unlikely(page_ref_count(page) != 0)) 925 bad_reason = "nonzero _refcount"; 926 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 927 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 928 bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 929 } 930 #ifdef CONFIG_MEMCG 931 if (unlikely(page->mem_cgroup)) 932 bad_reason = "page still charged to cgroup"; 933 #endif 934 bad_page(page, bad_reason, bad_flags); 935 } 936 937 static inline int free_pages_check(struct page *page) 938 { 939 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) 940 return 0; 941 942 /* Something has gone sideways, find it */ 943 free_pages_check_bad(page); 944 return 1; 945 } 946 947 static int free_tail_pages_check(struct page *head_page, struct page *page) 948 { 949 int ret = 1; 950 951 /* 952 * We rely page->lru.next never has bit 0 set, unless the page 953 * is PageTail(). Let's make sure that's true even for poisoned ->lru. 954 */ 955 BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); 956 957 if (!IS_ENABLED(CONFIG_DEBUG_VM)) { 958 ret = 0; 959 goto out; 960 } 961 switch (page - head_page) { 962 case 1: 963 /* the first tail page: ->mapping is compound_mapcount() */ 964 if (unlikely(compound_mapcount(page))) { 965 bad_page(page, "nonzero compound_mapcount", 0); 966 goto out; 967 } 968 break; 969 case 2: 970 /* 971 * the second tail page: ->mapping is 972 * page_deferred_list().next -- ignore value. 973 */ 974 break; 975 default: 976 if (page->mapping != TAIL_MAPPING) { 977 bad_page(page, "corrupted mapping in tail page", 0); 978 goto out; 979 } 980 break; 981 } 982 if (unlikely(!PageTail(page))) { 983 bad_page(page, "PageTail not set", 0); 984 goto out; 985 } 986 if (unlikely(compound_head(page) != head_page)) { 987 bad_page(page, "compound_head not consistent", 0); 988 goto out; 989 } 990 ret = 0; 991 out: 992 page->mapping = NULL; 993 clear_compound_head(page); 994 return ret; 995 } 996 997 static __always_inline bool free_pages_prepare(struct page *page, 998 unsigned int order, bool check_free) 999 { 1000 int bad = 0; 1001 1002 VM_BUG_ON_PAGE(PageTail(page), page); 1003 1004 trace_mm_page_free(page, order); 1005 kmemcheck_free_shadow(page, order); 1006 1007 /* 1008 * Check tail pages before head page information is cleared to 1009 * avoid checking PageCompound for order-0 pages. 1010 */ 1011 if (unlikely(order)) { 1012 bool compound = PageCompound(page); 1013 int i; 1014 1015 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); 1016 1017 if (compound) 1018 ClearPageDoubleMap(page); 1019 for (i = 1; i < (1 << order); i++) { 1020 if (compound) 1021 bad += free_tail_pages_check(page, page + i); 1022 if (unlikely(free_pages_check(page + i))) { 1023 bad++; 1024 continue; 1025 } 1026 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1027 } 1028 } 1029 if (PageMappingFlags(page)) 1030 page->mapping = NULL; 1031 if (memcg_kmem_enabled() && PageKmemcg(page)) 1032 memcg_kmem_uncharge(page, order); 1033 if (check_free) 1034 bad += free_pages_check(page); 1035 if (bad) 1036 return false; 1037 1038 page_cpupid_reset_last(page); 1039 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 1040 reset_page_owner(page, order); 1041 1042 if (!PageHighMem(page)) { 1043 debug_check_no_locks_freed(page_address(page), 1044 PAGE_SIZE << order); 1045 debug_check_no_obj_freed(page_address(page), 1046 PAGE_SIZE << order); 1047 } 1048 arch_free_page(page, order); 1049 kernel_poison_pages(page, 1 << order, 0); 1050 kernel_map_pages(page, 1 << order, 0); 1051 kasan_free_pages(page, order); 1052 1053 return true; 1054 } 1055 1056 #ifdef CONFIG_DEBUG_VM 1057 static inline bool free_pcp_prepare(struct page *page) 1058 { 1059 return free_pages_prepare(page, 0, true); 1060 } 1061 1062 static inline bool bulkfree_pcp_prepare(struct page *page) 1063 { 1064 return false; 1065 } 1066 #else 1067 static bool free_pcp_prepare(struct page *page) 1068 { 1069 return free_pages_prepare(page, 0, false); 1070 } 1071 1072 static bool bulkfree_pcp_prepare(struct page *page) 1073 { 1074 return free_pages_check(page); 1075 } 1076 #endif /* CONFIG_DEBUG_VM */ 1077 1078 /* 1079 * Frees a number of pages from the PCP lists 1080 * Assumes all pages on list are in same zone, and of same order. 1081 * count is the number of pages to free. 1082 * 1083 * If the zone was previously in an "all pages pinned" state then look to 1084 * see if this freeing clears that state. 1085 * 1086 * And clear the zone's pages_scanned counter, to hold off the "all pages are 1087 * pinned" detection logic. 1088 */ 1089 static void free_pcppages_bulk(struct zone *zone, int count, 1090 struct per_cpu_pages *pcp) 1091 { 1092 int migratetype = 0; 1093 int batch_free = 0; 1094 bool isolated_pageblocks; 1095 1096 spin_lock(&zone->lock); 1097 isolated_pageblocks = has_isolate_pageblock(zone); 1098 1099 while (count) { 1100 struct page *page; 1101 struct list_head *list; 1102 1103 /* 1104 * Remove pages from lists in a round-robin fashion. A 1105 * batch_free count is maintained that is incremented when an 1106 * empty list is encountered. This is so more pages are freed 1107 * off fuller lists instead of spinning excessively around empty 1108 * lists 1109 */ 1110 do { 1111 batch_free++; 1112 if (++migratetype == MIGRATE_PCPTYPES) 1113 migratetype = 0; 1114 list = &pcp->lists[migratetype]; 1115 } while (list_empty(list)); 1116 1117 /* This is the only non-empty list. Free them all. */ 1118 if (batch_free == MIGRATE_PCPTYPES) 1119 batch_free = count; 1120 1121 do { 1122 int mt; /* migratetype of the to-be-freed page */ 1123 1124 page = list_last_entry(list, struct page, lru); 1125 /* must delete as __free_one_page list manipulates */ 1126 list_del(&page->lru); 1127 1128 mt = get_pcppage_migratetype(page); 1129 /* MIGRATE_ISOLATE page should not go to pcplists */ 1130 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); 1131 /* Pageblock could have been isolated meanwhile */ 1132 if (unlikely(isolated_pageblocks)) 1133 mt = get_pageblock_migratetype(page); 1134 1135 if (bulkfree_pcp_prepare(page)) 1136 continue; 1137 1138 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 1139 trace_mm_page_pcpu_drain(page, 0, mt); 1140 } while (--count && --batch_free && !list_empty(list)); 1141 } 1142 spin_unlock(&zone->lock); 1143 } 1144 1145 static void free_one_page(struct zone *zone, 1146 struct page *page, unsigned long pfn, 1147 unsigned int order, 1148 int migratetype) 1149 { 1150 spin_lock(&zone->lock); 1151 if (unlikely(has_isolate_pageblock(zone) || 1152 is_migrate_isolate(migratetype))) { 1153 migratetype = get_pfnblock_migratetype(page, pfn); 1154 } 1155 __free_one_page(page, pfn, zone, order, migratetype); 1156 spin_unlock(&zone->lock); 1157 } 1158 1159 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 1160 unsigned long zone, int nid) 1161 { 1162 set_page_links(page, zone, nid, pfn); 1163 init_page_count(page); 1164 page_mapcount_reset(page); 1165 page_cpupid_reset_last(page); 1166 1167 INIT_LIST_HEAD(&page->lru); 1168 #ifdef WANT_PAGE_VIRTUAL 1169 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 1170 if (!is_highmem_idx(zone)) 1171 set_page_address(page, __va(pfn << PAGE_SHIFT)); 1172 #endif 1173 } 1174 1175 static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, 1176 int nid) 1177 { 1178 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); 1179 } 1180 1181 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1182 static void init_reserved_page(unsigned long pfn) 1183 { 1184 pg_data_t *pgdat; 1185 int nid, zid; 1186 1187 if (!early_page_uninitialised(pfn)) 1188 return; 1189 1190 nid = early_pfn_to_nid(pfn); 1191 pgdat = NODE_DATA(nid); 1192 1193 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1194 struct zone *zone = &pgdat->node_zones[zid]; 1195 1196 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) 1197 break; 1198 } 1199 __init_single_pfn(pfn, zid, nid); 1200 } 1201 #else 1202 static inline void init_reserved_page(unsigned long pfn) 1203 { 1204 } 1205 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1206 1207 /* 1208 * Initialised pages do not have PageReserved set. This function is 1209 * called for each range allocated by the bootmem allocator and 1210 * marks the pages PageReserved. The remaining valid pages are later 1211 * sent to the buddy page allocator. 1212 */ 1213 void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) 1214 { 1215 unsigned long start_pfn = PFN_DOWN(start); 1216 unsigned long end_pfn = PFN_UP(end); 1217 1218 for (; start_pfn < end_pfn; start_pfn++) { 1219 if (pfn_valid(start_pfn)) { 1220 struct page *page = pfn_to_page(start_pfn); 1221 1222 init_reserved_page(start_pfn); 1223 1224 /* Avoid false-positive PageTail() */ 1225 INIT_LIST_HEAD(&page->lru); 1226 1227 SetPageReserved(page); 1228 } 1229 } 1230 } 1231 1232 static void __free_pages_ok(struct page *page, unsigned int order) 1233 { 1234 unsigned long flags; 1235 int migratetype; 1236 unsigned long pfn = page_to_pfn(page); 1237 1238 if (!free_pages_prepare(page, order, true)) 1239 return; 1240 1241 migratetype = get_pfnblock_migratetype(page, pfn); 1242 local_irq_save(flags); 1243 __count_vm_events(PGFREE, 1 << order); 1244 free_one_page(page_zone(page), page, pfn, order, migratetype); 1245 local_irq_restore(flags); 1246 } 1247 1248 static void __init __free_pages_boot_core(struct page *page, unsigned int order) 1249 { 1250 unsigned int nr_pages = 1 << order; 1251 struct page *p = page; 1252 unsigned int loop; 1253 1254 prefetchw(p); 1255 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 1256 prefetchw(p + 1); 1257 __ClearPageReserved(p); 1258 set_page_count(p, 0); 1259 } 1260 __ClearPageReserved(p); 1261 set_page_count(p, 0); 1262 1263 page_zone(page)->managed_pages += nr_pages; 1264 set_page_refcounted(page); 1265 __free_pages(page, order); 1266 } 1267 1268 #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 1269 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 1270 1271 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 1272 1273 int __meminit early_pfn_to_nid(unsigned long pfn) 1274 { 1275 static DEFINE_SPINLOCK(early_pfn_lock); 1276 int nid; 1277 1278 spin_lock(&early_pfn_lock); 1279 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 1280 if (nid < 0) 1281 nid = first_online_node; 1282 spin_unlock(&early_pfn_lock); 1283 1284 return nid; 1285 } 1286 #endif 1287 1288 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 1289 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1290 struct mminit_pfnnid_cache *state) 1291 { 1292 int nid; 1293 1294 nid = __early_pfn_to_nid(pfn, state); 1295 if (nid >= 0 && nid != node) 1296 return false; 1297 return true; 1298 } 1299 1300 /* Only safe to use early in boot when initialisation is single-threaded */ 1301 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1302 { 1303 return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); 1304 } 1305 1306 #else 1307 1308 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 1309 { 1310 return true; 1311 } 1312 static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, 1313 struct mminit_pfnnid_cache *state) 1314 { 1315 return true; 1316 } 1317 #endif 1318 1319 1320 void __init __free_pages_bootmem(struct page *page, unsigned long pfn, 1321 unsigned int order) 1322 { 1323 if (early_page_uninitialised(pfn)) 1324 return; 1325 return __free_pages_boot_core(page, order); 1326 } 1327 1328 /* 1329 * Check that the whole (or subset of) a pageblock given by the interval of 1330 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 1331 * with the migration of free compaction scanner. The scanners then need to 1332 * use only pfn_valid_within() check for arches that allow holes within 1333 * pageblocks. 1334 * 1335 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 1336 * 1337 * It's possible on some configurations to have a setup like node0 node1 node0 1338 * i.e. it's possible that all pages within a zones range of pages do not 1339 * belong to a single zone. We assume that a border between node0 and node1 1340 * can occur within a single pageblock, but not a node0 node1 node0 1341 * interleaving within a single pageblock. It is therefore sufficient to check 1342 * the first and last page of a pageblock and avoid checking each individual 1343 * page in a pageblock. 1344 */ 1345 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, 1346 unsigned long end_pfn, struct zone *zone) 1347 { 1348 struct page *start_page; 1349 struct page *end_page; 1350 1351 /* end_pfn is one past the range we are checking */ 1352 end_pfn--; 1353 1354 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1355 return NULL; 1356 1357 start_page = pfn_to_page(start_pfn); 1358 1359 if (page_zone(start_page) != zone) 1360 return NULL; 1361 1362 end_page = pfn_to_page(end_pfn); 1363 1364 /* This gives a shorter code than deriving page_zone(end_page) */ 1365 if (page_zone_id(start_page) != page_zone_id(end_page)) 1366 return NULL; 1367 1368 return start_page; 1369 } 1370 1371 void set_zone_contiguous(struct zone *zone) 1372 { 1373 unsigned long block_start_pfn = zone->zone_start_pfn; 1374 unsigned long block_end_pfn; 1375 1376 block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages); 1377 for (; block_start_pfn < zone_end_pfn(zone); 1378 block_start_pfn = block_end_pfn, 1379 block_end_pfn += pageblock_nr_pages) { 1380 1381 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 1382 1383 if (!__pageblock_pfn_to_page(block_start_pfn, 1384 block_end_pfn, zone)) 1385 return; 1386 } 1387 1388 /* We confirm that there is no hole */ 1389 zone->contiguous = true; 1390 } 1391 1392 void clear_zone_contiguous(struct zone *zone) 1393 { 1394 zone->contiguous = false; 1395 } 1396 1397 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1398 static void __init deferred_free_range(struct page *page, 1399 unsigned long pfn, int nr_pages) 1400 { 1401 int i; 1402 1403 if (!page) 1404 return; 1405 1406 /* Free a large naturally-aligned chunk if possible */ 1407 if (nr_pages == pageblock_nr_pages && 1408 (pfn & (pageblock_nr_pages - 1)) == 0) { 1409 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1410 __free_pages_boot_core(page, pageblock_order); 1411 return; 1412 } 1413 1414 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1415 if ((pfn & (pageblock_nr_pages - 1)) == 0) 1416 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1417 __free_pages_boot_core(page, 0); 1418 } 1419 } 1420 1421 /* Completion tracking for deferred_init_memmap() threads */ 1422 static atomic_t pgdat_init_n_undone __initdata; 1423 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 1424 1425 static inline void __init pgdat_init_report_one_done(void) 1426 { 1427 if (atomic_dec_and_test(&pgdat_init_n_undone)) 1428 complete(&pgdat_init_all_done_comp); 1429 } 1430 1431 /* Initialise remaining memory on a node */ 1432 static int __init deferred_init_memmap(void *data) 1433 { 1434 pg_data_t *pgdat = data; 1435 int nid = pgdat->node_id; 1436 struct mminit_pfnnid_cache nid_init_state = { }; 1437 unsigned long start = jiffies; 1438 unsigned long nr_pages = 0; 1439 unsigned long walk_start, walk_end; 1440 int i, zid; 1441 struct zone *zone; 1442 unsigned long first_init_pfn = pgdat->first_deferred_pfn; 1443 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1444 1445 if (first_init_pfn == ULONG_MAX) { 1446 pgdat_init_report_one_done(); 1447 return 0; 1448 } 1449 1450 /* Bind memory initialisation thread to a local node if possible */ 1451 if (!cpumask_empty(cpumask)) 1452 set_cpus_allowed_ptr(current, cpumask); 1453 1454 /* Sanity check boundaries */ 1455 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 1456 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 1457 pgdat->first_deferred_pfn = ULONG_MAX; 1458 1459 /* Only the highest zone is deferred so find it */ 1460 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1461 zone = pgdat->node_zones + zid; 1462 if (first_init_pfn < zone_end_pfn(zone)) 1463 break; 1464 } 1465 1466 for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { 1467 unsigned long pfn, end_pfn; 1468 struct page *page = NULL; 1469 struct page *free_base_page = NULL; 1470 unsigned long free_base_pfn = 0; 1471 int nr_to_free = 0; 1472 1473 end_pfn = min(walk_end, zone_end_pfn(zone)); 1474 pfn = first_init_pfn; 1475 if (pfn < walk_start) 1476 pfn = walk_start; 1477 if (pfn < zone->zone_start_pfn) 1478 pfn = zone->zone_start_pfn; 1479 1480 for (; pfn < end_pfn; pfn++) { 1481 if (!pfn_valid_within(pfn)) 1482 goto free_range; 1483 1484 /* 1485 * Ensure pfn_valid is checked every 1486 * pageblock_nr_pages for memory holes 1487 */ 1488 if ((pfn & (pageblock_nr_pages - 1)) == 0) { 1489 if (!pfn_valid(pfn)) { 1490 page = NULL; 1491 goto free_range; 1492 } 1493 } 1494 1495 if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { 1496 page = NULL; 1497 goto free_range; 1498 } 1499 1500 /* Minimise pfn page lookups and scheduler checks */ 1501 if (page && (pfn & (pageblock_nr_pages - 1)) != 0) { 1502 page++; 1503 } else { 1504 nr_pages += nr_to_free; 1505 deferred_free_range(free_base_page, 1506 free_base_pfn, nr_to_free); 1507 free_base_page = NULL; 1508 free_base_pfn = nr_to_free = 0; 1509 1510 page = pfn_to_page(pfn); 1511 cond_resched(); 1512 } 1513 1514 if (page->flags) { 1515 VM_BUG_ON(page_zone(page) != zone); 1516 goto free_range; 1517 } 1518 1519 __init_single_page(page, pfn, zid, nid); 1520 if (!free_base_page) { 1521 free_base_page = page; 1522 free_base_pfn = pfn; 1523 nr_to_free = 0; 1524 } 1525 nr_to_free++; 1526 1527 /* Where possible, batch up pages for a single free */ 1528 continue; 1529 free_range: 1530 /* Free the current block of pages to allocator */ 1531 nr_pages += nr_to_free; 1532 deferred_free_range(free_base_page, free_base_pfn, 1533 nr_to_free); 1534 free_base_page = NULL; 1535 free_base_pfn = nr_to_free = 0; 1536 } 1537 /* Free the last block of pages to allocator */ 1538 nr_pages += nr_to_free; 1539 deferred_free_range(free_base_page, free_base_pfn, nr_to_free); 1540 1541 first_init_pfn = max(end_pfn, first_init_pfn); 1542 } 1543 1544 /* Sanity check that the next zone really is unpopulated */ 1545 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 1546 1547 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1548 jiffies_to_msecs(jiffies - start)); 1549 1550 pgdat_init_report_one_done(); 1551 return 0; 1552 } 1553 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1554 1555 void __init page_alloc_init_late(void) 1556 { 1557 struct zone *zone; 1558 1559 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1560 int nid; 1561 1562 /* There will be num_node_state(N_MEMORY) threads */ 1563 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 1564 for_each_node_state(nid, N_MEMORY) { 1565 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1566 } 1567 1568 /* Block until all are initialised */ 1569 wait_for_completion(&pgdat_init_all_done_comp); 1570 1571 /* Reinit limits that are based on free pages after the kernel is up */ 1572 files_maxfiles_init(); 1573 #endif 1574 1575 for_each_populated_zone(zone) 1576 set_zone_contiguous(zone); 1577 } 1578 1579 #ifdef CONFIG_CMA 1580 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 1581 void __init init_cma_reserved_pageblock(struct page *page) 1582 { 1583 unsigned i = pageblock_nr_pages; 1584 struct page *p = page; 1585 1586 do { 1587 __ClearPageReserved(p); 1588 set_page_count(p, 0); 1589 } while (++p, --i); 1590 1591 set_pageblock_migratetype(page, MIGRATE_CMA); 1592 1593 if (pageblock_order >= MAX_ORDER) { 1594 i = pageblock_nr_pages; 1595 p = page; 1596 do { 1597 set_page_refcounted(p); 1598 __free_pages(p, MAX_ORDER - 1); 1599 p += MAX_ORDER_NR_PAGES; 1600 } while (i -= MAX_ORDER_NR_PAGES); 1601 } else { 1602 set_page_refcounted(page); 1603 __free_pages(page, pageblock_order); 1604 } 1605 1606 adjust_managed_page_count(page, pageblock_nr_pages); 1607 } 1608 #endif 1609 1610 /* 1611 * The order of subdivision here is critical for the IO subsystem. 1612 * Please do not alter this order without good reasons and regression 1613 * testing. Specifically, as large blocks of memory are subdivided, 1614 * the order in which smaller blocks are delivered depends on the order 1615 * they're subdivided in this function. This is the primary factor 1616 * influencing the order in which pages are delivered to the IO 1617 * subsystem according to empirical testing, and this is also justified 1618 * by considering the behavior of a buddy system containing a single 1619 * large block of memory acted on by a series of small allocations. 1620 * This behavior is a critical factor in sglist merging's success. 1621 * 1622 * -- nyc 1623 */ 1624 static inline void expand(struct zone *zone, struct page *page, 1625 int low, int high, struct free_area *area, 1626 int migratetype) 1627 { 1628 unsigned long size = 1 << high; 1629 1630 while (high > low) { 1631 area--; 1632 high--; 1633 size >>= 1; 1634 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 1635 1636 /* 1637 * Mark as guard pages (or page), that will allow to 1638 * merge back to allocator when buddy will be freed. 1639 * Corresponding page table entries will not be touched, 1640 * pages will stay not present in virtual address space 1641 */ 1642 if (set_page_guard(zone, &page[size], high, migratetype)) 1643 continue; 1644 1645 list_add(&page[size].lru, &area->free_list[migratetype]); 1646 area->nr_free++; 1647 set_page_order(&page[size], high); 1648 } 1649 } 1650 1651 static void check_new_page_bad(struct page *page) 1652 { 1653 const char *bad_reason = NULL; 1654 unsigned long bad_flags = 0; 1655 1656 if (unlikely(atomic_read(&page->_mapcount) != -1)) 1657 bad_reason = "nonzero mapcount"; 1658 if (unlikely(page->mapping != NULL)) 1659 bad_reason = "non-NULL mapping"; 1660 if (unlikely(page_ref_count(page) != 0)) 1661 bad_reason = "nonzero _count"; 1662 if (unlikely(page->flags & __PG_HWPOISON)) { 1663 bad_reason = "HWPoisoned (hardware-corrupted)"; 1664 bad_flags = __PG_HWPOISON; 1665 /* Don't complain about hwpoisoned pages */ 1666 page_mapcount_reset(page); /* remove PageBuddy */ 1667 return; 1668 } 1669 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1670 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1671 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1672 } 1673 #ifdef CONFIG_MEMCG 1674 if (unlikely(page->mem_cgroup)) 1675 bad_reason = "page still charged to cgroup"; 1676 #endif 1677 bad_page(page, bad_reason, bad_flags); 1678 } 1679 1680 /* 1681 * This page is about to be returned from the page allocator 1682 */ 1683 static inline int check_new_page(struct page *page) 1684 { 1685 if (likely(page_expected_state(page, 1686 PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) 1687 return 0; 1688 1689 check_new_page_bad(page); 1690 return 1; 1691 } 1692 1693 static inline bool free_pages_prezeroed(void) 1694 { 1695 return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) && 1696 page_poisoning_enabled(); 1697 } 1698 1699 #ifdef CONFIG_DEBUG_VM 1700 static bool check_pcp_refill(struct page *page) 1701 { 1702 return false; 1703 } 1704 1705 static bool check_new_pcp(struct page *page) 1706 { 1707 return check_new_page(page); 1708 } 1709 #else 1710 static bool check_pcp_refill(struct page *page) 1711 { 1712 return check_new_page(page); 1713 } 1714 static bool check_new_pcp(struct page *page) 1715 { 1716 return false; 1717 } 1718 #endif /* CONFIG_DEBUG_VM */ 1719 1720 static bool check_new_pages(struct page *page, unsigned int order) 1721 { 1722 int i; 1723 for (i = 0; i < (1 << order); i++) { 1724 struct page *p = page + i; 1725 1726 if (unlikely(check_new_page(p))) 1727 return true; 1728 } 1729 1730 return false; 1731 } 1732 1733 inline void post_alloc_hook(struct page *page, unsigned int order, 1734 gfp_t gfp_flags) 1735 { 1736 set_page_private(page, 0); 1737 set_page_refcounted(page); 1738 1739 arch_alloc_page(page, order); 1740 kernel_map_pages(page, 1 << order, 1); 1741 kernel_poison_pages(page, 1 << order, 1); 1742 kasan_alloc_pages(page, order); 1743 set_page_owner(page, order, gfp_flags); 1744 } 1745 1746 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, 1747 unsigned int alloc_flags) 1748 { 1749 int i; 1750 1751 post_alloc_hook(page, order, gfp_flags); 1752 1753 if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO)) 1754 for (i = 0; i < (1 << order); i++) 1755 clear_highpage(page + i); 1756 1757 if (order && (gfp_flags & __GFP_COMP)) 1758 prep_compound_page(page, order); 1759 1760 /* 1761 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to 1762 * allocate the page. The expectation is that the caller is taking 1763 * steps that will free more memory. The caller should avoid the page 1764 * being used for !PFMEMALLOC purposes. 1765 */ 1766 if (alloc_flags & ALLOC_NO_WATERMARKS) 1767 set_page_pfmemalloc(page); 1768 else 1769 clear_page_pfmemalloc(page); 1770 } 1771 1772 /* 1773 * Go through the free lists for the given migratetype and remove 1774 * the smallest available page from the freelists 1775 */ 1776 static inline 1777 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1778 int migratetype) 1779 { 1780 unsigned int current_order; 1781 struct free_area *area; 1782 struct page *page; 1783 1784 /* Find a page of the appropriate size in the preferred list */ 1785 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 1786 area = &(zone->free_area[current_order]); 1787 page = list_first_entry_or_null(&area->free_list[migratetype], 1788 struct page, lru); 1789 if (!page) 1790 continue; 1791 list_del(&page->lru); 1792 rmv_page_order(page); 1793 area->nr_free--; 1794 expand(zone, page, order, current_order, area, migratetype); 1795 set_pcppage_migratetype(page, migratetype); 1796 return page; 1797 } 1798 1799 return NULL; 1800 } 1801 1802 1803 /* 1804 * This array describes the order lists are fallen back to when 1805 * the free lists for the desirable migrate type are depleted 1806 */ 1807 static int fallbacks[MIGRATE_TYPES][4] = { 1808 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1809 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES }, 1810 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES }, 1811 #ifdef CONFIG_CMA 1812 [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */ 1813 #endif 1814 #ifdef CONFIG_MEMORY_ISOLATION 1815 [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */ 1816 #endif 1817 }; 1818 1819 #ifdef CONFIG_CMA 1820 static struct page *__rmqueue_cma_fallback(struct zone *zone, 1821 unsigned int order) 1822 { 1823 return __rmqueue_smallest(zone, order, MIGRATE_CMA); 1824 } 1825 #else 1826 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, 1827 unsigned int order) { return NULL; } 1828 #endif 1829 1830 /* 1831 * Move the free pages in a range to the free lists of the requested type. 1832 * Note that start_page and end_pages are not aligned on a pageblock 1833 * boundary. If alignment is required, use move_freepages_block() 1834 */ 1835 static int move_freepages(struct zone *zone, 1836 struct page *start_page, struct page *end_page, 1837 int migratetype, int *num_movable) 1838 { 1839 struct page *page; 1840 unsigned int order; 1841 int pages_moved = 0; 1842 1843 #ifndef CONFIG_HOLES_IN_ZONE 1844 /* 1845 * page_zone is not safe to call in this context when 1846 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 1847 * anyway as we check zone boundaries in move_freepages_block(). 1848 * Remove at a later date when no bug reports exist related to 1849 * grouping pages by mobility 1850 */ 1851 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1852 #endif 1853 1854 if (num_movable) 1855 *num_movable = 0; 1856 1857 for (page = start_page; page <= end_page;) { 1858 if (!pfn_valid_within(page_to_pfn(page))) { 1859 page++; 1860 continue; 1861 } 1862 1863 /* Make sure we are not inadvertently changing nodes */ 1864 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1865 1866 if (!PageBuddy(page)) { 1867 /* 1868 * We assume that pages that could be isolated for 1869 * migration are movable. But we don't actually try 1870 * isolating, as that would be expensive. 1871 */ 1872 if (num_movable && 1873 (PageLRU(page) || __PageMovable(page))) 1874 (*num_movable)++; 1875 1876 page++; 1877 continue; 1878 } 1879 1880 order = page_order(page); 1881 list_move(&page->lru, 1882 &zone->free_area[order].free_list[migratetype]); 1883 page += 1 << order; 1884 pages_moved += 1 << order; 1885 } 1886 1887 return pages_moved; 1888 } 1889 1890 int move_freepages_block(struct zone *zone, struct page *page, 1891 int migratetype, int *num_movable) 1892 { 1893 unsigned long start_pfn, end_pfn; 1894 struct page *start_page, *end_page; 1895 1896 start_pfn = page_to_pfn(page); 1897 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 1898 start_page = pfn_to_page(start_pfn); 1899 end_page = start_page + pageblock_nr_pages - 1; 1900 end_pfn = start_pfn + pageblock_nr_pages - 1; 1901 1902 /* Do not cross zone boundaries */ 1903 if (!zone_spans_pfn(zone, start_pfn)) 1904 start_page = page; 1905 if (!zone_spans_pfn(zone, end_pfn)) 1906 return 0; 1907 1908 return move_freepages(zone, start_page, end_page, migratetype, 1909 num_movable); 1910 } 1911 1912 static void change_pageblock_range(struct page *pageblock_page, 1913 int start_order, int migratetype) 1914 { 1915 int nr_pageblocks = 1 << (start_order - pageblock_order); 1916 1917 while (nr_pageblocks--) { 1918 set_pageblock_migratetype(pageblock_page, migratetype); 1919 pageblock_page += pageblock_nr_pages; 1920 } 1921 } 1922 1923 /* 1924 * When we are falling back to another migratetype during allocation, try to 1925 * steal extra free pages from the same pageblocks to satisfy further 1926 * allocations, instead of polluting multiple pageblocks. 1927 * 1928 * If we are stealing a relatively large buddy page, it is likely there will 1929 * be more free pages in the pageblock, so try to steal them all. For 1930 * reclaimable and unmovable allocations, we steal regardless of page size, 1931 * as fragmentation caused by those allocations polluting movable pageblocks 1932 * is worse than movable allocations stealing from unmovable and reclaimable 1933 * pageblocks. 1934 */ 1935 static bool can_steal_fallback(unsigned int order, int start_mt) 1936 { 1937 /* 1938 * Leaving this order check is intended, although there is 1939 * relaxed order check in next check. The reason is that 1940 * we can actually steal whole pageblock if this condition met, 1941 * but, below check doesn't guarantee it and that is just heuristic 1942 * so could be changed anytime. 1943 */ 1944 if (order >= pageblock_order) 1945 return true; 1946 1947 if (order >= pageblock_order / 2 || 1948 start_mt == MIGRATE_RECLAIMABLE || 1949 start_mt == MIGRATE_UNMOVABLE || 1950 page_group_by_mobility_disabled) 1951 return true; 1952 1953 return false; 1954 } 1955 1956 /* 1957 * This function implements actual steal behaviour. If order is large enough, 1958 * we can steal whole pageblock. If not, we first move freepages in this 1959 * pageblock to our migratetype and determine how many already-allocated pages 1960 * are there in the pageblock with a compatible migratetype. If at least half 1961 * of pages are free or compatible, we can change migratetype of the pageblock 1962 * itself, so pages freed in the future will be put on the correct free list. 1963 */ 1964 static void steal_suitable_fallback(struct zone *zone, struct page *page, 1965 int start_type, bool whole_block) 1966 { 1967 unsigned int current_order = page_order(page); 1968 struct free_area *area; 1969 int free_pages, movable_pages, alike_pages; 1970 int old_block_type; 1971 1972 old_block_type = get_pageblock_migratetype(page); 1973 1974 /* 1975 * This can happen due to races and we want to prevent broken 1976 * highatomic accounting. 1977 */ 1978 if (is_migrate_highatomic(old_block_type)) 1979 goto single_page; 1980 1981 /* Take ownership for orders >= pageblock_order */ 1982 if (current_order >= pageblock_order) { 1983 change_pageblock_range(page, current_order, start_type); 1984 goto single_page; 1985 } 1986 1987 /* We are not allowed to try stealing from the whole block */ 1988 if (!whole_block) 1989 goto single_page; 1990 1991 free_pages = move_freepages_block(zone, page, start_type, 1992 &movable_pages); 1993 /* 1994 * Determine how many pages are compatible with our allocation. 1995 * For movable allocation, it's the number of movable pages which 1996 * we just obtained. For other types it's a bit more tricky. 1997 */ 1998 if (start_type == MIGRATE_MOVABLE) { 1999 alike_pages = movable_pages; 2000 } else { 2001 /* 2002 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation 2003 * to MOVABLE pageblock, consider all non-movable pages as 2004 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or 2005 * vice versa, be conservative since we can't distinguish the 2006 * exact migratetype of non-movable pages. 2007 */ 2008 if (old_block_type == MIGRATE_MOVABLE) 2009 alike_pages = pageblock_nr_pages 2010 - (free_pages + movable_pages); 2011 else 2012 alike_pages = 0; 2013 } 2014 2015 /* moving whole block can fail due to zone boundary conditions */ 2016 if (!free_pages) 2017 goto single_page; 2018 2019 /* 2020 * If a sufficient number of pages in the block are either free or of 2021 * comparable migratability as our allocation, claim the whole block. 2022 */ 2023 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || 2024 page_group_by_mobility_disabled) 2025 set_pageblock_migratetype(page, start_type); 2026 2027 return; 2028 2029 single_page: 2030 area = &zone->free_area[current_order]; 2031 list_move(&page->lru, &area->free_list[start_type]); 2032 } 2033 2034 /* 2035 * Check whether there is a suitable fallback freepage with requested order. 2036 * If only_stealable is true, this function returns fallback_mt only if 2037 * we can steal other freepages all together. This would help to reduce 2038 * fragmentation due to mixed migratetype pages in one pageblock. 2039 */ 2040 int find_suitable_fallback(struct free_area *area, unsigned int order, 2041 int migratetype, bool only_stealable, bool *can_steal) 2042 { 2043 int i; 2044 int fallback_mt; 2045 2046 if (area->nr_free == 0) 2047 return -1; 2048 2049 *can_steal = false; 2050 for (i = 0;; i++) { 2051 fallback_mt = fallbacks[migratetype][i]; 2052 if (fallback_mt == MIGRATE_TYPES) 2053 break; 2054 2055 if (list_empty(&area->free_list[fallback_mt])) 2056 continue; 2057 2058 if (can_steal_fallback(order, migratetype)) 2059 *can_steal = true; 2060 2061 if (!only_stealable) 2062 return fallback_mt; 2063 2064 if (*can_steal) 2065 return fallback_mt; 2066 } 2067 2068 return -1; 2069 } 2070 2071 /* 2072 * Reserve a pageblock for exclusive use of high-order atomic allocations if 2073 * there are no empty page blocks that contain a page with a suitable order 2074 */ 2075 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, 2076 unsigned int alloc_order) 2077 { 2078 int mt; 2079 unsigned long max_managed, flags; 2080 2081 /* 2082 * Limit the number reserved to 1 pageblock or roughly 1% of a zone. 2083 * Check is race-prone but harmless. 2084 */ 2085 max_managed = (zone->managed_pages / 100) + pageblock_nr_pages; 2086 if (zone->nr_reserved_highatomic >= max_managed) 2087 return; 2088 2089 spin_lock_irqsave(&zone->lock, flags); 2090 2091 /* Recheck the nr_reserved_highatomic limit under the lock */ 2092 if (zone->nr_reserved_highatomic >= max_managed) 2093 goto out_unlock; 2094 2095 /* Yoink! */ 2096 mt = get_pageblock_migratetype(page); 2097 if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt) 2098 && !is_migrate_cma(mt)) { 2099 zone->nr_reserved_highatomic += pageblock_nr_pages; 2100 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); 2101 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); 2102 } 2103 2104 out_unlock: 2105 spin_unlock_irqrestore(&zone->lock, flags); 2106 } 2107 2108 /* 2109 * Used when an allocation is about to fail under memory pressure. This 2110 * potentially hurts the reliability of high-order allocations when under 2111 * intense memory pressure but failed atomic allocations should be easier 2112 * to recover from than an OOM. 2113 * 2114 * If @force is true, try to unreserve a pageblock even though highatomic 2115 * pageblock is exhausted. 2116 */ 2117 static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, 2118 bool force) 2119 { 2120 struct zonelist *zonelist = ac->zonelist; 2121 unsigned long flags; 2122 struct zoneref *z; 2123 struct zone *zone; 2124 struct page *page; 2125 int order; 2126 bool ret; 2127 2128 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx, 2129 ac->nodemask) { 2130 /* 2131 * Preserve at least one pageblock unless memory pressure 2132 * is really high. 2133 */ 2134 if (!force && zone->nr_reserved_highatomic <= 2135 pageblock_nr_pages) 2136 continue; 2137 2138 spin_lock_irqsave(&zone->lock, flags); 2139 for (order = 0; order < MAX_ORDER; order++) { 2140 struct free_area *area = &(zone->free_area[order]); 2141 2142 page = list_first_entry_or_null( 2143 &area->free_list[MIGRATE_HIGHATOMIC], 2144 struct page, lru); 2145 if (!page) 2146 continue; 2147 2148 /* 2149 * In page freeing path, migratetype change is racy so 2150 * we can counter several free pages in a pageblock 2151 * in this loop althoug we changed the pageblock type 2152 * from highatomic to ac->migratetype. So we should 2153 * adjust the count once. 2154 */ 2155 if (is_migrate_highatomic_page(page)) { 2156 /* 2157 * It should never happen but changes to 2158 * locking could inadvertently allow a per-cpu 2159 * drain to add pages to MIGRATE_HIGHATOMIC 2160 * while unreserving so be safe and watch for 2161 * underflows. 2162 */ 2163 zone->nr_reserved_highatomic -= min( 2164 pageblock_nr_pages, 2165 zone->nr_reserved_highatomic); 2166 } 2167 2168 /* 2169 * Convert to ac->migratetype and avoid the normal 2170 * pageblock stealing heuristics. Minimally, the caller 2171 * is doing the work and needs the pages. More 2172 * importantly, if the block was always converted to 2173 * MIGRATE_UNMOVABLE or another type then the number 2174 * of pageblocks that cannot be completely freed 2175 * may increase. 2176 */ 2177 set_pageblock_migratetype(page, ac->migratetype); 2178 ret = move_freepages_block(zone, page, ac->migratetype, 2179 NULL); 2180 if (ret) { 2181 spin_unlock_irqrestore(&zone->lock, flags); 2182 return ret; 2183 } 2184 } 2185 spin_unlock_irqrestore(&zone->lock, flags); 2186 } 2187 2188 return false; 2189 } 2190 2191 /* 2192 * Try finding a free buddy page on the fallback list and put it on the free 2193 * list of requested migratetype, possibly along with other pages from the same 2194 * block, depending on fragmentation avoidance heuristics. Returns true if 2195 * fallback was found so that __rmqueue_smallest() can grab it. 2196 */ 2197 static inline bool 2198 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) 2199 { 2200 struct free_area *area; 2201 unsigned int current_order; 2202 struct page *page; 2203 int fallback_mt; 2204 bool can_steal; 2205 2206 /* Find the largest possible block of pages in the other list */ 2207 for (current_order = MAX_ORDER-1; 2208 current_order >= order && current_order <= MAX_ORDER-1; 2209 --current_order) { 2210 area = &(zone->free_area[current_order]); 2211 fallback_mt = find_suitable_fallback(area, current_order, 2212 start_migratetype, false, &can_steal); 2213 if (fallback_mt == -1) 2214 continue; 2215 2216 page = list_first_entry(&area->free_list[fallback_mt], 2217 struct page, lru); 2218 2219 steal_suitable_fallback(zone, page, start_migratetype, 2220 can_steal); 2221 2222 trace_mm_page_alloc_extfrag(page, order, current_order, 2223 start_migratetype, fallback_mt); 2224 2225 return true; 2226 } 2227 2228 return false; 2229 } 2230 2231 /* 2232 * Do the hard work of removing an element from the buddy allocator. 2233 * Call me with the zone->lock already held. 2234 */ 2235 static struct page *__rmqueue(struct zone *zone, unsigned int order, 2236 int migratetype) 2237 { 2238 struct page *page; 2239 2240 retry: 2241 page = __rmqueue_smallest(zone, order, migratetype); 2242 if (unlikely(!page)) { 2243 if (migratetype == MIGRATE_MOVABLE) 2244 page = __rmqueue_cma_fallback(zone, order); 2245 2246 if (!page && __rmqueue_fallback(zone, order, migratetype)) 2247 goto retry; 2248 } 2249 2250 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2251 return page; 2252 } 2253 2254 /* 2255 * Obtain a specified number of elements from the buddy allocator, all under 2256 * a single hold of the lock, for efficiency. Add them to the supplied list. 2257 * Returns the number of new pages which were placed at *list. 2258 */ 2259 static int rmqueue_bulk(struct zone *zone, unsigned int order, 2260 unsigned long count, struct list_head *list, 2261 int migratetype, bool cold) 2262 { 2263 int i, alloced = 0; 2264 2265 spin_lock(&zone->lock); 2266 for (i = 0; i < count; ++i) { 2267 struct page *page = __rmqueue(zone, order, migratetype); 2268 if (unlikely(page == NULL)) 2269 break; 2270 2271 if (unlikely(check_pcp_refill(page))) 2272 continue; 2273 2274 /* 2275 * Split buddy pages returned by expand() are received here 2276 * in physical page order. The page is added to the callers and 2277 * list and the list head then moves forward. From the callers 2278 * perspective, the linked list is ordered by page number in 2279 * some conditions. This is useful for IO devices that can 2280 * merge IO requests if the physical pages are ordered 2281 * properly. 2282 */ 2283 if (likely(!cold)) 2284 list_add(&page->lru, list); 2285 else 2286 list_add_tail(&page->lru, list); 2287 list = &page->lru; 2288 alloced++; 2289 if (is_migrate_cma(get_pcppage_migratetype(page))) 2290 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 2291 -(1 << order)); 2292 } 2293 2294 /* 2295 * i pages were removed from the buddy list even if some leak due 2296 * to check_pcp_refill failing so adjust NR_FREE_PAGES based 2297 * on i. Do not confuse with 'alloced' which is the number of 2298 * pages added to the pcp list. 2299 */ 2300 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 2301 spin_unlock(&zone->lock); 2302 return alloced; 2303 } 2304 2305 #ifdef CONFIG_NUMA 2306 /* 2307 * Called from the vmstat counter updater to drain pagesets of this 2308 * currently executing processor on remote nodes after they have 2309 * expired. 2310 * 2311 * Note that this function must be called with the thread pinned to 2312 * a single processor. 2313 */ 2314 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 2315 { 2316 unsigned long flags; 2317 int to_drain, batch; 2318 2319 local_irq_save(flags); 2320 batch = READ_ONCE(pcp->batch); 2321 to_drain = min(pcp->count, batch); 2322 if (to_drain > 0) { 2323 free_pcppages_bulk(zone, to_drain, pcp); 2324 pcp->count -= to_drain; 2325 } 2326 local_irq_restore(flags); 2327 } 2328 #endif 2329 2330 /* 2331 * Drain pcplists of the indicated processor and zone. 2332 * 2333 * The processor must either be the current processor and the 2334 * thread pinned to the current processor or a processor that 2335 * is not online. 2336 */ 2337 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 2338 { 2339 unsigned long flags; 2340 struct per_cpu_pageset *pset; 2341 struct per_cpu_pages *pcp; 2342 2343 local_irq_save(flags); 2344 pset = per_cpu_ptr(zone->pageset, cpu); 2345 2346 pcp = &pset->pcp; 2347 if (pcp->count) { 2348 free_pcppages_bulk(zone, pcp->count, pcp); 2349 pcp->count = 0; 2350 } 2351 local_irq_restore(flags); 2352 } 2353 2354 /* 2355 * Drain pcplists of all zones on the indicated processor. 2356 * 2357 * The processor must either be the current processor and the 2358 * thread pinned to the current processor or a processor that 2359 * is not online. 2360 */ 2361 static void drain_pages(unsigned int cpu) 2362 { 2363 struct zone *zone; 2364 2365 for_each_populated_zone(zone) { 2366 drain_pages_zone(cpu, zone); 2367 } 2368 } 2369 2370 /* 2371 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 2372 * 2373 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 2374 * the single zone's pages. 2375 */ 2376 void drain_local_pages(struct zone *zone) 2377 { 2378 int cpu = smp_processor_id(); 2379 2380 if (zone) 2381 drain_pages_zone(cpu, zone); 2382 else 2383 drain_pages(cpu); 2384 } 2385 2386 static void drain_local_pages_wq(struct work_struct *work) 2387 { 2388 /* 2389 * drain_all_pages doesn't use proper cpu hotplug protection so 2390 * we can race with cpu offline when the WQ can move this from 2391 * a cpu pinned worker to an unbound one. We can operate on a different 2392 * cpu which is allright but we also have to make sure to not move to 2393 * a different one. 2394 */ 2395 preempt_disable(); 2396 drain_local_pages(NULL); 2397 preempt_enable(); 2398 } 2399 2400 /* 2401 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 2402 * 2403 * When zone parameter is non-NULL, spill just the single zone's pages. 2404 * 2405 * Note that this can be extremely slow as the draining happens in a workqueue. 2406 */ 2407 void drain_all_pages(struct zone *zone) 2408 { 2409 int cpu; 2410 2411 /* 2412 * Allocate in the BSS so we wont require allocation in 2413 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 2414 */ 2415 static cpumask_t cpus_with_pcps; 2416 2417 /* 2418 * Make sure nobody triggers this path before mm_percpu_wq is fully 2419 * initialized. 2420 */ 2421 if (WARN_ON_ONCE(!mm_percpu_wq)) 2422 return; 2423 2424 /* Workqueues cannot recurse */ 2425 if (current->flags & PF_WQ_WORKER) 2426 return; 2427 2428 /* 2429 * Do not drain if one is already in progress unless it's specific to 2430 * a zone. Such callers are primarily CMA and memory hotplug and need 2431 * the drain to be complete when the call returns. 2432 */ 2433 if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) { 2434 if (!zone) 2435 return; 2436 mutex_lock(&pcpu_drain_mutex); 2437 } 2438 2439 /* 2440 * We don't care about racing with CPU hotplug event 2441 * as offline notification will cause the notified 2442 * cpu to drain that CPU pcps and on_each_cpu_mask 2443 * disables preemption as part of its processing 2444 */ 2445 for_each_online_cpu(cpu) { 2446 struct per_cpu_pageset *pcp; 2447 struct zone *z; 2448 bool has_pcps = false; 2449 2450 if (zone) { 2451 pcp = per_cpu_ptr(zone->pageset, cpu); 2452 if (pcp->pcp.count) 2453 has_pcps = true; 2454 } else { 2455 for_each_populated_zone(z) { 2456 pcp = per_cpu_ptr(z->pageset, cpu); 2457 if (pcp->pcp.count) { 2458 has_pcps = true; 2459 break; 2460 } 2461 } 2462 } 2463 2464 if (has_pcps) 2465 cpumask_set_cpu(cpu, &cpus_with_pcps); 2466 else 2467 cpumask_clear_cpu(cpu, &cpus_with_pcps); 2468 } 2469 2470 for_each_cpu(cpu, &cpus_with_pcps) { 2471 struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu); 2472 INIT_WORK(work, drain_local_pages_wq); 2473 queue_work_on(cpu, mm_percpu_wq, work); 2474 } 2475 for_each_cpu(cpu, &cpus_with_pcps) 2476 flush_work(per_cpu_ptr(&pcpu_drain, cpu)); 2477 2478 mutex_unlock(&pcpu_drain_mutex); 2479 } 2480 2481 #ifdef CONFIG_HIBERNATION 2482 2483 void mark_free_pages(struct zone *zone) 2484 { 2485 unsigned long pfn, max_zone_pfn; 2486 unsigned long flags; 2487 unsigned int order, t; 2488 struct page *page; 2489 2490 if (zone_is_empty(zone)) 2491 return; 2492 2493 spin_lock_irqsave(&zone->lock, flags); 2494 2495 max_zone_pfn = zone_end_pfn(zone); 2496 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 2497 if (pfn_valid(pfn)) { 2498 page = pfn_to_page(pfn); 2499 2500 if (page_zone(page) != zone) 2501 continue; 2502 2503 if (!swsusp_page_is_forbidden(page)) 2504 swsusp_unset_page_free(page); 2505 } 2506 2507 for_each_migratetype_order(order, t) { 2508 list_for_each_entry(page, 2509 &zone->free_area[order].free_list[t], lru) { 2510 unsigned long i; 2511 2512 pfn = page_to_pfn(page); 2513 for (i = 0; i < (1UL << order); i++) 2514 swsusp_set_page_free(pfn_to_page(pfn + i)); 2515 } 2516 } 2517 spin_unlock_irqrestore(&zone->lock, flags); 2518 } 2519 #endif /* CONFIG_PM */ 2520 2521 /* 2522 * Free a 0-order page 2523 * cold == true ? free a cold page : free a hot page 2524 */ 2525 void free_hot_cold_page(struct page *page, bool cold) 2526 { 2527 struct zone *zone = page_zone(page); 2528 struct per_cpu_pages *pcp; 2529 unsigned long flags; 2530 unsigned long pfn = page_to_pfn(page); 2531 int migratetype; 2532 2533 if (!free_pcp_prepare(page)) 2534 return; 2535 2536 migratetype = get_pfnblock_migratetype(page, pfn); 2537 set_pcppage_migratetype(page, migratetype); 2538 local_irq_save(flags); 2539 __count_vm_event(PGFREE); 2540 2541 /* 2542 * We only track unmovable, reclaimable and movable on pcp lists. 2543 * Free ISOLATE pages back to the allocator because they are being 2544 * offlined but treat HIGHATOMIC as movable pages so we can get those 2545 * areas back if necessary. Otherwise, we may have to free 2546 * excessively into the page allocator 2547 */ 2548 if (migratetype >= MIGRATE_PCPTYPES) { 2549 if (unlikely(is_migrate_isolate(migratetype))) { 2550 free_one_page(zone, page, pfn, 0, migratetype); 2551 goto out; 2552 } 2553 migratetype = MIGRATE_MOVABLE; 2554 } 2555 2556 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2557 if (!cold) 2558 list_add(&page->lru, &pcp->lists[migratetype]); 2559 else 2560 list_add_tail(&page->lru, &pcp->lists[migratetype]); 2561 pcp->count++; 2562 if (pcp->count >= pcp->high) { 2563 unsigned long batch = READ_ONCE(pcp->batch); 2564 free_pcppages_bulk(zone, batch, pcp); 2565 pcp->count -= batch; 2566 } 2567 2568 out: 2569 local_irq_restore(flags); 2570 } 2571 2572 /* 2573 * Free a list of 0-order pages 2574 */ 2575 void free_hot_cold_page_list(struct list_head *list, bool cold) 2576 { 2577 struct page *page, *next; 2578 2579 list_for_each_entry_safe(page, next, list, lru) { 2580 trace_mm_page_free_batched(page, cold); 2581 free_hot_cold_page(page, cold); 2582 } 2583 } 2584 2585 /* 2586 * split_page takes a non-compound higher-order page, and splits it into 2587 * n (1<<order) sub-pages: page[0..n] 2588 * Each sub-page must be freed individually. 2589 * 2590 * Note: this is probably too low level an operation for use in drivers. 2591 * Please consult with lkml before using this in your driver. 2592 */ 2593 void split_page(struct page *page, unsigned int order) 2594 { 2595 int i; 2596 2597 VM_BUG_ON_PAGE(PageCompound(page), page); 2598 VM_BUG_ON_PAGE(!page_count(page), page); 2599 2600 #ifdef CONFIG_KMEMCHECK 2601 /* 2602 * Split shadow pages too, because free(page[0]) would 2603 * otherwise free the whole shadow. 2604 */ 2605 if (kmemcheck_page_is_tracked(page)) 2606 split_page(virt_to_page(page[0].shadow), order); 2607 #endif 2608 2609 for (i = 1; i < (1 << order); i++) 2610 set_page_refcounted(page + i); 2611 split_page_owner(page, order); 2612 } 2613 EXPORT_SYMBOL_GPL(split_page); 2614 2615 int __isolate_free_page(struct page *page, unsigned int order) 2616 { 2617 unsigned long watermark; 2618 struct zone *zone; 2619 int mt; 2620 2621 BUG_ON(!PageBuddy(page)); 2622 2623 zone = page_zone(page); 2624 mt = get_pageblock_migratetype(page); 2625 2626 if (!is_migrate_isolate(mt)) { 2627 /* 2628 * Obey watermarks as if the page was being allocated. We can 2629 * emulate a high-order watermark check with a raised order-0 2630 * watermark, because we already know our high-order page 2631 * exists. 2632 */ 2633 watermark = min_wmark_pages(zone) + (1UL << order); 2634 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) 2635 return 0; 2636 2637 __mod_zone_freepage_state(zone, -(1UL << order), mt); 2638 } 2639 2640 /* Remove page from free list */ 2641 list_del(&page->lru); 2642 zone->free_area[order].nr_free--; 2643 rmv_page_order(page); 2644 2645 /* 2646 * Set the pageblock if the isolated page is at least half of a 2647 * pageblock 2648 */ 2649 if (order >= pageblock_order - 1) { 2650 struct page *endpage = page + (1 << order) - 1; 2651 for (; page < endpage; page += pageblock_nr_pages) { 2652 int mt = get_pageblock_migratetype(page); 2653 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt) 2654 && !is_migrate_highatomic(mt)) 2655 set_pageblock_migratetype(page, 2656 MIGRATE_MOVABLE); 2657 } 2658 } 2659 2660 2661 return 1UL << order; 2662 } 2663 2664 /* 2665 * Update NUMA hit/miss statistics 2666 * 2667 * Must be called with interrupts disabled. 2668 */ 2669 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) 2670 { 2671 #ifdef CONFIG_NUMA 2672 enum zone_stat_item local_stat = NUMA_LOCAL; 2673 2674 if (z->node != numa_node_id()) 2675 local_stat = NUMA_OTHER; 2676 2677 if (z->node == preferred_zone->node) 2678 __inc_zone_state(z, NUMA_HIT); 2679 else { 2680 __inc_zone_state(z, NUMA_MISS); 2681 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 2682 } 2683 __inc_zone_state(z, local_stat); 2684 #endif 2685 } 2686 2687 /* Remove page from the per-cpu list, caller must protect the list */ 2688 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, 2689 bool cold, struct per_cpu_pages *pcp, 2690 struct list_head *list) 2691 { 2692 struct page *page; 2693 2694 do { 2695 if (list_empty(list)) { 2696 pcp->count += rmqueue_bulk(zone, 0, 2697 pcp->batch, list, 2698 migratetype, cold); 2699 if (unlikely(list_empty(list))) 2700 return NULL; 2701 } 2702 2703 if (cold) 2704 page = list_last_entry(list, struct page, lru); 2705 else 2706 page = list_first_entry(list, struct page, lru); 2707 2708 list_del(&page->lru); 2709 pcp->count--; 2710 } while (check_new_pcp(page)); 2711 2712 return page; 2713 } 2714 2715 /* Lock and remove page from the per-cpu list */ 2716 static struct page *rmqueue_pcplist(struct zone *preferred_zone, 2717 struct zone *zone, unsigned int order, 2718 gfp_t gfp_flags, int migratetype) 2719 { 2720 struct per_cpu_pages *pcp; 2721 struct list_head *list; 2722 bool cold = ((gfp_flags & __GFP_COLD) != 0); 2723 struct page *page; 2724 unsigned long flags; 2725 2726 local_irq_save(flags); 2727 pcp = &this_cpu_ptr(zone->pageset)->pcp; 2728 list = &pcp->lists[migratetype]; 2729 page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list); 2730 if (page) { 2731 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2732 zone_statistics(preferred_zone, zone); 2733 } 2734 local_irq_restore(flags); 2735 return page; 2736 } 2737 2738 /* 2739 * Allocate a page from the given zone. Use pcplists for order-0 allocations. 2740 */ 2741 static inline 2742 struct page *rmqueue(struct zone *preferred_zone, 2743 struct zone *zone, unsigned int order, 2744 gfp_t gfp_flags, unsigned int alloc_flags, 2745 int migratetype) 2746 { 2747 unsigned long flags; 2748 struct page *page; 2749 2750 if (likely(order == 0)) { 2751 page = rmqueue_pcplist(preferred_zone, zone, order, 2752 gfp_flags, migratetype); 2753 goto out; 2754 } 2755 2756 /* 2757 * We most definitely don't want callers attempting to 2758 * allocate greater than order-1 page units with __GFP_NOFAIL. 2759 */ 2760 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); 2761 spin_lock_irqsave(&zone->lock, flags); 2762 2763 do { 2764 page = NULL; 2765 if (alloc_flags & ALLOC_HARDER) { 2766 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); 2767 if (page) 2768 trace_mm_page_alloc_zone_locked(page, order, migratetype); 2769 } 2770 if (!page) 2771 page = __rmqueue(zone, order, migratetype); 2772 } while (page && check_new_pages(page, order)); 2773 spin_unlock(&zone->lock); 2774 if (!page) 2775 goto failed; 2776 __mod_zone_freepage_state(zone, -(1 << order), 2777 get_pcppage_migratetype(page)); 2778 2779 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 2780 zone_statistics(preferred_zone, zone); 2781 local_irq_restore(flags); 2782 2783 out: 2784 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 2785 return page; 2786 2787 failed: 2788 local_irq_restore(flags); 2789 return NULL; 2790 } 2791 2792 #ifdef CONFIG_FAIL_PAGE_ALLOC 2793 2794 static struct { 2795 struct fault_attr attr; 2796 2797 bool ignore_gfp_highmem; 2798 bool ignore_gfp_reclaim; 2799 u32 min_order; 2800 } fail_page_alloc = { 2801 .attr = FAULT_ATTR_INITIALIZER, 2802 .ignore_gfp_reclaim = true, 2803 .ignore_gfp_highmem = true, 2804 .min_order = 1, 2805 }; 2806 2807 static int __init setup_fail_page_alloc(char *str) 2808 { 2809 return setup_fault_attr(&fail_page_alloc.attr, str); 2810 } 2811 __setup("fail_page_alloc=", setup_fail_page_alloc); 2812 2813 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2814 { 2815 if (order < fail_page_alloc.min_order) 2816 return false; 2817 if (gfp_mask & __GFP_NOFAIL) 2818 return false; 2819 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 2820 return false; 2821 if (fail_page_alloc.ignore_gfp_reclaim && 2822 (gfp_mask & __GFP_DIRECT_RECLAIM)) 2823 return false; 2824 2825 return should_fail(&fail_page_alloc.attr, 1 << order); 2826 } 2827 2828 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 2829 2830 static int __init fail_page_alloc_debugfs(void) 2831 { 2832 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 2833 struct dentry *dir; 2834 2835 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 2836 &fail_page_alloc.attr); 2837 if (IS_ERR(dir)) 2838 return PTR_ERR(dir); 2839 2840 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 2841 &fail_page_alloc.ignore_gfp_reclaim)) 2842 goto fail; 2843 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 2844 &fail_page_alloc.ignore_gfp_highmem)) 2845 goto fail; 2846 if (!debugfs_create_u32("min-order", mode, dir, 2847 &fail_page_alloc.min_order)) 2848 goto fail; 2849 2850 return 0; 2851 fail: 2852 debugfs_remove_recursive(dir); 2853 2854 return -ENOMEM; 2855 } 2856 2857 late_initcall(fail_page_alloc_debugfs); 2858 2859 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 2860 2861 #else /* CONFIG_FAIL_PAGE_ALLOC */ 2862 2863 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 2864 { 2865 return false; 2866 } 2867 2868 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 2869 2870 /* 2871 * Return true if free base pages are above 'mark'. For high-order checks it 2872 * will return true of the order-0 watermark is reached and there is at least 2873 * one free page of a suitable size. Checking now avoids taking the zone lock 2874 * to check in the allocation paths if no pages are free. 2875 */ 2876 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2877 int classzone_idx, unsigned int alloc_flags, 2878 long free_pages) 2879 { 2880 long min = mark; 2881 int o; 2882 const bool alloc_harder = (alloc_flags & ALLOC_HARDER); 2883 2884 /* free_pages may go negative - that's OK */ 2885 free_pages -= (1 << order) - 1; 2886 2887 if (alloc_flags & ALLOC_HIGH) 2888 min -= min / 2; 2889 2890 /* 2891 * If the caller does not have rights to ALLOC_HARDER then subtract 2892 * the high-atomic reserves. This will over-estimate the size of the 2893 * atomic reserve but it avoids a search. 2894 */ 2895 if (likely(!alloc_harder)) 2896 free_pages -= z->nr_reserved_highatomic; 2897 else 2898 min -= min / 4; 2899 2900 #ifdef CONFIG_CMA 2901 /* If allocation can't use CMA areas don't use free CMA pages */ 2902 if (!(alloc_flags & ALLOC_CMA)) 2903 free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES); 2904 #endif 2905 2906 /* 2907 * Check watermarks for an order-0 allocation request. If these 2908 * are not met, then a high-order request also cannot go ahead 2909 * even if a suitable page happened to be free. 2910 */ 2911 if (free_pages <= min + z->lowmem_reserve[classzone_idx]) 2912 return false; 2913 2914 /* If this is an order-0 request then the watermark is fine */ 2915 if (!order) 2916 return true; 2917 2918 /* For a high-order request, check at least one suitable page is free */ 2919 for (o = order; o < MAX_ORDER; o++) { 2920 struct free_area *area = &z->free_area[o]; 2921 int mt; 2922 2923 if (!area->nr_free) 2924 continue; 2925 2926 if (alloc_harder) 2927 return true; 2928 2929 for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) { 2930 if (!list_empty(&area->free_list[mt])) 2931 return true; 2932 } 2933 2934 #ifdef CONFIG_CMA 2935 if ((alloc_flags & ALLOC_CMA) && 2936 !list_empty(&area->free_list[MIGRATE_CMA])) { 2937 return true; 2938 } 2939 #endif 2940 } 2941 return false; 2942 } 2943 2944 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 2945 int classzone_idx, unsigned int alloc_flags) 2946 { 2947 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2948 zone_page_state(z, NR_FREE_PAGES)); 2949 } 2950 2951 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, 2952 unsigned long mark, int classzone_idx, unsigned int alloc_flags) 2953 { 2954 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2955 long cma_pages = 0; 2956 2957 #ifdef CONFIG_CMA 2958 /* If allocation can't use CMA areas don't use free CMA pages */ 2959 if (!(alloc_flags & ALLOC_CMA)) 2960 cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES); 2961 #endif 2962 2963 /* 2964 * Fast check for order-0 only. If this fails then the reserves 2965 * need to be calculated. There is a corner case where the check 2966 * passes but only the high-order atomic reserve are free. If 2967 * the caller is !atomic then it'll uselessly search the free 2968 * list. That corner case is then slower but it is harmless. 2969 */ 2970 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) 2971 return true; 2972 2973 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 2974 free_pages); 2975 } 2976 2977 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 2978 unsigned long mark, int classzone_idx) 2979 { 2980 long free_pages = zone_page_state(z, NR_FREE_PAGES); 2981 2982 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 2983 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 2984 2985 return __zone_watermark_ok(z, order, mark, classzone_idx, 0, 2986 free_pages); 2987 } 2988 2989 #ifdef CONFIG_NUMA 2990 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2991 { 2992 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= 2993 RECLAIM_DISTANCE; 2994 } 2995 #else /* CONFIG_NUMA */ 2996 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2997 { 2998 return true; 2999 } 3000 #endif /* CONFIG_NUMA */ 3001 3002 /* 3003 * get_page_from_freelist goes through the zonelist trying to allocate 3004 * a page. 3005 */ 3006 static struct page * 3007 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, 3008 const struct alloc_context *ac) 3009 { 3010 struct zoneref *z = ac->preferred_zoneref; 3011 struct zone *zone; 3012 struct pglist_data *last_pgdat_dirty_limit = NULL; 3013 3014 /* 3015 * Scan zonelist, looking for a zone with enough free. 3016 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 3017 */ 3018 for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 3019 ac->nodemask) { 3020 struct page *page; 3021 unsigned long mark; 3022 3023 if (cpusets_enabled() && 3024 (alloc_flags & ALLOC_CPUSET) && 3025 !__cpuset_zone_allowed(zone, gfp_mask)) 3026 continue; 3027 /* 3028 * When allocating a page cache page for writing, we 3029 * want to get it from a node that is within its dirty 3030 * limit, such that no single node holds more than its 3031 * proportional share of globally allowed dirty pages. 3032 * The dirty limits take into account the node's 3033 * lowmem reserves and high watermark so that kswapd 3034 * should be able to balance it without having to 3035 * write pages from its LRU list. 3036 * 3037 * XXX: For now, allow allocations to potentially 3038 * exceed the per-node dirty limit in the slowpath 3039 * (spread_dirty_pages unset) before going into reclaim, 3040 * which is important when on a NUMA setup the allowed 3041 * nodes are together not big enough to reach the 3042 * global limit. The proper fix for these situations 3043 * will require awareness of nodes in the 3044 * dirty-throttling and the flusher threads. 3045 */ 3046 if (ac->spread_dirty_pages) { 3047 if (last_pgdat_dirty_limit == zone->zone_pgdat) 3048 continue; 3049 3050 if (!node_dirty_ok(zone->zone_pgdat)) { 3051 last_pgdat_dirty_limit = zone->zone_pgdat; 3052 continue; 3053 } 3054 } 3055 3056 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 3057 if (!zone_watermark_fast(zone, order, mark, 3058 ac_classzone_idx(ac), alloc_flags)) { 3059 int ret; 3060 3061 /* Checked here to keep the fast path fast */ 3062 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 3063 if (alloc_flags & ALLOC_NO_WATERMARKS) 3064 goto try_this_zone; 3065 3066 if (node_reclaim_mode == 0 || 3067 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) 3068 continue; 3069 3070 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); 3071 switch (ret) { 3072 case NODE_RECLAIM_NOSCAN: 3073 /* did not scan */ 3074 continue; 3075 case NODE_RECLAIM_FULL: 3076 /* scanned but unreclaimable */ 3077 continue; 3078 default: 3079 /* did we reclaim enough */ 3080 if (zone_watermark_ok(zone, order, mark, 3081 ac_classzone_idx(ac), alloc_flags)) 3082 goto try_this_zone; 3083 3084 continue; 3085 } 3086 } 3087 3088 try_this_zone: 3089 page = rmqueue(ac->preferred_zoneref->zone, zone, order, 3090 gfp_mask, alloc_flags, ac->migratetype); 3091 if (page) { 3092 prep_new_page(page, order, gfp_mask, alloc_flags); 3093 3094 /* 3095 * If this is a high-order atomic allocation then check 3096 * if the pageblock should be reserved for the future 3097 */ 3098 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) 3099 reserve_highatomic_pageblock(page, zone, order); 3100 3101 return page; 3102 } 3103 } 3104 3105 return NULL; 3106 } 3107 3108 /* 3109 * Large machines with many possible nodes should not always dump per-node 3110 * meminfo in irq context. 3111 */ 3112 static inline bool should_suppress_show_mem(void) 3113 { 3114 bool ret = false; 3115 3116 #if NODES_SHIFT > 8 3117 ret = in_interrupt(); 3118 #endif 3119 return ret; 3120 } 3121 3122 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) 3123 { 3124 unsigned int filter = SHOW_MEM_FILTER_NODES; 3125 static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1); 3126 3127 if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs)) 3128 return; 3129 3130 /* 3131 * This documents exceptions given to allocations in certain 3132 * contexts that are allowed to allocate outside current's set 3133 * of allowed nodes. 3134 */ 3135 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3136 if (test_thread_flag(TIF_MEMDIE) || 3137 (current->flags & (PF_MEMALLOC | PF_EXITING))) 3138 filter &= ~SHOW_MEM_FILTER_NODES; 3139 if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) 3140 filter &= ~SHOW_MEM_FILTER_NODES; 3141 3142 show_mem(filter, nodemask); 3143 } 3144 3145 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) 3146 { 3147 struct va_format vaf; 3148 va_list args; 3149 static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL, 3150 DEFAULT_RATELIMIT_BURST); 3151 3152 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs)) 3153 return; 3154 3155 pr_warn("%s: ", current->comm); 3156 3157 va_start(args, fmt); 3158 vaf.fmt = fmt; 3159 vaf.va = &args; 3160 pr_cont("%pV", &vaf); 3161 va_end(args); 3162 3163 pr_cont(", mode:%#x(%pGg), nodemask=", gfp_mask, &gfp_mask); 3164 if (nodemask) 3165 pr_cont("%*pbl\n", nodemask_pr_args(nodemask)); 3166 else 3167 pr_cont("(null)\n"); 3168 3169 cpuset_print_current_mems_allowed(); 3170 3171 dump_stack(); 3172 warn_alloc_show_mem(gfp_mask, nodemask); 3173 } 3174 3175 static inline struct page * 3176 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, 3177 unsigned int alloc_flags, 3178 const struct alloc_context *ac) 3179 { 3180 struct page *page; 3181 3182 page = get_page_from_freelist(gfp_mask, order, 3183 alloc_flags|ALLOC_CPUSET, ac); 3184 /* 3185 * fallback to ignore cpuset restriction if our nodes 3186 * are depleted 3187 */ 3188 if (!page) 3189 page = get_page_from_freelist(gfp_mask, order, 3190 alloc_flags, ac); 3191 3192 return page; 3193 } 3194 3195 static inline struct page * 3196 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 3197 const struct alloc_context *ac, unsigned long *did_some_progress) 3198 { 3199 struct oom_control oc = { 3200 .zonelist = ac->zonelist, 3201 .nodemask = ac->nodemask, 3202 .memcg = NULL, 3203 .gfp_mask = gfp_mask, 3204 .order = order, 3205 }; 3206 struct page *page; 3207 3208 *did_some_progress = 0; 3209 3210 /* 3211 * Acquire the oom lock. If that fails, somebody else is 3212 * making progress for us. 3213 */ 3214 if (!mutex_trylock(&oom_lock)) { 3215 *did_some_progress = 1; 3216 schedule_timeout_uninterruptible(1); 3217 return NULL; 3218 } 3219 3220 /* 3221 * Go through the zonelist yet one more time, keep very high watermark 3222 * here, this is only to catch a parallel oom killing, we must fail if 3223 * we're still under heavy pressure. 3224 */ 3225 page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order, 3226 ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); 3227 if (page) 3228 goto out; 3229 3230 /* Coredumps can quickly deplete all memory reserves */ 3231 if (current->flags & PF_DUMPCORE) 3232 goto out; 3233 /* The OOM killer will not help higher order allocs */ 3234 if (order > PAGE_ALLOC_COSTLY_ORDER) 3235 goto out; 3236 /* The OOM killer does not needlessly kill tasks for lowmem */ 3237 if (ac->high_zoneidx < ZONE_NORMAL) 3238 goto out; 3239 if (pm_suspended_storage()) 3240 goto out; 3241 /* 3242 * XXX: GFP_NOFS allocations should rather fail than rely on 3243 * other request to make a forward progress. 3244 * We are in an unfortunate situation where out_of_memory cannot 3245 * do much for this context but let's try it to at least get 3246 * access to memory reserved if the current task is killed (see 3247 * out_of_memory). Once filesystems are ready to handle allocation 3248 * failures more gracefully we should just bail out here. 3249 */ 3250 3251 /* The OOM killer may not free memory on a specific node */ 3252 if (gfp_mask & __GFP_THISNODE) 3253 goto out; 3254 3255 /* Exhausted what can be done so it's blamo time */ 3256 if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) { 3257 *did_some_progress = 1; 3258 3259 /* 3260 * Help non-failing allocations by giving them access to memory 3261 * reserves 3262 */ 3263 if (gfp_mask & __GFP_NOFAIL) 3264 page = __alloc_pages_cpuset_fallback(gfp_mask, order, 3265 ALLOC_NO_WATERMARKS, ac); 3266 } 3267 out: 3268 mutex_unlock(&oom_lock); 3269 return page; 3270 } 3271 3272 /* 3273 * Maximum number of compaction retries wit a progress before OOM 3274 * killer is consider as the only way to move forward. 3275 */ 3276 #define MAX_COMPACT_RETRIES 16 3277 3278 #ifdef CONFIG_COMPACTION 3279 /* Try memory compaction for high-order allocations before reclaim */ 3280 static struct page * 3281 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3282 unsigned int alloc_flags, const struct alloc_context *ac, 3283 enum compact_priority prio, enum compact_result *compact_result) 3284 { 3285 struct page *page; 3286 unsigned int noreclaim_flag; 3287 3288 if (!order) 3289 return NULL; 3290 3291 noreclaim_flag = memalloc_noreclaim_save(); 3292 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3293 prio); 3294 memalloc_noreclaim_restore(noreclaim_flag); 3295 3296 if (*compact_result <= COMPACT_INACTIVE) 3297 return NULL; 3298 3299 /* 3300 * At least in one zone compaction wasn't deferred or skipped, so let's 3301 * count a compaction stall 3302 */ 3303 count_vm_event(COMPACTSTALL); 3304 3305 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3306 3307 if (page) { 3308 struct zone *zone = page_zone(page); 3309 3310 zone->compact_blockskip_flush = false; 3311 compaction_defer_reset(zone, order, true); 3312 count_vm_event(COMPACTSUCCESS); 3313 return page; 3314 } 3315 3316 /* 3317 * It's bad if compaction run occurs and fails. The most likely reason 3318 * is that pages exist, but not enough to satisfy watermarks. 3319 */ 3320 count_vm_event(COMPACTFAIL); 3321 3322 cond_resched(); 3323 3324 return NULL; 3325 } 3326 3327 static inline bool 3328 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3329 enum compact_result compact_result, 3330 enum compact_priority *compact_priority, 3331 int *compaction_retries) 3332 { 3333 int max_retries = MAX_COMPACT_RETRIES; 3334 int min_priority; 3335 bool ret = false; 3336 int retries = *compaction_retries; 3337 enum compact_priority priority = *compact_priority; 3338 3339 if (!order) 3340 return false; 3341 3342 if (compaction_made_progress(compact_result)) 3343 (*compaction_retries)++; 3344 3345 /* 3346 * compaction considers all the zone as desperately out of memory 3347 * so it doesn't really make much sense to retry except when the 3348 * failure could be caused by insufficient priority 3349 */ 3350 if (compaction_failed(compact_result)) 3351 goto check_priority; 3352 3353 /* 3354 * make sure the compaction wasn't deferred or didn't bail out early 3355 * due to locks contention before we declare that we should give up. 3356 * But do not retry if the given zonelist is not suitable for 3357 * compaction. 3358 */ 3359 if (compaction_withdrawn(compact_result)) { 3360 ret = compaction_zonelist_suitable(ac, order, alloc_flags); 3361 goto out; 3362 } 3363 3364 /* 3365 * !costly requests are much more important than __GFP_REPEAT 3366 * costly ones because they are de facto nofail and invoke OOM 3367 * killer to move on while costly can fail and users are ready 3368 * to cope with that. 1/4 retries is rather arbitrary but we 3369 * would need much more detailed feedback from compaction to 3370 * make a better decision. 3371 */ 3372 if (order > PAGE_ALLOC_COSTLY_ORDER) 3373 max_retries /= 4; 3374 if (*compaction_retries <= max_retries) { 3375 ret = true; 3376 goto out; 3377 } 3378 3379 /* 3380 * Make sure there are attempts at the highest priority if we exhausted 3381 * all retries or failed at the lower priorities. 3382 */ 3383 check_priority: 3384 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? 3385 MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; 3386 3387 if (*compact_priority > min_priority) { 3388 (*compact_priority)--; 3389 *compaction_retries = 0; 3390 ret = true; 3391 } 3392 out: 3393 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); 3394 return ret; 3395 } 3396 #else 3397 static inline struct page * 3398 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3399 unsigned int alloc_flags, const struct alloc_context *ac, 3400 enum compact_priority prio, enum compact_result *compact_result) 3401 { 3402 *compact_result = COMPACT_SKIPPED; 3403 return NULL; 3404 } 3405 3406 static inline bool 3407 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3408 enum compact_result compact_result, 3409 enum compact_priority *compact_priority, 3410 int *compaction_retries) 3411 { 3412 struct zone *zone; 3413 struct zoneref *z; 3414 3415 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) 3416 return false; 3417 3418 /* 3419 * There are setups with compaction disabled which would prefer to loop 3420 * inside the allocator rather than hit the oom killer prematurely. 3421 * Let's give them a good hope and keep retrying while the order-0 3422 * watermarks are OK. 3423 */ 3424 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 3425 ac->nodemask) { 3426 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), 3427 ac_classzone_idx(ac), alloc_flags)) 3428 return true; 3429 } 3430 return false; 3431 } 3432 #endif /* CONFIG_COMPACTION */ 3433 3434 /* Perform direct synchronous page reclaim */ 3435 static int 3436 __perform_reclaim(gfp_t gfp_mask, unsigned int order, 3437 const struct alloc_context *ac) 3438 { 3439 struct reclaim_state reclaim_state; 3440 int progress; 3441 unsigned int noreclaim_flag; 3442 3443 cond_resched(); 3444 3445 /* We now go into synchronous reclaim */ 3446 cpuset_memory_pressure_bump(); 3447 noreclaim_flag = memalloc_noreclaim_save(); 3448 lockdep_set_current_reclaim_state(gfp_mask); 3449 reclaim_state.reclaimed_slab = 0; 3450 current->reclaim_state = &reclaim_state; 3451 3452 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, 3453 ac->nodemask); 3454 3455 current->reclaim_state = NULL; 3456 lockdep_clear_current_reclaim_state(); 3457 memalloc_noreclaim_restore(noreclaim_flag); 3458 3459 cond_resched(); 3460 3461 return progress; 3462 } 3463 3464 /* The really slow allocator path where we enter direct reclaim */ 3465 static inline struct page * 3466 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 3467 unsigned int alloc_flags, const struct alloc_context *ac, 3468 unsigned long *did_some_progress) 3469 { 3470 struct page *page = NULL; 3471 bool drained = false; 3472 3473 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); 3474 if (unlikely(!(*did_some_progress))) 3475 return NULL; 3476 3477 retry: 3478 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3479 3480 /* 3481 * If an allocation failed after direct reclaim, it could be because 3482 * pages are pinned on the per-cpu lists or in high alloc reserves. 3483 * Shrink them them and try again 3484 */ 3485 if (!page && !drained) { 3486 unreserve_highatomic_pageblock(ac, false); 3487 drain_all_pages(NULL); 3488 drained = true; 3489 goto retry; 3490 } 3491 3492 return page; 3493 } 3494 3495 static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac) 3496 { 3497 struct zoneref *z; 3498 struct zone *zone; 3499 pg_data_t *last_pgdat = NULL; 3500 3501 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 3502 ac->high_zoneidx, ac->nodemask) { 3503 if (last_pgdat != zone->zone_pgdat) 3504 wakeup_kswapd(zone, order, ac->high_zoneidx); 3505 last_pgdat = zone->zone_pgdat; 3506 } 3507 } 3508 3509 static inline unsigned int 3510 gfp_to_alloc_flags(gfp_t gfp_mask) 3511 { 3512 unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 3513 3514 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 3515 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 3516 3517 /* 3518 * The caller may dip into page reserves a bit more if the caller 3519 * cannot run direct reclaim, or if the caller has realtime scheduling 3520 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 3521 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH). 3522 */ 3523 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 3524 3525 if (gfp_mask & __GFP_ATOMIC) { 3526 /* 3527 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 3528 * if it can't schedule. 3529 */ 3530 if (!(gfp_mask & __GFP_NOMEMALLOC)) 3531 alloc_flags |= ALLOC_HARDER; 3532 /* 3533 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 3534 * comment for __cpuset_node_allowed(). 3535 */ 3536 alloc_flags &= ~ALLOC_CPUSET; 3537 } else if (unlikely(rt_task(current)) && !in_interrupt()) 3538 alloc_flags |= ALLOC_HARDER; 3539 3540 #ifdef CONFIG_CMA 3541 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 3542 alloc_flags |= ALLOC_CMA; 3543 #endif 3544 return alloc_flags; 3545 } 3546 3547 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 3548 { 3549 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) 3550 return false; 3551 3552 if (gfp_mask & __GFP_MEMALLOC) 3553 return true; 3554 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 3555 return true; 3556 if (!in_interrupt() && 3557 ((current->flags & PF_MEMALLOC) || 3558 unlikely(test_thread_flag(TIF_MEMDIE)))) 3559 return true; 3560 3561 return false; 3562 } 3563 3564 /* 3565 * Checks whether it makes sense to retry the reclaim to make a forward progress 3566 * for the given allocation request. 3567 * 3568 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row 3569 * without success, or when we couldn't even meet the watermark if we 3570 * reclaimed all remaining pages on the LRU lists. 3571 * 3572 * Returns true if a retry is viable or false to enter the oom path. 3573 */ 3574 static inline bool 3575 should_reclaim_retry(gfp_t gfp_mask, unsigned order, 3576 struct alloc_context *ac, int alloc_flags, 3577 bool did_some_progress, int *no_progress_loops) 3578 { 3579 struct zone *zone; 3580 struct zoneref *z; 3581 3582 /* 3583 * Costly allocations might have made a progress but this doesn't mean 3584 * their order will become available due to high fragmentation so 3585 * always increment the no progress counter for them 3586 */ 3587 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) 3588 *no_progress_loops = 0; 3589 else 3590 (*no_progress_loops)++; 3591 3592 /* 3593 * Make sure we converge to OOM if we cannot make any progress 3594 * several times in the row. 3595 */ 3596 if (*no_progress_loops > MAX_RECLAIM_RETRIES) { 3597 /* Before OOM, exhaust highatomic_reserve */ 3598 return unreserve_highatomic_pageblock(ac, true); 3599 } 3600 3601 /* 3602 * Keep reclaiming pages while there is a chance this will lead 3603 * somewhere. If none of the target zones can satisfy our allocation 3604 * request even if all reclaimable pages are considered then we are 3605 * screwed and have to go OOM. 3606 */ 3607 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 3608 ac->nodemask) { 3609 unsigned long available; 3610 unsigned long reclaimable; 3611 unsigned long min_wmark = min_wmark_pages(zone); 3612 bool wmark; 3613 3614 available = reclaimable = zone_reclaimable_pages(zone); 3615 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 3616 3617 /* 3618 * Would the allocation succeed if we reclaimed all 3619 * reclaimable pages? 3620 */ 3621 wmark = __zone_watermark_ok(zone, order, min_wmark, 3622 ac_classzone_idx(ac), alloc_flags, available); 3623 trace_reclaim_retry_zone(z, order, reclaimable, 3624 available, min_wmark, *no_progress_loops, wmark); 3625 if (wmark) { 3626 /* 3627 * If we didn't make any progress and have a lot of 3628 * dirty + writeback pages then we should wait for 3629 * an IO to complete to slow down the reclaim and 3630 * prevent from pre mature OOM 3631 */ 3632 if (!did_some_progress) { 3633 unsigned long write_pending; 3634 3635 write_pending = zone_page_state_snapshot(zone, 3636 NR_ZONE_WRITE_PENDING); 3637 3638 if (2 * write_pending > reclaimable) { 3639 congestion_wait(BLK_RW_ASYNC, HZ/10); 3640 return true; 3641 } 3642 } 3643 3644 /* 3645 * Memory allocation/reclaim might be called from a WQ 3646 * context and the current implementation of the WQ 3647 * concurrency control doesn't recognize that 3648 * a particular WQ is congested if the worker thread is 3649 * looping without ever sleeping. Therefore we have to 3650 * do a short sleep here rather than calling 3651 * cond_resched(). 3652 */ 3653 if (current->flags & PF_WQ_WORKER) 3654 schedule_timeout_uninterruptible(1); 3655 else 3656 cond_resched(); 3657 3658 return true; 3659 } 3660 } 3661 3662 return false; 3663 } 3664 3665 static inline struct page * 3666 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 3667 struct alloc_context *ac) 3668 { 3669 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3670 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 3671 struct page *page = NULL; 3672 unsigned int alloc_flags; 3673 unsigned long did_some_progress; 3674 enum compact_priority compact_priority; 3675 enum compact_result compact_result; 3676 int compaction_retries; 3677 int no_progress_loops; 3678 unsigned long alloc_start = jiffies; 3679 unsigned int stall_timeout = 10 * HZ; 3680 unsigned int cpuset_mems_cookie; 3681 3682 /* 3683 * In the slowpath, we sanity check order to avoid ever trying to 3684 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 3685 * be using allocators in order of preference for an area that is 3686 * too large. 3687 */ 3688 if (order >= MAX_ORDER) { 3689 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 3690 return NULL; 3691 } 3692 3693 /* 3694 * We also sanity check to catch abuse of atomic reserves being used by 3695 * callers that are not in atomic context. 3696 */ 3697 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == 3698 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM))) 3699 gfp_mask &= ~__GFP_ATOMIC; 3700 3701 retry_cpuset: 3702 compaction_retries = 0; 3703 no_progress_loops = 0; 3704 compact_priority = DEF_COMPACT_PRIORITY; 3705 cpuset_mems_cookie = read_mems_allowed_begin(); 3706 3707 /* 3708 * The fast path uses conservative alloc_flags to succeed only until 3709 * kswapd needs to be woken up, and to avoid the cost of setting up 3710 * alloc_flags precisely. So we do that now. 3711 */ 3712 alloc_flags = gfp_to_alloc_flags(gfp_mask); 3713 3714 /* 3715 * We need to recalculate the starting point for the zonelist iterator 3716 * because we might have used different nodemask in the fast path, or 3717 * there was a cpuset modification and we are retrying - otherwise we 3718 * could end up iterating over non-eligible zones endlessly. 3719 */ 3720 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3721 ac->high_zoneidx, ac->nodemask); 3722 if (!ac->preferred_zoneref->zone) 3723 goto nopage; 3724 3725 if (gfp_mask & __GFP_KSWAPD_RECLAIM) 3726 wake_all_kswapds(order, ac); 3727 3728 /* 3729 * The adjusted alloc_flags might result in immediate success, so try 3730 * that first 3731 */ 3732 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3733 if (page) 3734 goto got_pg; 3735 3736 /* 3737 * For costly allocations, try direct compaction first, as it's likely 3738 * that we have enough base pages and don't need to reclaim. For non- 3739 * movable high-order allocations, do that as well, as compaction will 3740 * try prevent permanent fragmentation by migrating from blocks of the 3741 * same migratetype. 3742 * Don't try this for allocations that are allowed to ignore 3743 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 3744 */ 3745 if (can_direct_reclaim && 3746 (costly_order || 3747 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 3748 && !gfp_pfmemalloc_allowed(gfp_mask)) { 3749 page = __alloc_pages_direct_compact(gfp_mask, order, 3750 alloc_flags, ac, 3751 INIT_COMPACT_PRIORITY, 3752 &compact_result); 3753 if (page) 3754 goto got_pg; 3755 3756 /* 3757 * Checks for costly allocations with __GFP_NORETRY, which 3758 * includes THP page fault allocations 3759 */ 3760 if (costly_order && (gfp_mask & __GFP_NORETRY)) { 3761 /* 3762 * If compaction is deferred for high-order allocations, 3763 * it is because sync compaction recently failed. If 3764 * this is the case and the caller requested a THP 3765 * allocation, we do not want to heavily disrupt the 3766 * system, so we fail the allocation instead of entering 3767 * direct reclaim. 3768 */ 3769 if (compact_result == COMPACT_DEFERRED) 3770 goto nopage; 3771 3772 /* 3773 * Looks like reclaim/compaction is worth trying, but 3774 * sync compaction could be very expensive, so keep 3775 * using async compaction. 3776 */ 3777 compact_priority = INIT_COMPACT_PRIORITY; 3778 } 3779 } 3780 3781 retry: 3782 /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ 3783 if (gfp_mask & __GFP_KSWAPD_RECLAIM) 3784 wake_all_kswapds(order, ac); 3785 3786 if (gfp_pfmemalloc_allowed(gfp_mask)) 3787 alloc_flags = ALLOC_NO_WATERMARKS; 3788 3789 /* 3790 * Reset the zonelist iterators if memory policies can be ignored. 3791 * These allocations are high priority and system rather than user 3792 * orientated. 3793 */ 3794 if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) { 3795 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); 3796 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3797 ac->high_zoneidx, ac->nodemask); 3798 } 3799 3800 /* Attempt with potentially adjusted zonelist and alloc_flags */ 3801 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); 3802 if (page) 3803 goto got_pg; 3804 3805 /* Caller is not willing to reclaim, we can't balance anything */ 3806 if (!can_direct_reclaim) 3807 goto nopage; 3808 3809 /* Make sure we know about allocations which stall for too long */ 3810 if (time_after(jiffies, alloc_start + stall_timeout)) { 3811 warn_alloc(gfp_mask & ~__GFP_NOWARN, ac->nodemask, 3812 "page allocation stalls for %ums, order:%u", 3813 jiffies_to_msecs(jiffies-alloc_start), order); 3814 stall_timeout += 10 * HZ; 3815 } 3816 3817 /* Avoid recursion of direct reclaim */ 3818 if (current->flags & PF_MEMALLOC) 3819 goto nopage; 3820 3821 /* Try direct reclaim and then allocating */ 3822 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, 3823 &did_some_progress); 3824 if (page) 3825 goto got_pg; 3826 3827 /* Try direct compaction and then allocating */ 3828 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 3829 compact_priority, &compact_result); 3830 if (page) 3831 goto got_pg; 3832 3833 /* Do not loop if specifically requested */ 3834 if (gfp_mask & __GFP_NORETRY) 3835 goto nopage; 3836 3837 /* 3838 * Do not retry costly high order allocations unless they are 3839 * __GFP_REPEAT 3840 */ 3841 if (costly_order && !(gfp_mask & __GFP_REPEAT)) 3842 goto nopage; 3843 3844 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 3845 did_some_progress > 0, &no_progress_loops)) 3846 goto retry; 3847 3848 /* 3849 * It doesn't make any sense to retry for the compaction if the order-0 3850 * reclaim is not able to make any progress because the current 3851 * implementation of the compaction depends on the sufficient amount 3852 * of free memory (see __compaction_suitable) 3853 */ 3854 if (did_some_progress > 0 && 3855 should_compact_retry(ac, order, alloc_flags, 3856 compact_result, &compact_priority, 3857 &compaction_retries)) 3858 goto retry; 3859 3860 /* 3861 * It's possible we raced with cpuset update so the OOM would be 3862 * premature (see below the nopage: label for full explanation). 3863 */ 3864 if (read_mems_allowed_retry(cpuset_mems_cookie)) 3865 goto retry_cpuset; 3866 3867 /* Reclaim has failed us, start killing things */ 3868 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); 3869 if (page) 3870 goto got_pg; 3871 3872 /* Avoid allocations with no watermarks from looping endlessly */ 3873 if (test_thread_flag(TIF_MEMDIE)) 3874 goto nopage; 3875 3876 /* Retry as long as the OOM killer is making progress */ 3877 if (did_some_progress) { 3878 no_progress_loops = 0; 3879 goto retry; 3880 } 3881 3882 nopage: 3883 /* 3884 * When updating a task's mems_allowed or mempolicy nodemask, it is 3885 * possible to race with parallel threads in such a way that our 3886 * allocation can fail while the mask is being updated. If we are about 3887 * to fail, check if the cpuset changed during allocation and if so, 3888 * retry. 3889 */ 3890 if (read_mems_allowed_retry(cpuset_mems_cookie)) 3891 goto retry_cpuset; 3892 3893 /* 3894 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure 3895 * we always retry 3896 */ 3897 if (gfp_mask & __GFP_NOFAIL) { 3898 /* 3899 * All existing users of the __GFP_NOFAIL are blockable, so warn 3900 * of any new users that actually require GFP_NOWAIT 3901 */ 3902 if (WARN_ON_ONCE(!can_direct_reclaim)) 3903 goto fail; 3904 3905 /* 3906 * PF_MEMALLOC request from this context is rather bizarre 3907 * because we cannot reclaim anything and only can loop waiting 3908 * for somebody to do a work for us 3909 */ 3910 WARN_ON_ONCE(current->flags & PF_MEMALLOC); 3911 3912 /* 3913 * non failing costly orders are a hard requirement which we 3914 * are not prepared for much so let's warn about these users 3915 * so that we can identify them and convert them to something 3916 * else. 3917 */ 3918 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); 3919 3920 /* 3921 * Help non-failing allocations by giving them access to memory 3922 * reserves but do not use ALLOC_NO_WATERMARKS because this 3923 * could deplete whole memory reserves which would just make 3924 * the situation worse 3925 */ 3926 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); 3927 if (page) 3928 goto got_pg; 3929 3930 cond_resched(); 3931 goto retry; 3932 } 3933 fail: 3934 warn_alloc(gfp_mask, ac->nodemask, 3935 "page allocation failure: order:%u", order); 3936 got_pg: 3937 return page; 3938 } 3939 3940 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, 3941 struct zonelist *zonelist, nodemask_t *nodemask, 3942 struct alloc_context *ac, gfp_t *alloc_mask, 3943 unsigned int *alloc_flags) 3944 { 3945 ac->high_zoneidx = gfp_zone(gfp_mask); 3946 ac->zonelist = zonelist; 3947 ac->nodemask = nodemask; 3948 ac->migratetype = gfpflags_to_migratetype(gfp_mask); 3949 3950 if (cpusets_enabled()) { 3951 *alloc_mask |= __GFP_HARDWALL; 3952 if (!ac->nodemask) 3953 ac->nodemask = &cpuset_current_mems_allowed; 3954 else 3955 *alloc_flags |= ALLOC_CPUSET; 3956 } 3957 3958 lockdep_trace_alloc(gfp_mask); 3959 3960 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); 3961 3962 if (should_fail_alloc_page(gfp_mask, order)) 3963 return false; 3964 3965 if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE) 3966 *alloc_flags |= ALLOC_CMA; 3967 3968 return true; 3969 } 3970 3971 /* Determine whether to spread dirty pages and what the first usable zone */ 3972 static inline void finalise_ac(gfp_t gfp_mask, 3973 unsigned int order, struct alloc_context *ac) 3974 { 3975 /* Dirty zone balancing only done in the fast path */ 3976 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); 3977 3978 /* 3979 * The preferred zone is used for statistics but crucially it is 3980 * also used as the starting point for the zonelist iterator. It 3981 * may get reset for allocations that ignore memory policies. 3982 */ 3983 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, 3984 ac->high_zoneidx, ac->nodemask); 3985 } 3986 3987 /* 3988 * This is the 'heart' of the zoned buddy allocator. 3989 */ 3990 struct page * 3991 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 3992 struct zonelist *zonelist, nodemask_t *nodemask) 3993 { 3994 struct page *page; 3995 unsigned int alloc_flags = ALLOC_WMARK_LOW; 3996 gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */ 3997 struct alloc_context ac = { }; 3998 3999 gfp_mask &= gfp_allowed_mask; 4000 if (!prepare_alloc_pages(gfp_mask, order, zonelist, nodemask, &ac, &alloc_mask, &alloc_flags)) 4001 return NULL; 4002 4003 finalise_ac(gfp_mask, order, &ac); 4004 4005 /* First allocation attempt */ 4006 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); 4007 if (likely(page)) 4008 goto out; 4009 4010 /* 4011 * Apply scoped allocation constraints. This is mainly about GFP_NOFS 4012 * resp. GFP_NOIO which has to be inherited for all allocation requests 4013 * from a particular context which has been marked by 4014 * memalloc_no{fs,io}_{save,restore}. 4015 */ 4016 alloc_mask = current_gfp_context(gfp_mask); 4017 ac.spread_dirty_pages = false; 4018 4019 /* 4020 * Restore the original nodemask if it was potentially replaced with 4021 * &cpuset_current_mems_allowed to optimize the fast-path attempt. 4022 */ 4023 if (unlikely(ac.nodemask != nodemask)) 4024 ac.nodemask = nodemask; 4025 4026 page = __alloc_pages_slowpath(alloc_mask, order, &ac); 4027 4028 out: 4029 if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page && 4030 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { 4031 __free_pages(page, order); 4032 page = NULL; 4033 } 4034 4035 if (kmemcheck_enabled && page) 4036 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 4037 4038 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); 4039 4040 return page; 4041 } 4042 EXPORT_SYMBOL(__alloc_pages_nodemask); 4043 4044 /* 4045 * Common helper functions. 4046 */ 4047 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 4048 { 4049 struct page *page; 4050 4051 /* 4052 * __get_free_pages() returns a 32-bit address, which cannot represent 4053 * a highmem page 4054 */ 4055 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 4056 4057 page = alloc_pages(gfp_mask, order); 4058 if (!page) 4059 return 0; 4060 return (unsigned long) page_address(page); 4061 } 4062 EXPORT_SYMBOL(__get_free_pages); 4063 4064 unsigned long get_zeroed_page(gfp_t gfp_mask) 4065 { 4066 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 4067 } 4068 EXPORT_SYMBOL(get_zeroed_page); 4069 4070 void __free_pages(struct page *page, unsigned int order) 4071 { 4072 if (put_page_testzero(page)) { 4073 if (order == 0) 4074 free_hot_cold_page(page, false); 4075 else 4076 __free_pages_ok(page, order); 4077 } 4078 } 4079 4080 EXPORT_SYMBOL(__free_pages); 4081 4082 void free_pages(unsigned long addr, unsigned int order) 4083 { 4084 if (addr != 0) { 4085 VM_BUG_ON(!virt_addr_valid((void *)addr)); 4086 __free_pages(virt_to_page((void *)addr), order); 4087 } 4088 } 4089 4090 EXPORT_SYMBOL(free_pages); 4091 4092 /* 4093 * Page Fragment: 4094 * An arbitrary-length arbitrary-offset area of memory which resides 4095 * within a 0 or higher order page. Multiple fragments within that page 4096 * are individually refcounted, in the page's reference counter. 4097 * 4098 * The page_frag functions below provide a simple allocation framework for 4099 * page fragments. This is used by the network stack and network device 4100 * drivers to provide a backing region of memory for use as either an 4101 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info. 4102 */ 4103 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, 4104 gfp_t gfp_mask) 4105 { 4106 struct page *page = NULL; 4107 gfp_t gfp = gfp_mask; 4108 4109 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4110 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | 4111 __GFP_NOMEMALLOC; 4112 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, 4113 PAGE_FRAG_CACHE_MAX_ORDER); 4114 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; 4115 #endif 4116 if (unlikely(!page)) 4117 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); 4118 4119 nc->va = page ? page_address(page) : NULL; 4120 4121 return page; 4122 } 4123 4124 void __page_frag_cache_drain(struct page *page, unsigned int count) 4125 { 4126 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); 4127 4128 if (page_ref_sub_and_test(page, count)) { 4129 unsigned int order = compound_order(page); 4130 4131 if (order == 0) 4132 free_hot_cold_page(page, false); 4133 else 4134 __free_pages_ok(page, order); 4135 } 4136 } 4137 EXPORT_SYMBOL(__page_frag_cache_drain); 4138 4139 void *page_frag_alloc(struct page_frag_cache *nc, 4140 unsigned int fragsz, gfp_t gfp_mask) 4141 { 4142 unsigned int size = PAGE_SIZE; 4143 struct page *page; 4144 int offset; 4145 4146 if (unlikely(!nc->va)) { 4147 refill: 4148 page = __page_frag_cache_refill(nc, gfp_mask); 4149 if (!page) 4150 return NULL; 4151 4152 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4153 /* if size can vary use size else just use PAGE_SIZE */ 4154 size = nc->size; 4155 #endif 4156 /* Even if we own the page, we do not use atomic_set(). 4157 * This would break get_page_unless_zero() users. 4158 */ 4159 page_ref_add(page, size - 1); 4160 4161 /* reset page count bias and offset to start of new frag */ 4162 nc->pfmemalloc = page_is_pfmemalloc(page); 4163 nc->pagecnt_bias = size; 4164 nc->offset = size; 4165 } 4166 4167 offset = nc->offset - fragsz; 4168 if (unlikely(offset < 0)) { 4169 page = virt_to_page(nc->va); 4170 4171 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) 4172 goto refill; 4173 4174 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) 4175 /* if size can vary use size else just use PAGE_SIZE */ 4176 size = nc->size; 4177 #endif 4178 /* OK, page count is 0, we can safely set it */ 4179 set_page_count(page, size); 4180 4181 /* reset page count bias and offset to start of new frag */ 4182 nc->pagecnt_bias = size; 4183 offset = size - fragsz; 4184 } 4185 4186 nc->pagecnt_bias--; 4187 nc->offset = offset; 4188 4189 return nc->va + offset; 4190 } 4191 EXPORT_SYMBOL(page_frag_alloc); 4192 4193 /* 4194 * Frees a page fragment allocated out of either a compound or order 0 page. 4195 */ 4196 void page_frag_free(void *addr) 4197 { 4198 struct page *page = virt_to_head_page(addr); 4199 4200 if (unlikely(put_page_testzero(page))) 4201 __free_pages_ok(page, compound_order(page)); 4202 } 4203 EXPORT_SYMBOL(page_frag_free); 4204 4205 static void *make_alloc_exact(unsigned long addr, unsigned int order, 4206 size_t size) 4207 { 4208 if (addr) { 4209 unsigned long alloc_end = addr + (PAGE_SIZE << order); 4210 unsigned long used = addr + PAGE_ALIGN(size); 4211 4212 split_page(virt_to_page((void *)addr), order); 4213 while (used < alloc_end) { 4214 free_page(used); 4215 used += PAGE_SIZE; 4216 } 4217 } 4218 return (void *)addr; 4219 } 4220 4221 /** 4222 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 4223 * @size: the number of bytes to allocate 4224 * @gfp_mask: GFP flags for the allocation 4225 * 4226 * This function is similar to alloc_pages(), except that it allocates the 4227 * minimum number of pages to satisfy the request. alloc_pages() can only 4228 * allocate memory in power-of-two pages. 4229 * 4230 * This function is also limited by MAX_ORDER. 4231 * 4232 * Memory allocated by this function must be released by free_pages_exact(). 4233 */ 4234 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 4235 { 4236 unsigned int order = get_order(size); 4237 unsigned long addr; 4238 4239 addr = __get_free_pages(gfp_mask, order); 4240 return make_alloc_exact(addr, order, size); 4241 } 4242 EXPORT_SYMBOL(alloc_pages_exact); 4243 4244 /** 4245 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 4246 * pages on a node. 4247 * @nid: the preferred node ID where memory should be allocated 4248 * @size: the number of bytes to allocate 4249 * @gfp_mask: GFP flags for the allocation 4250 * 4251 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 4252 * back. 4253 */ 4254 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 4255 { 4256 unsigned int order = get_order(size); 4257 struct page *p = alloc_pages_node(nid, gfp_mask, order); 4258 if (!p) 4259 return NULL; 4260 return make_alloc_exact((unsigned long)page_address(p), order, size); 4261 } 4262 4263 /** 4264 * free_pages_exact - release memory allocated via alloc_pages_exact() 4265 * @virt: the value returned by alloc_pages_exact. 4266 * @size: size of allocation, same value as passed to alloc_pages_exact(). 4267 * 4268 * Release the memory allocated by a previous call to alloc_pages_exact. 4269 */ 4270 void free_pages_exact(void *virt, size_t size) 4271 { 4272 unsigned long addr = (unsigned long)virt; 4273 unsigned long end = addr + PAGE_ALIGN(size); 4274 4275 while (addr < end) { 4276 free_page(addr); 4277 addr += PAGE_SIZE; 4278 } 4279 } 4280 EXPORT_SYMBOL(free_pages_exact); 4281 4282 /** 4283 * nr_free_zone_pages - count number of pages beyond high watermark 4284 * @offset: The zone index of the highest zone 4285 * 4286 * nr_free_zone_pages() counts the number of counts pages which are beyond the 4287 * high watermark within all zones at or below a given zone index. For each 4288 * zone, the number of pages is calculated as: 4289 * 4290 * nr_free_zone_pages = managed_pages - high_pages 4291 */ 4292 static unsigned long nr_free_zone_pages(int offset) 4293 { 4294 struct zoneref *z; 4295 struct zone *zone; 4296 4297 /* Just pick one node, since fallback list is circular */ 4298 unsigned long sum = 0; 4299 4300 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 4301 4302 for_each_zone_zonelist(zone, z, zonelist, offset) { 4303 unsigned long size = zone->managed_pages; 4304 unsigned long high = high_wmark_pages(zone); 4305 if (size > high) 4306 sum += size - high; 4307 } 4308 4309 return sum; 4310 } 4311 4312 /** 4313 * nr_free_buffer_pages - count number of pages beyond high watermark 4314 * 4315 * nr_free_buffer_pages() counts the number of pages which are beyond the high 4316 * watermark within ZONE_DMA and ZONE_NORMAL. 4317 */ 4318 unsigned long nr_free_buffer_pages(void) 4319 { 4320 return nr_free_zone_pages(gfp_zone(GFP_USER)); 4321 } 4322 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 4323 4324 /** 4325 * nr_free_pagecache_pages - count number of pages beyond high watermark 4326 * 4327 * nr_free_pagecache_pages() counts the number of pages which are beyond the 4328 * high watermark within all zones. 4329 */ 4330 unsigned long nr_free_pagecache_pages(void) 4331 { 4332 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 4333 } 4334 4335 static inline void show_node(struct zone *zone) 4336 { 4337 if (IS_ENABLED(CONFIG_NUMA)) 4338 printk("Node %d ", zone_to_nid(zone)); 4339 } 4340 4341 long si_mem_available(void) 4342 { 4343 long available; 4344 unsigned long pagecache; 4345 unsigned long wmark_low = 0; 4346 unsigned long pages[NR_LRU_LISTS]; 4347 struct zone *zone; 4348 int lru; 4349 4350 for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) 4351 pages[lru] = global_node_page_state(NR_LRU_BASE + lru); 4352 4353 for_each_zone(zone) 4354 wmark_low += zone->watermark[WMARK_LOW]; 4355 4356 /* 4357 * Estimate the amount of memory available for userspace allocations, 4358 * without causing swapping. 4359 */ 4360 available = global_page_state(NR_FREE_PAGES) - totalreserve_pages; 4361 4362 /* 4363 * Not all the page cache can be freed, otherwise the system will 4364 * start swapping. Assume at least half of the page cache, or the 4365 * low watermark worth of cache, needs to stay. 4366 */ 4367 pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; 4368 pagecache -= min(pagecache / 2, wmark_low); 4369 available += pagecache; 4370 4371 /* 4372 * Part of the reclaimable slab consists of items that are in use, 4373 * and cannot be freed. Cap this estimate at the low watermark. 4374 */ 4375 available += global_page_state(NR_SLAB_RECLAIMABLE) - 4376 min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); 4377 4378 if (available < 0) 4379 available = 0; 4380 return available; 4381 } 4382 EXPORT_SYMBOL_GPL(si_mem_available); 4383 4384 void si_meminfo(struct sysinfo *val) 4385 { 4386 val->totalram = totalram_pages; 4387 val->sharedram = global_node_page_state(NR_SHMEM); 4388 val->freeram = global_page_state(NR_FREE_PAGES); 4389 val->bufferram = nr_blockdev_pages(); 4390 val->totalhigh = totalhigh_pages; 4391 val->freehigh = nr_free_highpages(); 4392 val->mem_unit = PAGE_SIZE; 4393 } 4394 4395 EXPORT_SYMBOL(si_meminfo); 4396 4397 #ifdef CONFIG_NUMA 4398 void si_meminfo_node(struct sysinfo *val, int nid) 4399 { 4400 int zone_type; /* needs to be signed */ 4401 unsigned long managed_pages = 0; 4402 unsigned long managed_highpages = 0; 4403 unsigned long free_highpages = 0; 4404 pg_data_t *pgdat = NODE_DATA(nid); 4405 4406 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 4407 managed_pages += pgdat->node_zones[zone_type].managed_pages; 4408 val->totalram = managed_pages; 4409 val->sharedram = node_page_state(pgdat, NR_SHMEM); 4410 val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); 4411 #ifdef CONFIG_HIGHMEM 4412 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { 4413 struct zone *zone = &pgdat->node_zones[zone_type]; 4414 4415 if (is_highmem(zone)) { 4416 managed_highpages += zone->managed_pages; 4417 free_highpages += zone_page_state(zone, NR_FREE_PAGES); 4418 } 4419 } 4420 val->totalhigh = managed_highpages; 4421 val->freehigh = free_highpages; 4422 #else 4423 val->totalhigh = managed_highpages; 4424 val->freehigh = free_highpages; 4425 #endif 4426 val->mem_unit = PAGE_SIZE; 4427 } 4428 #endif 4429 4430 /* 4431 * Determine whether the node should be displayed or not, depending on whether 4432 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 4433 */ 4434 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) 4435 { 4436 if (!(flags & SHOW_MEM_FILTER_NODES)) 4437 return false; 4438 4439 /* 4440 * no node mask - aka implicit memory numa policy. Do not bother with 4441 * the synchronization - read_mems_allowed_begin - because we do not 4442 * have to be precise here. 4443 */ 4444 if (!nodemask) 4445 nodemask = &cpuset_current_mems_allowed; 4446 4447 return !node_isset(nid, *nodemask); 4448 } 4449 4450 #define K(x) ((x) << (PAGE_SHIFT-10)) 4451 4452 static void show_migration_types(unsigned char type) 4453 { 4454 static const char types[MIGRATE_TYPES] = { 4455 [MIGRATE_UNMOVABLE] = 'U', 4456 [MIGRATE_MOVABLE] = 'M', 4457 [MIGRATE_RECLAIMABLE] = 'E', 4458 [MIGRATE_HIGHATOMIC] = 'H', 4459 #ifdef CONFIG_CMA 4460 [MIGRATE_CMA] = 'C', 4461 #endif 4462 #ifdef CONFIG_MEMORY_ISOLATION 4463 [MIGRATE_ISOLATE] = 'I', 4464 #endif 4465 }; 4466 char tmp[MIGRATE_TYPES + 1]; 4467 char *p = tmp; 4468 int i; 4469 4470 for (i = 0; i < MIGRATE_TYPES; i++) { 4471 if (type & (1 << i)) 4472 *p++ = types[i]; 4473 } 4474 4475 *p = '\0'; 4476 printk(KERN_CONT "(%s) ", tmp); 4477 } 4478 4479 /* 4480 * Show free area list (used inside shift_scroll-lock stuff) 4481 * We also calculate the percentage fragmentation. We do this by counting the 4482 * memory on each free list with the exception of the first item on the list. 4483 * 4484 * Bits in @filter: 4485 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's 4486 * cpuset. 4487 */ 4488 void show_free_areas(unsigned int filter, nodemask_t *nodemask) 4489 { 4490 unsigned long free_pcp = 0; 4491 int cpu; 4492 struct zone *zone; 4493 pg_data_t *pgdat; 4494 4495 for_each_populated_zone(zone) { 4496 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 4497 continue; 4498 4499 for_each_online_cpu(cpu) 4500 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 4501 } 4502 4503 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 4504 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 4505 " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n" 4506 " slab_reclaimable:%lu slab_unreclaimable:%lu\n" 4507 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 4508 " free:%lu free_pcp:%lu free_cma:%lu\n", 4509 global_node_page_state(NR_ACTIVE_ANON), 4510 global_node_page_state(NR_INACTIVE_ANON), 4511 global_node_page_state(NR_ISOLATED_ANON), 4512 global_node_page_state(NR_ACTIVE_FILE), 4513 global_node_page_state(NR_INACTIVE_FILE), 4514 global_node_page_state(NR_ISOLATED_FILE), 4515 global_node_page_state(NR_UNEVICTABLE), 4516 global_node_page_state(NR_FILE_DIRTY), 4517 global_node_page_state(NR_WRITEBACK), 4518 global_node_page_state(NR_UNSTABLE_NFS), 4519 global_page_state(NR_SLAB_RECLAIMABLE), 4520 global_page_state(NR_SLAB_UNRECLAIMABLE), 4521 global_node_page_state(NR_FILE_MAPPED), 4522 global_node_page_state(NR_SHMEM), 4523 global_page_state(NR_PAGETABLE), 4524 global_page_state(NR_BOUNCE), 4525 global_page_state(NR_FREE_PAGES), 4526 free_pcp, 4527 global_page_state(NR_FREE_CMA_PAGES)); 4528 4529 for_each_online_pgdat(pgdat) { 4530 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) 4531 continue; 4532 4533 printk("Node %d" 4534 " active_anon:%lukB" 4535 " inactive_anon:%lukB" 4536 " active_file:%lukB" 4537 " inactive_file:%lukB" 4538 " unevictable:%lukB" 4539 " isolated(anon):%lukB" 4540 " isolated(file):%lukB" 4541 " mapped:%lukB" 4542 " dirty:%lukB" 4543 " writeback:%lukB" 4544 " shmem:%lukB" 4545 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4546 " shmem_thp: %lukB" 4547 " shmem_pmdmapped: %lukB" 4548 " anon_thp: %lukB" 4549 #endif 4550 " writeback_tmp:%lukB" 4551 " unstable:%lukB" 4552 " all_unreclaimable? %s" 4553 "\n", 4554 pgdat->node_id, 4555 K(node_page_state(pgdat, NR_ACTIVE_ANON)), 4556 K(node_page_state(pgdat, NR_INACTIVE_ANON)), 4557 K(node_page_state(pgdat, NR_ACTIVE_FILE)), 4558 K(node_page_state(pgdat, NR_INACTIVE_FILE)), 4559 K(node_page_state(pgdat, NR_UNEVICTABLE)), 4560 K(node_page_state(pgdat, NR_ISOLATED_ANON)), 4561 K(node_page_state(pgdat, NR_ISOLATED_FILE)), 4562 K(node_page_state(pgdat, NR_FILE_MAPPED)), 4563 K(node_page_state(pgdat, NR_FILE_DIRTY)), 4564 K(node_page_state(pgdat, NR_WRITEBACK)), 4565 K(node_page_state(pgdat, NR_SHMEM)), 4566 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4567 K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR), 4568 K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) 4569 * HPAGE_PMD_NR), 4570 K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR), 4571 #endif 4572 K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), 4573 K(node_page_state(pgdat, NR_UNSTABLE_NFS)), 4574 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? 4575 "yes" : "no"); 4576 } 4577 4578 for_each_populated_zone(zone) { 4579 int i; 4580 4581 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 4582 continue; 4583 4584 free_pcp = 0; 4585 for_each_online_cpu(cpu) 4586 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; 4587 4588 show_node(zone); 4589 printk(KERN_CONT 4590 "%s" 4591 " free:%lukB" 4592 " min:%lukB" 4593 " low:%lukB" 4594 " high:%lukB" 4595 " active_anon:%lukB" 4596 " inactive_anon:%lukB" 4597 " active_file:%lukB" 4598 " inactive_file:%lukB" 4599 " unevictable:%lukB" 4600 " writepending:%lukB" 4601 " present:%lukB" 4602 " managed:%lukB" 4603 " mlocked:%lukB" 4604 " slab_reclaimable:%lukB" 4605 " slab_unreclaimable:%lukB" 4606 " kernel_stack:%lukB" 4607 " pagetables:%lukB" 4608 " bounce:%lukB" 4609 " free_pcp:%lukB" 4610 " local_pcp:%ukB" 4611 " free_cma:%lukB" 4612 "\n", 4613 zone->name, 4614 K(zone_page_state(zone, NR_FREE_PAGES)), 4615 K(min_wmark_pages(zone)), 4616 K(low_wmark_pages(zone)), 4617 K(high_wmark_pages(zone)), 4618 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), 4619 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), 4620 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), 4621 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), 4622 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), 4623 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), 4624 K(zone->present_pages), 4625 K(zone->managed_pages), 4626 K(zone_page_state(zone, NR_MLOCK)), 4627 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 4628 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 4629 zone_page_state(zone, NR_KERNEL_STACK_KB), 4630 K(zone_page_state(zone, NR_PAGETABLE)), 4631 K(zone_page_state(zone, NR_BOUNCE)), 4632 K(free_pcp), 4633 K(this_cpu_read(zone->pageset->pcp.count)), 4634 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); 4635 printk("lowmem_reserve[]:"); 4636 for (i = 0; i < MAX_NR_ZONES; i++) 4637 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); 4638 printk(KERN_CONT "\n"); 4639 } 4640 4641 for_each_populated_zone(zone) { 4642 unsigned int order; 4643 unsigned long nr[MAX_ORDER], flags, total = 0; 4644 unsigned char types[MAX_ORDER]; 4645 4646 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) 4647 continue; 4648 show_node(zone); 4649 printk(KERN_CONT "%s: ", zone->name); 4650 4651 spin_lock_irqsave(&zone->lock, flags); 4652 for (order = 0; order < MAX_ORDER; order++) { 4653 struct free_area *area = &zone->free_area[order]; 4654 int type; 4655 4656 nr[order] = area->nr_free; 4657 total += nr[order] << order; 4658 4659 types[order] = 0; 4660 for (type = 0; type < MIGRATE_TYPES; type++) { 4661 if (!list_empty(&area->free_list[type])) 4662 types[order] |= 1 << type; 4663 } 4664 } 4665 spin_unlock_irqrestore(&zone->lock, flags); 4666 for (order = 0; order < MAX_ORDER; order++) { 4667 printk(KERN_CONT "%lu*%lukB ", 4668 nr[order], K(1UL) << order); 4669 if (nr[order]) 4670 show_migration_types(types[order]); 4671 } 4672 printk(KERN_CONT "= %lukB\n", K(total)); 4673 } 4674 4675 hugetlb_show_meminfo(); 4676 4677 printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); 4678 4679 show_swap_cache_info(); 4680 } 4681 4682 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 4683 { 4684 zoneref->zone = zone; 4685 zoneref->zone_idx = zone_idx(zone); 4686 } 4687 4688 /* 4689 * Builds allocation fallback zone lists. 4690 * 4691 * Add all populated zones of a node to the zonelist. 4692 */ 4693 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 4694 int nr_zones) 4695 { 4696 struct zone *zone; 4697 enum zone_type zone_type = MAX_NR_ZONES; 4698 4699 do { 4700 zone_type--; 4701 zone = pgdat->node_zones + zone_type; 4702 if (managed_zone(zone)) { 4703 zoneref_set_zone(zone, 4704 &zonelist->_zonerefs[nr_zones++]); 4705 check_highest_zone(zone_type); 4706 } 4707 } while (zone_type); 4708 4709 return nr_zones; 4710 } 4711 4712 4713 /* 4714 * zonelist_order: 4715 * 0 = automatic detection of better ordering. 4716 * 1 = order by ([node] distance, -zonetype) 4717 * 2 = order by (-zonetype, [node] distance) 4718 * 4719 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 4720 * the same zonelist. So only NUMA can configure this param. 4721 */ 4722 #define ZONELIST_ORDER_DEFAULT 0 4723 #define ZONELIST_ORDER_NODE 1 4724 #define ZONELIST_ORDER_ZONE 2 4725 4726 /* zonelist order in the kernel. 4727 * set_zonelist_order() will set this to NODE or ZONE. 4728 */ 4729 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 4730 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 4731 4732 4733 #ifdef CONFIG_NUMA 4734 /* The value user specified ....changed by config */ 4735 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 4736 /* string for sysctl */ 4737 #define NUMA_ZONELIST_ORDER_LEN 16 4738 char numa_zonelist_order[16] = "default"; 4739 4740 /* 4741 * interface for configure zonelist ordering. 4742 * command line option "numa_zonelist_order" 4743 * = "[dD]efault - default, automatic configuration. 4744 * = "[nN]ode - order by node locality, then by zone within node 4745 * = "[zZ]one - order by zone, then by locality within zone 4746 */ 4747 4748 static int __parse_numa_zonelist_order(char *s) 4749 { 4750 if (*s == 'd' || *s == 'D') { 4751 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 4752 } else if (*s == 'n' || *s == 'N') { 4753 user_zonelist_order = ZONELIST_ORDER_NODE; 4754 } else if (*s == 'z' || *s == 'Z') { 4755 user_zonelist_order = ZONELIST_ORDER_ZONE; 4756 } else { 4757 pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s); 4758 return -EINVAL; 4759 } 4760 return 0; 4761 } 4762 4763 static __init int setup_numa_zonelist_order(char *s) 4764 { 4765 int ret; 4766 4767 if (!s) 4768 return 0; 4769 4770 ret = __parse_numa_zonelist_order(s); 4771 if (ret == 0) 4772 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 4773 4774 return ret; 4775 } 4776 early_param("numa_zonelist_order", setup_numa_zonelist_order); 4777 4778 /* 4779 * sysctl handler for numa_zonelist_order 4780 */ 4781 int numa_zonelist_order_handler(struct ctl_table *table, int write, 4782 void __user *buffer, size_t *length, 4783 loff_t *ppos) 4784 { 4785 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 4786 int ret; 4787 static DEFINE_MUTEX(zl_order_mutex); 4788 4789 mutex_lock(&zl_order_mutex); 4790 if (write) { 4791 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { 4792 ret = -EINVAL; 4793 goto out; 4794 } 4795 strcpy(saved_string, (char *)table->data); 4796 } 4797 ret = proc_dostring(table, write, buffer, length, ppos); 4798 if (ret) 4799 goto out; 4800 if (write) { 4801 int oldval = user_zonelist_order; 4802 4803 ret = __parse_numa_zonelist_order((char *)table->data); 4804 if (ret) { 4805 /* 4806 * bogus value. restore saved string 4807 */ 4808 strncpy((char *)table->data, saved_string, 4809 NUMA_ZONELIST_ORDER_LEN); 4810 user_zonelist_order = oldval; 4811 } else if (oldval != user_zonelist_order) { 4812 mutex_lock(&zonelists_mutex); 4813 build_all_zonelists(NULL, NULL); 4814 mutex_unlock(&zonelists_mutex); 4815 } 4816 } 4817 out: 4818 mutex_unlock(&zl_order_mutex); 4819 return ret; 4820 } 4821 4822 4823 #define MAX_NODE_LOAD (nr_online_nodes) 4824 static int node_load[MAX_NUMNODES]; 4825 4826 /** 4827 * find_next_best_node - find the next node that should appear in a given node's fallback list 4828 * @node: node whose fallback list we're appending 4829 * @used_node_mask: nodemask_t of already used nodes 4830 * 4831 * We use a number of factors to determine which is the next node that should 4832 * appear on a given node's fallback list. The node should not have appeared 4833 * already in @node's fallback list, and it should be the next closest node 4834 * according to the distance array (which contains arbitrary distance values 4835 * from each node to each node in the system), and should also prefer nodes 4836 * with no CPUs, since presumably they'll have very little allocation pressure 4837 * on them otherwise. 4838 * It returns -1 if no node is found. 4839 */ 4840 static int find_next_best_node(int node, nodemask_t *used_node_mask) 4841 { 4842 int n, val; 4843 int min_val = INT_MAX; 4844 int best_node = NUMA_NO_NODE; 4845 const struct cpumask *tmp = cpumask_of_node(0); 4846 4847 /* Use the local node if we haven't already */ 4848 if (!node_isset(node, *used_node_mask)) { 4849 node_set(node, *used_node_mask); 4850 return node; 4851 } 4852 4853 for_each_node_state(n, N_MEMORY) { 4854 4855 /* Don't want a node to appear more than once */ 4856 if (node_isset(n, *used_node_mask)) 4857 continue; 4858 4859 /* Use the distance array to find the distance */ 4860 val = node_distance(node, n); 4861 4862 /* Penalize nodes under us ("prefer the next node") */ 4863 val += (n < node); 4864 4865 /* Give preference to headless and unused nodes */ 4866 tmp = cpumask_of_node(n); 4867 if (!cpumask_empty(tmp)) 4868 val += PENALTY_FOR_NODE_WITH_CPUS; 4869 4870 /* Slight preference for less loaded node */ 4871 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 4872 val += node_load[n]; 4873 4874 if (val < min_val) { 4875 min_val = val; 4876 best_node = n; 4877 } 4878 } 4879 4880 if (best_node >= 0) 4881 node_set(best_node, *used_node_mask); 4882 4883 return best_node; 4884 } 4885 4886 4887 /* 4888 * Build zonelists ordered by node and zones within node. 4889 * This results in maximum locality--normal zone overflows into local 4890 * DMA zone, if any--but risks exhausting DMA zone. 4891 */ 4892 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 4893 { 4894 int j; 4895 struct zonelist *zonelist; 4896 4897 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK]; 4898 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 4899 ; 4900 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 4901 zonelist->_zonerefs[j].zone = NULL; 4902 zonelist->_zonerefs[j].zone_idx = 0; 4903 } 4904 4905 /* 4906 * Build gfp_thisnode zonelists 4907 */ 4908 static void build_thisnode_zonelists(pg_data_t *pgdat) 4909 { 4910 int j; 4911 struct zonelist *zonelist; 4912 4913 zonelist = &pgdat->node_zonelists[ZONELIST_NOFALLBACK]; 4914 j = build_zonelists_node(pgdat, zonelist, 0); 4915 zonelist->_zonerefs[j].zone = NULL; 4916 zonelist->_zonerefs[j].zone_idx = 0; 4917 } 4918 4919 /* 4920 * Build zonelists ordered by zone and nodes within zones. 4921 * This results in conserving DMA zone[s] until all Normal memory is 4922 * exhausted, but results in overflowing to remote node while memory 4923 * may still exist in local DMA zone. 4924 */ 4925 static int node_order[MAX_NUMNODES]; 4926 4927 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 4928 { 4929 int pos, j, node; 4930 int zone_type; /* needs to be signed */ 4931 struct zone *z; 4932 struct zonelist *zonelist; 4933 4934 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK]; 4935 pos = 0; 4936 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 4937 for (j = 0; j < nr_nodes; j++) { 4938 node = node_order[j]; 4939 z = &NODE_DATA(node)->node_zones[zone_type]; 4940 if (managed_zone(z)) { 4941 zoneref_set_zone(z, 4942 &zonelist->_zonerefs[pos++]); 4943 check_highest_zone(zone_type); 4944 } 4945 } 4946 } 4947 zonelist->_zonerefs[pos].zone = NULL; 4948 zonelist->_zonerefs[pos].zone_idx = 0; 4949 } 4950 4951 #if defined(CONFIG_64BIT) 4952 /* 4953 * Devices that require DMA32/DMA are relatively rare and do not justify a 4954 * penalty to every machine in case the specialised case applies. Default 4955 * to Node-ordering on 64-bit NUMA machines 4956 */ 4957 static int default_zonelist_order(void) 4958 { 4959 return ZONELIST_ORDER_NODE; 4960 } 4961 #else 4962 /* 4963 * On 32-bit, the Normal zone needs to be preserved for allocations accessible 4964 * by the kernel. If processes running on node 0 deplete the low memory zone 4965 * then reclaim will occur more frequency increasing stalls and potentially 4966 * be easier to OOM if a large percentage of the zone is under writeback or 4967 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set. 4968 * Hence, default to zone ordering on 32-bit. 4969 */ 4970 static int default_zonelist_order(void) 4971 { 4972 return ZONELIST_ORDER_ZONE; 4973 } 4974 #endif /* CONFIG_64BIT */ 4975 4976 static void set_zonelist_order(void) 4977 { 4978 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 4979 current_zonelist_order = default_zonelist_order(); 4980 else 4981 current_zonelist_order = user_zonelist_order; 4982 } 4983 4984 static void build_zonelists(pg_data_t *pgdat) 4985 { 4986 int i, node, load; 4987 nodemask_t used_mask; 4988 int local_node, prev_node; 4989 struct zonelist *zonelist; 4990 unsigned int order = current_zonelist_order; 4991 4992 /* initialize zonelists */ 4993 for (i = 0; i < MAX_ZONELISTS; i++) { 4994 zonelist = pgdat->node_zonelists + i; 4995 zonelist->_zonerefs[0].zone = NULL; 4996 zonelist->_zonerefs[0].zone_idx = 0; 4997 } 4998 4999 /* NUMA-aware ordering of nodes */ 5000 local_node = pgdat->node_id; 5001 load = nr_online_nodes; 5002 prev_node = local_node; 5003 nodes_clear(used_mask); 5004 5005 memset(node_order, 0, sizeof(node_order)); 5006 i = 0; 5007 5008 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 5009 /* 5010 * We don't want to pressure a particular node. 5011 * So adding penalty to the first node in same 5012 * distance group to make it round-robin. 5013 */ 5014 if (node_distance(local_node, node) != 5015 node_distance(local_node, prev_node)) 5016 node_load[node] = load; 5017 5018 prev_node = node; 5019 load--; 5020 if (order == ZONELIST_ORDER_NODE) 5021 build_zonelists_in_node_order(pgdat, node); 5022 else 5023 node_order[i++] = node; /* remember order */ 5024 } 5025 5026 if (order == ZONELIST_ORDER_ZONE) { 5027 /* calculate node order -- i.e., DMA last! */ 5028 build_zonelists_in_zone_order(pgdat, i); 5029 } 5030 5031 build_thisnode_zonelists(pgdat); 5032 } 5033 5034 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5035 /* 5036 * Return node id of node used for "local" allocations. 5037 * I.e., first node id of first zone in arg node's generic zonelist. 5038 * Used for initializing percpu 'numa_mem', which is used primarily 5039 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 5040 */ 5041 int local_memory_node(int node) 5042 { 5043 struct zoneref *z; 5044 5045 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 5046 gfp_zone(GFP_KERNEL), 5047 NULL); 5048 return z->zone->node; 5049 } 5050 #endif 5051 5052 static void setup_min_unmapped_ratio(void); 5053 static void setup_min_slab_ratio(void); 5054 #else /* CONFIG_NUMA */ 5055 5056 static void set_zonelist_order(void) 5057 { 5058 current_zonelist_order = ZONELIST_ORDER_ZONE; 5059 } 5060 5061 static void build_zonelists(pg_data_t *pgdat) 5062 { 5063 int node, local_node; 5064 enum zone_type j; 5065 struct zonelist *zonelist; 5066 5067 local_node = pgdat->node_id; 5068 5069 zonelist = &pgdat->node_zonelists[ZONELIST_FALLBACK]; 5070 j = build_zonelists_node(pgdat, zonelist, 0); 5071 5072 /* 5073 * Now we build the zonelist so that it contains the zones 5074 * of all the other nodes. 5075 * We don't want to pressure a particular node, so when 5076 * building the zones for node N, we make sure that the 5077 * zones coming right after the local ones are those from 5078 * node N+1 (modulo N) 5079 */ 5080 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 5081 if (!node_online(node)) 5082 continue; 5083 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 5084 } 5085 for (node = 0; node < local_node; node++) { 5086 if (!node_online(node)) 5087 continue; 5088 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 5089 } 5090 5091 zonelist->_zonerefs[j].zone = NULL; 5092 zonelist->_zonerefs[j].zone_idx = 0; 5093 } 5094 5095 #endif /* CONFIG_NUMA */ 5096 5097 /* 5098 * Boot pageset table. One per cpu which is going to be used for all 5099 * zones and all nodes. The parameters will be set in such a way 5100 * that an item put on a list will immediately be handed over to 5101 * the buddy list. This is safe since pageset manipulation is done 5102 * with interrupts disabled. 5103 * 5104 * The boot_pagesets must be kept even after bootup is complete for 5105 * unused processors and/or zones. They do play a role for bootstrapping 5106 * hotplugged processors. 5107 * 5108 * zoneinfo_show() and maybe other functions do 5109 * not check if the processor is online before following the pageset pointer. 5110 * Other parts of the kernel may not check if the zone is available. 5111 */ 5112 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 5113 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 5114 static void setup_zone_pageset(struct zone *zone); 5115 5116 /* 5117 * Global mutex to protect against size modification of zonelists 5118 * as well as to serialize pageset setup for the new populated zone. 5119 */ 5120 DEFINE_MUTEX(zonelists_mutex); 5121 5122 /* return values int ....just for stop_machine() */ 5123 static int __build_all_zonelists(void *data) 5124 { 5125 int nid; 5126 int cpu; 5127 pg_data_t *self = data; 5128 5129 #ifdef CONFIG_NUMA 5130 memset(node_load, 0, sizeof(node_load)); 5131 #endif 5132 5133 if (self && !node_online(self->node_id)) { 5134 build_zonelists(self); 5135 } 5136 5137 for_each_online_node(nid) { 5138 pg_data_t *pgdat = NODE_DATA(nid); 5139 5140 build_zonelists(pgdat); 5141 } 5142 5143 /* 5144 * Initialize the boot_pagesets that are going to be used 5145 * for bootstrapping processors. The real pagesets for 5146 * each zone will be allocated later when the per cpu 5147 * allocator is available. 5148 * 5149 * boot_pagesets are used also for bootstrapping offline 5150 * cpus if the system is already booted because the pagesets 5151 * are needed to initialize allocators on a specific cpu too. 5152 * F.e. the percpu allocator needs the page allocator which 5153 * needs the percpu allocator in order to allocate its pagesets 5154 * (a chicken-egg dilemma). 5155 */ 5156 for_each_possible_cpu(cpu) { 5157 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 5158 5159 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 5160 /* 5161 * We now know the "local memory node" for each node-- 5162 * i.e., the node of the first zone in the generic zonelist. 5163 * Set up numa_mem percpu variable for on-line cpus. During 5164 * boot, only the boot cpu should be on-line; we'll init the 5165 * secondary cpus' numa_mem as they come on-line. During 5166 * node/memory hotplug, we'll fixup all on-line cpus. 5167 */ 5168 if (cpu_online(cpu)) 5169 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 5170 #endif 5171 } 5172 5173 return 0; 5174 } 5175 5176 static noinline void __init 5177 build_all_zonelists_init(void) 5178 { 5179 __build_all_zonelists(NULL); 5180 mminit_verify_zonelist(); 5181 cpuset_init_current_mems_allowed(); 5182 } 5183 5184 /* 5185 * Called with zonelists_mutex held always 5186 * unless system_state == SYSTEM_BOOTING. 5187 * 5188 * __ref due to (1) call of __meminit annotated setup_zone_pageset 5189 * [we're only called with non-NULL zone through __meminit paths] and 5190 * (2) call of __init annotated helper build_all_zonelists_init 5191 * [protected by SYSTEM_BOOTING]. 5192 */ 5193 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) 5194 { 5195 set_zonelist_order(); 5196 5197 if (system_state == SYSTEM_BOOTING) { 5198 build_all_zonelists_init(); 5199 } else { 5200 #ifdef CONFIG_MEMORY_HOTPLUG 5201 if (zone) 5202 setup_zone_pageset(zone); 5203 #endif 5204 /* we have to stop all cpus to guarantee there is no user 5205 of zonelist */ 5206 stop_machine(__build_all_zonelists, pgdat, NULL); 5207 /* cpuset refresh routine should be here */ 5208 } 5209 vm_total_pages = nr_free_pagecache_pages(); 5210 /* 5211 * Disable grouping by mobility if the number of pages in the 5212 * system is too low to allow the mechanism to work. It would be 5213 * more accurate, but expensive to check per-zone. This check is 5214 * made on memory-hotadd so a system can start with mobility 5215 * disabled and enable it later 5216 */ 5217 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 5218 page_group_by_mobility_disabled = 1; 5219 else 5220 page_group_by_mobility_disabled = 0; 5221 5222 pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n", 5223 nr_online_nodes, 5224 zonelist_order_name[current_zonelist_order], 5225 page_group_by_mobility_disabled ? "off" : "on", 5226 vm_total_pages); 5227 #ifdef CONFIG_NUMA 5228 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 5229 #endif 5230 } 5231 5232 /* 5233 * Initially all pages are reserved - free ones are freed 5234 * up by free_all_bootmem() once the early boot process is 5235 * done. Non-atomic initialization, single-pass. 5236 */ 5237 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 5238 unsigned long start_pfn, enum memmap_context context) 5239 { 5240 struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn)); 5241 unsigned long end_pfn = start_pfn + size; 5242 pg_data_t *pgdat = NODE_DATA(nid); 5243 unsigned long pfn; 5244 unsigned long nr_initialised = 0; 5245 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5246 struct memblock_region *r = NULL, *tmp; 5247 #endif 5248 5249 if (highest_memmap_pfn < end_pfn - 1) 5250 highest_memmap_pfn = end_pfn - 1; 5251 5252 /* 5253 * Honor reservation requested by the driver for this ZONE_DEVICE 5254 * memory 5255 */ 5256 if (altmap && start_pfn == altmap->base_pfn) 5257 start_pfn += altmap->reserve; 5258 5259 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 5260 /* 5261 * There can be holes in boot-time mem_map[]s handed to this 5262 * function. They do not exist on hotplugged memory. 5263 */ 5264 if (context != MEMMAP_EARLY) 5265 goto not_early; 5266 5267 if (!early_pfn_valid(pfn)) { 5268 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5269 /* 5270 * Skip to the pfn preceding the next valid one (or 5271 * end_pfn), such that we hit a valid pfn (or end_pfn) 5272 * on our next iteration of the loop. 5273 */ 5274 pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1; 5275 #endif 5276 continue; 5277 } 5278 if (!early_pfn_in_nid(pfn, nid)) 5279 continue; 5280 if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised)) 5281 break; 5282 5283 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5284 /* 5285 * Check given memblock attribute by firmware which can affect 5286 * kernel memory layout. If zone==ZONE_MOVABLE but memory is 5287 * mirrored, it's an overlapped memmap init. skip it. 5288 */ 5289 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 5290 if (!r || pfn >= memblock_region_memory_end_pfn(r)) { 5291 for_each_memblock(memory, tmp) 5292 if (pfn < memblock_region_memory_end_pfn(tmp)) 5293 break; 5294 r = tmp; 5295 } 5296 if (pfn >= memblock_region_memory_base_pfn(r) && 5297 memblock_is_mirror(r)) { 5298 /* already initialized as NORMAL */ 5299 pfn = memblock_region_memory_end_pfn(r); 5300 continue; 5301 } 5302 } 5303 #endif 5304 5305 not_early: 5306 /* 5307 * Mark the block movable so that blocks are reserved for 5308 * movable at startup. This will force kernel allocations 5309 * to reserve their blocks rather than leaking throughout 5310 * the address space during boot when many long-lived 5311 * kernel allocations are made. 5312 * 5313 * bitmap is created for zone's valid pfn range. but memmap 5314 * can be created for invalid pages (for alignment) 5315 * check here not to call set_pageblock_migratetype() against 5316 * pfn out of zone. 5317 */ 5318 if (!(pfn & (pageblock_nr_pages - 1))) { 5319 struct page *page = pfn_to_page(pfn); 5320 5321 __init_single_page(page, pfn, zone, nid); 5322 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 5323 } else { 5324 __init_single_pfn(pfn, zone, nid); 5325 } 5326 } 5327 } 5328 5329 static void __meminit zone_init_free_lists(struct zone *zone) 5330 { 5331 unsigned int order, t; 5332 for_each_migratetype_order(order, t) { 5333 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 5334 zone->free_area[order].nr_free = 0; 5335 } 5336 } 5337 5338 #ifndef __HAVE_ARCH_MEMMAP_INIT 5339 #define memmap_init(size, nid, zone, start_pfn) \ 5340 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 5341 #endif 5342 5343 static int zone_batchsize(struct zone *zone) 5344 { 5345 #ifdef CONFIG_MMU 5346 int batch; 5347 5348 /* 5349 * The per-cpu-pages pools are set to around 1000th of the 5350 * size of the zone. But no more than 1/2 of a meg. 5351 * 5352 * OK, so we don't know how big the cache is. So guess. 5353 */ 5354 batch = zone->managed_pages / 1024; 5355 if (batch * PAGE_SIZE > 512 * 1024) 5356 batch = (512 * 1024) / PAGE_SIZE; 5357 batch /= 4; /* We effectively *= 4 below */ 5358 if (batch < 1) 5359 batch = 1; 5360 5361 /* 5362 * Clamp the batch to a 2^n - 1 value. Having a power 5363 * of 2 value was found to be more likely to have 5364 * suboptimal cache aliasing properties in some cases. 5365 * 5366 * For example if 2 tasks are alternately allocating 5367 * batches of pages, one task can end up with a lot 5368 * of pages of one half of the possible page colors 5369 * and the other with pages of the other colors. 5370 */ 5371 batch = rounddown_pow_of_two(batch + batch/2) - 1; 5372 5373 return batch; 5374 5375 #else 5376 /* The deferral and batching of frees should be suppressed under NOMMU 5377 * conditions. 5378 * 5379 * The problem is that NOMMU needs to be able to allocate large chunks 5380 * of contiguous memory as there's no hardware page translation to 5381 * assemble apparent contiguous memory from discontiguous pages. 5382 * 5383 * Queueing large contiguous runs of pages for batching, however, 5384 * causes the pages to actually be freed in smaller chunks. As there 5385 * can be a significant delay between the individual batches being 5386 * recycled, this leads to the once large chunks of space being 5387 * fragmented and becoming unavailable for high-order allocations. 5388 */ 5389 return 0; 5390 #endif 5391 } 5392 5393 /* 5394 * pcp->high and pcp->batch values are related and dependent on one another: 5395 * ->batch must never be higher then ->high. 5396 * The following function updates them in a safe manner without read side 5397 * locking. 5398 * 5399 * Any new users of pcp->batch and pcp->high should ensure they can cope with 5400 * those fields changing asynchronously (acording the the above rule). 5401 * 5402 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 5403 * outside of boot time (or some other assurance that no concurrent updaters 5404 * exist). 5405 */ 5406 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 5407 unsigned long batch) 5408 { 5409 /* start with a fail safe value for batch */ 5410 pcp->batch = 1; 5411 smp_wmb(); 5412 5413 /* Update high, then batch, in order */ 5414 pcp->high = high; 5415 smp_wmb(); 5416 5417 pcp->batch = batch; 5418 } 5419 5420 /* a companion to pageset_set_high() */ 5421 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) 5422 { 5423 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); 5424 } 5425 5426 static void pageset_init(struct per_cpu_pageset *p) 5427 { 5428 struct per_cpu_pages *pcp; 5429 int migratetype; 5430 5431 memset(p, 0, sizeof(*p)); 5432 5433 pcp = &p->pcp; 5434 pcp->count = 0; 5435 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 5436 INIT_LIST_HEAD(&pcp->lists[migratetype]); 5437 } 5438 5439 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 5440 { 5441 pageset_init(p); 5442 pageset_set_batch(p, batch); 5443 } 5444 5445 /* 5446 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist 5447 * to the value high for the pageset p. 5448 */ 5449 static void pageset_set_high(struct per_cpu_pageset *p, 5450 unsigned long high) 5451 { 5452 unsigned long batch = max(1UL, high / 4); 5453 if ((high / 4) > (PAGE_SHIFT * 8)) 5454 batch = PAGE_SHIFT * 8; 5455 5456 pageset_update(&p->pcp, high, batch); 5457 } 5458 5459 static void pageset_set_high_and_batch(struct zone *zone, 5460 struct per_cpu_pageset *pcp) 5461 { 5462 if (percpu_pagelist_fraction) 5463 pageset_set_high(pcp, 5464 (zone->managed_pages / 5465 percpu_pagelist_fraction)); 5466 else 5467 pageset_set_batch(pcp, zone_batchsize(zone)); 5468 } 5469 5470 static void __meminit zone_pageset_init(struct zone *zone, int cpu) 5471 { 5472 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 5473 5474 pageset_init(pcp); 5475 pageset_set_high_and_batch(zone, pcp); 5476 } 5477 5478 static void __meminit setup_zone_pageset(struct zone *zone) 5479 { 5480 int cpu; 5481 zone->pageset = alloc_percpu(struct per_cpu_pageset); 5482 for_each_possible_cpu(cpu) 5483 zone_pageset_init(zone, cpu); 5484 } 5485 5486 /* 5487 * Allocate per cpu pagesets and initialize them. 5488 * Before this call only boot pagesets were available. 5489 */ 5490 void __init setup_per_cpu_pageset(void) 5491 { 5492 struct pglist_data *pgdat; 5493 struct zone *zone; 5494 5495 for_each_populated_zone(zone) 5496 setup_zone_pageset(zone); 5497 5498 for_each_online_pgdat(pgdat) 5499 pgdat->per_cpu_nodestats = 5500 alloc_percpu(struct per_cpu_nodestat); 5501 } 5502 5503 static __meminit void zone_pcp_init(struct zone *zone) 5504 { 5505 /* 5506 * per cpu subsystem is not up at this point. The following code 5507 * relies on the ability of the linker to provide the 5508 * offset of a (static) per cpu variable into the per cpu area. 5509 */ 5510 zone->pageset = &boot_pageset; 5511 5512 if (populated_zone(zone)) 5513 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 5514 zone->name, zone->present_pages, 5515 zone_batchsize(zone)); 5516 } 5517 5518 int __meminit init_currently_empty_zone(struct zone *zone, 5519 unsigned long zone_start_pfn, 5520 unsigned long size) 5521 { 5522 struct pglist_data *pgdat = zone->zone_pgdat; 5523 5524 pgdat->nr_zones = zone_idx(zone) + 1; 5525 5526 zone->zone_start_pfn = zone_start_pfn; 5527 5528 mminit_dprintk(MMINIT_TRACE, "memmap_init", 5529 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 5530 pgdat->node_id, 5531 (unsigned long)zone_idx(zone), 5532 zone_start_pfn, (zone_start_pfn + size)); 5533 5534 zone_init_free_lists(zone); 5535 zone->initialized = 1; 5536 5537 return 0; 5538 } 5539 5540 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5541 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 5542 5543 /* 5544 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 5545 */ 5546 int __meminit __early_pfn_to_nid(unsigned long pfn, 5547 struct mminit_pfnnid_cache *state) 5548 { 5549 unsigned long start_pfn, end_pfn; 5550 int nid; 5551 5552 if (state->last_start <= pfn && pfn < state->last_end) 5553 return state->last_nid; 5554 5555 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 5556 if (nid != -1) { 5557 state->last_start = start_pfn; 5558 state->last_end = end_pfn; 5559 state->last_nid = nid; 5560 } 5561 5562 return nid; 5563 } 5564 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 5565 5566 /** 5567 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 5568 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 5569 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid 5570 * 5571 * If an architecture guarantees that all ranges registered contain no holes 5572 * and may be freed, this this function may be used instead of calling 5573 * memblock_free_early_nid() manually. 5574 */ 5575 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 5576 { 5577 unsigned long start_pfn, end_pfn; 5578 int i, this_nid; 5579 5580 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { 5581 start_pfn = min(start_pfn, max_low_pfn); 5582 end_pfn = min(end_pfn, max_low_pfn); 5583 5584 if (start_pfn < end_pfn) 5585 memblock_free_early_nid(PFN_PHYS(start_pfn), 5586 (end_pfn - start_pfn) << PAGE_SHIFT, 5587 this_nid); 5588 } 5589 } 5590 5591 /** 5592 * sparse_memory_present_with_active_regions - Call memory_present for each active range 5593 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 5594 * 5595 * If an architecture guarantees that all ranges registered contain no holes and may 5596 * be freed, this function may be used instead of calling memory_present() manually. 5597 */ 5598 void __init sparse_memory_present_with_active_regions(int nid) 5599 { 5600 unsigned long start_pfn, end_pfn; 5601 int i, this_nid; 5602 5603 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 5604 memory_present(this_nid, start_pfn, end_pfn); 5605 } 5606 5607 /** 5608 * get_pfn_range_for_nid - Return the start and end page frames for a node 5609 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 5610 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 5611 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 5612 * 5613 * It returns the start and end page frame of a node based on information 5614 * provided by memblock_set_node(). If called for a node 5615 * with no available memory, a warning is printed and the start and end 5616 * PFNs will be 0. 5617 */ 5618 void __meminit get_pfn_range_for_nid(unsigned int nid, 5619 unsigned long *start_pfn, unsigned long *end_pfn) 5620 { 5621 unsigned long this_start_pfn, this_end_pfn; 5622 int i; 5623 5624 *start_pfn = -1UL; 5625 *end_pfn = 0; 5626 5627 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 5628 *start_pfn = min(*start_pfn, this_start_pfn); 5629 *end_pfn = max(*end_pfn, this_end_pfn); 5630 } 5631 5632 if (*start_pfn == -1UL) 5633 *start_pfn = 0; 5634 } 5635 5636 /* 5637 * This finds a zone that can be used for ZONE_MOVABLE pages. The 5638 * assumption is made that zones within a node are ordered in monotonic 5639 * increasing memory addresses so that the "highest" populated zone is used 5640 */ 5641 static void __init find_usable_zone_for_movable(void) 5642 { 5643 int zone_index; 5644 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 5645 if (zone_index == ZONE_MOVABLE) 5646 continue; 5647 5648 if (arch_zone_highest_possible_pfn[zone_index] > 5649 arch_zone_lowest_possible_pfn[zone_index]) 5650 break; 5651 } 5652 5653 VM_BUG_ON(zone_index == -1); 5654 movable_zone = zone_index; 5655 } 5656 5657 /* 5658 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 5659 * because it is sized independent of architecture. Unlike the other zones, 5660 * the starting point for ZONE_MOVABLE is not fixed. It may be different 5661 * in each node depending on the size of each node and how evenly kernelcore 5662 * is distributed. This helper function adjusts the zone ranges 5663 * provided by the architecture for a given node by using the end of the 5664 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 5665 * zones within a node are in order of monotonic increases memory addresses 5666 */ 5667 static void __meminit adjust_zone_range_for_zone_movable(int nid, 5668 unsigned long zone_type, 5669 unsigned long node_start_pfn, 5670 unsigned long node_end_pfn, 5671 unsigned long *zone_start_pfn, 5672 unsigned long *zone_end_pfn) 5673 { 5674 /* Only adjust if ZONE_MOVABLE is on this node */ 5675 if (zone_movable_pfn[nid]) { 5676 /* Size ZONE_MOVABLE */ 5677 if (zone_type == ZONE_MOVABLE) { 5678 *zone_start_pfn = zone_movable_pfn[nid]; 5679 *zone_end_pfn = min(node_end_pfn, 5680 arch_zone_highest_possible_pfn[movable_zone]); 5681 5682 /* Adjust for ZONE_MOVABLE starting within this range */ 5683 } else if (!mirrored_kernelcore && 5684 *zone_start_pfn < zone_movable_pfn[nid] && 5685 *zone_end_pfn > zone_movable_pfn[nid]) { 5686 *zone_end_pfn = zone_movable_pfn[nid]; 5687 5688 /* Check if this whole range is within ZONE_MOVABLE */ 5689 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 5690 *zone_start_pfn = *zone_end_pfn; 5691 } 5692 } 5693 5694 /* 5695 * Return the number of pages a zone spans in a node, including holes 5696 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 5697 */ 5698 static unsigned long __meminit zone_spanned_pages_in_node(int nid, 5699 unsigned long zone_type, 5700 unsigned long node_start_pfn, 5701 unsigned long node_end_pfn, 5702 unsigned long *zone_start_pfn, 5703 unsigned long *zone_end_pfn, 5704 unsigned long *ignored) 5705 { 5706 /* When hotadd a new node from cpu_up(), the node should be empty */ 5707 if (!node_start_pfn && !node_end_pfn) 5708 return 0; 5709 5710 /* Get the start and end of the zone */ 5711 *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 5712 *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 5713 adjust_zone_range_for_zone_movable(nid, zone_type, 5714 node_start_pfn, node_end_pfn, 5715 zone_start_pfn, zone_end_pfn); 5716 5717 /* Check that this node has pages within the zone's required range */ 5718 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 5719 return 0; 5720 5721 /* Move the zone boundaries inside the node if necessary */ 5722 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 5723 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 5724 5725 /* Return the spanned pages */ 5726 return *zone_end_pfn - *zone_start_pfn; 5727 } 5728 5729 /* 5730 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 5731 * then all holes in the requested range will be accounted for. 5732 */ 5733 unsigned long __meminit __absent_pages_in_range(int nid, 5734 unsigned long range_start_pfn, 5735 unsigned long range_end_pfn) 5736 { 5737 unsigned long nr_absent = range_end_pfn - range_start_pfn; 5738 unsigned long start_pfn, end_pfn; 5739 int i; 5740 5741 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 5742 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 5743 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 5744 nr_absent -= end_pfn - start_pfn; 5745 } 5746 return nr_absent; 5747 } 5748 5749 /** 5750 * absent_pages_in_range - Return number of page frames in holes within a range 5751 * @start_pfn: The start PFN to start searching for holes 5752 * @end_pfn: The end PFN to stop searching for holes 5753 * 5754 * It returns the number of pages frames in memory holes within a range. 5755 */ 5756 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 5757 unsigned long end_pfn) 5758 { 5759 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 5760 } 5761 5762 /* Return the number of page frames in holes in a zone on a node */ 5763 static unsigned long __meminit zone_absent_pages_in_node(int nid, 5764 unsigned long zone_type, 5765 unsigned long node_start_pfn, 5766 unsigned long node_end_pfn, 5767 unsigned long *ignored) 5768 { 5769 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 5770 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5771 unsigned long zone_start_pfn, zone_end_pfn; 5772 unsigned long nr_absent; 5773 5774 /* When hotadd a new node from cpu_up(), the node should be empty */ 5775 if (!node_start_pfn && !node_end_pfn) 5776 return 0; 5777 5778 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5779 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5780 5781 adjust_zone_range_for_zone_movable(nid, zone_type, 5782 node_start_pfn, node_end_pfn, 5783 &zone_start_pfn, &zone_end_pfn); 5784 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 5785 5786 /* 5787 * ZONE_MOVABLE handling. 5788 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 5789 * and vice versa. 5790 */ 5791 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 5792 unsigned long start_pfn, end_pfn; 5793 struct memblock_region *r; 5794 5795 for_each_memblock(memory, r) { 5796 start_pfn = clamp(memblock_region_memory_base_pfn(r), 5797 zone_start_pfn, zone_end_pfn); 5798 end_pfn = clamp(memblock_region_memory_end_pfn(r), 5799 zone_start_pfn, zone_end_pfn); 5800 5801 if (zone_type == ZONE_MOVABLE && 5802 memblock_is_mirror(r)) 5803 nr_absent += end_pfn - start_pfn; 5804 5805 if (zone_type == ZONE_NORMAL && 5806 !memblock_is_mirror(r)) 5807 nr_absent += end_pfn - start_pfn; 5808 } 5809 } 5810 5811 return nr_absent; 5812 } 5813 5814 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5815 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 5816 unsigned long zone_type, 5817 unsigned long node_start_pfn, 5818 unsigned long node_end_pfn, 5819 unsigned long *zone_start_pfn, 5820 unsigned long *zone_end_pfn, 5821 unsigned long *zones_size) 5822 { 5823 unsigned int zone; 5824 5825 *zone_start_pfn = node_start_pfn; 5826 for (zone = 0; zone < zone_type; zone++) 5827 *zone_start_pfn += zones_size[zone]; 5828 5829 *zone_end_pfn = *zone_start_pfn + zones_size[zone_type]; 5830 5831 return zones_size[zone_type]; 5832 } 5833 5834 static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 5835 unsigned long zone_type, 5836 unsigned long node_start_pfn, 5837 unsigned long node_end_pfn, 5838 unsigned long *zholes_size) 5839 { 5840 if (!zholes_size) 5841 return 0; 5842 5843 return zholes_size[zone_type]; 5844 } 5845 5846 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5847 5848 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 5849 unsigned long node_start_pfn, 5850 unsigned long node_end_pfn, 5851 unsigned long *zones_size, 5852 unsigned long *zholes_size) 5853 { 5854 unsigned long realtotalpages = 0, totalpages = 0; 5855 enum zone_type i; 5856 5857 for (i = 0; i < MAX_NR_ZONES; i++) { 5858 struct zone *zone = pgdat->node_zones + i; 5859 unsigned long zone_start_pfn, zone_end_pfn; 5860 unsigned long size, real_size; 5861 5862 size = zone_spanned_pages_in_node(pgdat->node_id, i, 5863 node_start_pfn, 5864 node_end_pfn, 5865 &zone_start_pfn, 5866 &zone_end_pfn, 5867 zones_size); 5868 real_size = size - zone_absent_pages_in_node(pgdat->node_id, i, 5869 node_start_pfn, node_end_pfn, 5870 zholes_size); 5871 if (size) 5872 zone->zone_start_pfn = zone_start_pfn; 5873 else 5874 zone->zone_start_pfn = 0; 5875 zone->spanned_pages = size; 5876 zone->present_pages = real_size; 5877 5878 totalpages += size; 5879 realtotalpages += real_size; 5880 } 5881 5882 pgdat->node_spanned_pages = totalpages; 5883 pgdat->node_present_pages = realtotalpages; 5884 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 5885 realtotalpages); 5886 } 5887 5888 #ifndef CONFIG_SPARSEMEM 5889 /* 5890 * Calculate the size of the zone->blockflags rounded to an unsigned long 5891 * Start by making sure zonesize is a multiple of pageblock_order by rounding 5892 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 5893 * round what is now in bits to nearest long in bits, then return it in 5894 * bytes. 5895 */ 5896 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 5897 { 5898 unsigned long usemapsize; 5899 5900 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 5901 usemapsize = roundup(zonesize, pageblock_nr_pages); 5902 usemapsize = usemapsize >> pageblock_order; 5903 usemapsize *= NR_PAGEBLOCK_BITS; 5904 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 5905 5906 return usemapsize / 8; 5907 } 5908 5909 static void __init setup_usemap(struct pglist_data *pgdat, 5910 struct zone *zone, 5911 unsigned long zone_start_pfn, 5912 unsigned long zonesize) 5913 { 5914 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 5915 zone->pageblock_flags = NULL; 5916 if (usemapsize) 5917 zone->pageblock_flags = 5918 memblock_virt_alloc_node_nopanic(usemapsize, 5919 pgdat->node_id); 5920 } 5921 #else 5922 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 5923 unsigned long zone_start_pfn, unsigned long zonesize) {} 5924 #endif /* CONFIG_SPARSEMEM */ 5925 5926 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 5927 5928 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 5929 void __paginginit set_pageblock_order(void) 5930 { 5931 unsigned int order; 5932 5933 /* Check that pageblock_nr_pages has not already been setup */ 5934 if (pageblock_order) 5935 return; 5936 5937 if (HPAGE_SHIFT > PAGE_SHIFT) 5938 order = HUGETLB_PAGE_ORDER; 5939 else 5940 order = MAX_ORDER - 1; 5941 5942 /* 5943 * Assume the largest contiguous order of interest is a huge page. 5944 * This value may be variable depending on boot parameters on IA64 and 5945 * powerpc. 5946 */ 5947 pageblock_order = order; 5948 } 5949 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5950 5951 /* 5952 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 5953 * is unused as pageblock_order is set at compile-time. See 5954 * include/linux/pageblock-flags.h for the values of pageblock_order based on 5955 * the kernel config 5956 */ 5957 void __paginginit set_pageblock_order(void) 5958 { 5959 } 5960 5961 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 5962 5963 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, 5964 unsigned long present_pages) 5965 { 5966 unsigned long pages = spanned_pages; 5967 5968 /* 5969 * Provide a more accurate estimation if there are holes within 5970 * the zone and SPARSEMEM is in use. If there are holes within the 5971 * zone, each populated memory region may cost us one or two extra 5972 * memmap pages due to alignment because memmap pages for each 5973 * populated regions may not be naturally aligned on page boundary. 5974 * So the (present_pages >> 4) heuristic is a tradeoff for that. 5975 */ 5976 if (spanned_pages > present_pages + (present_pages >> 4) && 5977 IS_ENABLED(CONFIG_SPARSEMEM)) 5978 pages = present_pages; 5979 5980 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 5981 } 5982 5983 /* 5984 * Set up the zone data structures: 5985 * - mark all pages reserved 5986 * - mark all memory queues empty 5987 * - clear the memory bitmaps 5988 * 5989 * NOTE: pgdat should get zeroed by caller. 5990 */ 5991 static void __paginginit free_area_init_core(struct pglist_data *pgdat) 5992 { 5993 enum zone_type j; 5994 int nid = pgdat->node_id; 5995 int ret; 5996 5997 pgdat_resize_init(pgdat); 5998 #ifdef CONFIG_NUMA_BALANCING 5999 spin_lock_init(&pgdat->numabalancing_migrate_lock); 6000 pgdat->numabalancing_migrate_nr_pages = 0; 6001 pgdat->numabalancing_migrate_next_window = jiffies; 6002 #endif 6003 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 6004 spin_lock_init(&pgdat->split_queue_lock); 6005 INIT_LIST_HEAD(&pgdat->split_queue); 6006 pgdat->split_queue_len = 0; 6007 #endif 6008 init_waitqueue_head(&pgdat->kswapd_wait); 6009 init_waitqueue_head(&pgdat->pfmemalloc_wait); 6010 #ifdef CONFIG_COMPACTION 6011 init_waitqueue_head(&pgdat->kcompactd_wait); 6012 #endif 6013 pgdat_page_ext_init(pgdat); 6014 spin_lock_init(&pgdat->lru_lock); 6015 lruvec_init(node_lruvec(pgdat)); 6016 6017 for (j = 0; j < MAX_NR_ZONES; j++) { 6018 struct zone *zone = pgdat->node_zones + j; 6019 unsigned long size, realsize, freesize, memmap_pages; 6020 unsigned long zone_start_pfn = zone->zone_start_pfn; 6021 6022 size = zone->spanned_pages; 6023 realsize = freesize = zone->present_pages; 6024 6025 /* 6026 * Adjust freesize so that it accounts for how much memory 6027 * is used by this zone for memmap. This affects the watermark 6028 * and per-cpu initialisations 6029 */ 6030 memmap_pages = calc_memmap_size(size, realsize); 6031 if (!is_highmem_idx(j)) { 6032 if (freesize >= memmap_pages) { 6033 freesize -= memmap_pages; 6034 if (memmap_pages) 6035 printk(KERN_DEBUG 6036 " %s zone: %lu pages used for memmap\n", 6037 zone_names[j], memmap_pages); 6038 } else 6039 pr_warn(" %s zone: %lu pages exceeds freesize %lu\n", 6040 zone_names[j], memmap_pages, freesize); 6041 } 6042 6043 /* Account for reserved pages */ 6044 if (j == 0 && freesize > dma_reserve) { 6045 freesize -= dma_reserve; 6046 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 6047 zone_names[0], dma_reserve); 6048 } 6049 6050 if (!is_highmem_idx(j)) 6051 nr_kernel_pages += freesize; 6052 /* Charge for highmem memmap if there are enough kernel pages */ 6053 else if (nr_kernel_pages > memmap_pages * 2) 6054 nr_kernel_pages -= memmap_pages; 6055 nr_all_pages += freesize; 6056 6057 /* 6058 * Set an approximate value for lowmem here, it will be adjusted 6059 * when the bootmem allocator frees pages into the buddy system. 6060 * And all highmem pages will be managed by the buddy system. 6061 */ 6062 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 6063 #ifdef CONFIG_NUMA 6064 zone->node = nid; 6065 #endif 6066 zone->name = zone_names[j]; 6067 zone->zone_pgdat = pgdat; 6068 spin_lock_init(&zone->lock); 6069 zone_seqlock_init(zone); 6070 zone_pcp_init(zone); 6071 6072 if (!size) 6073 continue; 6074 6075 set_pageblock_order(); 6076 setup_usemap(pgdat, zone, zone_start_pfn, size); 6077 ret = init_currently_empty_zone(zone, zone_start_pfn, size); 6078 BUG_ON(ret); 6079 memmap_init(size, nid, j, zone_start_pfn); 6080 } 6081 } 6082 6083 static void __ref alloc_node_mem_map(struct pglist_data *pgdat) 6084 { 6085 unsigned long __maybe_unused start = 0; 6086 unsigned long __maybe_unused offset = 0; 6087 6088 /* Skip empty nodes */ 6089 if (!pgdat->node_spanned_pages) 6090 return; 6091 6092 #ifdef CONFIG_FLAT_NODE_MEM_MAP 6093 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 6094 offset = pgdat->node_start_pfn - start; 6095 /* ia64 gets its own node_mem_map, before this, without bootmem */ 6096 if (!pgdat->node_mem_map) { 6097 unsigned long size, end; 6098 struct page *map; 6099 6100 /* 6101 * The zone's endpoints aren't required to be MAX_ORDER 6102 * aligned but the node_mem_map endpoints must be in order 6103 * for the buddy allocator to function correctly. 6104 */ 6105 end = pgdat_end_pfn(pgdat); 6106 end = ALIGN(end, MAX_ORDER_NR_PAGES); 6107 size = (end - start) * sizeof(struct page); 6108 map = alloc_remap(pgdat->node_id, size); 6109 if (!map) 6110 map = memblock_virt_alloc_node_nopanic(size, 6111 pgdat->node_id); 6112 pgdat->node_mem_map = map + offset; 6113 } 6114 #ifndef CONFIG_NEED_MULTIPLE_NODES 6115 /* 6116 * With no DISCONTIG, the global mem_map is just set as node 0's 6117 */ 6118 if (pgdat == NODE_DATA(0)) { 6119 mem_map = NODE_DATA(0)->node_mem_map; 6120 #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM) 6121 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 6122 mem_map -= offset; 6123 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 6124 } 6125 #endif 6126 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 6127 } 6128 6129 void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 6130 unsigned long node_start_pfn, unsigned long *zholes_size) 6131 { 6132 pg_data_t *pgdat = NODE_DATA(nid); 6133 unsigned long start_pfn = 0; 6134 unsigned long end_pfn = 0; 6135 6136 /* pg_data_t should be reset to zero when it's allocated */ 6137 WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); 6138 6139 reset_deferred_meminit(pgdat); 6140 pgdat->node_id = nid; 6141 pgdat->node_start_pfn = node_start_pfn; 6142 pgdat->per_cpu_nodestats = NULL; 6143 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 6144 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 6145 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 6146 (u64)start_pfn << PAGE_SHIFT, 6147 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 6148 #else 6149 start_pfn = node_start_pfn; 6150 #endif 6151 calculate_node_totalpages(pgdat, start_pfn, end_pfn, 6152 zones_size, zholes_size); 6153 6154 alloc_node_mem_map(pgdat); 6155 #ifdef CONFIG_FLAT_NODE_MEM_MAP 6156 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 6157 nid, (unsigned long)pgdat, 6158 (unsigned long)pgdat->node_mem_map); 6159 #endif 6160 6161 free_area_init_core(pgdat); 6162 } 6163 6164 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 6165 6166 #if MAX_NUMNODES > 1 6167 /* 6168 * Figure out the number of possible node ids. 6169 */ 6170 void __init setup_nr_node_ids(void) 6171 { 6172 unsigned int highest; 6173 6174 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 6175 nr_node_ids = highest + 1; 6176 } 6177 #endif 6178 6179 /** 6180 * node_map_pfn_alignment - determine the maximum internode alignment 6181 * 6182 * This function should be called after node map is populated and sorted. 6183 * It calculates the maximum power of two alignment which can distinguish 6184 * all the nodes. 6185 * 6186 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 6187 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 6188 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 6189 * shifted, 1GiB is enough and this function will indicate so. 6190 * 6191 * This is used to test whether pfn -> nid mapping of the chosen memory 6192 * model has fine enough granularity to avoid incorrect mapping for the 6193 * populated node map. 6194 * 6195 * Returns the determined alignment in pfn's. 0 if there is no alignment 6196 * requirement (single node). 6197 */ 6198 unsigned long __init node_map_pfn_alignment(void) 6199 { 6200 unsigned long accl_mask = 0, last_end = 0; 6201 unsigned long start, end, mask; 6202 int last_nid = -1; 6203 int i, nid; 6204 6205 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 6206 if (!start || last_nid < 0 || last_nid == nid) { 6207 last_nid = nid; 6208 last_end = end; 6209 continue; 6210 } 6211 6212 /* 6213 * Start with a mask granular enough to pin-point to the 6214 * start pfn and tick off bits one-by-one until it becomes 6215 * too coarse to separate the current node from the last. 6216 */ 6217 mask = ~((1 << __ffs(start)) - 1); 6218 while (mask && last_end <= (start & (mask << 1))) 6219 mask <<= 1; 6220 6221 /* accumulate all internode masks */ 6222 accl_mask |= mask; 6223 } 6224 6225 /* convert mask to number of pages */ 6226 return ~accl_mask + 1; 6227 } 6228 6229 /* Find the lowest pfn for a node */ 6230 static unsigned long __init find_min_pfn_for_node(int nid) 6231 { 6232 unsigned long min_pfn = ULONG_MAX; 6233 unsigned long start_pfn; 6234 int i; 6235 6236 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) 6237 min_pfn = min(min_pfn, start_pfn); 6238 6239 if (min_pfn == ULONG_MAX) { 6240 pr_warn("Could not find start_pfn for node %d\n", nid); 6241 return 0; 6242 } 6243 6244 return min_pfn; 6245 } 6246 6247 /** 6248 * find_min_pfn_with_active_regions - Find the minimum PFN registered 6249 * 6250 * It returns the minimum PFN based on information provided via 6251 * memblock_set_node(). 6252 */ 6253 unsigned long __init find_min_pfn_with_active_regions(void) 6254 { 6255 return find_min_pfn_for_node(MAX_NUMNODES); 6256 } 6257 6258 /* 6259 * early_calculate_totalpages() 6260 * Sum pages in active regions for movable zone. 6261 * Populate N_MEMORY for calculating usable_nodes. 6262 */ 6263 static unsigned long __init early_calculate_totalpages(void) 6264 { 6265 unsigned long totalpages = 0; 6266 unsigned long start_pfn, end_pfn; 6267 int i, nid; 6268 6269 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 6270 unsigned long pages = end_pfn - start_pfn; 6271 6272 totalpages += pages; 6273 if (pages) 6274 node_set_state(nid, N_MEMORY); 6275 } 6276 return totalpages; 6277 } 6278 6279 /* 6280 * Find the PFN the Movable zone begins in each node. Kernel memory 6281 * is spread evenly between nodes as long as the nodes have enough 6282 * memory. When they don't, some nodes will have more kernelcore than 6283 * others 6284 */ 6285 static void __init find_zone_movable_pfns_for_nodes(void) 6286 { 6287 int i, nid; 6288 unsigned long usable_startpfn; 6289 unsigned long kernelcore_node, kernelcore_remaining; 6290 /* save the state before borrow the nodemask */ 6291 nodemask_t saved_node_state = node_states[N_MEMORY]; 6292 unsigned long totalpages = early_calculate_totalpages(); 6293 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 6294 struct memblock_region *r; 6295 6296 /* Need to find movable_zone earlier when movable_node is specified. */ 6297 find_usable_zone_for_movable(); 6298 6299 /* 6300 * If movable_node is specified, ignore kernelcore and movablecore 6301 * options. 6302 */ 6303 if (movable_node_is_enabled()) { 6304 for_each_memblock(memory, r) { 6305 if (!memblock_is_hotpluggable(r)) 6306 continue; 6307 6308 nid = r->nid; 6309 6310 usable_startpfn = PFN_DOWN(r->base); 6311 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 6312 min(usable_startpfn, zone_movable_pfn[nid]) : 6313 usable_startpfn; 6314 } 6315 6316 goto out2; 6317 } 6318 6319 /* 6320 * If kernelcore=mirror is specified, ignore movablecore option 6321 */ 6322 if (mirrored_kernelcore) { 6323 bool mem_below_4gb_not_mirrored = false; 6324 6325 for_each_memblock(memory, r) { 6326 if (memblock_is_mirror(r)) 6327 continue; 6328 6329 nid = r->nid; 6330 6331 usable_startpfn = memblock_region_memory_base_pfn(r); 6332 6333 if (usable_startpfn < 0x100000) { 6334 mem_below_4gb_not_mirrored = true; 6335 continue; 6336 } 6337 6338 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 6339 min(usable_startpfn, zone_movable_pfn[nid]) : 6340 usable_startpfn; 6341 } 6342 6343 if (mem_below_4gb_not_mirrored) 6344 pr_warn("This configuration results in unmirrored kernel memory."); 6345 6346 goto out2; 6347 } 6348 6349 /* 6350 * If movablecore=nn[KMG] was specified, calculate what size of 6351 * kernelcore that corresponds so that memory usable for 6352 * any allocation type is evenly spread. If both kernelcore 6353 * and movablecore are specified, then the value of kernelcore 6354 * will be used for required_kernelcore if it's greater than 6355 * what movablecore would have allowed. 6356 */ 6357 if (required_movablecore) { 6358 unsigned long corepages; 6359 6360 /* 6361 * Round-up so that ZONE_MOVABLE is at least as large as what 6362 * was requested by the user 6363 */ 6364 required_movablecore = 6365 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 6366 required_movablecore = min(totalpages, required_movablecore); 6367 corepages = totalpages - required_movablecore; 6368 6369 required_kernelcore = max(required_kernelcore, corepages); 6370 } 6371 6372 /* 6373 * If kernelcore was not specified or kernelcore size is larger 6374 * than totalpages, there is no ZONE_MOVABLE. 6375 */ 6376 if (!required_kernelcore || required_kernelcore >= totalpages) 6377 goto out; 6378 6379 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 6380 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 6381 6382 restart: 6383 /* Spread kernelcore memory as evenly as possible throughout nodes */ 6384 kernelcore_node = required_kernelcore / usable_nodes; 6385 for_each_node_state(nid, N_MEMORY) { 6386 unsigned long start_pfn, end_pfn; 6387 6388 /* 6389 * Recalculate kernelcore_node if the division per node 6390 * now exceeds what is necessary to satisfy the requested 6391 * amount of memory for the kernel 6392 */ 6393 if (required_kernelcore < kernelcore_node) 6394 kernelcore_node = required_kernelcore / usable_nodes; 6395 6396 /* 6397 * As the map is walked, we track how much memory is usable 6398 * by the kernel using kernelcore_remaining. When it is 6399 * 0, the rest of the node is usable by ZONE_MOVABLE 6400 */ 6401 kernelcore_remaining = kernelcore_node; 6402 6403 /* Go through each range of PFNs within this node */ 6404 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 6405 unsigned long size_pages; 6406 6407 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 6408 if (start_pfn >= end_pfn) 6409 continue; 6410 6411 /* Account for what is only usable for kernelcore */ 6412 if (start_pfn < usable_startpfn) { 6413 unsigned long kernel_pages; 6414 kernel_pages = min(end_pfn, usable_startpfn) 6415 - start_pfn; 6416 6417 kernelcore_remaining -= min(kernel_pages, 6418 kernelcore_remaining); 6419 required_kernelcore -= min(kernel_pages, 6420 required_kernelcore); 6421 6422 /* Continue if range is now fully accounted */ 6423 if (end_pfn <= usable_startpfn) { 6424 6425 /* 6426 * Push zone_movable_pfn to the end so 6427 * that if we have to rebalance 6428 * kernelcore across nodes, we will 6429 * not double account here 6430 */ 6431 zone_movable_pfn[nid] = end_pfn; 6432 continue; 6433 } 6434 start_pfn = usable_startpfn; 6435 } 6436 6437 /* 6438 * The usable PFN range for ZONE_MOVABLE is from 6439 * start_pfn->end_pfn. Calculate size_pages as the 6440 * number of pages used as kernelcore 6441 */ 6442 size_pages = end_pfn - start_pfn; 6443 if (size_pages > kernelcore_remaining) 6444 size_pages = kernelcore_remaining; 6445 zone_movable_pfn[nid] = start_pfn + size_pages; 6446 6447 /* 6448 * Some kernelcore has been met, update counts and 6449 * break if the kernelcore for this node has been 6450 * satisfied 6451 */ 6452 required_kernelcore -= min(required_kernelcore, 6453 size_pages); 6454 kernelcore_remaining -= size_pages; 6455 if (!kernelcore_remaining) 6456 break; 6457 } 6458 } 6459 6460 /* 6461 * If there is still required_kernelcore, we do another pass with one 6462 * less node in the count. This will push zone_movable_pfn[nid] further 6463 * along on the nodes that still have memory until kernelcore is 6464 * satisfied 6465 */ 6466 usable_nodes--; 6467 if (usable_nodes && required_kernelcore > usable_nodes) 6468 goto restart; 6469 6470 out2: 6471 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 6472 for (nid = 0; nid < MAX_NUMNODES; nid++) 6473 zone_movable_pfn[nid] = 6474 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 6475 6476 out: 6477 /* restore the node_state */ 6478 node_states[N_MEMORY] = saved_node_state; 6479 } 6480 6481 /* Any regular or high memory on that node ? */ 6482 static void check_for_memory(pg_data_t *pgdat, int nid) 6483 { 6484 enum zone_type zone_type; 6485 6486 if (N_MEMORY == N_NORMAL_MEMORY) 6487 return; 6488 6489 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 6490 struct zone *zone = &pgdat->node_zones[zone_type]; 6491 if (populated_zone(zone)) { 6492 node_set_state(nid, N_HIGH_MEMORY); 6493 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 6494 zone_type <= ZONE_NORMAL) 6495 node_set_state(nid, N_NORMAL_MEMORY); 6496 break; 6497 } 6498 } 6499 } 6500 6501 /** 6502 * free_area_init_nodes - Initialise all pg_data_t and zone data 6503 * @max_zone_pfn: an array of max PFNs for each zone 6504 * 6505 * This will call free_area_init_node() for each active node in the system. 6506 * Using the page ranges provided by memblock_set_node(), the size of each 6507 * zone in each node and their holes is calculated. If the maximum PFN 6508 * between two adjacent zones match, it is assumed that the zone is empty. 6509 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 6510 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 6511 * starts where the previous one ended. For example, ZONE_DMA32 starts 6512 * at arch_max_dma_pfn. 6513 */ 6514 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 6515 { 6516 unsigned long start_pfn, end_pfn; 6517 int i, nid; 6518 6519 /* Record where the zone boundaries are */ 6520 memset(arch_zone_lowest_possible_pfn, 0, 6521 sizeof(arch_zone_lowest_possible_pfn)); 6522 memset(arch_zone_highest_possible_pfn, 0, 6523 sizeof(arch_zone_highest_possible_pfn)); 6524 6525 start_pfn = find_min_pfn_with_active_regions(); 6526 6527 for (i = 0; i < MAX_NR_ZONES; i++) { 6528 if (i == ZONE_MOVABLE) 6529 continue; 6530 6531 end_pfn = max(max_zone_pfn[i], start_pfn); 6532 arch_zone_lowest_possible_pfn[i] = start_pfn; 6533 arch_zone_highest_possible_pfn[i] = end_pfn; 6534 6535 start_pfn = end_pfn; 6536 } 6537 6538 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 6539 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 6540 find_zone_movable_pfns_for_nodes(); 6541 6542 /* Print out the zone ranges */ 6543 pr_info("Zone ranges:\n"); 6544 for (i = 0; i < MAX_NR_ZONES; i++) { 6545 if (i == ZONE_MOVABLE) 6546 continue; 6547 pr_info(" %-8s ", zone_names[i]); 6548 if (arch_zone_lowest_possible_pfn[i] == 6549 arch_zone_highest_possible_pfn[i]) 6550 pr_cont("empty\n"); 6551 else 6552 pr_cont("[mem %#018Lx-%#018Lx]\n", 6553 (u64)arch_zone_lowest_possible_pfn[i] 6554 << PAGE_SHIFT, 6555 ((u64)arch_zone_highest_possible_pfn[i] 6556 << PAGE_SHIFT) - 1); 6557 } 6558 6559 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 6560 pr_info("Movable zone start for each node\n"); 6561 for (i = 0; i < MAX_NUMNODES; i++) { 6562 if (zone_movable_pfn[i]) 6563 pr_info(" Node %d: %#018Lx\n", i, 6564 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 6565 } 6566 6567 /* Print out the early node map */ 6568 pr_info("Early memory node ranges\n"); 6569 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 6570 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 6571 (u64)start_pfn << PAGE_SHIFT, 6572 ((u64)end_pfn << PAGE_SHIFT) - 1); 6573 6574 /* Initialise every node */ 6575 mminit_verify_pageflags_layout(); 6576 setup_nr_node_ids(); 6577 for_each_online_node(nid) { 6578 pg_data_t *pgdat = NODE_DATA(nid); 6579 free_area_init_node(nid, NULL, 6580 find_min_pfn_for_node(nid), NULL); 6581 6582 /* Any memory on that node */ 6583 if (pgdat->node_present_pages) 6584 node_set_state(nid, N_MEMORY); 6585 check_for_memory(pgdat, nid); 6586 } 6587 } 6588 6589 static int __init cmdline_parse_core(char *p, unsigned long *core) 6590 { 6591 unsigned long long coremem; 6592 if (!p) 6593 return -EINVAL; 6594 6595 coremem = memparse(p, &p); 6596 *core = coremem >> PAGE_SHIFT; 6597 6598 /* Paranoid check that UL is enough for the coremem value */ 6599 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 6600 6601 return 0; 6602 } 6603 6604 /* 6605 * kernelcore=size sets the amount of memory for use for allocations that 6606 * cannot be reclaimed or migrated. 6607 */ 6608 static int __init cmdline_parse_kernelcore(char *p) 6609 { 6610 /* parse kernelcore=mirror */ 6611 if (parse_option_str(p, "mirror")) { 6612 mirrored_kernelcore = true; 6613 return 0; 6614 } 6615 6616 return cmdline_parse_core(p, &required_kernelcore); 6617 } 6618 6619 /* 6620 * movablecore=size sets the amount of memory for use for allocations that 6621 * can be reclaimed or migrated. 6622 */ 6623 static int __init cmdline_parse_movablecore(char *p) 6624 { 6625 return cmdline_parse_core(p, &required_movablecore); 6626 } 6627 6628 early_param("kernelcore", cmdline_parse_kernelcore); 6629 early_param("movablecore", cmdline_parse_movablecore); 6630 6631 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 6632 6633 void adjust_managed_page_count(struct page *page, long count) 6634 { 6635 spin_lock(&managed_page_count_lock); 6636 page_zone(page)->managed_pages += count; 6637 totalram_pages += count; 6638 #ifdef CONFIG_HIGHMEM 6639 if (PageHighMem(page)) 6640 totalhigh_pages += count; 6641 #endif 6642 spin_unlock(&managed_page_count_lock); 6643 } 6644 EXPORT_SYMBOL(adjust_managed_page_count); 6645 6646 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 6647 { 6648 void *pos; 6649 unsigned long pages = 0; 6650 6651 start = (void *)PAGE_ALIGN((unsigned long)start); 6652 end = (void *)((unsigned long)end & PAGE_MASK); 6653 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 6654 if ((unsigned int)poison <= 0xFF) 6655 memset(pos, poison, PAGE_SIZE); 6656 free_reserved_page(virt_to_page(pos)); 6657 } 6658 6659 if (pages && s) 6660 pr_info("Freeing %s memory: %ldK\n", 6661 s, pages << (PAGE_SHIFT - 10)); 6662 6663 return pages; 6664 } 6665 EXPORT_SYMBOL(free_reserved_area); 6666 6667 #ifdef CONFIG_HIGHMEM 6668 void free_highmem_page(struct page *page) 6669 { 6670 __free_reserved_page(page); 6671 totalram_pages++; 6672 page_zone(page)->managed_pages++; 6673 totalhigh_pages++; 6674 } 6675 #endif 6676 6677 6678 void __init mem_init_print_info(const char *str) 6679 { 6680 unsigned long physpages, codesize, datasize, rosize, bss_size; 6681 unsigned long init_code_size, init_data_size; 6682 6683 physpages = get_num_physpages(); 6684 codesize = _etext - _stext; 6685 datasize = _edata - _sdata; 6686 rosize = __end_rodata - __start_rodata; 6687 bss_size = __bss_stop - __bss_start; 6688 init_data_size = __init_end - __init_begin; 6689 init_code_size = _einittext - _sinittext; 6690 6691 /* 6692 * Detect special cases and adjust section sizes accordingly: 6693 * 1) .init.* may be embedded into .data sections 6694 * 2) .init.text.* may be out of [__init_begin, __init_end], 6695 * please refer to arch/tile/kernel/vmlinux.lds.S. 6696 * 3) .rodata.* may be embedded into .text or .data sections. 6697 */ 6698 #define adj_init_size(start, end, size, pos, adj) \ 6699 do { \ 6700 if (start <= pos && pos < end && size > adj) \ 6701 size -= adj; \ 6702 } while (0) 6703 6704 adj_init_size(__init_begin, __init_end, init_data_size, 6705 _sinittext, init_code_size); 6706 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 6707 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 6708 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 6709 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 6710 6711 #undef adj_init_size 6712 6713 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 6714 #ifdef CONFIG_HIGHMEM 6715 ", %luK highmem" 6716 #endif 6717 "%s%s)\n", 6718 nr_free_pages() << (PAGE_SHIFT - 10), 6719 physpages << (PAGE_SHIFT - 10), 6720 codesize >> 10, datasize >> 10, rosize >> 10, 6721 (init_data_size + init_code_size) >> 10, bss_size >> 10, 6722 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10), 6723 totalcma_pages << (PAGE_SHIFT - 10), 6724 #ifdef CONFIG_HIGHMEM 6725 totalhigh_pages << (PAGE_SHIFT - 10), 6726 #endif 6727 str ? ", " : "", str ? str : ""); 6728 } 6729 6730 /** 6731 * set_dma_reserve - set the specified number of pages reserved in the first zone 6732 * @new_dma_reserve: The number of pages to mark reserved 6733 * 6734 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 6735 * In the DMA zone, a significant percentage may be consumed by kernel image 6736 * and other unfreeable allocations which can skew the watermarks badly. This 6737 * function may optionally be used to account for unfreeable pages in the 6738 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 6739 * smaller per-cpu batchsize. 6740 */ 6741 void __init set_dma_reserve(unsigned long new_dma_reserve) 6742 { 6743 dma_reserve = new_dma_reserve; 6744 } 6745 6746 void __init free_area_init(unsigned long *zones_size) 6747 { 6748 free_area_init_node(0, zones_size, 6749 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 6750 } 6751 6752 static int page_alloc_cpu_dead(unsigned int cpu) 6753 { 6754 6755 lru_add_drain_cpu(cpu); 6756 drain_pages(cpu); 6757 6758 /* 6759 * Spill the event counters of the dead processor 6760 * into the current processors event counters. 6761 * This artificially elevates the count of the current 6762 * processor. 6763 */ 6764 vm_events_fold_cpu(cpu); 6765 6766 /* 6767 * Zero the differential counters of the dead processor 6768 * so that the vm statistics are consistent. 6769 * 6770 * This is only okay since the processor is dead and cannot 6771 * race with what we are doing. 6772 */ 6773 cpu_vm_stats_fold(cpu); 6774 return 0; 6775 } 6776 6777 void __init page_alloc_init(void) 6778 { 6779 int ret; 6780 6781 ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD, 6782 "mm/page_alloc:dead", NULL, 6783 page_alloc_cpu_dead); 6784 WARN_ON(ret < 0); 6785 } 6786 6787 /* 6788 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio 6789 * or min_free_kbytes changes. 6790 */ 6791 static void calculate_totalreserve_pages(void) 6792 { 6793 struct pglist_data *pgdat; 6794 unsigned long reserve_pages = 0; 6795 enum zone_type i, j; 6796 6797 for_each_online_pgdat(pgdat) { 6798 6799 pgdat->totalreserve_pages = 0; 6800 6801 for (i = 0; i < MAX_NR_ZONES; i++) { 6802 struct zone *zone = pgdat->node_zones + i; 6803 long max = 0; 6804 6805 /* Find valid and maximum lowmem_reserve in the zone */ 6806 for (j = i; j < MAX_NR_ZONES; j++) { 6807 if (zone->lowmem_reserve[j] > max) 6808 max = zone->lowmem_reserve[j]; 6809 } 6810 6811 /* we treat the high watermark as reserved pages. */ 6812 max += high_wmark_pages(zone); 6813 6814 if (max > zone->managed_pages) 6815 max = zone->managed_pages; 6816 6817 pgdat->totalreserve_pages += max; 6818 6819 reserve_pages += max; 6820 } 6821 } 6822 totalreserve_pages = reserve_pages; 6823 } 6824 6825 /* 6826 * setup_per_zone_lowmem_reserve - called whenever 6827 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone 6828 * has a correct pages reserved value, so an adequate number of 6829 * pages are left in the zone after a successful __alloc_pages(). 6830 */ 6831 static void setup_per_zone_lowmem_reserve(void) 6832 { 6833 struct pglist_data *pgdat; 6834 enum zone_type j, idx; 6835 6836 for_each_online_pgdat(pgdat) { 6837 for (j = 0; j < MAX_NR_ZONES; j++) { 6838 struct zone *zone = pgdat->node_zones + j; 6839 unsigned long managed_pages = zone->managed_pages; 6840 6841 zone->lowmem_reserve[j] = 0; 6842 6843 idx = j; 6844 while (idx) { 6845 struct zone *lower_zone; 6846 6847 idx--; 6848 6849 if (sysctl_lowmem_reserve_ratio[idx] < 1) 6850 sysctl_lowmem_reserve_ratio[idx] = 1; 6851 6852 lower_zone = pgdat->node_zones + idx; 6853 lower_zone->lowmem_reserve[j] = managed_pages / 6854 sysctl_lowmem_reserve_ratio[idx]; 6855 managed_pages += lower_zone->managed_pages; 6856 } 6857 } 6858 } 6859 6860 /* update totalreserve_pages */ 6861 calculate_totalreserve_pages(); 6862 } 6863 6864 static void __setup_per_zone_wmarks(void) 6865 { 6866 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 6867 unsigned long lowmem_pages = 0; 6868 struct zone *zone; 6869 unsigned long flags; 6870 6871 /* Calculate total number of !ZONE_HIGHMEM pages */ 6872 for_each_zone(zone) { 6873 if (!is_highmem(zone)) 6874 lowmem_pages += zone->managed_pages; 6875 } 6876 6877 for_each_zone(zone) { 6878 u64 tmp; 6879 6880 spin_lock_irqsave(&zone->lock, flags); 6881 tmp = (u64)pages_min * zone->managed_pages; 6882 do_div(tmp, lowmem_pages); 6883 if (is_highmem(zone)) { 6884 /* 6885 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 6886 * need highmem pages, so cap pages_min to a small 6887 * value here. 6888 * 6889 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 6890 * deltas control asynch page reclaim, and so should 6891 * not be capped for highmem. 6892 */ 6893 unsigned long min_pages; 6894 6895 min_pages = zone->managed_pages / 1024; 6896 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 6897 zone->watermark[WMARK_MIN] = min_pages; 6898 } else { 6899 /* 6900 * If it's a lowmem zone, reserve a number of pages 6901 * proportionate to the zone's size. 6902 */ 6903 zone->watermark[WMARK_MIN] = tmp; 6904 } 6905 6906 /* 6907 * Set the kswapd watermarks distance according to the 6908 * scale factor in proportion to available memory, but 6909 * ensure a minimum size on small systems. 6910 */ 6911 tmp = max_t(u64, tmp >> 2, 6912 mult_frac(zone->managed_pages, 6913 watermark_scale_factor, 10000)); 6914 6915 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; 6916 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; 6917 6918 spin_unlock_irqrestore(&zone->lock, flags); 6919 } 6920 6921 /* update totalreserve_pages */ 6922 calculate_totalreserve_pages(); 6923 } 6924 6925 /** 6926 * setup_per_zone_wmarks - called when min_free_kbytes changes 6927 * or when memory is hot-{added|removed} 6928 * 6929 * Ensures that the watermark[min,low,high] values for each zone are set 6930 * correctly with respect to min_free_kbytes. 6931 */ 6932 void setup_per_zone_wmarks(void) 6933 { 6934 mutex_lock(&zonelists_mutex); 6935 __setup_per_zone_wmarks(); 6936 mutex_unlock(&zonelists_mutex); 6937 } 6938 6939 /* 6940 * Initialise min_free_kbytes. 6941 * 6942 * For small machines we want it small (128k min). For large machines 6943 * we want it large (64MB max). But it is not linear, because network 6944 * bandwidth does not increase linearly with machine size. We use 6945 * 6946 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 6947 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 6948 * 6949 * which yields 6950 * 6951 * 16MB: 512k 6952 * 32MB: 724k 6953 * 64MB: 1024k 6954 * 128MB: 1448k 6955 * 256MB: 2048k 6956 * 512MB: 2896k 6957 * 1024MB: 4096k 6958 * 2048MB: 5792k 6959 * 4096MB: 8192k 6960 * 8192MB: 11584k 6961 * 16384MB: 16384k 6962 */ 6963 int __meminit init_per_zone_wmark_min(void) 6964 { 6965 unsigned long lowmem_kbytes; 6966 int new_min_free_kbytes; 6967 6968 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 6969 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 6970 6971 if (new_min_free_kbytes > user_min_free_kbytes) { 6972 min_free_kbytes = new_min_free_kbytes; 6973 if (min_free_kbytes < 128) 6974 min_free_kbytes = 128; 6975 if (min_free_kbytes > 65536) 6976 min_free_kbytes = 65536; 6977 } else { 6978 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 6979 new_min_free_kbytes, user_min_free_kbytes); 6980 } 6981 setup_per_zone_wmarks(); 6982 refresh_zone_stat_thresholds(); 6983 setup_per_zone_lowmem_reserve(); 6984 6985 #ifdef CONFIG_NUMA 6986 setup_min_unmapped_ratio(); 6987 setup_min_slab_ratio(); 6988 #endif 6989 6990 return 0; 6991 } 6992 core_initcall(init_per_zone_wmark_min) 6993 6994 /* 6995 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 6996 * that we can call two helper functions whenever min_free_kbytes 6997 * changes. 6998 */ 6999 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 7000 void __user *buffer, size_t *length, loff_t *ppos) 7001 { 7002 int rc; 7003 7004 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 7005 if (rc) 7006 return rc; 7007 7008 if (write) { 7009 user_min_free_kbytes = min_free_kbytes; 7010 setup_per_zone_wmarks(); 7011 } 7012 return 0; 7013 } 7014 7015 int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, 7016 void __user *buffer, size_t *length, loff_t *ppos) 7017 { 7018 int rc; 7019 7020 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 7021 if (rc) 7022 return rc; 7023 7024 if (write) 7025 setup_per_zone_wmarks(); 7026 7027 return 0; 7028 } 7029 7030 #ifdef CONFIG_NUMA 7031 static void setup_min_unmapped_ratio(void) 7032 { 7033 pg_data_t *pgdat; 7034 struct zone *zone; 7035 7036 for_each_online_pgdat(pgdat) 7037 pgdat->min_unmapped_pages = 0; 7038 7039 for_each_zone(zone) 7040 zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages * 7041 sysctl_min_unmapped_ratio) / 100; 7042 } 7043 7044 7045 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 7046 void __user *buffer, size_t *length, loff_t *ppos) 7047 { 7048 int rc; 7049 7050 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 7051 if (rc) 7052 return rc; 7053 7054 setup_min_unmapped_ratio(); 7055 7056 return 0; 7057 } 7058 7059 static void setup_min_slab_ratio(void) 7060 { 7061 pg_data_t *pgdat; 7062 struct zone *zone; 7063 7064 for_each_online_pgdat(pgdat) 7065 pgdat->min_slab_pages = 0; 7066 7067 for_each_zone(zone) 7068 zone->zone_pgdat->min_slab_pages += (zone->managed_pages * 7069 sysctl_min_slab_ratio) / 100; 7070 } 7071 7072 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 7073 void __user *buffer, size_t *length, loff_t *ppos) 7074 { 7075 int rc; 7076 7077 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 7078 if (rc) 7079 return rc; 7080 7081 setup_min_slab_ratio(); 7082 7083 return 0; 7084 } 7085 #endif 7086 7087 /* 7088 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 7089 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 7090 * whenever sysctl_lowmem_reserve_ratio changes. 7091 * 7092 * The reserve ratio obviously has absolutely no relation with the 7093 * minimum watermarks. The lowmem reserve ratio can only make sense 7094 * if in function of the boot time zone sizes. 7095 */ 7096 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 7097 void __user *buffer, size_t *length, loff_t *ppos) 7098 { 7099 proc_dointvec_minmax(table, write, buffer, length, ppos); 7100 setup_per_zone_lowmem_reserve(); 7101 return 0; 7102 } 7103 7104 /* 7105 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 7106 * cpu. It is the fraction of total pages in each zone that a hot per cpu 7107 * pagelist can have before it gets flushed back to buddy allocator. 7108 */ 7109 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, 7110 void __user *buffer, size_t *length, loff_t *ppos) 7111 { 7112 struct zone *zone; 7113 int old_percpu_pagelist_fraction; 7114 int ret; 7115 7116 mutex_lock(&pcp_batch_high_lock); 7117 old_percpu_pagelist_fraction = percpu_pagelist_fraction; 7118 7119 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 7120 if (!write || ret < 0) 7121 goto out; 7122 7123 /* Sanity checking to avoid pcp imbalance */ 7124 if (percpu_pagelist_fraction && 7125 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { 7126 percpu_pagelist_fraction = old_percpu_pagelist_fraction; 7127 ret = -EINVAL; 7128 goto out; 7129 } 7130 7131 /* No change? */ 7132 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) 7133 goto out; 7134 7135 for_each_populated_zone(zone) { 7136 unsigned int cpu; 7137 7138 for_each_possible_cpu(cpu) 7139 pageset_set_high_and_batch(zone, 7140 per_cpu_ptr(zone->pageset, cpu)); 7141 } 7142 out: 7143 mutex_unlock(&pcp_batch_high_lock); 7144 return ret; 7145 } 7146 7147 #ifdef CONFIG_NUMA 7148 int hashdist = HASHDIST_DEFAULT; 7149 7150 static int __init set_hashdist(char *str) 7151 { 7152 if (!str) 7153 return 0; 7154 hashdist = simple_strtoul(str, &str, 0); 7155 return 1; 7156 } 7157 __setup("hashdist=", set_hashdist); 7158 #endif 7159 7160 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 7161 /* 7162 * Returns the number of pages that arch has reserved but 7163 * is not known to alloc_large_system_hash(). 7164 */ 7165 static unsigned long __init arch_reserved_kernel_pages(void) 7166 { 7167 return 0; 7168 } 7169 #endif 7170 7171 /* 7172 * allocate a large system hash table from bootmem 7173 * - it is assumed that the hash table must contain an exact power-of-2 7174 * quantity of entries 7175 * - limit is the number of hash buckets, not the total allocation size 7176 */ 7177 void *__init alloc_large_system_hash(const char *tablename, 7178 unsigned long bucketsize, 7179 unsigned long numentries, 7180 int scale, 7181 int flags, 7182 unsigned int *_hash_shift, 7183 unsigned int *_hash_mask, 7184 unsigned long low_limit, 7185 unsigned long high_limit) 7186 { 7187 unsigned long long max = high_limit; 7188 unsigned long log2qty, size; 7189 void *table = NULL; 7190 7191 /* allow the kernel cmdline to have a say */ 7192 if (!numentries) { 7193 /* round applicable memory size up to nearest megabyte */ 7194 numentries = nr_kernel_pages; 7195 numentries -= arch_reserved_kernel_pages(); 7196 7197 /* It isn't necessary when PAGE_SIZE >= 1MB */ 7198 if (PAGE_SHIFT < 20) 7199 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 7200 7201 /* limit to 1 bucket per 2^scale bytes of low memory */ 7202 if (scale > PAGE_SHIFT) 7203 numentries >>= (scale - PAGE_SHIFT); 7204 else 7205 numentries <<= (PAGE_SHIFT - scale); 7206 7207 /* Make sure we've got at least a 0-order allocation.. */ 7208 if (unlikely(flags & HASH_SMALL)) { 7209 /* Makes no sense without HASH_EARLY */ 7210 WARN_ON(!(flags & HASH_EARLY)); 7211 if (!(numentries >> *_hash_shift)) { 7212 numentries = 1UL << *_hash_shift; 7213 BUG_ON(!numentries); 7214 } 7215 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 7216 numentries = PAGE_SIZE / bucketsize; 7217 } 7218 numentries = roundup_pow_of_two(numentries); 7219 7220 /* limit allocation size to 1/16 total memory by default */ 7221 if (max == 0) { 7222 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 7223 do_div(max, bucketsize); 7224 } 7225 max = min(max, 0x80000000ULL); 7226 7227 if (numentries < low_limit) 7228 numentries = low_limit; 7229 if (numentries > max) 7230 numentries = max; 7231 7232 log2qty = ilog2(numentries); 7233 7234 do { 7235 size = bucketsize << log2qty; 7236 if (flags & HASH_EARLY) 7237 table = memblock_virt_alloc_nopanic(size, 0); 7238 else if (hashdist) 7239 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 7240 else { 7241 /* 7242 * If bucketsize is not a power-of-two, we may free 7243 * some pages at the end of hash table which 7244 * alloc_pages_exact() automatically does 7245 */ 7246 if (get_order(size) < MAX_ORDER) { 7247 table = alloc_pages_exact(size, GFP_ATOMIC); 7248 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 7249 } 7250 } 7251 } while (!table && size > PAGE_SIZE && --log2qty); 7252 7253 if (!table) 7254 panic("Failed to allocate %s hash table\n", tablename); 7255 7256 pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n", 7257 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size); 7258 7259 if (_hash_shift) 7260 *_hash_shift = log2qty; 7261 if (_hash_mask) 7262 *_hash_mask = (1 << log2qty) - 1; 7263 7264 return table; 7265 } 7266 7267 /* 7268 * This function checks whether pageblock includes unmovable pages or not. 7269 * If @count is not zero, it is okay to include less @count unmovable pages 7270 * 7271 * PageLRU check without isolation or lru_lock could race so that 7272 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable 7273 * check without lock_page also may miss some movable non-lru pages at 7274 * race condition. So you can't expect this function should be exact. 7275 */ 7276 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 7277 bool skip_hwpoisoned_pages) 7278 { 7279 unsigned long pfn, iter, found; 7280 int mt; 7281 7282 /* 7283 * For avoiding noise data, lru_add_drain_all() should be called 7284 * If ZONE_MOVABLE, the zone never contains unmovable pages 7285 */ 7286 if (zone_idx(zone) == ZONE_MOVABLE) 7287 return false; 7288 mt = get_pageblock_migratetype(page); 7289 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) 7290 return false; 7291 7292 pfn = page_to_pfn(page); 7293 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 7294 unsigned long check = pfn + iter; 7295 7296 if (!pfn_valid_within(check)) 7297 continue; 7298 7299 page = pfn_to_page(check); 7300 7301 /* 7302 * Hugepages are not in LRU lists, but they're movable. 7303 * We need not scan over tail pages bacause we don't 7304 * handle each tail page individually in migration. 7305 */ 7306 if (PageHuge(page)) { 7307 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 7308 continue; 7309 } 7310 7311 /* 7312 * We can't use page_count without pin a page 7313 * because another CPU can free compound page. 7314 * This check already skips compound tails of THP 7315 * because their page->_refcount is zero at all time. 7316 */ 7317 if (!page_ref_count(page)) { 7318 if (PageBuddy(page)) 7319 iter += (1 << page_order(page)) - 1; 7320 continue; 7321 } 7322 7323 /* 7324 * The HWPoisoned page may be not in buddy system, and 7325 * page_count() is not 0. 7326 */ 7327 if (skip_hwpoisoned_pages && PageHWPoison(page)) 7328 continue; 7329 7330 if (__PageMovable(page)) 7331 continue; 7332 7333 if (!PageLRU(page)) 7334 found++; 7335 /* 7336 * If there are RECLAIMABLE pages, we need to check 7337 * it. But now, memory offline itself doesn't call 7338 * shrink_node_slabs() and it still to be fixed. 7339 */ 7340 /* 7341 * If the page is not RAM, page_count()should be 0. 7342 * we don't need more check. This is an _used_ not-movable page. 7343 * 7344 * The problematic thing here is PG_reserved pages. PG_reserved 7345 * is set to both of a memory hole page and a _used_ kernel 7346 * page at boot. 7347 */ 7348 if (found > count) 7349 return true; 7350 } 7351 return false; 7352 } 7353 7354 bool is_pageblock_removable_nolock(struct page *page) 7355 { 7356 struct zone *zone; 7357 unsigned long pfn; 7358 7359 /* 7360 * We have to be careful here because we are iterating over memory 7361 * sections which are not zone aware so we might end up outside of 7362 * the zone but still within the section. 7363 * We have to take care about the node as well. If the node is offline 7364 * its NODE_DATA will be NULL - see page_zone. 7365 */ 7366 if (!node_online(page_to_nid(page))) 7367 return false; 7368 7369 zone = page_zone(page); 7370 pfn = page_to_pfn(page); 7371 if (!zone_spans_pfn(zone, pfn)) 7372 return false; 7373 7374 return !has_unmovable_pages(zone, page, 0, true); 7375 } 7376 7377 #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) 7378 7379 static unsigned long pfn_max_align_down(unsigned long pfn) 7380 { 7381 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 7382 pageblock_nr_pages) - 1); 7383 } 7384 7385 static unsigned long pfn_max_align_up(unsigned long pfn) 7386 { 7387 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 7388 pageblock_nr_pages)); 7389 } 7390 7391 /* [start, end) must belong to a single zone. */ 7392 static int __alloc_contig_migrate_range(struct compact_control *cc, 7393 unsigned long start, unsigned long end) 7394 { 7395 /* This function is based on compact_zone() from compaction.c. */ 7396 unsigned long nr_reclaimed; 7397 unsigned long pfn = start; 7398 unsigned int tries = 0; 7399 int ret = 0; 7400 7401 migrate_prep(); 7402 7403 while (pfn < end || !list_empty(&cc->migratepages)) { 7404 if (fatal_signal_pending(current)) { 7405 ret = -EINTR; 7406 break; 7407 } 7408 7409 if (list_empty(&cc->migratepages)) { 7410 cc->nr_migratepages = 0; 7411 pfn = isolate_migratepages_range(cc, pfn, end); 7412 if (!pfn) { 7413 ret = -EINTR; 7414 break; 7415 } 7416 tries = 0; 7417 } else if (++tries == 5) { 7418 ret = ret < 0 ? ret : -EBUSY; 7419 break; 7420 } 7421 7422 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 7423 &cc->migratepages); 7424 cc->nr_migratepages -= nr_reclaimed; 7425 7426 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 7427 NULL, 0, cc->mode, MR_CMA); 7428 } 7429 if (ret < 0) { 7430 putback_movable_pages(&cc->migratepages); 7431 return ret; 7432 } 7433 return 0; 7434 } 7435 7436 /** 7437 * alloc_contig_range() -- tries to allocate given range of pages 7438 * @start: start PFN to allocate 7439 * @end: one-past-the-last PFN to allocate 7440 * @migratetype: migratetype of the underlaying pageblocks (either 7441 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 7442 * in range must have the same migratetype and it must 7443 * be either of the two. 7444 * @gfp_mask: GFP mask to use during compaction 7445 * 7446 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 7447 * aligned, however it's the caller's responsibility to guarantee that 7448 * we are the only thread that changes migrate type of pageblocks the 7449 * pages fall in. 7450 * 7451 * The PFN range must belong to a single zone. 7452 * 7453 * Returns zero on success or negative error code. On success all 7454 * pages which PFN is in [start, end) are allocated for the caller and 7455 * need to be freed with free_contig_range(). 7456 */ 7457 int alloc_contig_range(unsigned long start, unsigned long end, 7458 unsigned migratetype, gfp_t gfp_mask) 7459 { 7460 unsigned long outer_start, outer_end; 7461 unsigned int order; 7462 int ret = 0; 7463 7464 struct compact_control cc = { 7465 .nr_migratepages = 0, 7466 .order = -1, 7467 .zone = page_zone(pfn_to_page(start)), 7468 .mode = MIGRATE_SYNC, 7469 .ignore_skip_hint = true, 7470 .gfp_mask = current_gfp_context(gfp_mask), 7471 }; 7472 INIT_LIST_HEAD(&cc.migratepages); 7473 7474 /* 7475 * What we do here is we mark all pageblocks in range as 7476 * MIGRATE_ISOLATE. Because pageblock and max order pages may 7477 * have different sizes, and due to the way page allocator 7478 * work, we align the range to biggest of the two pages so 7479 * that page allocator won't try to merge buddies from 7480 * different pageblocks and change MIGRATE_ISOLATE to some 7481 * other migration type. 7482 * 7483 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 7484 * migrate the pages from an unaligned range (ie. pages that 7485 * we are interested in). This will put all the pages in 7486 * range back to page allocator as MIGRATE_ISOLATE. 7487 * 7488 * When this is done, we take the pages in range from page 7489 * allocator removing them from the buddy system. This way 7490 * page allocator will never consider using them. 7491 * 7492 * This lets us mark the pageblocks back as 7493 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 7494 * aligned range but not in the unaligned, original range are 7495 * put back to page allocator so that buddy can use them. 7496 */ 7497 7498 ret = start_isolate_page_range(pfn_max_align_down(start), 7499 pfn_max_align_up(end), migratetype, 7500 false); 7501 if (ret) 7502 return ret; 7503 7504 /* 7505 * In case of -EBUSY, we'd like to know which page causes problem. 7506 * So, just fall through. We will check it in test_pages_isolated(). 7507 */ 7508 ret = __alloc_contig_migrate_range(&cc, start, end); 7509 if (ret && ret != -EBUSY) 7510 goto done; 7511 7512 /* 7513 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 7514 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 7515 * more, all pages in [start, end) are free in page allocator. 7516 * What we are going to do is to allocate all pages from 7517 * [start, end) (that is remove them from page allocator). 7518 * 7519 * The only problem is that pages at the beginning and at the 7520 * end of interesting range may be not aligned with pages that 7521 * page allocator holds, ie. they can be part of higher order 7522 * pages. Because of this, we reserve the bigger range and 7523 * once this is done free the pages we are not interested in. 7524 * 7525 * We don't have to hold zone->lock here because the pages are 7526 * isolated thus they won't get removed from buddy. 7527 */ 7528 7529 lru_add_drain_all(); 7530 drain_all_pages(cc.zone); 7531 7532 order = 0; 7533 outer_start = start; 7534 while (!PageBuddy(pfn_to_page(outer_start))) { 7535 if (++order >= MAX_ORDER) { 7536 outer_start = start; 7537 break; 7538 } 7539 outer_start &= ~0UL << order; 7540 } 7541 7542 if (outer_start != start) { 7543 order = page_order(pfn_to_page(outer_start)); 7544 7545 /* 7546 * outer_start page could be small order buddy page and 7547 * it doesn't include start page. Adjust outer_start 7548 * in this case to report failed page properly 7549 * on tracepoint in test_pages_isolated() 7550 */ 7551 if (outer_start + (1UL << order) <= start) 7552 outer_start = start; 7553 } 7554 7555 /* Make sure the range is really isolated. */ 7556 if (test_pages_isolated(outer_start, end, false)) { 7557 pr_info("%s: [%lx, %lx) PFNs busy\n", 7558 __func__, outer_start, end); 7559 ret = -EBUSY; 7560 goto done; 7561 } 7562 7563 /* Grab isolated pages from freelists. */ 7564 outer_end = isolate_freepages_range(&cc, outer_start, end); 7565 if (!outer_end) { 7566 ret = -EBUSY; 7567 goto done; 7568 } 7569 7570 /* Free head and tail (if any) */ 7571 if (start != outer_start) 7572 free_contig_range(outer_start, start - outer_start); 7573 if (end != outer_end) 7574 free_contig_range(end, outer_end - end); 7575 7576 done: 7577 undo_isolate_page_range(pfn_max_align_down(start), 7578 pfn_max_align_up(end), migratetype); 7579 return ret; 7580 } 7581 7582 void free_contig_range(unsigned long pfn, unsigned nr_pages) 7583 { 7584 unsigned int count = 0; 7585 7586 for (; nr_pages--; pfn++) { 7587 struct page *page = pfn_to_page(pfn); 7588 7589 count += page_count(page) != 1; 7590 __free_page(page); 7591 } 7592 WARN(count != 0, "%d pages are still in use!\n", count); 7593 } 7594 #endif 7595 7596 #ifdef CONFIG_MEMORY_HOTPLUG 7597 /* 7598 * The zone indicated has a new number of managed_pages; batch sizes and percpu 7599 * page high values need to be recalulated. 7600 */ 7601 void __meminit zone_pcp_update(struct zone *zone) 7602 { 7603 unsigned cpu; 7604 mutex_lock(&pcp_batch_high_lock); 7605 for_each_possible_cpu(cpu) 7606 pageset_set_high_and_batch(zone, 7607 per_cpu_ptr(zone->pageset, cpu)); 7608 mutex_unlock(&pcp_batch_high_lock); 7609 } 7610 #endif 7611 7612 void zone_pcp_reset(struct zone *zone) 7613 { 7614 unsigned long flags; 7615 int cpu; 7616 struct per_cpu_pageset *pset; 7617 7618 /* avoid races with drain_pages() */ 7619 local_irq_save(flags); 7620 if (zone->pageset != &boot_pageset) { 7621 for_each_online_cpu(cpu) { 7622 pset = per_cpu_ptr(zone->pageset, cpu); 7623 drain_zonestat(zone, pset); 7624 } 7625 free_percpu(zone->pageset); 7626 zone->pageset = &boot_pageset; 7627 } 7628 local_irq_restore(flags); 7629 } 7630 7631 #ifdef CONFIG_MEMORY_HOTREMOVE 7632 /* 7633 * All pages in the range must be in a single zone and isolated 7634 * before calling this. 7635 */ 7636 void 7637 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 7638 { 7639 struct page *page; 7640 struct zone *zone; 7641 unsigned int order, i; 7642 unsigned long pfn; 7643 unsigned long flags; 7644 /* find the first valid pfn */ 7645 for (pfn = start_pfn; pfn < end_pfn; pfn++) 7646 if (pfn_valid(pfn)) 7647 break; 7648 if (pfn == end_pfn) 7649 return; 7650 zone = page_zone(pfn_to_page(pfn)); 7651 spin_lock_irqsave(&zone->lock, flags); 7652 pfn = start_pfn; 7653 while (pfn < end_pfn) { 7654 if (!pfn_valid(pfn)) { 7655 pfn++; 7656 continue; 7657 } 7658 page = pfn_to_page(pfn); 7659 /* 7660 * The HWPoisoned page may be not in buddy system, and 7661 * page_count() is not 0. 7662 */ 7663 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 7664 pfn++; 7665 SetPageReserved(page); 7666 continue; 7667 } 7668 7669 BUG_ON(page_count(page)); 7670 BUG_ON(!PageBuddy(page)); 7671 order = page_order(page); 7672 #ifdef CONFIG_DEBUG_VM 7673 pr_info("remove from free list %lx %d %lx\n", 7674 pfn, 1 << order, end_pfn); 7675 #endif 7676 list_del(&page->lru); 7677 rmv_page_order(page); 7678 zone->free_area[order].nr_free--; 7679 for (i = 0; i < (1 << order); i++) 7680 SetPageReserved((page+i)); 7681 pfn += (1 << order); 7682 } 7683 spin_unlock_irqrestore(&zone->lock, flags); 7684 } 7685 #endif 7686 7687 bool is_free_buddy_page(struct page *page) 7688 { 7689 struct zone *zone = page_zone(page); 7690 unsigned long pfn = page_to_pfn(page); 7691 unsigned long flags; 7692 unsigned int order; 7693 7694 spin_lock_irqsave(&zone->lock, flags); 7695 for (order = 0; order < MAX_ORDER; order++) { 7696 struct page *page_head = page - (pfn & ((1 << order) - 1)); 7697 7698 if (PageBuddy(page_head) && page_order(page_head) >= order) 7699 break; 7700 } 7701 spin_unlock_irqrestore(&zone->lock, flags); 7702 7703 return order < MAX_ORDER; 7704 } 7705