1 /* 2 * linux/mm/page_alloc.c 3 * 4 * Manages the free list, the system allocates free pages here. 5 * Note that kmalloc() lives in slab.c 6 * 7 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 8 * Swap reorganised 29.12.95, Stephen Tweedie 9 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 10 * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999 11 * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999 12 * Zone balancing, Kanoj Sarcar, SGI, Jan 2000 13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002 14 * (lots of bits borrowed from Ingo Molnar & Andrew Morton) 15 */ 16 17 #include <linux/stddef.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/interrupt.h> 21 #include <linux/pagemap.h> 22 #include <linux/jiffies.h> 23 #include <linux/bootmem.h> 24 #include <linux/memblock.h> 25 #include <linux/compiler.h> 26 #include <linux/kernel.h> 27 #include <linux/kmemcheck.h> 28 #include <linux/module.h> 29 #include <linux/suspend.h> 30 #include <linux/pagevec.h> 31 #include <linux/blkdev.h> 32 #include <linux/slab.h> 33 #include <linux/ratelimit.h> 34 #include <linux/oom.h> 35 #include <linux/notifier.h> 36 #include <linux/topology.h> 37 #include <linux/sysctl.h> 38 #include <linux/cpu.h> 39 #include <linux/cpuset.h> 40 #include <linux/memory_hotplug.h> 41 #include <linux/nodemask.h> 42 #include <linux/vmalloc.h> 43 #include <linux/vmstat.h> 44 #include <linux/mempolicy.h> 45 #include <linux/stop_machine.h> 46 #include <linux/sort.h> 47 #include <linux/pfn.h> 48 #include <linux/backing-dev.h> 49 #include <linux/fault-inject.h> 50 #include <linux/page-isolation.h> 51 #include <linux/page_ext.h> 52 #include <linux/debugobjects.h> 53 #include <linux/kmemleak.h> 54 #include <linux/compaction.h> 55 #include <trace/events/kmem.h> 56 #include <linux/prefetch.h> 57 #include <linux/mm_inline.h> 58 #include <linux/migrate.h> 59 #include <linux/page_ext.h> 60 #include <linux/hugetlb.h> 61 #include <linux/sched/rt.h> 62 #include <linux/page_owner.h> 63 64 #include <asm/sections.h> 65 #include <asm/tlbflush.h> 66 #include <asm/div64.h> 67 #include "internal.h" 68 69 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 70 static DEFINE_MUTEX(pcp_batch_high_lock); 71 #define MIN_PERCPU_PAGELIST_FRACTION (8) 72 73 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 74 DEFINE_PER_CPU(int, numa_node); 75 EXPORT_PER_CPU_SYMBOL(numa_node); 76 #endif 77 78 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 79 /* 80 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly. 81 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined. 82 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem() 83 * defined in <linux/topology.h>. 84 */ 85 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ 86 EXPORT_PER_CPU_SYMBOL(_numa_mem_); 87 int _node_numa_mem_[MAX_NUMNODES]; 88 #endif 89 90 /* 91 * Array of node states. 92 */ 93 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 94 [N_POSSIBLE] = NODE_MASK_ALL, 95 [N_ONLINE] = { { [0] = 1UL } }, 96 #ifndef CONFIG_NUMA 97 [N_NORMAL_MEMORY] = { { [0] = 1UL } }, 98 #ifdef CONFIG_HIGHMEM 99 [N_HIGH_MEMORY] = { { [0] = 1UL } }, 100 #endif 101 #ifdef CONFIG_MOVABLE_NODE 102 [N_MEMORY] = { { [0] = 1UL } }, 103 #endif 104 [N_CPU] = { { [0] = 1UL } }, 105 #endif /* NUMA */ 106 }; 107 EXPORT_SYMBOL(node_states); 108 109 /* Protect totalram_pages and zone->managed_pages */ 110 static DEFINE_SPINLOCK(managed_page_count_lock); 111 112 unsigned long totalram_pages __read_mostly; 113 unsigned long totalreserve_pages __read_mostly; 114 unsigned long totalcma_pages __read_mostly; 115 /* 116 * When calculating the number of globally allowed dirty pages, there 117 * is a certain number of per-zone reserves that should not be 118 * considered dirtyable memory. This is the sum of those reserves 119 * over all existing zones that contribute dirtyable memory. 120 */ 121 unsigned long dirty_balance_reserve __read_mostly; 122 123 int percpu_pagelist_fraction; 124 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 125 126 #ifdef CONFIG_PM_SLEEP 127 /* 128 * The following functions are used by the suspend/hibernate code to temporarily 129 * change gfp_allowed_mask in order to avoid using I/O during memory allocations 130 * while devices are suspended. To avoid races with the suspend/hibernate code, 131 * they should always be called with pm_mutex held (gfp_allowed_mask also should 132 * only be modified with pm_mutex held, unless the suspend/hibernate code is 133 * guaranteed not to run in parallel with that modification). 134 */ 135 136 static gfp_t saved_gfp_mask; 137 138 void pm_restore_gfp_mask(void) 139 { 140 WARN_ON(!mutex_is_locked(&pm_mutex)); 141 if (saved_gfp_mask) { 142 gfp_allowed_mask = saved_gfp_mask; 143 saved_gfp_mask = 0; 144 } 145 } 146 147 void pm_restrict_gfp_mask(void) 148 { 149 WARN_ON(!mutex_is_locked(&pm_mutex)); 150 WARN_ON(saved_gfp_mask); 151 saved_gfp_mask = gfp_allowed_mask; 152 gfp_allowed_mask &= ~GFP_IOFS; 153 } 154 155 bool pm_suspended_storage(void) 156 { 157 if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS) 158 return false; 159 return true; 160 } 161 #endif /* CONFIG_PM_SLEEP */ 162 163 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 164 int pageblock_order __read_mostly; 165 #endif 166 167 static void __free_pages_ok(struct page *page, unsigned int order); 168 169 /* 170 * results with 256, 32 in the lowmem_reserve sysctl: 171 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high) 172 * 1G machine -> (16M dma, 784M normal, 224M high) 173 * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA 174 * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL 175 * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA 176 * 177 * TBD: should special case ZONE_DMA32 machines here - in those we normally 178 * don't need any ZONE_NORMAL reservation 179 */ 180 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 181 #ifdef CONFIG_ZONE_DMA 182 256, 183 #endif 184 #ifdef CONFIG_ZONE_DMA32 185 256, 186 #endif 187 #ifdef CONFIG_HIGHMEM 188 32, 189 #endif 190 32, 191 }; 192 193 EXPORT_SYMBOL(totalram_pages); 194 195 static char * const zone_names[MAX_NR_ZONES] = { 196 #ifdef CONFIG_ZONE_DMA 197 "DMA", 198 #endif 199 #ifdef CONFIG_ZONE_DMA32 200 "DMA32", 201 #endif 202 "Normal", 203 #ifdef CONFIG_HIGHMEM 204 "HighMem", 205 #endif 206 "Movable", 207 }; 208 209 int min_free_kbytes = 1024; 210 int user_min_free_kbytes = -1; 211 212 static unsigned long __meminitdata nr_kernel_pages; 213 static unsigned long __meminitdata nr_all_pages; 214 static unsigned long __meminitdata dma_reserve; 215 216 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 217 static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES]; 218 static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES]; 219 static unsigned long __initdata required_kernelcore; 220 static unsigned long __initdata required_movablecore; 221 static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; 222 223 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ 224 int movable_zone; 225 EXPORT_SYMBOL(movable_zone); 226 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 227 228 #if MAX_NUMNODES > 1 229 int nr_node_ids __read_mostly = MAX_NUMNODES; 230 int nr_online_nodes __read_mostly = 1; 231 EXPORT_SYMBOL(nr_node_ids); 232 EXPORT_SYMBOL(nr_online_nodes); 233 #endif 234 235 int page_group_by_mobility_disabled __read_mostly; 236 237 void set_pageblock_migratetype(struct page *page, int migratetype) 238 { 239 if (unlikely(page_group_by_mobility_disabled && 240 migratetype < MIGRATE_PCPTYPES)) 241 migratetype = MIGRATE_UNMOVABLE; 242 243 set_pageblock_flags_group(page, (unsigned long)migratetype, 244 PB_migrate, PB_migrate_end); 245 } 246 247 bool oom_killer_disabled __read_mostly; 248 249 #ifdef CONFIG_DEBUG_VM 250 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 251 { 252 int ret = 0; 253 unsigned seq; 254 unsigned long pfn = page_to_pfn(page); 255 unsigned long sp, start_pfn; 256 257 do { 258 seq = zone_span_seqbegin(zone); 259 start_pfn = zone->zone_start_pfn; 260 sp = zone->spanned_pages; 261 if (!zone_spans_pfn(zone, pfn)) 262 ret = 1; 263 } while (zone_span_seqretry(zone, seq)); 264 265 if (ret) 266 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", 267 pfn, zone_to_nid(zone), zone->name, 268 start_pfn, start_pfn + sp); 269 270 return ret; 271 } 272 273 static int page_is_consistent(struct zone *zone, struct page *page) 274 { 275 if (!pfn_valid_within(page_to_pfn(page))) 276 return 0; 277 if (zone != page_zone(page)) 278 return 0; 279 280 return 1; 281 } 282 /* 283 * Temporary debugging check for pages not lying within a given zone. 284 */ 285 static int bad_range(struct zone *zone, struct page *page) 286 { 287 if (page_outside_zone_boundaries(zone, page)) 288 return 1; 289 if (!page_is_consistent(zone, page)) 290 return 1; 291 292 return 0; 293 } 294 #else 295 static inline int bad_range(struct zone *zone, struct page *page) 296 { 297 return 0; 298 } 299 #endif 300 301 static void bad_page(struct page *page, const char *reason, 302 unsigned long bad_flags) 303 { 304 static unsigned long resume; 305 static unsigned long nr_shown; 306 static unsigned long nr_unshown; 307 308 /* Don't complain about poisoned pages */ 309 if (PageHWPoison(page)) { 310 page_mapcount_reset(page); /* remove PageBuddy */ 311 return; 312 } 313 314 /* 315 * Allow a burst of 60 reports, then keep quiet for that minute; 316 * or allow a steady drip of one report per second. 317 */ 318 if (nr_shown == 60) { 319 if (time_before(jiffies, resume)) { 320 nr_unshown++; 321 goto out; 322 } 323 if (nr_unshown) { 324 printk(KERN_ALERT 325 "BUG: Bad page state: %lu messages suppressed\n", 326 nr_unshown); 327 nr_unshown = 0; 328 } 329 nr_shown = 0; 330 } 331 if (nr_shown++ == 0) 332 resume = jiffies + 60 * HZ; 333 334 printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", 335 current->comm, page_to_pfn(page)); 336 dump_page_badflags(page, reason, bad_flags); 337 338 print_modules(); 339 dump_stack(); 340 out: 341 /* Leave bad fields for debug, except PageBuddy could make trouble */ 342 page_mapcount_reset(page); /* remove PageBuddy */ 343 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); 344 } 345 346 /* 347 * Higher-order pages are called "compound pages". They are structured thusly: 348 * 349 * The first PAGE_SIZE page is called the "head page". 350 * 351 * The remaining PAGE_SIZE pages are called "tail pages". 352 * 353 * All pages have PG_compound set. All tail pages have their ->first_page 354 * pointing at the head page. 355 * 356 * The first tail page's ->lru.next holds the address of the compound page's 357 * put_page() function. Its ->lru.prev holds the order of allocation. 358 * This usage means that zero-order pages may not be compound. 359 */ 360 361 static void free_compound_page(struct page *page) 362 { 363 __free_pages_ok(page, compound_order(page)); 364 } 365 366 void prep_compound_page(struct page *page, unsigned long order) 367 { 368 int i; 369 int nr_pages = 1 << order; 370 371 set_compound_page_dtor(page, free_compound_page); 372 set_compound_order(page, order); 373 __SetPageHead(page); 374 for (i = 1; i < nr_pages; i++) { 375 struct page *p = page + i; 376 set_page_count(p, 0); 377 p->first_page = page; 378 /* Make sure p->first_page is always valid for PageTail() */ 379 smp_wmb(); 380 __SetPageTail(p); 381 } 382 } 383 384 /* update __split_huge_page_refcount if you change this function */ 385 static int destroy_compound_page(struct page *page, unsigned long order) 386 { 387 int i; 388 int nr_pages = 1 << order; 389 int bad = 0; 390 391 if (unlikely(compound_order(page) != order)) { 392 bad_page(page, "wrong compound order", 0); 393 bad++; 394 } 395 396 __ClearPageHead(page); 397 398 for (i = 1; i < nr_pages; i++) { 399 struct page *p = page + i; 400 401 if (unlikely(!PageTail(p))) { 402 bad_page(page, "PageTail not set", 0); 403 bad++; 404 } else if (unlikely(p->first_page != page)) { 405 bad_page(page, "first_page not consistent", 0); 406 bad++; 407 } 408 __ClearPageTail(p); 409 } 410 411 return bad; 412 } 413 414 static inline void prep_zero_page(struct page *page, unsigned int order, 415 gfp_t gfp_flags) 416 { 417 int i; 418 419 /* 420 * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO 421 * and __GFP_HIGHMEM from hard or soft interrupt context. 422 */ 423 VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); 424 for (i = 0; i < (1 << order); i++) 425 clear_highpage(page + i); 426 } 427 428 #ifdef CONFIG_DEBUG_PAGEALLOC 429 unsigned int _debug_guardpage_minorder; 430 bool _debug_pagealloc_enabled __read_mostly; 431 bool _debug_guardpage_enabled __read_mostly; 432 433 static int __init early_debug_pagealloc(char *buf) 434 { 435 if (!buf) 436 return -EINVAL; 437 438 if (strcmp(buf, "on") == 0) 439 _debug_pagealloc_enabled = true; 440 441 return 0; 442 } 443 early_param("debug_pagealloc", early_debug_pagealloc); 444 445 static bool need_debug_guardpage(void) 446 { 447 /* If we don't use debug_pagealloc, we don't need guard page */ 448 if (!debug_pagealloc_enabled()) 449 return false; 450 451 return true; 452 } 453 454 static void init_debug_guardpage(void) 455 { 456 if (!debug_pagealloc_enabled()) 457 return; 458 459 _debug_guardpage_enabled = true; 460 } 461 462 struct page_ext_operations debug_guardpage_ops = { 463 .need = need_debug_guardpage, 464 .init = init_debug_guardpage, 465 }; 466 467 static int __init debug_guardpage_minorder_setup(char *buf) 468 { 469 unsigned long res; 470 471 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { 472 printk(KERN_ERR "Bad debug_guardpage_minorder value\n"); 473 return 0; 474 } 475 _debug_guardpage_minorder = res; 476 printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res); 477 return 0; 478 } 479 __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup); 480 481 static inline void set_page_guard(struct zone *zone, struct page *page, 482 unsigned int order, int migratetype) 483 { 484 struct page_ext *page_ext; 485 486 if (!debug_guardpage_enabled()) 487 return; 488 489 page_ext = lookup_page_ext(page); 490 __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 491 492 INIT_LIST_HEAD(&page->lru); 493 set_page_private(page, order); 494 /* Guard pages are not available for any usage */ 495 __mod_zone_freepage_state(zone, -(1 << order), migratetype); 496 } 497 498 static inline void clear_page_guard(struct zone *zone, struct page *page, 499 unsigned int order, int migratetype) 500 { 501 struct page_ext *page_ext; 502 503 if (!debug_guardpage_enabled()) 504 return; 505 506 page_ext = lookup_page_ext(page); 507 __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); 508 509 set_page_private(page, 0); 510 if (!is_migrate_isolate(migratetype)) 511 __mod_zone_freepage_state(zone, (1 << order), migratetype); 512 } 513 #else 514 struct page_ext_operations debug_guardpage_ops = { NULL, }; 515 static inline void set_page_guard(struct zone *zone, struct page *page, 516 unsigned int order, int migratetype) {} 517 static inline void clear_page_guard(struct zone *zone, struct page *page, 518 unsigned int order, int migratetype) {} 519 #endif 520 521 static inline void set_page_order(struct page *page, unsigned int order) 522 { 523 set_page_private(page, order); 524 __SetPageBuddy(page); 525 } 526 527 static inline void rmv_page_order(struct page *page) 528 { 529 __ClearPageBuddy(page); 530 set_page_private(page, 0); 531 } 532 533 /* 534 * This function checks whether a page is free && is the buddy 535 * we can do coalesce a page and its buddy if 536 * (a) the buddy is not in a hole && 537 * (b) the buddy is in the buddy system && 538 * (c) a page and its buddy have the same order && 539 * (d) a page and its buddy are in the same zone. 540 * 541 * For recording whether a page is in the buddy system, we set ->_mapcount 542 * PAGE_BUDDY_MAPCOUNT_VALUE. 543 * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is 544 * serialized by zone->lock. 545 * 546 * For recording page's order, we use page_private(page). 547 */ 548 static inline int page_is_buddy(struct page *page, struct page *buddy, 549 unsigned int order) 550 { 551 if (!pfn_valid_within(page_to_pfn(buddy))) 552 return 0; 553 554 if (page_is_guard(buddy) && page_order(buddy) == order) { 555 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 556 557 if (page_zone_id(page) != page_zone_id(buddy)) 558 return 0; 559 560 return 1; 561 } 562 563 if (PageBuddy(buddy) && page_order(buddy) == order) { 564 VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy); 565 566 /* 567 * zone check is done late to avoid uselessly 568 * calculating zone/node ids for pages that could 569 * never merge. 570 */ 571 if (page_zone_id(page) != page_zone_id(buddy)) 572 return 0; 573 574 return 1; 575 } 576 return 0; 577 } 578 579 /* 580 * Freeing function for a buddy system allocator. 581 * 582 * The concept of a buddy system is to maintain direct-mapped table 583 * (containing bit values) for memory blocks of various "orders". 584 * The bottom level table contains the map for the smallest allocatable 585 * units of memory (here, pages), and each level above it describes 586 * pairs of units from the levels below, hence, "buddies". 587 * At a high level, all that happens here is marking the table entry 588 * at the bottom level available, and propagating the changes upward 589 * as necessary, plus some accounting needed to play nicely with other 590 * parts of the VM system. 591 * At each level, we keep a list of pages, which are heads of continuous 592 * free pages of length of (1 << order) and marked with _mapcount 593 * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page) 594 * field. 595 * So when we are allocating or freeing one, we can derive the state of the 596 * other. That is, if we allocate a small block, and both were 597 * free, the remainder of the region must be split into blocks. 598 * If a block is freed, and its buddy is also free, then this 599 * triggers coalescing into a block of larger size. 600 * 601 * -- nyc 602 */ 603 604 static inline void __free_one_page(struct page *page, 605 unsigned long pfn, 606 struct zone *zone, unsigned int order, 607 int migratetype) 608 { 609 unsigned long page_idx; 610 unsigned long combined_idx; 611 unsigned long uninitialized_var(buddy_idx); 612 struct page *buddy; 613 int max_order = MAX_ORDER; 614 615 VM_BUG_ON(!zone_is_initialized(zone)); 616 617 if (unlikely(PageCompound(page))) 618 if (unlikely(destroy_compound_page(page, order))) 619 return; 620 621 VM_BUG_ON(migratetype == -1); 622 if (is_migrate_isolate(migratetype)) { 623 /* 624 * We restrict max order of merging to prevent merge 625 * between freepages on isolate pageblock and normal 626 * pageblock. Without this, pageblock isolation 627 * could cause incorrect freepage accounting. 628 */ 629 max_order = min(MAX_ORDER, pageblock_order + 1); 630 } else { 631 __mod_zone_freepage_state(zone, 1 << order, migratetype); 632 } 633 634 page_idx = pfn & ((1 << max_order) - 1); 635 636 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 637 VM_BUG_ON_PAGE(bad_range(zone, page), page); 638 639 while (order < max_order - 1) { 640 buddy_idx = __find_buddy_index(page_idx, order); 641 buddy = page + (buddy_idx - page_idx); 642 if (!page_is_buddy(page, buddy, order)) 643 break; 644 /* 645 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, 646 * merge with it and move up one order. 647 */ 648 if (page_is_guard(buddy)) { 649 clear_page_guard(zone, buddy, order, migratetype); 650 } else { 651 list_del(&buddy->lru); 652 zone->free_area[order].nr_free--; 653 rmv_page_order(buddy); 654 } 655 combined_idx = buddy_idx & page_idx; 656 page = page + (combined_idx - page_idx); 657 page_idx = combined_idx; 658 order++; 659 } 660 set_page_order(page, order); 661 662 /* 663 * If this is not the largest possible page, check if the buddy 664 * of the next-highest order is free. If it is, it's possible 665 * that pages are being freed that will coalesce soon. In case, 666 * that is happening, add the free page to the tail of the list 667 * so it's less likely to be used soon and more likely to be merged 668 * as a higher order page 669 */ 670 if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) { 671 struct page *higher_page, *higher_buddy; 672 combined_idx = buddy_idx & page_idx; 673 higher_page = page + (combined_idx - page_idx); 674 buddy_idx = __find_buddy_index(combined_idx, order + 1); 675 higher_buddy = higher_page + (buddy_idx - combined_idx); 676 if (page_is_buddy(higher_page, higher_buddy, order + 1)) { 677 list_add_tail(&page->lru, 678 &zone->free_area[order].free_list[migratetype]); 679 goto out; 680 } 681 } 682 683 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); 684 out: 685 zone->free_area[order].nr_free++; 686 } 687 688 static inline int free_pages_check(struct page *page) 689 { 690 const char *bad_reason = NULL; 691 unsigned long bad_flags = 0; 692 693 if (unlikely(page_mapcount(page))) 694 bad_reason = "nonzero mapcount"; 695 if (unlikely(page->mapping != NULL)) 696 bad_reason = "non-NULL mapping"; 697 if (unlikely(atomic_read(&page->_count) != 0)) 698 bad_reason = "nonzero _count"; 699 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) { 700 bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; 701 bad_flags = PAGE_FLAGS_CHECK_AT_FREE; 702 } 703 #ifdef CONFIG_MEMCG 704 if (unlikely(page->mem_cgroup)) 705 bad_reason = "page still charged to cgroup"; 706 #endif 707 if (unlikely(bad_reason)) { 708 bad_page(page, bad_reason, bad_flags); 709 return 1; 710 } 711 page_cpupid_reset_last(page); 712 if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) 713 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; 714 return 0; 715 } 716 717 /* 718 * Frees a number of pages from the PCP lists 719 * Assumes all pages on list are in same zone, and of same order. 720 * count is the number of pages to free. 721 * 722 * If the zone was previously in an "all pages pinned" state then look to 723 * see if this freeing clears that state. 724 * 725 * And clear the zone's pages_scanned counter, to hold off the "all pages are 726 * pinned" detection logic. 727 */ 728 static void free_pcppages_bulk(struct zone *zone, int count, 729 struct per_cpu_pages *pcp) 730 { 731 int migratetype = 0; 732 int batch_free = 0; 733 int to_free = count; 734 unsigned long nr_scanned; 735 736 spin_lock(&zone->lock); 737 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); 738 if (nr_scanned) 739 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 740 741 while (to_free) { 742 struct page *page; 743 struct list_head *list; 744 745 /* 746 * Remove pages from lists in a round-robin fashion. A 747 * batch_free count is maintained that is incremented when an 748 * empty list is encountered. This is so more pages are freed 749 * off fuller lists instead of spinning excessively around empty 750 * lists 751 */ 752 do { 753 batch_free++; 754 if (++migratetype == MIGRATE_PCPTYPES) 755 migratetype = 0; 756 list = &pcp->lists[migratetype]; 757 } while (list_empty(list)); 758 759 /* This is the only non-empty list. Free them all. */ 760 if (batch_free == MIGRATE_PCPTYPES) 761 batch_free = to_free; 762 763 do { 764 int mt; /* migratetype of the to-be-freed page */ 765 766 page = list_entry(list->prev, struct page, lru); 767 /* must delete as __free_one_page list manipulates */ 768 list_del(&page->lru); 769 mt = get_freepage_migratetype(page); 770 if (unlikely(has_isolate_pageblock(zone))) 771 mt = get_pageblock_migratetype(page); 772 773 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 774 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 775 trace_mm_page_pcpu_drain(page, 0, mt); 776 } while (--to_free && --batch_free && !list_empty(list)); 777 } 778 spin_unlock(&zone->lock); 779 } 780 781 static void free_one_page(struct zone *zone, 782 struct page *page, unsigned long pfn, 783 unsigned int order, 784 int migratetype) 785 { 786 unsigned long nr_scanned; 787 spin_lock(&zone->lock); 788 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); 789 if (nr_scanned) 790 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 791 792 if (unlikely(has_isolate_pageblock(zone) || 793 is_migrate_isolate(migratetype))) { 794 migratetype = get_pfnblock_migratetype(page, pfn); 795 } 796 __free_one_page(page, pfn, zone, order, migratetype); 797 spin_unlock(&zone->lock); 798 } 799 800 static bool free_pages_prepare(struct page *page, unsigned int order) 801 { 802 int i; 803 int bad = 0; 804 805 VM_BUG_ON_PAGE(PageTail(page), page); 806 VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page); 807 808 trace_mm_page_free(page, order); 809 kmemcheck_free_shadow(page, order); 810 811 if (PageAnon(page)) 812 page->mapping = NULL; 813 for (i = 0; i < (1 << order); i++) 814 bad += free_pages_check(page + i); 815 if (bad) 816 return false; 817 818 reset_page_owner(page, order); 819 820 if (!PageHighMem(page)) { 821 debug_check_no_locks_freed(page_address(page), 822 PAGE_SIZE << order); 823 debug_check_no_obj_freed(page_address(page), 824 PAGE_SIZE << order); 825 } 826 arch_free_page(page, order); 827 kernel_map_pages(page, 1 << order, 0); 828 829 return true; 830 } 831 832 static void __free_pages_ok(struct page *page, unsigned int order) 833 { 834 unsigned long flags; 835 int migratetype; 836 unsigned long pfn = page_to_pfn(page); 837 838 if (!free_pages_prepare(page, order)) 839 return; 840 841 migratetype = get_pfnblock_migratetype(page, pfn); 842 local_irq_save(flags); 843 __count_vm_events(PGFREE, 1 << order); 844 set_freepage_migratetype(page, migratetype); 845 free_one_page(page_zone(page), page, pfn, order, migratetype); 846 local_irq_restore(flags); 847 } 848 849 void __init __free_pages_bootmem(struct page *page, unsigned int order) 850 { 851 unsigned int nr_pages = 1 << order; 852 struct page *p = page; 853 unsigned int loop; 854 855 prefetchw(p); 856 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { 857 prefetchw(p + 1); 858 __ClearPageReserved(p); 859 set_page_count(p, 0); 860 } 861 __ClearPageReserved(p); 862 set_page_count(p, 0); 863 864 page_zone(page)->managed_pages += nr_pages; 865 set_page_refcounted(page); 866 __free_pages(page, order); 867 } 868 869 #ifdef CONFIG_CMA 870 /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ 871 void __init init_cma_reserved_pageblock(struct page *page) 872 { 873 unsigned i = pageblock_nr_pages; 874 struct page *p = page; 875 876 do { 877 __ClearPageReserved(p); 878 set_page_count(p, 0); 879 } while (++p, --i); 880 881 set_pageblock_migratetype(page, MIGRATE_CMA); 882 883 if (pageblock_order >= MAX_ORDER) { 884 i = pageblock_nr_pages; 885 p = page; 886 do { 887 set_page_refcounted(p); 888 __free_pages(p, MAX_ORDER - 1); 889 p += MAX_ORDER_NR_PAGES; 890 } while (i -= MAX_ORDER_NR_PAGES); 891 } else { 892 set_page_refcounted(page); 893 __free_pages(page, pageblock_order); 894 } 895 896 adjust_managed_page_count(page, pageblock_nr_pages); 897 } 898 #endif 899 900 /* 901 * The order of subdivision here is critical for the IO subsystem. 902 * Please do not alter this order without good reasons and regression 903 * testing. Specifically, as large blocks of memory are subdivided, 904 * the order in which smaller blocks are delivered depends on the order 905 * they're subdivided in this function. This is the primary factor 906 * influencing the order in which pages are delivered to the IO 907 * subsystem according to empirical testing, and this is also justified 908 * by considering the behavior of a buddy system containing a single 909 * large block of memory acted on by a series of small allocations. 910 * This behavior is a critical factor in sglist merging's success. 911 * 912 * -- nyc 913 */ 914 static inline void expand(struct zone *zone, struct page *page, 915 int low, int high, struct free_area *area, 916 int migratetype) 917 { 918 unsigned long size = 1 << high; 919 920 while (high > low) { 921 area--; 922 high--; 923 size >>= 1; 924 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); 925 926 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && 927 debug_guardpage_enabled() && 928 high < debug_guardpage_minorder()) { 929 /* 930 * Mark as guard pages (or page), that will allow to 931 * merge back to allocator when buddy will be freed. 932 * Corresponding page table entries will not be touched, 933 * pages will stay not present in virtual address space 934 */ 935 set_page_guard(zone, &page[size], high, migratetype); 936 continue; 937 } 938 list_add(&page[size].lru, &area->free_list[migratetype]); 939 area->nr_free++; 940 set_page_order(&page[size], high); 941 } 942 } 943 944 /* 945 * This page is about to be returned from the page allocator 946 */ 947 static inline int check_new_page(struct page *page) 948 { 949 const char *bad_reason = NULL; 950 unsigned long bad_flags = 0; 951 952 if (unlikely(page_mapcount(page))) 953 bad_reason = "nonzero mapcount"; 954 if (unlikely(page->mapping != NULL)) 955 bad_reason = "non-NULL mapping"; 956 if (unlikely(atomic_read(&page->_count) != 0)) 957 bad_reason = "nonzero _count"; 958 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 959 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 960 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 961 } 962 #ifdef CONFIG_MEMCG 963 if (unlikely(page->mem_cgroup)) 964 bad_reason = "page still charged to cgroup"; 965 #endif 966 if (unlikely(bad_reason)) { 967 bad_page(page, bad_reason, bad_flags); 968 return 1; 969 } 970 return 0; 971 } 972 973 static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags) 974 { 975 int i; 976 977 for (i = 0; i < (1 << order); i++) { 978 struct page *p = page + i; 979 if (unlikely(check_new_page(p))) 980 return 1; 981 } 982 983 set_page_private(page, 0); 984 set_page_refcounted(page); 985 986 arch_alloc_page(page, order); 987 kernel_map_pages(page, 1 << order, 1); 988 989 if (gfp_flags & __GFP_ZERO) 990 prep_zero_page(page, order, gfp_flags); 991 992 if (order && (gfp_flags & __GFP_COMP)) 993 prep_compound_page(page, order); 994 995 set_page_owner(page, order, gfp_flags); 996 997 return 0; 998 } 999 1000 /* 1001 * Go through the free lists for the given migratetype and remove 1002 * the smallest available page from the freelists 1003 */ 1004 static inline 1005 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, 1006 int migratetype) 1007 { 1008 unsigned int current_order; 1009 struct free_area *area; 1010 struct page *page; 1011 1012 /* Find a page of the appropriate size in the preferred list */ 1013 for (current_order = order; current_order < MAX_ORDER; ++current_order) { 1014 area = &(zone->free_area[current_order]); 1015 if (list_empty(&area->free_list[migratetype])) 1016 continue; 1017 1018 page = list_entry(area->free_list[migratetype].next, 1019 struct page, lru); 1020 list_del(&page->lru); 1021 rmv_page_order(page); 1022 area->nr_free--; 1023 expand(zone, page, order, current_order, area, migratetype); 1024 set_freepage_migratetype(page, migratetype); 1025 return page; 1026 } 1027 1028 return NULL; 1029 } 1030 1031 1032 /* 1033 * This array describes the order lists are fallen back to when 1034 * the free lists for the desirable migrate type are depleted 1035 */ 1036 static int fallbacks[MIGRATE_TYPES][4] = { 1037 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 1038 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 1039 #ifdef CONFIG_CMA 1040 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 1041 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */ 1042 #else 1043 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 1044 #endif 1045 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ 1046 #ifdef CONFIG_MEMORY_ISOLATION 1047 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */ 1048 #endif 1049 }; 1050 1051 /* 1052 * Move the free pages in a range to the free lists of the requested type. 1053 * Note that start_page and end_pages are not aligned on a pageblock 1054 * boundary. If alignment is required, use move_freepages_block() 1055 */ 1056 int move_freepages(struct zone *zone, 1057 struct page *start_page, struct page *end_page, 1058 int migratetype) 1059 { 1060 struct page *page; 1061 unsigned long order; 1062 int pages_moved = 0; 1063 1064 #ifndef CONFIG_HOLES_IN_ZONE 1065 /* 1066 * page_zone is not safe to call in this context when 1067 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 1068 * anyway as we check zone boundaries in move_freepages_block(). 1069 * Remove at a later date when no bug reports exist related to 1070 * grouping pages by mobility 1071 */ 1072 VM_BUG_ON(page_zone(start_page) != page_zone(end_page)); 1073 #endif 1074 1075 for (page = start_page; page <= end_page;) { 1076 /* Make sure we are not inadvertently changing nodes */ 1077 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); 1078 1079 if (!pfn_valid_within(page_to_pfn(page))) { 1080 page++; 1081 continue; 1082 } 1083 1084 if (!PageBuddy(page)) { 1085 page++; 1086 continue; 1087 } 1088 1089 order = page_order(page); 1090 list_move(&page->lru, 1091 &zone->free_area[order].free_list[migratetype]); 1092 set_freepage_migratetype(page, migratetype); 1093 page += 1 << order; 1094 pages_moved += 1 << order; 1095 } 1096 1097 return pages_moved; 1098 } 1099 1100 int move_freepages_block(struct zone *zone, struct page *page, 1101 int migratetype) 1102 { 1103 unsigned long start_pfn, end_pfn; 1104 struct page *start_page, *end_page; 1105 1106 start_pfn = page_to_pfn(page); 1107 start_pfn = start_pfn & ~(pageblock_nr_pages-1); 1108 start_page = pfn_to_page(start_pfn); 1109 end_page = start_page + pageblock_nr_pages - 1; 1110 end_pfn = start_pfn + pageblock_nr_pages - 1; 1111 1112 /* Do not cross zone boundaries */ 1113 if (!zone_spans_pfn(zone, start_pfn)) 1114 start_page = page; 1115 if (!zone_spans_pfn(zone, end_pfn)) 1116 return 0; 1117 1118 return move_freepages(zone, start_page, end_page, migratetype); 1119 } 1120 1121 static void change_pageblock_range(struct page *pageblock_page, 1122 int start_order, int migratetype) 1123 { 1124 int nr_pageblocks = 1 << (start_order - pageblock_order); 1125 1126 while (nr_pageblocks--) { 1127 set_pageblock_migratetype(pageblock_page, migratetype); 1128 pageblock_page += pageblock_nr_pages; 1129 } 1130 } 1131 1132 /* 1133 * If breaking a large block of pages, move all free pages to the preferred 1134 * allocation list. If falling back for a reclaimable kernel allocation, be 1135 * more aggressive about taking ownership of free pages. 1136 * 1137 * On the other hand, never change migration type of MIGRATE_CMA pageblocks 1138 * nor move CMA pages to different free lists. We don't want unmovable pages 1139 * to be allocated from MIGRATE_CMA areas. 1140 * 1141 * Returns the new migratetype of the pageblock (or the same old migratetype 1142 * if it was unchanged). 1143 */ 1144 static int try_to_steal_freepages(struct zone *zone, struct page *page, 1145 int start_type, int fallback_type) 1146 { 1147 int current_order = page_order(page); 1148 1149 /* 1150 * When borrowing from MIGRATE_CMA, we need to release the excess 1151 * buddy pages to CMA itself. We also ensure the freepage_migratetype 1152 * is set to CMA so it is returned to the correct freelist in case 1153 * the page ends up being not actually allocated from the pcp lists. 1154 */ 1155 if (is_migrate_cma(fallback_type)) 1156 return fallback_type; 1157 1158 /* Take ownership for orders >= pageblock_order */ 1159 if (current_order >= pageblock_order) { 1160 change_pageblock_range(page, current_order, start_type); 1161 return start_type; 1162 } 1163 1164 if (current_order >= pageblock_order / 2 || 1165 start_type == MIGRATE_RECLAIMABLE || 1166 page_group_by_mobility_disabled) { 1167 int pages; 1168 1169 pages = move_freepages_block(zone, page, start_type); 1170 1171 /* Claim the whole block if over half of it is free */ 1172 if (pages >= (1 << (pageblock_order-1)) || 1173 page_group_by_mobility_disabled) { 1174 1175 set_pageblock_migratetype(page, start_type); 1176 return start_type; 1177 } 1178 1179 } 1180 1181 return fallback_type; 1182 } 1183 1184 /* Remove an element from the buddy allocator from the fallback list */ 1185 static inline struct page * 1186 __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) 1187 { 1188 struct free_area *area; 1189 unsigned int current_order; 1190 struct page *page; 1191 int migratetype, new_type, i; 1192 1193 /* Find the largest possible block of pages in the other list */ 1194 for (current_order = MAX_ORDER-1; 1195 current_order >= order && current_order <= MAX_ORDER-1; 1196 --current_order) { 1197 for (i = 0;; i++) { 1198 migratetype = fallbacks[start_migratetype][i]; 1199 1200 /* MIGRATE_RESERVE handled later if necessary */ 1201 if (migratetype == MIGRATE_RESERVE) 1202 break; 1203 1204 area = &(zone->free_area[current_order]); 1205 if (list_empty(&area->free_list[migratetype])) 1206 continue; 1207 1208 page = list_entry(area->free_list[migratetype].next, 1209 struct page, lru); 1210 area->nr_free--; 1211 1212 new_type = try_to_steal_freepages(zone, page, 1213 start_migratetype, 1214 migratetype); 1215 1216 /* Remove the page from the freelists */ 1217 list_del(&page->lru); 1218 rmv_page_order(page); 1219 1220 expand(zone, page, order, current_order, area, 1221 new_type); 1222 /* The freepage_migratetype may differ from pageblock's 1223 * migratetype depending on the decisions in 1224 * try_to_steal_freepages. This is OK as long as it does 1225 * not differ for MIGRATE_CMA type. 1226 */ 1227 set_freepage_migratetype(page, new_type); 1228 1229 trace_mm_page_alloc_extfrag(page, order, current_order, 1230 start_migratetype, migratetype, new_type); 1231 1232 return page; 1233 } 1234 } 1235 1236 return NULL; 1237 } 1238 1239 /* 1240 * Do the hard work of removing an element from the buddy allocator. 1241 * Call me with the zone->lock already held. 1242 */ 1243 static struct page *__rmqueue(struct zone *zone, unsigned int order, 1244 int migratetype) 1245 { 1246 struct page *page; 1247 1248 retry_reserve: 1249 page = __rmqueue_smallest(zone, order, migratetype); 1250 1251 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { 1252 page = __rmqueue_fallback(zone, order, migratetype); 1253 1254 /* 1255 * Use MIGRATE_RESERVE rather than fail an allocation. goto 1256 * is used because __rmqueue_smallest is an inline function 1257 * and we want just one call site 1258 */ 1259 if (!page) { 1260 migratetype = MIGRATE_RESERVE; 1261 goto retry_reserve; 1262 } 1263 } 1264 1265 trace_mm_page_alloc_zone_locked(page, order, migratetype); 1266 return page; 1267 } 1268 1269 /* 1270 * Obtain a specified number of elements from the buddy allocator, all under 1271 * a single hold of the lock, for efficiency. Add them to the supplied list. 1272 * Returns the number of new pages which were placed at *list. 1273 */ 1274 static int rmqueue_bulk(struct zone *zone, unsigned int order, 1275 unsigned long count, struct list_head *list, 1276 int migratetype, bool cold) 1277 { 1278 int i; 1279 1280 spin_lock(&zone->lock); 1281 for (i = 0; i < count; ++i) { 1282 struct page *page = __rmqueue(zone, order, migratetype); 1283 if (unlikely(page == NULL)) 1284 break; 1285 1286 /* 1287 * Split buddy pages returned by expand() are received here 1288 * in physical page order. The page is added to the callers and 1289 * list and the list head then moves forward. From the callers 1290 * perspective, the linked list is ordered by page number in 1291 * some conditions. This is useful for IO devices that can 1292 * merge IO requests if the physical pages are ordered 1293 * properly. 1294 */ 1295 if (likely(!cold)) 1296 list_add(&page->lru, list); 1297 else 1298 list_add_tail(&page->lru, list); 1299 list = &page->lru; 1300 if (is_migrate_cma(get_freepage_migratetype(page))) 1301 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1302 -(1 << order)); 1303 } 1304 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1305 spin_unlock(&zone->lock); 1306 return i; 1307 } 1308 1309 #ifdef CONFIG_NUMA 1310 /* 1311 * Called from the vmstat counter updater to drain pagesets of this 1312 * currently executing processor on remote nodes after they have 1313 * expired. 1314 * 1315 * Note that this function must be called with the thread pinned to 1316 * a single processor. 1317 */ 1318 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) 1319 { 1320 unsigned long flags; 1321 int to_drain, batch; 1322 1323 local_irq_save(flags); 1324 batch = ACCESS_ONCE(pcp->batch); 1325 to_drain = min(pcp->count, batch); 1326 if (to_drain > 0) { 1327 free_pcppages_bulk(zone, to_drain, pcp); 1328 pcp->count -= to_drain; 1329 } 1330 local_irq_restore(flags); 1331 } 1332 #endif 1333 1334 /* 1335 * Drain pcplists of the indicated processor and zone. 1336 * 1337 * The processor must either be the current processor and the 1338 * thread pinned to the current processor or a processor that 1339 * is not online. 1340 */ 1341 static void drain_pages_zone(unsigned int cpu, struct zone *zone) 1342 { 1343 unsigned long flags; 1344 struct per_cpu_pageset *pset; 1345 struct per_cpu_pages *pcp; 1346 1347 local_irq_save(flags); 1348 pset = per_cpu_ptr(zone->pageset, cpu); 1349 1350 pcp = &pset->pcp; 1351 if (pcp->count) { 1352 free_pcppages_bulk(zone, pcp->count, pcp); 1353 pcp->count = 0; 1354 } 1355 local_irq_restore(flags); 1356 } 1357 1358 /* 1359 * Drain pcplists of all zones on the indicated processor. 1360 * 1361 * The processor must either be the current processor and the 1362 * thread pinned to the current processor or a processor that 1363 * is not online. 1364 */ 1365 static void drain_pages(unsigned int cpu) 1366 { 1367 struct zone *zone; 1368 1369 for_each_populated_zone(zone) { 1370 drain_pages_zone(cpu, zone); 1371 } 1372 } 1373 1374 /* 1375 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 1376 * 1377 * The CPU has to be pinned. When zone parameter is non-NULL, spill just 1378 * the single zone's pages. 1379 */ 1380 void drain_local_pages(struct zone *zone) 1381 { 1382 int cpu = smp_processor_id(); 1383 1384 if (zone) 1385 drain_pages_zone(cpu, zone); 1386 else 1387 drain_pages(cpu); 1388 } 1389 1390 /* 1391 * Spill all the per-cpu pages from all CPUs back into the buddy allocator. 1392 * 1393 * When zone parameter is non-NULL, spill just the single zone's pages. 1394 * 1395 * Note that this code is protected against sending an IPI to an offline 1396 * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: 1397 * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but 1398 * nothing keeps CPUs from showing up after we populated the cpumask and 1399 * before the call to on_each_cpu_mask(). 1400 */ 1401 void drain_all_pages(struct zone *zone) 1402 { 1403 int cpu; 1404 1405 /* 1406 * Allocate in the BSS so we wont require allocation in 1407 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y 1408 */ 1409 static cpumask_t cpus_with_pcps; 1410 1411 /* 1412 * We don't care about racing with CPU hotplug event 1413 * as offline notification will cause the notified 1414 * cpu to drain that CPU pcps and on_each_cpu_mask 1415 * disables preemption as part of its processing 1416 */ 1417 for_each_online_cpu(cpu) { 1418 struct per_cpu_pageset *pcp; 1419 struct zone *z; 1420 bool has_pcps = false; 1421 1422 if (zone) { 1423 pcp = per_cpu_ptr(zone->pageset, cpu); 1424 if (pcp->pcp.count) 1425 has_pcps = true; 1426 } else { 1427 for_each_populated_zone(z) { 1428 pcp = per_cpu_ptr(z->pageset, cpu); 1429 if (pcp->pcp.count) { 1430 has_pcps = true; 1431 break; 1432 } 1433 } 1434 } 1435 1436 if (has_pcps) 1437 cpumask_set_cpu(cpu, &cpus_with_pcps); 1438 else 1439 cpumask_clear_cpu(cpu, &cpus_with_pcps); 1440 } 1441 on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, 1442 zone, 1); 1443 } 1444 1445 #ifdef CONFIG_HIBERNATION 1446 1447 void mark_free_pages(struct zone *zone) 1448 { 1449 unsigned long pfn, max_zone_pfn; 1450 unsigned long flags; 1451 unsigned int order, t; 1452 struct list_head *curr; 1453 1454 if (zone_is_empty(zone)) 1455 return; 1456 1457 spin_lock_irqsave(&zone->lock, flags); 1458 1459 max_zone_pfn = zone_end_pfn(zone); 1460 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1461 if (pfn_valid(pfn)) { 1462 struct page *page = pfn_to_page(pfn); 1463 1464 if (!swsusp_page_is_forbidden(page)) 1465 swsusp_unset_page_free(page); 1466 } 1467 1468 for_each_migratetype_order(order, t) { 1469 list_for_each(curr, &zone->free_area[order].free_list[t]) { 1470 unsigned long i; 1471 1472 pfn = page_to_pfn(list_entry(curr, struct page, lru)); 1473 for (i = 0; i < (1UL << order); i++) 1474 swsusp_set_page_free(pfn_to_page(pfn + i)); 1475 } 1476 } 1477 spin_unlock_irqrestore(&zone->lock, flags); 1478 } 1479 #endif /* CONFIG_PM */ 1480 1481 /* 1482 * Free a 0-order page 1483 * cold == true ? free a cold page : free a hot page 1484 */ 1485 void free_hot_cold_page(struct page *page, bool cold) 1486 { 1487 struct zone *zone = page_zone(page); 1488 struct per_cpu_pages *pcp; 1489 unsigned long flags; 1490 unsigned long pfn = page_to_pfn(page); 1491 int migratetype; 1492 1493 if (!free_pages_prepare(page, 0)) 1494 return; 1495 1496 migratetype = get_pfnblock_migratetype(page, pfn); 1497 set_freepage_migratetype(page, migratetype); 1498 local_irq_save(flags); 1499 __count_vm_event(PGFREE); 1500 1501 /* 1502 * We only track unmovable, reclaimable and movable on pcp lists. 1503 * Free ISOLATE pages back to the allocator because they are being 1504 * offlined but treat RESERVE as movable pages so we can get those 1505 * areas back if necessary. Otherwise, we may have to free 1506 * excessively into the page allocator 1507 */ 1508 if (migratetype >= MIGRATE_PCPTYPES) { 1509 if (unlikely(is_migrate_isolate(migratetype))) { 1510 free_one_page(zone, page, pfn, 0, migratetype); 1511 goto out; 1512 } 1513 migratetype = MIGRATE_MOVABLE; 1514 } 1515 1516 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1517 if (!cold) 1518 list_add(&page->lru, &pcp->lists[migratetype]); 1519 else 1520 list_add_tail(&page->lru, &pcp->lists[migratetype]); 1521 pcp->count++; 1522 if (pcp->count >= pcp->high) { 1523 unsigned long batch = ACCESS_ONCE(pcp->batch); 1524 free_pcppages_bulk(zone, batch, pcp); 1525 pcp->count -= batch; 1526 } 1527 1528 out: 1529 local_irq_restore(flags); 1530 } 1531 1532 /* 1533 * Free a list of 0-order pages 1534 */ 1535 void free_hot_cold_page_list(struct list_head *list, bool cold) 1536 { 1537 struct page *page, *next; 1538 1539 list_for_each_entry_safe(page, next, list, lru) { 1540 trace_mm_page_free_batched(page, cold); 1541 free_hot_cold_page(page, cold); 1542 } 1543 } 1544 1545 /* 1546 * split_page takes a non-compound higher-order page, and splits it into 1547 * n (1<<order) sub-pages: page[0..n] 1548 * Each sub-page must be freed individually. 1549 * 1550 * Note: this is probably too low level an operation for use in drivers. 1551 * Please consult with lkml before using this in your driver. 1552 */ 1553 void split_page(struct page *page, unsigned int order) 1554 { 1555 int i; 1556 1557 VM_BUG_ON_PAGE(PageCompound(page), page); 1558 VM_BUG_ON_PAGE(!page_count(page), page); 1559 1560 #ifdef CONFIG_KMEMCHECK 1561 /* 1562 * Split shadow pages too, because free(page[0]) would 1563 * otherwise free the whole shadow. 1564 */ 1565 if (kmemcheck_page_is_tracked(page)) 1566 split_page(virt_to_page(page[0].shadow), order); 1567 #endif 1568 1569 set_page_owner(page, 0, 0); 1570 for (i = 1; i < (1 << order); i++) { 1571 set_page_refcounted(page + i); 1572 set_page_owner(page + i, 0, 0); 1573 } 1574 } 1575 EXPORT_SYMBOL_GPL(split_page); 1576 1577 int __isolate_free_page(struct page *page, unsigned int order) 1578 { 1579 unsigned long watermark; 1580 struct zone *zone; 1581 int mt; 1582 1583 BUG_ON(!PageBuddy(page)); 1584 1585 zone = page_zone(page); 1586 mt = get_pageblock_migratetype(page); 1587 1588 if (!is_migrate_isolate(mt)) { 1589 /* Obey watermarks as if the page was being allocated */ 1590 watermark = low_wmark_pages(zone) + (1 << order); 1591 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 1592 return 0; 1593 1594 __mod_zone_freepage_state(zone, -(1UL << order), mt); 1595 } 1596 1597 /* Remove page from free list */ 1598 list_del(&page->lru); 1599 zone->free_area[order].nr_free--; 1600 rmv_page_order(page); 1601 1602 /* Set the pageblock if the isolated page is at least a pageblock */ 1603 if (order >= pageblock_order - 1) { 1604 struct page *endpage = page + (1 << order) - 1; 1605 for (; page < endpage; page += pageblock_nr_pages) { 1606 int mt = get_pageblock_migratetype(page); 1607 if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)) 1608 set_pageblock_migratetype(page, 1609 MIGRATE_MOVABLE); 1610 } 1611 } 1612 1613 set_page_owner(page, order, 0); 1614 return 1UL << order; 1615 } 1616 1617 /* 1618 * Similar to split_page except the page is already free. As this is only 1619 * being used for migration, the migratetype of the block also changes. 1620 * As this is called with interrupts disabled, the caller is responsible 1621 * for calling arch_alloc_page() and kernel_map_page() after interrupts 1622 * are enabled. 1623 * 1624 * Note: this is probably too low level an operation for use in drivers. 1625 * Please consult with lkml before using this in your driver. 1626 */ 1627 int split_free_page(struct page *page) 1628 { 1629 unsigned int order; 1630 int nr_pages; 1631 1632 order = page_order(page); 1633 1634 nr_pages = __isolate_free_page(page, order); 1635 if (!nr_pages) 1636 return 0; 1637 1638 /* Split into individual pages */ 1639 set_page_refcounted(page); 1640 split_page(page, order); 1641 return nr_pages; 1642 } 1643 1644 /* 1645 * Really, prep_compound_page() should be called from __rmqueue_bulk(). But 1646 * we cheat by calling it from here, in the order > 0 path. Saves a branch 1647 * or two. 1648 */ 1649 static inline 1650 struct page *buffered_rmqueue(struct zone *preferred_zone, 1651 struct zone *zone, unsigned int order, 1652 gfp_t gfp_flags, int migratetype) 1653 { 1654 unsigned long flags; 1655 struct page *page; 1656 bool cold = ((gfp_flags & __GFP_COLD) != 0); 1657 1658 again: 1659 if (likely(order == 0)) { 1660 struct per_cpu_pages *pcp; 1661 struct list_head *list; 1662 1663 local_irq_save(flags); 1664 pcp = &this_cpu_ptr(zone->pageset)->pcp; 1665 list = &pcp->lists[migratetype]; 1666 if (list_empty(list)) { 1667 pcp->count += rmqueue_bulk(zone, 0, 1668 pcp->batch, list, 1669 migratetype, cold); 1670 if (unlikely(list_empty(list))) 1671 goto failed; 1672 } 1673 1674 if (cold) 1675 page = list_entry(list->prev, struct page, lru); 1676 else 1677 page = list_entry(list->next, struct page, lru); 1678 1679 list_del(&page->lru); 1680 pcp->count--; 1681 } else { 1682 if (unlikely(gfp_flags & __GFP_NOFAIL)) { 1683 /* 1684 * __GFP_NOFAIL is not to be used in new code. 1685 * 1686 * All __GFP_NOFAIL callers should be fixed so that they 1687 * properly detect and handle allocation failures. 1688 * 1689 * We most definitely don't want callers attempting to 1690 * allocate greater than order-1 page units with 1691 * __GFP_NOFAIL. 1692 */ 1693 WARN_ON_ONCE(order > 1); 1694 } 1695 spin_lock_irqsave(&zone->lock, flags); 1696 page = __rmqueue(zone, order, migratetype); 1697 spin_unlock(&zone->lock); 1698 if (!page) 1699 goto failed; 1700 __mod_zone_freepage_state(zone, -(1 << order), 1701 get_freepage_migratetype(page)); 1702 } 1703 1704 __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); 1705 if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && 1706 !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) 1707 set_bit(ZONE_FAIR_DEPLETED, &zone->flags); 1708 1709 __count_zone_vm_events(PGALLOC, zone, 1 << order); 1710 zone_statistics(preferred_zone, zone, gfp_flags); 1711 local_irq_restore(flags); 1712 1713 VM_BUG_ON_PAGE(bad_range(zone, page), page); 1714 if (prep_new_page(page, order, gfp_flags)) 1715 goto again; 1716 return page; 1717 1718 failed: 1719 local_irq_restore(flags); 1720 return NULL; 1721 } 1722 1723 #ifdef CONFIG_FAIL_PAGE_ALLOC 1724 1725 static struct { 1726 struct fault_attr attr; 1727 1728 u32 ignore_gfp_highmem; 1729 u32 ignore_gfp_wait; 1730 u32 min_order; 1731 } fail_page_alloc = { 1732 .attr = FAULT_ATTR_INITIALIZER, 1733 .ignore_gfp_wait = 1, 1734 .ignore_gfp_highmem = 1, 1735 .min_order = 1, 1736 }; 1737 1738 static int __init setup_fail_page_alloc(char *str) 1739 { 1740 return setup_fault_attr(&fail_page_alloc.attr, str); 1741 } 1742 __setup("fail_page_alloc=", setup_fail_page_alloc); 1743 1744 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1745 { 1746 if (order < fail_page_alloc.min_order) 1747 return false; 1748 if (gfp_mask & __GFP_NOFAIL) 1749 return false; 1750 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) 1751 return false; 1752 if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT)) 1753 return false; 1754 1755 return should_fail(&fail_page_alloc.attr, 1 << order); 1756 } 1757 1758 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 1759 1760 static int __init fail_page_alloc_debugfs(void) 1761 { 1762 umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 1763 struct dentry *dir; 1764 1765 dir = fault_create_debugfs_attr("fail_page_alloc", NULL, 1766 &fail_page_alloc.attr); 1767 if (IS_ERR(dir)) 1768 return PTR_ERR(dir); 1769 1770 if (!debugfs_create_bool("ignore-gfp-wait", mode, dir, 1771 &fail_page_alloc.ignore_gfp_wait)) 1772 goto fail; 1773 if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir, 1774 &fail_page_alloc.ignore_gfp_highmem)) 1775 goto fail; 1776 if (!debugfs_create_u32("min-order", mode, dir, 1777 &fail_page_alloc.min_order)) 1778 goto fail; 1779 1780 return 0; 1781 fail: 1782 debugfs_remove_recursive(dir); 1783 1784 return -ENOMEM; 1785 } 1786 1787 late_initcall(fail_page_alloc_debugfs); 1788 1789 #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ 1790 1791 #else /* CONFIG_FAIL_PAGE_ALLOC */ 1792 1793 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 1794 { 1795 return false; 1796 } 1797 1798 #endif /* CONFIG_FAIL_PAGE_ALLOC */ 1799 1800 /* 1801 * Return true if free pages are above 'mark'. This takes into account the order 1802 * of the allocation. 1803 */ 1804 static bool __zone_watermark_ok(struct zone *z, unsigned int order, 1805 unsigned long mark, int classzone_idx, int alloc_flags, 1806 long free_pages) 1807 { 1808 /* free_pages may go negative - that's OK */ 1809 long min = mark; 1810 int o; 1811 long free_cma = 0; 1812 1813 free_pages -= (1 << order) - 1; 1814 if (alloc_flags & ALLOC_HIGH) 1815 min -= min / 2; 1816 if (alloc_flags & ALLOC_HARDER) 1817 min -= min / 4; 1818 #ifdef CONFIG_CMA 1819 /* If allocation can't use CMA areas don't use free CMA pages */ 1820 if (!(alloc_flags & ALLOC_CMA)) 1821 free_cma = zone_page_state(z, NR_FREE_CMA_PAGES); 1822 #endif 1823 1824 if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx]) 1825 return false; 1826 for (o = 0; o < order; o++) { 1827 /* At the next order, this order's pages become unavailable */ 1828 free_pages -= z->free_area[o].nr_free << o; 1829 1830 /* Require fewer higher order pages to be free */ 1831 min >>= 1; 1832 1833 if (free_pages <= min) 1834 return false; 1835 } 1836 return true; 1837 } 1838 1839 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, 1840 int classzone_idx, int alloc_flags) 1841 { 1842 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 1843 zone_page_state(z, NR_FREE_PAGES)); 1844 } 1845 1846 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, 1847 unsigned long mark, int classzone_idx, int alloc_flags) 1848 { 1849 long free_pages = zone_page_state(z, NR_FREE_PAGES); 1850 1851 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) 1852 free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES); 1853 1854 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, 1855 free_pages); 1856 } 1857 1858 #ifdef CONFIG_NUMA 1859 /* 1860 * zlc_setup - Setup for "zonelist cache". Uses cached zone data to 1861 * skip over zones that are not allowed by the cpuset, or that have 1862 * been recently (in last second) found to be nearly full. See further 1863 * comments in mmzone.h. Reduces cache footprint of zonelist scans 1864 * that have to skip over a lot of full or unallowed zones. 1865 * 1866 * If the zonelist cache is present in the passed zonelist, then 1867 * returns a pointer to the allowed node mask (either the current 1868 * tasks mems_allowed, or node_states[N_MEMORY].) 1869 * 1870 * If the zonelist cache is not available for this zonelist, does 1871 * nothing and returns NULL. 1872 * 1873 * If the fullzones BITMAP in the zonelist cache is stale (more than 1874 * a second since last zap'd) then we zap it out (clear its bits.) 1875 * 1876 * We hold off even calling zlc_setup, until after we've checked the 1877 * first zone in the zonelist, on the theory that most allocations will 1878 * be satisfied from that first zone, so best to examine that zone as 1879 * quickly as we can. 1880 */ 1881 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1882 { 1883 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1884 nodemask_t *allowednodes; /* zonelist_cache approximation */ 1885 1886 zlc = zonelist->zlcache_ptr; 1887 if (!zlc) 1888 return NULL; 1889 1890 if (time_after(jiffies, zlc->last_full_zap + HZ)) { 1891 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1892 zlc->last_full_zap = jiffies; 1893 } 1894 1895 allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ? 1896 &cpuset_current_mems_allowed : 1897 &node_states[N_MEMORY]; 1898 return allowednodes; 1899 } 1900 1901 /* 1902 * Given 'z' scanning a zonelist, run a couple of quick checks to see 1903 * if it is worth looking at further for free memory: 1904 * 1) Check that the zone isn't thought to be full (doesn't have its 1905 * bit set in the zonelist_cache fullzones BITMAP). 1906 * 2) Check that the zones node (obtained from the zonelist_cache 1907 * z_to_n[] mapping) is allowed in the passed in allowednodes mask. 1908 * Return true (non-zero) if zone is worth looking at further, or 1909 * else return false (zero) if it is not. 1910 * 1911 * This check -ignores- the distinction between various watermarks, 1912 * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is 1913 * found to be full for any variation of these watermarks, it will 1914 * be considered full for up to one second by all requests, unless 1915 * we are so low on memory on all allowed nodes that we are forced 1916 * into the second scan of the zonelist. 1917 * 1918 * In the second scan we ignore this zonelist cache and exactly 1919 * apply the watermarks to all zones, even it is slower to do so. 1920 * We are low on memory in the second scan, and should leave no stone 1921 * unturned looking for a free page. 1922 */ 1923 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1924 nodemask_t *allowednodes) 1925 { 1926 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1927 int i; /* index of *z in zonelist zones */ 1928 int n; /* node that zone *z is on */ 1929 1930 zlc = zonelist->zlcache_ptr; 1931 if (!zlc) 1932 return 1; 1933 1934 i = z - zonelist->_zonerefs; 1935 n = zlc->z_to_n[i]; 1936 1937 /* This zone is worth trying if it is allowed but not full */ 1938 return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones); 1939 } 1940 1941 /* 1942 * Given 'z' scanning a zonelist, set the corresponding bit in 1943 * zlc->fullzones, so that subsequent attempts to allocate a page 1944 * from that zone don't waste time re-examining it. 1945 */ 1946 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 1947 { 1948 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1949 int i; /* index of *z in zonelist zones */ 1950 1951 zlc = zonelist->zlcache_ptr; 1952 if (!zlc) 1953 return; 1954 1955 i = z - zonelist->_zonerefs; 1956 1957 set_bit(i, zlc->fullzones); 1958 } 1959 1960 /* 1961 * clear all zones full, called after direct reclaim makes progress so that 1962 * a zone that was recently full is not skipped over for up to a second 1963 */ 1964 static void zlc_clear_zones_full(struct zonelist *zonelist) 1965 { 1966 struct zonelist_cache *zlc; /* cached zonelist speedup info */ 1967 1968 zlc = zonelist->zlcache_ptr; 1969 if (!zlc) 1970 return; 1971 1972 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 1973 } 1974 1975 static bool zone_local(struct zone *local_zone, struct zone *zone) 1976 { 1977 return local_zone->node == zone->node; 1978 } 1979 1980 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 1981 { 1982 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) < 1983 RECLAIM_DISTANCE; 1984 } 1985 1986 #else /* CONFIG_NUMA */ 1987 1988 static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags) 1989 { 1990 return NULL; 1991 } 1992 1993 static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z, 1994 nodemask_t *allowednodes) 1995 { 1996 return 1; 1997 } 1998 1999 static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z) 2000 { 2001 } 2002 2003 static void zlc_clear_zones_full(struct zonelist *zonelist) 2004 { 2005 } 2006 2007 static bool zone_local(struct zone *local_zone, struct zone *zone) 2008 { 2009 return true; 2010 } 2011 2012 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) 2013 { 2014 return true; 2015 } 2016 2017 #endif /* CONFIG_NUMA */ 2018 2019 static void reset_alloc_batches(struct zone *preferred_zone) 2020 { 2021 struct zone *zone = preferred_zone->zone_pgdat->node_zones; 2022 2023 do { 2024 mod_zone_page_state(zone, NR_ALLOC_BATCH, 2025 high_wmark_pages(zone) - low_wmark_pages(zone) - 2026 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 2027 clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); 2028 } while (zone++ != preferred_zone); 2029 } 2030 2031 /* 2032 * get_page_from_freelist goes through the zonelist trying to allocate 2033 * a page. 2034 */ 2035 static struct page * 2036 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, 2037 struct zonelist *zonelist, int high_zoneidx, int alloc_flags, 2038 struct zone *preferred_zone, int classzone_idx, int migratetype) 2039 { 2040 struct zoneref *z; 2041 struct page *page = NULL; 2042 struct zone *zone; 2043 nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */ 2044 int zlc_active = 0; /* set if using zonelist_cache */ 2045 int did_zlc_setup = 0; /* just call zlc_setup() one time */ 2046 bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) && 2047 (gfp_mask & __GFP_WRITE); 2048 int nr_fair_skipped = 0; 2049 bool zonelist_rescan; 2050 2051 zonelist_scan: 2052 zonelist_rescan = false; 2053 2054 /* 2055 * Scan zonelist, looking for a zone with enough free. 2056 * See also __cpuset_node_allowed() comment in kernel/cpuset.c. 2057 */ 2058 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2059 high_zoneidx, nodemask) { 2060 unsigned long mark; 2061 2062 if (IS_ENABLED(CONFIG_NUMA) && zlc_active && 2063 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 2064 continue; 2065 if (cpusets_enabled() && 2066 (alloc_flags & ALLOC_CPUSET) && 2067 !cpuset_zone_allowed(zone, gfp_mask)) 2068 continue; 2069 /* 2070 * Distribute pages in proportion to the individual 2071 * zone size to ensure fair page aging. The zone a 2072 * page was allocated in should have no effect on the 2073 * time the page has in memory before being reclaimed. 2074 */ 2075 if (alloc_flags & ALLOC_FAIR) { 2076 if (!zone_local(preferred_zone, zone)) 2077 break; 2078 if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { 2079 nr_fair_skipped++; 2080 continue; 2081 } 2082 } 2083 /* 2084 * When allocating a page cache page for writing, we 2085 * want to get it from a zone that is within its dirty 2086 * limit, such that no single zone holds more than its 2087 * proportional share of globally allowed dirty pages. 2088 * The dirty limits take into account the zone's 2089 * lowmem reserves and high watermark so that kswapd 2090 * should be able to balance it without having to 2091 * write pages from its LRU list. 2092 * 2093 * This may look like it could increase pressure on 2094 * lower zones by failing allocations in higher zones 2095 * before they are full. But the pages that do spill 2096 * over are limited as the lower zones are protected 2097 * by this very same mechanism. It should not become 2098 * a practical burden to them. 2099 * 2100 * XXX: For now, allow allocations to potentially 2101 * exceed the per-zone dirty limit in the slowpath 2102 * (ALLOC_WMARK_LOW unset) before going into reclaim, 2103 * which is important when on a NUMA setup the allowed 2104 * zones are together not big enough to reach the 2105 * global limit. The proper fix for these situations 2106 * will require awareness of zones in the 2107 * dirty-throttling and the flusher threads. 2108 */ 2109 if (consider_zone_dirty && !zone_dirty_ok(zone)) 2110 continue; 2111 2112 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK]; 2113 if (!zone_watermark_ok(zone, order, mark, 2114 classzone_idx, alloc_flags)) { 2115 int ret; 2116 2117 /* Checked here to keep the fast path fast */ 2118 BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK); 2119 if (alloc_flags & ALLOC_NO_WATERMARKS) 2120 goto try_this_zone; 2121 2122 if (IS_ENABLED(CONFIG_NUMA) && 2123 !did_zlc_setup && nr_online_nodes > 1) { 2124 /* 2125 * we do zlc_setup if there are multiple nodes 2126 * and before considering the first zone allowed 2127 * by the cpuset. 2128 */ 2129 allowednodes = zlc_setup(zonelist, alloc_flags); 2130 zlc_active = 1; 2131 did_zlc_setup = 1; 2132 } 2133 2134 if (zone_reclaim_mode == 0 || 2135 !zone_allows_reclaim(preferred_zone, zone)) 2136 goto this_zone_full; 2137 2138 /* 2139 * As we may have just activated ZLC, check if the first 2140 * eligible zone has failed zone_reclaim recently. 2141 */ 2142 if (IS_ENABLED(CONFIG_NUMA) && zlc_active && 2143 !zlc_zone_worth_trying(zonelist, z, allowednodes)) 2144 continue; 2145 2146 ret = zone_reclaim(zone, gfp_mask, order); 2147 switch (ret) { 2148 case ZONE_RECLAIM_NOSCAN: 2149 /* did not scan */ 2150 continue; 2151 case ZONE_RECLAIM_FULL: 2152 /* scanned but unreclaimable */ 2153 continue; 2154 default: 2155 /* did we reclaim enough */ 2156 if (zone_watermark_ok(zone, order, mark, 2157 classzone_idx, alloc_flags)) 2158 goto try_this_zone; 2159 2160 /* 2161 * Failed to reclaim enough to meet watermark. 2162 * Only mark the zone full if checking the min 2163 * watermark or if we failed to reclaim just 2164 * 1<<order pages or else the page allocator 2165 * fastpath will prematurely mark zones full 2166 * when the watermark is between the low and 2167 * min watermarks. 2168 */ 2169 if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) || 2170 ret == ZONE_RECLAIM_SOME) 2171 goto this_zone_full; 2172 2173 continue; 2174 } 2175 } 2176 2177 try_this_zone: 2178 page = buffered_rmqueue(preferred_zone, zone, order, 2179 gfp_mask, migratetype); 2180 if (page) 2181 break; 2182 this_zone_full: 2183 if (IS_ENABLED(CONFIG_NUMA) && zlc_active) 2184 zlc_mark_zone_full(zonelist, z); 2185 } 2186 2187 if (page) { 2188 /* 2189 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was 2190 * necessary to allocate the page. The expectation is 2191 * that the caller is taking steps that will free more 2192 * memory. The caller should avoid the page being used 2193 * for !PFMEMALLOC purposes. 2194 */ 2195 page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); 2196 return page; 2197 } 2198 2199 /* 2200 * The first pass makes sure allocations are spread fairly within the 2201 * local node. However, the local node might have free pages left 2202 * after the fairness batches are exhausted, and remote zones haven't 2203 * even been considered yet. Try once more without fairness, and 2204 * include remote zones now, before entering the slowpath and waking 2205 * kswapd: prefer spilling to a remote zone over swapping locally. 2206 */ 2207 if (alloc_flags & ALLOC_FAIR) { 2208 alloc_flags &= ~ALLOC_FAIR; 2209 if (nr_fair_skipped) { 2210 zonelist_rescan = true; 2211 reset_alloc_batches(preferred_zone); 2212 } 2213 if (nr_online_nodes > 1) 2214 zonelist_rescan = true; 2215 } 2216 2217 if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) { 2218 /* Disable zlc cache for second zonelist scan */ 2219 zlc_active = 0; 2220 zonelist_rescan = true; 2221 } 2222 2223 if (zonelist_rescan) 2224 goto zonelist_scan; 2225 2226 return NULL; 2227 } 2228 2229 /* 2230 * Large machines with many possible nodes should not always dump per-node 2231 * meminfo in irq context. 2232 */ 2233 static inline bool should_suppress_show_mem(void) 2234 { 2235 bool ret = false; 2236 2237 #if NODES_SHIFT > 8 2238 ret = in_interrupt(); 2239 #endif 2240 return ret; 2241 } 2242 2243 static DEFINE_RATELIMIT_STATE(nopage_rs, 2244 DEFAULT_RATELIMIT_INTERVAL, 2245 DEFAULT_RATELIMIT_BURST); 2246 2247 void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...) 2248 { 2249 unsigned int filter = SHOW_MEM_FILTER_NODES; 2250 2251 if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) || 2252 debug_guardpage_minorder() > 0) 2253 return; 2254 2255 /* 2256 * This documents exceptions given to allocations in certain 2257 * contexts that are allowed to allocate outside current's set 2258 * of allowed nodes. 2259 */ 2260 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2261 if (test_thread_flag(TIF_MEMDIE) || 2262 (current->flags & (PF_MEMALLOC | PF_EXITING))) 2263 filter &= ~SHOW_MEM_FILTER_NODES; 2264 if (in_interrupt() || !(gfp_mask & __GFP_WAIT)) 2265 filter &= ~SHOW_MEM_FILTER_NODES; 2266 2267 if (fmt) { 2268 struct va_format vaf; 2269 va_list args; 2270 2271 va_start(args, fmt); 2272 2273 vaf.fmt = fmt; 2274 vaf.va = &args; 2275 2276 pr_warn("%pV", &vaf); 2277 2278 va_end(args); 2279 } 2280 2281 pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n", 2282 current->comm, order, gfp_mask); 2283 2284 dump_stack(); 2285 if (!should_suppress_show_mem()) 2286 show_mem(filter); 2287 } 2288 2289 static inline int 2290 should_alloc_retry(gfp_t gfp_mask, unsigned int order, 2291 unsigned long did_some_progress, 2292 unsigned long pages_reclaimed) 2293 { 2294 /* Do not loop if specifically requested */ 2295 if (gfp_mask & __GFP_NORETRY) 2296 return 0; 2297 2298 /* Always retry if specifically requested */ 2299 if (gfp_mask & __GFP_NOFAIL) 2300 return 1; 2301 2302 /* 2303 * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim 2304 * making forward progress without invoking OOM. Suspend also disables 2305 * storage devices so kswapd will not help. Bail if we are suspending. 2306 */ 2307 if (!did_some_progress && pm_suspended_storage()) 2308 return 0; 2309 2310 /* 2311 * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER 2312 * means __GFP_NOFAIL, but that may not be true in other 2313 * implementations. 2314 */ 2315 if (order <= PAGE_ALLOC_COSTLY_ORDER) 2316 return 1; 2317 2318 /* 2319 * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is 2320 * specified, then we retry until we no longer reclaim any pages 2321 * (above), or we've reclaimed an order of pages at least as 2322 * large as the allocation's order. In both cases, if the 2323 * allocation still fails, we stop retrying. 2324 */ 2325 if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order)) 2326 return 1; 2327 2328 return 0; 2329 } 2330 2331 static inline struct page * 2332 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, 2333 struct zonelist *zonelist, enum zone_type high_zoneidx, 2334 nodemask_t *nodemask, struct zone *preferred_zone, 2335 int classzone_idx, int migratetype) 2336 { 2337 struct page *page; 2338 2339 /* Acquire the per-zone oom lock for each zone */ 2340 if (!oom_zonelist_trylock(zonelist, gfp_mask)) { 2341 schedule_timeout_uninterruptible(1); 2342 return NULL; 2343 } 2344 2345 /* 2346 * PM-freezer should be notified that there might be an OOM killer on 2347 * its way to kill and wake somebody up. This is too early and we might 2348 * end up not killing anything but false positives are acceptable. 2349 * See freeze_processes. 2350 */ 2351 note_oom_kill(); 2352 2353 /* 2354 * Go through the zonelist yet one more time, keep very high watermark 2355 * here, this is only to catch a parallel oom killing, we must fail if 2356 * we're still under heavy pressure. 2357 */ 2358 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, 2359 order, zonelist, high_zoneidx, 2360 ALLOC_WMARK_HIGH|ALLOC_CPUSET, 2361 preferred_zone, classzone_idx, migratetype); 2362 if (page) 2363 goto out; 2364 2365 if (!(gfp_mask & __GFP_NOFAIL)) { 2366 /* The OOM killer will not help higher order allocs */ 2367 if (order > PAGE_ALLOC_COSTLY_ORDER) 2368 goto out; 2369 /* The OOM killer does not needlessly kill tasks for lowmem */ 2370 if (high_zoneidx < ZONE_NORMAL) 2371 goto out; 2372 /* 2373 * GFP_THISNODE contains __GFP_NORETRY and we never hit this. 2374 * Sanity check for bare calls of __GFP_THISNODE, not real OOM. 2375 * The caller should handle page allocation failure by itself if 2376 * it specifies __GFP_THISNODE. 2377 * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER. 2378 */ 2379 if (gfp_mask & __GFP_THISNODE) 2380 goto out; 2381 } 2382 /* Exhausted what can be done so it's blamo time */ 2383 out_of_memory(zonelist, gfp_mask, order, nodemask, false); 2384 2385 out: 2386 oom_zonelist_unlock(zonelist, gfp_mask); 2387 return page; 2388 } 2389 2390 #ifdef CONFIG_COMPACTION 2391 /* Try memory compaction for high-order allocations before reclaim */ 2392 static struct page * 2393 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2394 struct zonelist *zonelist, enum zone_type high_zoneidx, 2395 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2396 int classzone_idx, int migratetype, enum migrate_mode mode, 2397 int *contended_compaction, bool *deferred_compaction) 2398 { 2399 unsigned long compact_result; 2400 struct page *page; 2401 2402 if (!order) 2403 return NULL; 2404 2405 current->flags |= PF_MEMALLOC; 2406 compact_result = try_to_compact_pages(zonelist, order, gfp_mask, 2407 nodemask, mode, 2408 contended_compaction, 2409 alloc_flags, classzone_idx); 2410 current->flags &= ~PF_MEMALLOC; 2411 2412 switch (compact_result) { 2413 case COMPACT_DEFERRED: 2414 *deferred_compaction = true; 2415 /* fall-through */ 2416 case COMPACT_SKIPPED: 2417 return NULL; 2418 default: 2419 break; 2420 } 2421 2422 /* 2423 * At least in one zone compaction wasn't deferred or skipped, so let's 2424 * count a compaction stall 2425 */ 2426 count_vm_event(COMPACTSTALL); 2427 2428 page = get_page_from_freelist(gfp_mask, nodemask, 2429 order, zonelist, high_zoneidx, 2430 alloc_flags & ~ALLOC_NO_WATERMARKS, 2431 preferred_zone, classzone_idx, migratetype); 2432 2433 if (page) { 2434 struct zone *zone = page_zone(page); 2435 2436 zone->compact_blockskip_flush = false; 2437 compaction_defer_reset(zone, order, true); 2438 count_vm_event(COMPACTSUCCESS); 2439 return page; 2440 } 2441 2442 /* 2443 * It's bad if compaction run occurs and fails. The most likely reason 2444 * is that pages exist, but not enough to satisfy watermarks. 2445 */ 2446 count_vm_event(COMPACTFAIL); 2447 2448 cond_resched(); 2449 2450 return NULL; 2451 } 2452 #else 2453 static inline struct page * 2454 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2455 struct zonelist *zonelist, enum zone_type high_zoneidx, 2456 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2457 int classzone_idx, int migratetype, enum migrate_mode mode, 2458 int *contended_compaction, bool *deferred_compaction) 2459 { 2460 return NULL; 2461 } 2462 #endif /* CONFIG_COMPACTION */ 2463 2464 /* Perform direct synchronous page reclaim */ 2465 static int 2466 __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, 2467 nodemask_t *nodemask) 2468 { 2469 struct reclaim_state reclaim_state; 2470 int progress; 2471 2472 cond_resched(); 2473 2474 /* We now go into synchronous reclaim */ 2475 cpuset_memory_pressure_bump(); 2476 current->flags |= PF_MEMALLOC; 2477 lockdep_set_current_reclaim_state(gfp_mask); 2478 reclaim_state.reclaimed_slab = 0; 2479 current->reclaim_state = &reclaim_state; 2480 2481 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 2482 2483 current->reclaim_state = NULL; 2484 lockdep_clear_current_reclaim_state(); 2485 current->flags &= ~PF_MEMALLOC; 2486 2487 cond_resched(); 2488 2489 return progress; 2490 } 2491 2492 /* The really slow allocator path where we enter direct reclaim */ 2493 static inline struct page * 2494 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 2495 struct zonelist *zonelist, enum zone_type high_zoneidx, 2496 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2497 int classzone_idx, int migratetype, unsigned long *did_some_progress) 2498 { 2499 struct page *page = NULL; 2500 bool drained = false; 2501 2502 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, 2503 nodemask); 2504 if (unlikely(!(*did_some_progress))) 2505 return NULL; 2506 2507 /* After successful reclaim, reconsider all zones for allocation */ 2508 if (IS_ENABLED(CONFIG_NUMA)) 2509 zlc_clear_zones_full(zonelist); 2510 2511 retry: 2512 page = get_page_from_freelist(gfp_mask, nodemask, order, 2513 zonelist, high_zoneidx, 2514 alloc_flags & ~ALLOC_NO_WATERMARKS, 2515 preferred_zone, classzone_idx, 2516 migratetype); 2517 2518 /* 2519 * If an allocation failed after direct reclaim, it could be because 2520 * pages are pinned on the per-cpu lists. Drain them and try again 2521 */ 2522 if (!page && !drained) { 2523 drain_all_pages(NULL); 2524 drained = true; 2525 goto retry; 2526 } 2527 2528 return page; 2529 } 2530 2531 /* 2532 * This is called in the allocator slow-path if the allocation request is of 2533 * sufficient urgency to ignore watermarks and take other desperate measures 2534 */ 2535 static inline struct page * 2536 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order, 2537 struct zonelist *zonelist, enum zone_type high_zoneidx, 2538 nodemask_t *nodemask, struct zone *preferred_zone, 2539 int classzone_idx, int migratetype) 2540 { 2541 struct page *page; 2542 2543 do { 2544 page = get_page_from_freelist(gfp_mask, nodemask, order, 2545 zonelist, high_zoneidx, ALLOC_NO_WATERMARKS, 2546 preferred_zone, classzone_idx, migratetype); 2547 2548 if (!page && gfp_mask & __GFP_NOFAIL) 2549 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2550 } while (!page && (gfp_mask & __GFP_NOFAIL)); 2551 2552 return page; 2553 } 2554 2555 static void wake_all_kswapds(unsigned int order, 2556 struct zonelist *zonelist, 2557 enum zone_type high_zoneidx, 2558 struct zone *preferred_zone, 2559 nodemask_t *nodemask) 2560 { 2561 struct zoneref *z; 2562 struct zone *zone; 2563 2564 for_each_zone_zonelist_nodemask(zone, z, zonelist, 2565 high_zoneidx, nodemask) 2566 wakeup_kswapd(zone, order, zone_idx(preferred_zone)); 2567 } 2568 2569 static inline int 2570 gfp_to_alloc_flags(gfp_t gfp_mask) 2571 { 2572 int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET; 2573 const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD)); 2574 2575 /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */ 2576 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH); 2577 2578 /* 2579 * The caller may dip into page reserves a bit more if the caller 2580 * cannot run direct reclaim, or if the caller has realtime scheduling 2581 * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will 2582 * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH). 2583 */ 2584 alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH); 2585 2586 if (atomic) { 2587 /* 2588 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even 2589 * if it can't schedule. 2590 */ 2591 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2592 alloc_flags |= ALLOC_HARDER; 2593 /* 2594 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 2595 * comment for __cpuset_node_allowed(). 2596 */ 2597 alloc_flags &= ~ALLOC_CPUSET; 2598 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2599 alloc_flags |= ALLOC_HARDER; 2600 2601 if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) { 2602 if (gfp_mask & __GFP_MEMALLOC) 2603 alloc_flags |= ALLOC_NO_WATERMARKS; 2604 else if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) 2605 alloc_flags |= ALLOC_NO_WATERMARKS; 2606 else if (!in_interrupt() && 2607 ((current->flags & PF_MEMALLOC) || 2608 unlikely(test_thread_flag(TIF_MEMDIE)))) 2609 alloc_flags |= ALLOC_NO_WATERMARKS; 2610 } 2611 #ifdef CONFIG_CMA 2612 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 2613 alloc_flags |= ALLOC_CMA; 2614 #endif 2615 return alloc_flags; 2616 } 2617 2618 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) 2619 { 2620 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); 2621 } 2622 2623 static inline struct page * 2624 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 2625 struct zonelist *zonelist, enum zone_type high_zoneidx, 2626 nodemask_t *nodemask, struct zone *preferred_zone, 2627 int classzone_idx, int migratetype) 2628 { 2629 const gfp_t wait = gfp_mask & __GFP_WAIT; 2630 struct page *page = NULL; 2631 int alloc_flags; 2632 unsigned long pages_reclaimed = 0; 2633 unsigned long did_some_progress; 2634 enum migrate_mode migration_mode = MIGRATE_ASYNC; 2635 bool deferred_compaction = false; 2636 int contended_compaction = COMPACT_CONTENDED_NONE; 2637 2638 /* 2639 * In the slowpath, we sanity check order to avoid ever trying to 2640 * reclaim >= MAX_ORDER areas which will never succeed. Callers may 2641 * be using allocators in order of preference for an area that is 2642 * too large. 2643 */ 2644 if (order >= MAX_ORDER) { 2645 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN)); 2646 return NULL; 2647 } 2648 2649 /* 2650 * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and 2651 * __GFP_NOWARN set) should not cause reclaim since the subsystem 2652 * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim 2653 * using a larger set of nodes after it has established that the 2654 * allowed per node queues are empty and that nodes are 2655 * over allocated. 2656 */ 2657 if (IS_ENABLED(CONFIG_NUMA) && 2658 (gfp_mask & GFP_THISNODE) == GFP_THISNODE) 2659 goto nopage; 2660 2661 restart: 2662 if (!(gfp_mask & __GFP_NO_KSWAPD)) 2663 wake_all_kswapds(order, zonelist, high_zoneidx, 2664 preferred_zone, nodemask); 2665 2666 /* 2667 * OK, we're below the kswapd watermark and have kicked background 2668 * reclaim. Now things get more complex, so set up alloc_flags according 2669 * to how we want to proceed. 2670 */ 2671 alloc_flags = gfp_to_alloc_flags(gfp_mask); 2672 2673 /* 2674 * Find the true preferred zone if the allocation is unconstrained by 2675 * cpusets. 2676 */ 2677 if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) { 2678 struct zoneref *preferred_zoneref; 2679 preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx, 2680 NULL, &preferred_zone); 2681 classzone_idx = zonelist_zone_idx(preferred_zoneref); 2682 } 2683 2684 rebalance: 2685 /* This is the last chance, in general, before the goto nopage. */ 2686 page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, 2687 high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, 2688 preferred_zone, classzone_idx, migratetype); 2689 if (page) 2690 goto got_pg; 2691 2692 /* Allocate without watermarks if the context allows */ 2693 if (alloc_flags & ALLOC_NO_WATERMARKS) { 2694 /* 2695 * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds 2696 * the allocation is high priority and these type of 2697 * allocations are system rather than user orientated 2698 */ 2699 zonelist = node_zonelist(numa_node_id(), gfp_mask); 2700 2701 page = __alloc_pages_high_priority(gfp_mask, order, 2702 zonelist, high_zoneidx, nodemask, 2703 preferred_zone, classzone_idx, migratetype); 2704 if (page) { 2705 goto got_pg; 2706 } 2707 } 2708 2709 /* Atomic allocations - we can't balance anything */ 2710 if (!wait) { 2711 /* 2712 * All existing users of the deprecated __GFP_NOFAIL are 2713 * blockable, so warn of any new users that actually allow this 2714 * type of allocation to fail. 2715 */ 2716 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL); 2717 goto nopage; 2718 } 2719 2720 /* Avoid recursion of direct reclaim */ 2721 if (current->flags & PF_MEMALLOC) 2722 goto nopage; 2723 2724 /* Avoid allocations with no watermarks from looping endlessly */ 2725 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 2726 goto nopage; 2727 2728 /* 2729 * Try direct compaction. The first pass is asynchronous. Subsequent 2730 * attempts after direct reclaim are synchronous 2731 */ 2732 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, 2733 high_zoneidx, nodemask, alloc_flags, 2734 preferred_zone, 2735 classzone_idx, migratetype, 2736 migration_mode, &contended_compaction, 2737 &deferred_compaction); 2738 if (page) 2739 goto got_pg; 2740 2741 /* Checks for THP-specific high-order allocations */ 2742 if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) { 2743 /* 2744 * If compaction is deferred for high-order allocations, it is 2745 * because sync compaction recently failed. If this is the case 2746 * and the caller requested a THP allocation, we do not want 2747 * to heavily disrupt the system, so we fail the allocation 2748 * instead of entering direct reclaim. 2749 */ 2750 if (deferred_compaction) 2751 goto nopage; 2752 2753 /* 2754 * In all zones where compaction was attempted (and not 2755 * deferred or skipped), lock contention has been detected. 2756 * For THP allocation we do not want to disrupt the others 2757 * so we fallback to base pages instead. 2758 */ 2759 if (contended_compaction == COMPACT_CONTENDED_LOCK) 2760 goto nopage; 2761 2762 /* 2763 * If compaction was aborted due to need_resched(), we do not 2764 * want to further increase allocation latency, unless it is 2765 * khugepaged trying to collapse. 2766 */ 2767 if (contended_compaction == COMPACT_CONTENDED_SCHED 2768 && !(current->flags & PF_KTHREAD)) 2769 goto nopage; 2770 } 2771 2772 /* 2773 * It can become very expensive to allocate transparent hugepages at 2774 * fault, so use asynchronous memory compaction for THP unless it is 2775 * khugepaged trying to collapse. 2776 */ 2777 if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE || 2778 (current->flags & PF_KTHREAD)) 2779 migration_mode = MIGRATE_SYNC_LIGHT; 2780 2781 /* Try direct reclaim and then allocating */ 2782 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2783 zonelist, high_zoneidx, 2784 nodemask, 2785 alloc_flags, preferred_zone, 2786 classzone_idx, migratetype, 2787 &did_some_progress); 2788 if (page) 2789 goto got_pg; 2790 2791 /* 2792 * If we failed to make any progress reclaiming, then we are 2793 * running out of options and have to consider going OOM 2794 */ 2795 if (!did_some_progress) { 2796 if (oom_gfp_allowed(gfp_mask)) { 2797 if (oom_killer_disabled) 2798 goto nopage; 2799 /* Coredumps can quickly deplete all memory reserves */ 2800 if ((current->flags & PF_DUMPCORE) && 2801 !(gfp_mask & __GFP_NOFAIL)) 2802 goto nopage; 2803 page = __alloc_pages_may_oom(gfp_mask, order, 2804 zonelist, high_zoneidx, 2805 nodemask, preferred_zone, 2806 classzone_idx, migratetype); 2807 if (page) 2808 goto got_pg; 2809 2810 if (!(gfp_mask & __GFP_NOFAIL)) { 2811 /* 2812 * The oom killer is not called for high-order 2813 * allocations that may fail, so if no progress 2814 * is being made, there are no other options and 2815 * retrying is unlikely to help. 2816 */ 2817 if (order > PAGE_ALLOC_COSTLY_ORDER) 2818 goto nopage; 2819 /* 2820 * The oom killer is not called for lowmem 2821 * allocations to prevent needlessly killing 2822 * innocent tasks. 2823 */ 2824 if (high_zoneidx < ZONE_NORMAL) 2825 goto nopage; 2826 } 2827 2828 goto restart; 2829 } 2830 } 2831 2832 /* Check if we should retry the allocation */ 2833 pages_reclaimed += did_some_progress; 2834 if (should_alloc_retry(gfp_mask, order, did_some_progress, 2835 pages_reclaimed)) { 2836 /* Wait for some write requests to complete then retry */ 2837 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50); 2838 goto rebalance; 2839 } else { 2840 /* 2841 * High-order allocations do not necessarily loop after 2842 * direct reclaim and reclaim/compaction depends on compaction 2843 * being called after reclaim so call directly if necessary 2844 */ 2845 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist, 2846 high_zoneidx, nodemask, alloc_flags, 2847 preferred_zone, 2848 classzone_idx, migratetype, 2849 migration_mode, &contended_compaction, 2850 &deferred_compaction); 2851 if (page) 2852 goto got_pg; 2853 } 2854 2855 nopage: 2856 warn_alloc_failed(gfp_mask, order, NULL); 2857 return page; 2858 got_pg: 2859 if (kmemcheck_enabled) 2860 kmemcheck_pagealloc_alloc(page, order, gfp_mask); 2861 2862 return page; 2863 } 2864 2865 /* 2866 * This is the 'heart' of the zoned buddy allocator. 2867 */ 2868 struct page * 2869 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, 2870 struct zonelist *zonelist, nodemask_t *nodemask) 2871 { 2872 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 2873 struct zone *preferred_zone; 2874 struct zoneref *preferred_zoneref; 2875 struct page *page = NULL; 2876 int migratetype = gfpflags_to_migratetype(gfp_mask); 2877 unsigned int cpuset_mems_cookie; 2878 int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; 2879 int classzone_idx; 2880 2881 gfp_mask &= gfp_allowed_mask; 2882 2883 lockdep_trace_alloc(gfp_mask); 2884 2885 might_sleep_if(gfp_mask & __GFP_WAIT); 2886 2887 if (should_fail_alloc_page(gfp_mask, order)) 2888 return NULL; 2889 2890 /* 2891 * Check the zones suitable for the gfp_mask contain at least one 2892 * valid zone. It's possible to have an empty zonelist as a result 2893 * of GFP_THISNODE and a memoryless node 2894 */ 2895 if (unlikely(!zonelist->_zonerefs->zone)) 2896 return NULL; 2897 2898 if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE) 2899 alloc_flags |= ALLOC_CMA; 2900 2901 retry_cpuset: 2902 cpuset_mems_cookie = read_mems_allowed_begin(); 2903 2904 /* The preferred zone is used for statistics later */ 2905 preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx, 2906 nodemask ? : &cpuset_current_mems_allowed, 2907 &preferred_zone); 2908 if (!preferred_zone) 2909 goto out; 2910 classzone_idx = zonelist_zone_idx(preferred_zoneref); 2911 2912 /* First allocation attempt */ 2913 page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order, 2914 zonelist, high_zoneidx, alloc_flags, 2915 preferred_zone, classzone_idx, migratetype); 2916 if (unlikely(!page)) { 2917 /* 2918 * Runtime PM, block IO and its error handling path 2919 * can deadlock because I/O on the device might not 2920 * complete. 2921 */ 2922 gfp_mask = memalloc_noio_flags(gfp_mask); 2923 page = __alloc_pages_slowpath(gfp_mask, order, 2924 zonelist, high_zoneidx, nodemask, 2925 preferred_zone, classzone_idx, migratetype); 2926 } 2927 2928 trace_mm_page_alloc(page, order, gfp_mask, migratetype); 2929 2930 out: 2931 /* 2932 * When updating a task's mems_allowed, it is possible to race with 2933 * parallel threads in such a way that an allocation can fail while 2934 * the mask is being updated. If a page allocation is about to fail, 2935 * check if the cpuset changed during allocation and if so, retry. 2936 */ 2937 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) 2938 goto retry_cpuset; 2939 2940 return page; 2941 } 2942 EXPORT_SYMBOL(__alloc_pages_nodemask); 2943 2944 /* 2945 * Common helper functions. 2946 */ 2947 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) 2948 { 2949 struct page *page; 2950 2951 /* 2952 * __get_free_pages() returns a 32-bit address, which cannot represent 2953 * a highmem page 2954 */ 2955 VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); 2956 2957 page = alloc_pages(gfp_mask, order); 2958 if (!page) 2959 return 0; 2960 return (unsigned long) page_address(page); 2961 } 2962 EXPORT_SYMBOL(__get_free_pages); 2963 2964 unsigned long get_zeroed_page(gfp_t gfp_mask) 2965 { 2966 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); 2967 } 2968 EXPORT_SYMBOL(get_zeroed_page); 2969 2970 void __free_pages(struct page *page, unsigned int order) 2971 { 2972 if (put_page_testzero(page)) { 2973 if (order == 0) 2974 free_hot_cold_page(page, false); 2975 else 2976 __free_pages_ok(page, order); 2977 } 2978 } 2979 2980 EXPORT_SYMBOL(__free_pages); 2981 2982 void free_pages(unsigned long addr, unsigned int order) 2983 { 2984 if (addr != 0) { 2985 VM_BUG_ON(!virt_addr_valid((void *)addr)); 2986 __free_pages(virt_to_page((void *)addr), order); 2987 } 2988 } 2989 2990 EXPORT_SYMBOL(free_pages); 2991 2992 /* 2993 * alloc_kmem_pages charges newly allocated pages to the kmem resource counter 2994 * of the current memory cgroup. 2995 * 2996 * It should be used when the caller would like to use kmalloc, but since the 2997 * allocation is large, it has to fall back to the page allocator. 2998 */ 2999 struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order) 3000 { 3001 struct page *page; 3002 struct mem_cgroup *memcg = NULL; 3003 3004 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) 3005 return NULL; 3006 page = alloc_pages(gfp_mask, order); 3007 memcg_kmem_commit_charge(page, memcg, order); 3008 return page; 3009 } 3010 3011 struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order) 3012 { 3013 struct page *page; 3014 struct mem_cgroup *memcg = NULL; 3015 3016 if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order)) 3017 return NULL; 3018 page = alloc_pages_node(nid, gfp_mask, order); 3019 memcg_kmem_commit_charge(page, memcg, order); 3020 return page; 3021 } 3022 3023 /* 3024 * __free_kmem_pages and free_kmem_pages will free pages allocated with 3025 * alloc_kmem_pages. 3026 */ 3027 void __free_kmem_pages(struct page *page, unsigned int order) 3028 { 3029 memcg_kmem_uncharge_pages(page, order); 3030 __free_pages(page, order); 3031 } 3032 3033 void free_kmem_pages(unsigned long addr, unsigned int order) 3034 { 3035 if (addr != 0) { 3036 VM_BUG_ON(!virt_addr_valid((void *)addr)); 3037 __free_kmem_pages(virt_to_page((void *)addr), order); 3038 } 3039 } 3040 3041 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size) 3042 { 3043 if (addr) { 3044 unsigned long alloc_end = addr + (PAGE_SIZE << order); 3045 unsigned long used = addr + PAGE_ALIGN(size); 3046 3047 split_page(virt_to_page((void *)addr), order); 3048 while (used < alloc_end) { 3049 free_page(used); 3050 used += PAGE_SIZE; 3051 } 3052 } 3053 return (void *)addr; 3054 } 3055 3056 /** 3057 * alloc_pages_exact - allocate an exact number physically-contiguous pages. 3058 * @size: the number of bytes to allocate 3059 * @gfp_mask: GFP flags for the allocation 3060 * 3061 * This function is similar to alloc_pages(), except that it allocates the 3062 * minimum number of pages to satisfy the request. alloc_pages() can only 3063 * allocate memory in power-of-two pages. 3064 * 3065 * This function is also limited by MAX_ORDER. 3066 * 3067 * Memory allocated by this function must be released by free_pages_exact(). 3068 */ 3069 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) 3070 { 3071 unsigned int order = get_order(size); 3072 unsigned long addr; 3073 3074 addr = __get_free_pages(gfp_mask, order); 3075 return make_alloc_exact(addr, order, size); 3076 } 3077 EXPORT_SYMBOL(alloc_pages_exact); 3078 3079 /** 3080 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous 3081 * pages on a node. 3082 * @nid: the preferred node ID where memory should be allocated 3083 * @size: the number of bytes to allocate 3084 * @gfp_mask: GFP flags for the allocation 3085 * 3086 * Like alloc_pages_exact(), but try to allocate on node nid first before falling 3087 * back. 3088 * Note this is not alloc_pages_exact_node() which allocates on a specific node, 3089 * but is not exact. 3090 */ 3091 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) 3092 { 3093 unsigned order = get_order(size); 3094 struct page *p = alloc_pages_node(nid, gfp_mask, order); 3095 if (!p) 3096 return NULL; 3097 return make_alloc_exact((unsigned long)page_address(p), order, size); 3098 } 3099 3100 /** 3101 * free_pages_exact - release memory allocated via alloc_pages_exact() 3102 * @virt: the value returned by alloc_pages_exact. 3103 * @size: size of allocation, same value as passed to alloc_pages_exact(). 3104 * 3105 * Release the memory allocated by a previous call to alloc_pages_exact. 3106 */ 3107 void free_pages_exact(void *virt, size_t size) 3108 { 3109 unsigned long addr = (unsigned long)virt; 3110 unsigned long end = addr + PAGE_ALIGN(size); 3111 3112 while (addr < end) { 3113 free_page(addr); 3114 addr += PAGE_SIZE; 3115 } 3116 } 3117 EXPORT_SYMBOL(free_pages_exact); 3118 3119 /** 3120 * nr_free_zone_pages - count number of pages beyond high watermark 3121 * @offset: The zone index of the highest zone 3122 * 3123 * nr_free_zone_pages() counts the number of counts pages which are beyond the 3124 * high watermark within all zones at or below a given zone index. For each 3125 * zone, the number of pages is calculated as: 3126 * managed_pages - high_pages 3127 */ 3128 static unsigned long nr_free_zone_pages(int offset) 3129 { 3130 struct zoneref *z; 3131 struct zone *zone; 3132 3133 /* Just pick one node, since fallback list is circular */ 3134 unsigned long sum = 0; 3135 3136 struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL); 3137 3138 for_each_zone_zonelist(zone, z, zonelist, offset) { 3139 unsigned long size = zone->managed_pages; 3140 unsigned long high = high_wmark_pages(zone); 3141 if (size > high) 3142 sum += size - high; 3143 } 3144 3145 return sum; 3146 } 3147 3148 /** 3149 * nr_free_buffer_pages - count number of pages beyond high watermark 3150 * 3151 * nr_free_buffer_pages() counts the number of pages which are beyond the high 3152 * watermark within ZONE_DMA and ZONE_NORMAL. 3153 */ 3154 unsigned long nr_free_buffer_pages(void) 3155 { 3156 return nr_free_zone_pages(gfp_zone(GFP_USER)); 3157 } 3158 EXPORT_SYMBOL_GPL(nr_free_buffer_pages); 3159 3160 /** 3161 * nr_free_pagecache_pages - count number of pages beyond high watermark 3162 * 3163 * nr_free_pagecache_pages() counts the number of pages which are beyond the 3164 * high watermark within all zones. 3165 */ 3166 unsigned long nr_free_pagecache_pages(void) 3167 { 3168 return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)); 3169 } 3170 3171 static inline void show_node(struct zone *zone) 3172 { 3173 if (IS_ENABLED(CONFIG_NUMA)) 3174 printk("Node %d ", zone_to_nid(zone)); 3175 } 3176 3177 void si_meminfo(struct sysinfo *val) 3178 { 3179 val->totalram = totalram_pages; 3180 val->sharedram = global_page_state(NR_SHMEM); 3181 val->freeram = global_page_state(NR_FREE_PAGES); 3182 val->bufferram = nr_blockdev_pages(); 3183 val->totalhigh = totalhigh_pages; 3184 val->freehigh = nr_free_highpages(); 3185 val->mem_unit = PAGE_SIZE; 3186 } 3187 3188 EXPORT_SYMBOL(si_meminfo); 3189 3190 #ifdef CONFIG_NUMA 3191 void si_meminfo_node(struct sysinfo *val, int nid) 3192 { 3193 int zone_type; /* needs to be signed */ 3194 unsigned long managed_pages = 0; 3195 pg_data_t *pgdat = NODE_DATA(nid); 3196 3197 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) 3198 managed_pages += pgdat->node_zones[zone_type].managed_pages; 3199 val->totalram = managed_pages; 3200 val->sharedram = node_page_state(nid, NR_SHMEM); 3201 val->freeram = node_page_state(nid, NR_FREE_PAGES); 3202 #ifdef CONFIG_HIGHMEM 3203 val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages; 3204 val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM], 3205 NR_FREE_PAGES); 3206 #else 3207 val->totalhigh = 0; 3208 val->freehigh = 0; 3209 #endif 3210 val->mem_unit = PAGE_SIZE; 3211 } 3212 #endif 3213 3214 /* 3215 * Determine whether the node should be displayed or not, depending on whether 3216 * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). 3217 */ 3218 bool skip_free_areas_node(unsigned int flags, int nid) 3219 { 3220 bool ret = false; 3221 unsigned int cpuset_mems_cookie; 3222 3223 if (!(flags & SHOW_MEM_FILTER_NODES)) 3224 goto out; 3225 3226 do { 3227 cpuset_mems_cookie = read_mems_allowed_begin(); 3228 ret = !node_isset(nid, cpuset_current_mems_allowed); 3229 } while (read_mems_allowed_retry(cpuset_mems_cookie)); 3230 out: 3231 return ret; 3232 } 3233 3234 #define K(x) ((x) << (PAGE_SHIFT-10)) 3235 3236 static void show_migration_types(unsigned char type) 3237 { 3238 static const char types[MIGRATE_TYPES] = { 3239 [MIGRATE_UNMOVABLE] = 'U', 3240 [MIGRATE_RECLAIMABLE] = 'E', 3241 [MIGRATE_MOVABLE] = 'M', 3242 [MIGRATE_RESERVE] = 'R', 3243 #ifdef CONFIG_CMA 3244 [MIGRATE_CMA] = 'C', 3245 #endif 3246 #ifdef CONFIG_MEMORY_ISOLATION 3247 [MIGRATE_ISOLATE] = 'I', 3248 #endif 3249 }; 3250 char tmp[MIGRATE_TYPES + 1]; 3251 char *p = tmp; 3252 int i; 3253 3254 for (i = 0; i < MIGRATE_TYPES; i++) { 3255 if (type & (1 << i)) 3256 *p++ = types[i]; 3257 } 3258 3259 *p = '\0'; 3260 printk("(%s) ", tmp); 3261 } 3262 3263 /* 3264 * Show free area list (used inside shift_scroll-lock stuff) 3265 * We also calculate the percentage fragmentation. We do this by counting the 3266 * memory on each free list with the exception of the first item on the list. 3267 * Suppresses nodes that are not allowed by current's cpuset if 3268 * SHOW_MEM_FILTER_NODES is passed. 3269 */ 3270 void show_free_areas(unsigned int filter) 3271 { 3272 int cpu; 3273 struct zone *zone; 3274 3275 for_each_populated_zone(zone) { 3276 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3277 continue; 3278 show_node(zone); 3279 printk("%s per-cpu:\n", zone->name); 3280 3281 for_each_online_cpu(cpu) { 3282 struct per_cpu_pageset *pageset; 3283 3284 pageset = per_cpu_ptr(zone->pageset, cpu); 3285 3286 printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n", 3287 cpu, pageset->pcp.high, 3288 pageset->pcp.batch, pageset->pcp.count); 3289 } 3290 } 3291 3292 printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" 3293 " active_file:%lu inactive_file:%lu isolated_file:%lu\n" 3294 " unevictable:%lu" 3295 " dirty:%lu writeback:%lu unstable:%lu\n" 3296 " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n" 3297 " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n" 3298 " free_cma:%lu\n", 3299 global_page_state(NR_ACTIVE_ANON), 3300 global_page_state(NR_INACTIVE_ANON), 3301 global_page_state(NR_ISOLATED_ANON), 3302 global_page_state(NR_ACTIVE_FILE), 3303 global_page_state(NR_INACTIVE_FILE), 3304 global_page_state(NR_ISOLATED_FILE), 3305 global_page_state(NR_UNEVICTABLE), 3306 global_page_state(NR_FILE_DIRTY), 3307 global_page_state(NR_WRITEBACK), 3308 global_page_state(NR_UNSTABLE_NFS), 3309 global_page_state(NR_FREE_PAGES), 3310 global_page_state(NR_SLAB_RECLAIMABLE), 3311 global_page_state(NR_SLAB_UNRECLAIMABLE), 3312 global_page_state(NR_FILE_MAPPED), 3313 global_page_state(NR_SHMEM), 3314 global_page_state(NR_PAGETABLE), 3315 global_page_state(NR_BOUNCE), 3316 global_page_state(NR_FREE_CMA_PAGES)); 3317 3318 for_each_populated_zone(zone) { 3319 int i; 3320 3321 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3322 continue; 3323 show_node(zone); 3324 printk("%s" 3325 " free:%lukB" 3326 " min:%lukB" 3327 " low:%lukB" 3328 " high:%lukB" 3329 " active_anon:%lukB" 3330 " inactive_anon:%lukB" 3331 " active_file:%lukB" 3332 " inactive_file:%lukB" 3333 " unevictable:%lukB" 3334 " isolated(anon):%lukB" 3335 " isolated(file):%lukB" 3336 " present:%lukB" 3337 " managed:%lukB" 3338 " mlocked:%lukB" 3339 " dirty:%lukB" 3340 " writeback:%lukB" 3341 " mapped:%lukB" 3342 " shmem:%lukB" 3343 " slab_reclaimable:%lukB" 3344 " slab_unreclaimable:%lukB" 3345 " kernel_stack:%lukB" 3346 " pagetables:%lukB" 3347 " unstable:%lukB" 3348 " bounce:%lukB" 3349 " free_cma:%lukB" 3350 " writeback_tmp:%lukB" 3351 " pages_scanned:%lu" 3352 " all_unreclaimable? %s" 3353 "\n", 3354 zone->name, 3355 K(zone_page_state(zone, NR_FREE_PAGES)), 3356 K(min_wmark_pages(zone)), 3357 K(low_wmark_pages(zone)), 3358 K(high_wmark_pages(zone)), 3359 K(zone_page_state(zone, NR_ACTIVE_ANON)), 3360 K(zone_page_state(zone, NR_INACTIVE_ANON)), 3361 K(zone_page_state(zone, NR_ACTIVE_FILE)), 3362 K(zone_page_state(zone, NR_INACTIVE_FILE)), 3363 K(zone_page_state(zone, NR_UNEVICTABLE)), 3364 K(zone_page_state(zone, NR_ISOLATED_ANON)), 3365 K(zone_page_state(zone, NR_ISOLATED_FILE)), 3366 K(zone->present_pages), 3367 K(zone->managed_pages), 3368 K(zone_page_state(zone, NR_MLOCK)), 3369 K(zone_page_state(zone, NR_FILE_DIRTY)), 3370 K(zone_page_state(zone, NR_WRITEBACK)), 3371 K(zone_page_state(zone, NR_FILE_MAPPED)), 3372 K(zone_page_state(zone, NR_SHMEM)), 3373 K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)), 3374 K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)), 3375 zone_page_state(zone, NR_KERNEL_STACK) * 3376 THREAD_SIZE / 1024, 3377 K(zone_page_state(zone, NR_PAGETABLE)), 3378 K(zone_page_state(zone, NR_UNSTABLE_NFS)), 3379 K(zone_page_state(zone, NR_BOUNCE)), 3380 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 3381 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 3382 K(zone_page_state(zone, NR_PAGES_SCANNED)), 3383 (!zone_reclaimable(zone) ? "yes" : "no") 3384 ); 3385 printk("lowmem_reserve[]:"); 3386 for (i = 0; i < MAX_NR_ZONES; i++) 3387 printk(" %ld", zone->lowmem_reserve[i]); 3388 printk("\n"); 3389 } 3390 3391 for_each_populated_zone(zone) { 3392 unsigned long nr[MAX_ORDER], flags, order, total = 0; 3393 unsigned char types[MAX_ORDER]; 3394 3395 if (skip_free_areas_node(filter, zone_to_nid(zone))) 3396 continue; 3397 show_node(zone); 3398 printk("%s: ", zone->name); 3399 3400 spin_lock_irqsave(&zone->lock, flags); 3401 for (order = 0; order < MAX_ORDER; order++) { 3402 struct free_area *area = &zone->free_area[order]; 3403 int type; 3404 3405 nr[order] = area->nr_free; 3406 total += nr[order] << order; 3407 3408 types[order] = 0; 3409 for (type = 0; type < MIGRATE_TYPES; type++) { 3410 if (!list_empty(&area->free_list[type])) 3411 types[order] |= 1 << type; 3412 } 3413 } 3414 spin_unlock_irqrestore(&zone->lock, flags); 3415 for (order = 0; order < MAX_ORDER; order++) { 3416 printk("%lu*%lukB ", nr[order], K(1UL) << order); 3417 if (nr[order]) 3418 show_migration_types(types[order]); 3419 } 3420 printk("= %lukB\n", K(total)); 3421 } 3422 3423 hugetlb_show_meminfo(); 3424 3425 printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES)); 3426 3427 show_swap_cache_info(); 3428 } 3429 3430 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) 3431 { 3432 zoneref->zone = zone; 3433 zoneref->zone_idx = zone_idx(zone); 3434 } 3435 3436 /* 3437 * Builds allocation fallback zone lists. 3438 * 3439 * Add all populated zones of a node to the zonelist. 3440 */ 3441 static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist, 3442 int nr_zones) 3443 { 3444 struct zone *zone; 3445 enum zone_type zone_type = MAX_NR_ZONES; 3446 3447 do { 3448 zone_type--; 3449 zone = pgdat->node_zones + zone_type; 3450 if (populated_zone(zone)) { 3451 zoneref_set_zone(zone, 3452 &zonelist->_zonerefs[nr_zones++]); 3453 check_highest_zone(zone_type); 3454 } 3455 } while (zone_type); 3456 3457 return nr_zones; 3458 } 3459 3460 3461 /* 3462 * zonelist_order: 3463 * 0 = automatic detection of better ordering. 3464 * 1 = order by ([node] distance, -zonetype) 3465 * 2 = order by (-zonetype, [node] distance) 3466 * 3467 * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create 3468 * the same zonelist. So only NUMA can configure this param. 3469 */ 3470 #define ZONELIST_ORDER_DEFAULT 0 3471 #define ZONELIST_ORDER_NODE 1 3472 #define ZONELIST_ORDER_ZONE 2 3473 3474 /* zonelist order in the kernel. 3475 * set_zonelist_order() will set this to NODE or ZONE. 3476 */ 3477 static int current_zonelist_order = ZONELIST_ORDER_DEFAULT; 3478 static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"}; 3479 3480 3481 #ifdef CONFIG_NUMA 3482 /* The value user specified ....changed by config */ 3483 static int user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3484 /* string for sysctl */ 3485 #define NUMA_ZONELIST_ORDER_LEN 16 3486 char numa_zonelist_order[16] = "default"; 3487 3488 /* 3489 * interface for configure zonelist ordering. 3490 * command line option "numa_zonelist_order" 3491 * = "[dD]efault - default, automatic configuration. 3492 * = "[nN]ode - order by node locality, then by zone within node 3493 * = "[zZ]one - order by zone, then by locality within zone 3494 */ 3495 3496 static int __parse_numa_zonelist_order(char *s) 3497 { 3498 if (*s == 'd' || *s == 'D') { 3499 user_zonelist_order = ZONELIST_ORDER_DEFAULT; 3500 } else if (*s == 'n' || *s == 'N') { 3501 user_zonelist_order = ZONELIST_ORDER_NODE; 3502 } else if (*s == 'z' || *s == 'Z') { 3503 user_zonelist_order = ZONELIST_ORDER_ZONE; 3504 } else { 3505 printk(KERN_WARNING 3506 "Ignoring invalid numa_zonelist_order value: " 3507 "%s\n", s); 3508 return -EINVAL; 3509 } 3510 return 0; 3511 } 3512 3513 static __init int setup_numa_zonelist_order(char *s) 3514 { 3515 int ret; 3516 3517 if (!s) 3518 return 0; 3519 3520 ret = __parse_numa_zonelist_order(s); 3521 if (ret == 0) 3522 strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN); 3523 3524 return ret; 3525 } 3526 early_param("numa_zonelist_order", setup_numa_zonelist_order); 3527 3528 /* 3529 * sysctl handler for numa_zonelist_order 3530 */ 3531 int numa_zonelist_order_handler(struct ctl_table *table, int write, 3532 void __user *buffer, size_t *length, 3533 loff_t *ppos) 3534 { 3535 char saved_string[NUMA_ZONELIST_ORDER_LEN]; 3536 int ret; 3537 static DEFINE_MUTEX(zl_order_mutex); 3538 3539 mutex_lock(&zl_order_mutex); 3540 if (write) { 3541 if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) { 3542 ret = -EINVAL; 3543 goto out; 3544 } 3545 strcpy(saved_string, (char *)table->data); 3546 } 3547 ret = proc_dostring(table, write, buffer, length, ppos); 3548 if (ret) 3549 goto out; 3550 if (write) { 3551 int oldval = user_zonelist_order; 3552 3553 ret = __parse_numa_zonelist_order((char *)table->data); 3554 if (ret) { 3555 /* 3556 * bogus value. restore saved string 3557 */ 3558 strncpy((char *)table->data, saved_string, 3559 NUMA_ZONELIST_ORDER_LEN); 3560 user_zonelist_order = oldval; 3561 } else if (oldval != user_zonelist_order) { 3562 mutex_lock(&zonelists_mutex); 3563 build_all_zonelists(NULL, NULL); 3564 mutex_unlock(&zonelists_mutex); 3565 } 3566 } 3567 out: 3568 mutex_unlock(&zl_order_mutex); 3569 return ret; 3570 } 3571 3572 3573 #define MAX_NODE_LOAD (nr_online_nodes) 3574 static int node_load[MAX_NUMNODES]; 3575 3576 /** 3577 * find_next_best_node - find the next node that should appear in a given node's fallback list 3578 * @node: node whose fallback list we're appending 3579 * @used_node_mask: nodemask_t of already used nodes 3580 * 3581 * We use a number of factors to determine which is the next node that should 3582 * appear on a given node's fallback list. The node should not have appeared 3583 * already in @node's fallback list, and it should be the next closest node 3584 * according to the distance array (which contains arbitrary distance values 3585 * from each node to each node in the system), and should also prefer nodes 3586 * with no CPUs, since presumably they'll have very little allocation pressure 3587 * on them otherwise. 3588 * It returns -1 if no node is found. 3589 */ 3590 static int find_next_best_node(int node, nodemask_t *used_node_mask) 3591 { 3592 int n, val; 3593 int min_val = INT_MAX; 3594 int best_node = NUMA_NO_NODE; 3595 const struct cpumask *tmp = cpumask_of_node(0); 3596 3597 /* Use the local node if we haven't already */ 3598 if (!node_isset(node, *used_node_mask)) { 3599 node_set(node, *used_node_mask); 3600 return node; 3601 } 3602 3603 for_each_node_state(n, N_MEMORY) { 3604 3605 /* Don't want a node to appear more than once */ 3606 if (node_isset(n, *used_node_mask)) 3607 continue; 3608 3609 /* Use the distance array to find the distance */ 3610 val = node_distance(node, n); 3611 3612 /* Penalize nodes under us ("prefer the next node") */ 3613 val += (n < node); 3614 3615 /* Give preference to headless and unused nodes */ 3616 tmp = cpumask_of_node(n); 3617 if (!cpumask_empty(tmp)) 3618 val += PENALTY_FOR_NODE_WITH_CPUS; 3619 3620 /* Slight preference for less loaded node */ 3621 val *= (MAX_NODE_LOAD*MAX_NUMNODES); 3622 val += node_load[n]; 3623 3624 if (val < min_val) { 3625 min_val = val; 3626 best_node = n; 3627 } 3628 } 3629 3630 if (best_node >= 0) 3631 node_set(best_node, *used_node_mask); 3632 3633 return best_node; 3634 } 3635 3636 3637 /* 3638 * Build zonelists ordered by node and zones within node. 3639 * This results in maximum locality--normal zone overflows into local 3640 * DMA zone, if any--but risks exhausting DMA zone. 3641 */ 3642 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node) 3643 { 3644 int j; 3645 struct zonelist *zonelist; 3646 3647 zonelist = &pgdat->node_zonelists[0]; 3648 for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++) 3649 ; 3650 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3651 zonelist->_zonerefs[j].zone = NULL; 3652 zonelist->_zonerefs[j].zone_idx = 0; 3653 } 3654 3655 /* 3656 * Build gfp_thisnode zonelists 3657 */ 3658 static void build_thisnode_zonelists(pg_data_t *pgdat) 3659 { 3660 int j; 3661 struct zonelist *zonelist; 3662 3663 zonelist = &pgdat->node_zonelists[1]; 3664 j = build_zonelists_node(pgdat, zonelist, 0); 3665 zonelist->_zonerefs[j].zone = NULL; 3666 zonelist->_zonerefs[j].zone_idx = 0; 3667 } 3668 3669 /* 3670 * Build zonelists ordered by zone and nodes within zones. 3671 * This results in conserving DMA zone[s] until all Normal memory is 3672 * exhausted, but results in overflowing to remote node while memory 3673 * may still exist in local DMA zone. 3674 */ 3675 static int node_order[MAX_NUMNODES]; 3676 3677 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) 3678 { 3679 int pos, j, node; 3680 int zone_type; /* needs to be signed */ 3681 struct zone *z; 3682 struct zonelist *zonelist; 3683 3684 zonelist = &pgdat->node_zonelists[0]; 3685 pos = 0; 3686 for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) { 3687 for (j = 0; j < nr_nodes; j++) { 3688 node = node_order[j]; 3689 z = &NODE_DATA(node)->node_zones[zone_type]; 3690 if (populated_zone(z)) { 3691 zoneref_set_zone(z, 3692 &zonelist->_zonerefs[pos++]); 3693 check_highest_zone(zone_type); 3694 } 3695 } 3696 } 3697 zonelist->_zonerefs[pos].zone = NULL; 3698 zonelist->_zonerefs[pos].zone_idx = 0; 3699 } 3700 3701 #if defined(CONFIG_64BIT) 3702 /* 3703 * Devices that require DMA32/DMA are relatively rare and do not justify a 3704 * penalty to every machine in case the specialised case applies. Default 3705 * to Node-ordering on 64-bit NUMA machines 3706 */ 3707 static int default_zonelist_order(void) 3708 { 3709 return ZONELIST_ORDER_NODE; 3710 } 3711 #else 3712 /* 3713 * On 32-bit, the Normal zone needs to be preserved for allocations accessible 3714 * by the kernel. If processes running on node 0 deplete the low memory zone 3715 * then reclaim will occur more frequency increasing stalls and potentially 3716 * be easier to OOM if a large percentage of the zone is under writeback or 3717 * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set. 3718 * Hence, default to zone ordering on 32-bit. 3719 */ 3720 static int default_zonelist_order(void) 3721 { 3722 return ZONELIST_ORDER_ZONE; 3723 } 3724 #endif /* CONFIG_64BIT */ 3725 3726 static void set_zonelist_order(void) 3727 { 3728 if (user_zonelist_order == ZONELIST_ORDER_DEFAULT) 3729 current_zonelist_order = default_zonelist_order(); 3730 else 3731 current_zonelist_order = user_zonelist_order; 3732 } 3733 3734 static void build_zonelists(pg_data_t *pgdat) 3735 { 3736 int j, node, load; 3737 enum zone_type i; 3738 nodemask_t used_mask; 3739 int local_node, prev_node; 3740 struct zonelist *zonelist; 3741 int order = current_zonelist_order; 3742 3743 /* initialize zonelists */ 3744 for (i = 0; i < MAX_ZONELISTS; i++) { 3745 zonelist = pgdat->node_zonelists + i; 3746 zonelist->_zonerefs[0].zone = NULL; 3747 zonelist->_zonerefs[0].zone_idx = 0; 3748 } 3749 3750 /* NUMA-aware ordering of nodes */ 3751 local_node = pgdat->node_id; 3752 load = nr_online_nodes; 3753 prev_node = local_node; 3754 nodes_clear(used_mask); 3755 3756 memset(node_order, 0, sizeof(node_order)); 3757 j = 0; 3758 3759 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) { 3760 /* 3761 * We don't want to pressure a particular node. 3762 * So adding penalty to the first node in same 3763 * distance group to make it round-robin. 3764 */ 3765 if (node_distance(local_node, node) != 3766 node_distance(local_node, prev_node)) 3767 node_load[node] = load; 3768 3769 prev_node = node; 3770 load--; 3771 if (order == ZONELIST_ORDER_NODE) 3772 build_zonelists_in_node_order(pgdat, node); 3773 else 3774 node_order[j++] = node; /* remember order */ 3775 } 3776 3777 if (order == ZONELIST_ORDER_ZONE) { 3778 /* calculate node order -- i.e., DMA last! */ 3779 build_zonelists_in_zone_order(pgdat, j); 3780 } 3781 3782 build_thisnode_zonelists(pgdat); 3783 } 3784 3785 /* Construct the zonelist performance cache - see further mmzone.h */ 3786 static void build_zonelist_cache(pg_data_t *pgdat) 3787 { 3788 struct zonelist *zonelist; 3789 struct zonelist_cache *zlc; 3790 struct zoneref *z; 3791 3792 zonelist = &pgdat->node_zonelists[0]; 3793 zonelist->zlcache_ptr = zlc = &zonelist->zlcache; 3794 bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST); 3795 for (z = zonelist->_zonerefs; z->zone; z++) 3796 zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z); 3797 } 3798 3799 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 3800 /* 3801 * Return node id of node used for "local" allocations. 3802 * I.e., first node id of first zone in arg node's generic zonelist. 3803 * Used for initializing percpu 'numa_mem', which is used primarily 3804 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist. 3805 */ 3806 int local_memory_node(int node) 3807 { 3808 struct zone *zone; 3809 3810 (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL), 3811 gfp_zone(GFP_KERNEL), 3812 NULL, 3813 &zone); 3814 return zone->node; 3815 } 3816 #endif 3817 3818 #else /* CONFIG_NUMA */ 3819 3820 static void set_zonelist_order(void) 3821 { 3822 current_zonelist_order = ZONELIST_ORDER_ZONE; 3823 } 3824 3825 static void build_zonelists(pg_data_t *pgdat) 3826 { 3827 int node, local_node; 3828 enum zone_type j; 3829 struct zonelist *zonelist; 3830 3831 local_node = pgdat->node_id; 3832 3833 zonelist = &pgdat->node_zonelists[0]; 3834 j = build_zonelists_node(pgdat, zonelist, 0); 3835 3836 /* 3837 * Now we build the zonelist so that it contains the zones 3838 * of all the other nodes. 3839 * We don't want to pressure a particular node, so when 3840 * building the zones for node N, we make sure that the 3841 * zones coming right after the local ones are those from 3842 * node N+1 (modulo N) 3843 */ 3844 for (node = local_node + 1; node < MAX_NUMNODES; node++) { 3845 if (!node_online(node)) 3846 continue; 3847 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3848 } 3849 for (node = 0; node < local_node; node++) { 3850 if (!node_online(node)) 3851 continue; 3852 j = build_zonelists_node(NODE_DATA(node), zonelist, j); 3853 } 3854 3855 zonelist->_zonerefs[j].zone = NULL; 3856 zonelist->_zonerefs[j].zone_idx = 0; 3857 } 3858 3859 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */ 3860 static void build_zonelist_cache(pg_data_t *pgdat) 3861 { 3862 pgdat->node_zonelists[0].zlcache_ptr = NULL; 3863 } 3864 3865 #endif /* CONFIG_NUMA */ 3866 3867 /* 3868 * Boot pageset table. One per cpu which is going to be used for all 3869 * zones and all nodes. The parameters will be set in such a way 3870 * that an item put on a list will immediately be handed over to 3871 * the buddy list. This is safe since pageset manipulation is done 3872 * with interrupts disabled. 3873 * 3874 * The boot_pagesets must be kept even after bootup is complete for 3875 * unused processors and/or zones. They do play a role for bootstrapping 3876 * hotplugged processors. 3877 * 3878 * zoneinfo_show() and maybe other functions do 3879 * not check if the processor is online before following the pageset pointer. 3880 * Other parts of the kernel may not check if the zone is available. 3881 */ 3882 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 3883 static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset); 3884 static void setup_zone_pageset(struct zone *zone); 3885 3886 /* 3887 * Global mutex to protect against size modification of zonelists 3888 * as well as to serialize pageset setup for the new populated zone. 3889 */ 3890 DEFINE_MUTEX(zonelists_mutex); 3891 3892 /* return values int ....just for stop_machine() */ 3893 static int __build_all_zonelists(void *data) 3894 { 3895 int nid; 3896 int cpu; 3897 pg_data_t *self = data; 3898 3899 #ifdef CONFIG_NUMA 3900 memset(node_load, 0, sizeof(node_load)); 3901 #endif 3902 3903 if (self && !node_online(self->node_id)) { 3904 build_zonelists(self); 3905 build_zonelist_cache(self); 3906 } 3907 3908 for_each_online_node(nid) { 3909 pg_data_t *pgdat = NODE_DATA(nid); 3910 3911 build_zonelists(pgdat); 3912 build_zonelist_cache(pgdat); 3913 } 3914 3915 /* 3916 * Initialize the boot_pagesets that are going to be used 3917 * for bootstrapping processors. The real pagesets for 3918 * each zone will be allocated later when the per cpu 3919 * allocator is available. 3920 * 3921 * boot_pagesets are used also for bootstrapping offline 3922 * cpus if the system is already booted because the pagesets 3923 * are needed to initialize allocators on a specific cpu too. 3924 * F.e. the percpu allocator needs the page allocator which 3925 * needs the percpu allocator in order to allocate its pagesets 3926 * (a chicken-egg dilemma). 3927 */ 3928 for_each_possible_cpu(cpu) { 3929 setup_pageset(&per_cpu(boot_pageset, cpu), 0); 3930 3931 #ifdef CONFIG_HAVE_MEMORYLESS_NODES 3932 /* 3933 * We now know the "local memory node" for each node-- 3934 * i.e., the node of the first zone in the generic zonelist. 3935 * Set up numa_mem percpu variable for on-line cpus. During 3936 * boot, only the boot cpu should be on-line; we'll init the 3937 * secondary cpus' numa_mem as they come on-line. During 3938 * node/memory hotplug, we'll fixup all on-line cpus. 3939 */ 3940 if (cpu_online(cpu)) 3941 set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu))); 3942 #endif 3943 } 3944 3945 return 0; 3946 } 3947 3948 /* 3949 * Called with zonelists_mutex held always 3950 * unless system_state == SYSTEM_BOOTING. 3951 */ 3952 void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) 3953 { 3954 set_zonelist_order(); 3955 3956 if (system_state == SYSTEM_BOOTING) { 3957 __build_all_zonelists(NULL); 3958 mminit_verify_zonelist(); 3959 cpuset_init_current_mems_allowed(); 3960 } else { 3961 #ifdef CONFIG_MEMORY_HOTPLUG 3962 if (zone) 3963 setup_zone_pageset(zone); 3964 #endif 3965 /* we have to stop all cpus to guarantee there is no user 3966 of zonelist */ 3967 stop_machine(__build_all_zonelists, pgdat, NULL); 3968 /* cpuset refresh routine should be here */ 3969 } 3970 vm_total_pages = nr_free_pagecache_pages(); 3971 /* 3972 * Disable grouping by mobility if the number of pages in the 3973 * system is too low to allow the mechanism to work. It would be 3974 * more accurate, but expensive to check per-zone. This check is 3975 * made on memory-hotadd so a system can start with mobility 3976 * disabled and enable it later 3977 */ 3978 if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES)) 3979 page_group_by_mobility_disabled = 1; 3980 else 3981 page_group_by_mobility_disabled = 0; 3982 3983 pr_info("Built %i zonelists in %s order, mobility grouping %s. " 3984 "Total pages: %ld\n", 3985 nr_online_nodes, 3986 zonelist_order_name[current_zonelist_order], 3987 page_group_by_mobility_disabled ? "off" : "on", 3988 vm_total_pages); 3989 #ifdef CONFIG_NUMA 3990 pr_info("Policy zone: %s\n", zone_names[policy_zone]); 3991 #endif 3992 } 3993 3994 /* 3995 * Helper functions to size the waitqueue hash table. 3996 * Essentially these want to choose hash table sizes sufficiently 3997 * large so that collisions trying to wait on pages are rare. 3998 * But in fact, the number of active page waitqueues on typical 3999 * systems is ridiculously low, less than 200. So this is even 4000 * conservative, even though it seems large. 4001 * 4002 * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to 4003 * waitqueues, i.e. the size of the waitq table given the number of pages. 4004 */ 4005 #define PAGES_PER_WAITQUEUE 256 4006 4007 #ifndef CONFIG_MEMORY_HOTPLUG 4008 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 4009 { 4010 unsigned long size = 1; 4011 4012 pages /= PAGES_PER_WAITQUEUE; 4013 4014 while (size < pages) 4015 size <<= 1; 4016 4017 /* 4018 * Once we have dozens or even hundreds of threads sleeping 4019 * on IO we've got bigger problems than wait queue collision. 4020 * Limit the size of the wait table to a reasonable size. 4021 */ 4022 size = min(size, 4096UL); 4023 4024 return max(size, 4UL); 4025 } 4026 #else 4027 /* 4028 * A zone's size might be changed by hot-add, so it is not possible to determine 4029 * a suitable size for its wait_table. So we use the maximum size now. 4030 * 4031 * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie: 4032 * 4033 * i386 (preemption config) : 4096 x 16 = 64Kbyte. 4034 * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte. 4035 * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte. 4036 * 4037 * The maximum entries are prepared when a zone's memory is (512K + 256) pages 4038 * or more by the traditional way. (See above). It equals: 4039 * 4040 * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte. 4041 * ia64(16K page size) : = ( 8G + 4M)byte. 4042 * powerpc (64K page size) : = (32G +16M)byte. 4043 */ 4044 static inline unsigned long wait_table_hash_nr_entries(unsigned long pages) 4045 { 4046 return 4096UL; 4047 } 4048 #endif 4049 4050 /* 4051 * This is an integer logarithm so that shifts can be used later 4052 * to extract the more random high bits from the multiplicative 4053 * hash function before the remainder is taken. 4054 */ 4055 static inline unsigned long wait_table_bits(unsigned long size) 4056 { 4057 return ffz(~size); 4058 } 4059 4060 /* 4061 * Check if a pageblock contains reserved pages 4062 */ 4063 static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn) 4064 { 4065 unsigned long pfn; 4066 4067 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4068 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn))) 4069 return 1; 4070 } 4071 return 0; 4072 } 4073 4074 /* 4075 * Mark a number of pageblocks as MIGRATE_RESERVE. The number 4076 * of blocks reserved is based on min_wmark_pages(zone). The memory within 4077 * the reserve will tend to store contiguous free pages. Setting min_free_kbytes 4078 * higher will lead to a bigger reserve which will get freed as contiguous 4079 * blocks as reclaim kicks in 4080 */ 4081 static void setup_zone_migrate_reserve(struct zone *zone) 4082 { 4083 unsigned long start_pfn, pfn, end_pfn, block_end_pfn; 4084 struct page *page; 4085 unsigned long block_migratetype; 4086 int reserve; 4087 int old_reserve; 4088 4089 /* 4090 * Get the start pfn, end pfn and the number of blocks to reserve 4091 * We have to be careful to be aligned to pageblock_nr_pages to 4092 * make sure that we always check pfn_valid for the first page in 4093 * the block. 4094 */ 4095 start_pfn = zone->zone_start_pfn; 4096 end_pfn = zone_end_pfn(zone); 4097 start_pfn = roundup(start_pfn, pageblock_nr_pages); 4098 reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >> 4099 pageblock_order; 4100 4101 /* 4102 * Reserve blocks are generally in place to help high-order atomic 4103 * allocations that are short-lived. A min_free_kbytes value that 4104 * would result in more than 2 reserve blocks for atomic allocations 4105 * is assumed to be in place to help anti-fragmentation for the 4106 * future allocation of hugepages at runtime. 4107 */ 4108 reserve = min(2, reserve); 4109 old_reserve = zone->nr_migrate_reserve_block; 4110 4111 /* When memory hot-add, we almost always need to do nothing */ 4112 if (reserve == old_reserve) 4113 return; 4114 zone->nr_migrate_reserve_block = reserve; 4115 4116 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 4117 if (!pfn_valid(pfn)) 4118 continue; 4119 page = pfn_to_page(pfn); 4120 4121 /* Watch out for overlapping nodes */ 4122 if (page_to_nid(page) != zone_to_nid(zone)) 4123 continue; 4124 4125 block_migratetype = get_pageblock_migratetype(page); 4126 4127 /* Only test what is necessary when the reserves are not met */ 4128 if (reserve > 0) { 4129 /* 4130 * Blocks with reserved pages will never free, skip 4131 * them. 4132 */ 4133 block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn); 4134 if (pageblock_is_reserved(pfn, block_end_pfn)) 4135 continue; 4136 4137 /* If this block is reserved, account for it */ 4138 if (block_migratetype == MIGRATE_RESERVE) { 4139 reserve--; 4140 continue; 4141 } 4142 4143 /* Suitable for reserving if this block is movable */ 4144 if (block_migratetype == MIGRATE_MOVABLE) { 4145 set_pageblock_migratetype(page, 4146 MIGRATE_RESERVE); 4147 move_freepages_block(zone, page, 4148 MIGRATE_RESERVE); 4149 reserve--; 4150 continue; 4151 } 4152 } else if (!old_reserve) { 4153 /* 4154 * At boot time we don't need to scan the whole zone 4155 * for turning off MIGRATE_RESERVE. 4156 */ 4157 break; 4158 } 4159 4160 /* 4161 * If the reserve is met and this is a previous reserved block, 4162 * take it back 4163 */ 4164 if (block_migratetype == MIGRATE_RESERVE) { 4165 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4166 move_freepages_block(zone, page, MIGRATE_MOVABLE); 4167 } 4168 } 4169 } 4170 4171 /* 4172 * Initially all pages are reserved - free ones are freed 4173 * up by free_all_bootmem() once the early boot process is 4174 * done. Non-atomic initialization, single-pass. 4175 */ 4176 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 4177 unsigned long start_pfn, enum memmap_context context) 4178 { 4179 struct page *page; 4180 unsigned long end_pfn = start_pfn + size; 4181 unsigned long pfn; 4182 struct zone *z; 4183 4184 if (highest_memmap_pfn < end_pfn - 1) 4185 highest_memmap_pfn = end_pfn - 1; 4186 4187 z = &NODE_DATA(nid)->node_zones[zone]; 4188 for (pfn = start_pfn; pfn < end_pfn; pfn++) { 4189 /* 4190 * There can be holes in boot-time mem_map[]s 4191 * handed to this function. They do not 4192 * exist on hotplugged memory. 4193 */ 4194 if (context == MEMMAP_EARLY) { 4195 if (!early_pfn_valid(pfn)) 4196 continue; 4197 if (!early_pfn_in_nid(pfn, nid)) 4198 continue; 4199 } 4200 page = pfn_to_page(pfn); 4201 set_page_links(page, zone, nid, pfn); 4202 mminit_verify_page_links(page, zone, nid, pfn); 4203 init_page_count(page); 4204 page_mapcount_reset(page); 4205 page_cpupid_reset_last(page); 4206 SetPageReserved(page); 4207 /* 4208 * Mark the block movable so that blocks are reserved for 4209 * movable at startup. This will force kernel allocations 4210 * to reserve their blocks rather than leaking throughout 4211 * the address space during boot when many long-lived 4212 * kernel allocations are made. Later some blocks near 4213 * the start are marked MIGRATE_RESERVE by 4214 * setup_zone_migrate_reserve() 4215 * 4216 * bitmap is created for zone's valid pfn range. but memmap 4217 * can be created for invalid pages (for alignment) 4218 * check here not to call set_pageblock_migratetype() against 4219 * pfn out of zone. 4220 */ 4221 if ((z->zone_start_pfn <= pfn) 4222 && (pfn < zone_end_pfn(z)) 4223 && !(pfn & (pageblock_nr_pages - 1))) 4224 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 4225 4226 INIT_LIST_HEAD(&page->lru); 4227 #ifdef WANT_PAGE_VIRTUAL 4228 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 4229 if (!is_highmem_idx(zone)) 4230 set_page_address(page, __va(pfn << PAGE_SHIFT)); 4231 #endif 4232 } 4233 } 4234 4235 static void __meminit zone_init_free_lists(struct zone *zone) 4236 { 4237 unsigned int order, t; 4238 for_each_migratetype_order(order, t) { 4239 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 4240 zone->free_area[order].nr_free = 0; 4241 } 4242 } 4243 4244 #ifndef __HAVE_ARCH_MEMMAP_INIT 4245 #define memmap_init(size, nid, zone, start_pfn) \ 4246 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 4247 #endif 4248 4249 static int zone_batchsize(struct zone *zone) 4250 { 4251 #ifdef CONFIG_MMU 4252 int batch; 4253 4254 /* 4255 * The per-cpu-pages pools are set to around 1000th of the 4256 * size of the zone. But no more than 1/2 of a meg. 4257 * 4258 * OK, so we don't know how big the cache is. So guess. 4259 */ 4260 batch = zone->managed_pages / 1024; 4261 if (batch * PAGE_SIZE > 512 * 1024) 4262 batch = (512 * 1024) / PAGE_SIZE; 4263 batch /= 4; /* We effectively *= 4 below */ 4264 if (batch < 1) 4265 batch = 1; 4266 4267 /* 4268 * Clamp the batch to a 2^n - 1 value. Having a power 4269 * of 2 value was found to be more likely to have 4270 * suboptimal cache aliasing properties in some cases. 4271 * 4272 * For example if 2 tasks are alternately allocating 4273 * batches of pages, one task can end up with a lot 4274 * of pages of one half of the possible page colors 4275 * and the other with pages of the other colors. 4276 */ 4277 batch = rounddown_pow_of_two(batch + batch/2) - 1; 4278 4279 return batch; 4280 4281 #else 4282 /* The deferral and batching of frees should be suppressed under NOMMU 4283 * conditions. 4284 * 4285 * The problem is that NOMMU needs to be able to allocate large chunks 4286 * of contiguous memory as there's no hardware page translation to 4287 * assemble apparent contiguous memory from discontiguous pages. 4288 * 4289 * Queueing large contiguous runs of pages for batching, however, 4290 * causes the pages to actually be freed in smaller chunks. As there 4291 * can be a significant delay between the individual batches being 4292 * recycled, this leads to the once large chunks of space being 4293 * fragmented and becoming unavailable for high-order allocations. 4294 */ 4295 return 0; 4296 #endif 4297 } 4298 4299 /* 4300 * pcp->high and pcp->batch values are related and dependent on one another: 4301 * ->batch must never be higher then ->high. 4302 * The following function updates them in a safe manner without read side 4303 * locking. 4304 * 4305 * Any new users of pcp->batch and pcp->high should ensure they can cope with 4306 * those fields changing asynchronously (acording the the above rule). 4307 * 4308 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function 4309 * outside of boot time (or some other assurance that no concurrent updaters 4310 * exist). 4311 */ 4312 static void pageset_update(struct per_cpu_pages *pcp, unsigned long high, 4313 unsigned long batch) 4314 { 4315 /* start with a fail safe value for batch */ 4316 pcp->batch = 1; 4317 smp_wmb(); 4318 4319 /* Update high, then batch, in order */ 4320 pcp->high = high; 4321 smp_wmb(); 4322 4323 pcp->batch = batch; 4324 } 4325 4326 /* a companion to pageset_set_high() */ 4327 static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch) 4328 { 4329 pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch)); 4330 } 4331 4332 static void pageset_init(struct per_cpu_pageset *p) 4333 { 4334 struct per_cpu_pages *pcp; 4335 int migratetype; 4336 4337 memset(p, 0, sizeof(*p)); 4338 4339 pcp = &p->pcp; 4340 pcp->count = 0; 4341 for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++) 4342 INIT_LIST_HEAD(&pcp->lists[migratetype]); 4343 } 4344 4345 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) 4346 { 4347 pageset_init(p); 4348 pageset_set_batch(p, batch); 4349 } 4350 4351 /* 4352 * pageset_set_high() sets the high water mark for hot per_cpu_pagelist 4353 * to the value high for the pageset p. 4354 */ 4355 static void pageset_set_high(struct per_cpu_pageset *p, 4356 unsigned long high) 4357 { 4358 unsigned long batch = max(1UL, high / 4); 4359 if ((high / 4) > (PAGE_SHIFT * 8)) 4360 batch = PAGE_SHIFT * 8; 4361 4362 pageset_update(&p->pcp, high, batch); 4363 } 4364 4365 static void pageset_set_high_and_batch(struct zone *zone, 4366 struct per_cpu_pageset *pcp) 4367 { 4368 if (percpu_pagelist_fraction) 4369 pageset_set_high(pcp, 4370 (zone->managed_pages / 4371 percpu_pagelist_fraction)); 4372 else 4373 pageset_set_batch(pcp, zone_batchsize(zone)); 4374 } 4375 4376 static void __meminit zone_pageset_init(struct zone *zone, int cpu) 4377 { 4378 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); 4379 4380 pageset_init(pcp); 4381 pageset_set_high_and_batch(zone, pcp); 4382 } 4383 4384 static void __meminit setup_zone_pageset(struct zone *zone) 4385 { 4386 int cpu; 4387 zone->pageset = alloc_percpu(struct per_cpu_pageset); 4388 for_each_possible_cpu(cpu) 4389 zone_pageset_init(zone, cpu); 4390 } 4391 4392 /* 4393 * Allocate per cpu pagesets and initialize them. 4394 * Before this call only boot pagesets were available. 4395 */ 4396 void __init setup_per_cpu_pageset(void) 4397 { 4398 struct zone *zone; 4399 4400 for_each_populated_zone(zone) 4401 setup_zone_pageset(zone); 4402 } 4403 4404 static noinline __init_refok 4405 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) 4406 { 4407 int i; 4408 size_t alloc_size; 4409 4410 /* 4411 * The per-page waitqueue mechanism uses hashed waitqueues 4412 * per zone. 4413 */ 4414 zone->wait_table_hash_nr_entries = 4415 wait_table_hash_nr_entries(zone_size_pages); 4416 zone->wait_table_bits = 4417 wait_table_bits(zone->wait_table_hash_nr_entries); 4418 alloc_size = zone->wait_table_hash_nr_entries 4419 * sizeof(wait_queue_head_t); 4420 4421 if (!slab_is_available()) { 4422 zone->wait_table = (wait_queue_head_t *) 4423 memblock_virt_alloc_node_nopanic( 4424 alloc_size, zone->zone_pgdat->node_id); 4425 } else { 4426 /* 4427 * This case means that a zone whose size was 0 gets new memory 4428 * via memory hot-add. 4429 * But it may be the case that a new node was hot-added. In 4430 * this case vmalloc() will not be able to use this new node's 4431 * memory - this wait_table must be initialized to use this new 4432 * node itself as well. 4433 * To use this new node's memory, further consideration will be 4434 * necessary. 4435 */ 4436 zone->wait_table = vmalloc(alloc_size); 4437 } 4438 if (!zone->wait_table) 4439 return -ENOMEM; 4440 4441 for (i = 0; i < zone->wait_table_hash_nr_entries; ++i) 4442 init_waitqueue_head(zone->wait_table + i); 4443 4444 return 0; 4445 } 4446 4447 static __meminit void zone_pcp_init(struct zone *zone) 4448 { 4449 /* 4450 * per cpu subsystem is not up at this point. The following code 4451 * relies on the ability of the linker to provide the 4452 * offset of a (static) per cpu variable into the per cpu area. 4453 */ 4454 zone->pageset = &boot_pageset; 4455 4456 if (populated_zone(zone)) 4457 printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n", 4458 zone->name, zone->present_pages, 4459 zone_batchsize(zone)); 4460 } 4461 4462 int __meminit init_currently_empty_zone(struct zone *zone, 4463 unsigned long zone_start_pfn, 4464 unsigned long size, 4465 enum memmap_context context) 4466 { 4467 struct pglist_data *pgdat = zone->zone_pgdat; 4468 int ret; 4469 ret = zone_wait_table_init(zone, size); 4470 if (ret) 4471 return ret; 4472 pgdat->nr_zones = zone_idx(zone) + 1; 4473 4474 zone->zone_start_pfn = zone_start_pfn; 4475 4476 mminit_dprintk(MMINIT_TRACE, "memmap_init", 4477 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 4478 pgdat->node_id, 4479 (unsigned long)zone_idx(zone), 4480 zone_start_pfn, (zone_start_pfn + size)); 4481 4482 zone_init_free_lists(zone); 4483 4484 return 0; 4485 } 4486 4487 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 4488 #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 4489 /* 4490 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 4491 */ 4492 int __meminit __early_pfn_to_nid(unsigned long pfn) 4493 { 4494 unsigned long start_pfn, end_pfn; 4495 int nid; 4496 /* 4497 * NOTE: The following SMP-unsafe globals are only used early in boot 4498 * when the kernel is running single-threaded. 4499 */ 4500 static unsigned long __meminitdata last_start_pfn, last_end_pfn; 4501 static int __meminitdata last_nid; 4502 4503 if (last_start_pfn <= pfn && pfn < last_end_pfn) 4504 return last_nid; 4505 4506 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 4507 if (nid != -1) { 4508 last_start_pfn = start_pfn; 4509 last_end_pfn = end_pfn; 4510 last_nid = nid; 4511 } 4512 4513 return nid; 4514 } 4515 #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 4516 4517 int __meminit early_pfn_to_nid(unsigned long pfn) 4518 { 4519 int nid; 4520 4521 nid = __early_pfn_to_nid(pfn); 4522 if (nid >= 0) 4523 return nid; 4524 /* just returns 0 */ 4525 return 0; 4526 } 4527 4528 #ifdef CONFIG_NODES_SPAN_OTHER_NODES 4529 bool __meminit early_pfn_in_nid(unsigned long pfn, int node) 4530 { 4531 int nid; 4532 4533 nid = __early_pfn_to_nid(pfn); 4534 if (nid >= 0 && nid != node) 4535 return false; 4536 return true; 4537 } 4538 #endif 4539 4540 /** 4541 * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range 4542 * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. 4543 * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid 4544 * 4545 * If an architecture guarantees that all ranges registered contain no holes 4546 * and may be freed, this this function may be used instead of calling 4547 * memblock_free_early_nid() manually. 4548 */ 4549 void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn) 4550 { 4551 unsigned long start_pfn, end_pfn; 4552 int i, this_nid; 4553 4554 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) { 4555 start_pfn = min(start_pfn, max_low_pfn); 4556 end_pfn = min(end_pfn, max_low_pfn); 4557 4558 if (start_pfn < end_pfn) 4559 memblock_free_early_nid(PFN_PHYS(start_pfn), 4560 (end_pfn - start_pfn) << PAGE_SHIFT, 4561 this_nid); 4562 } 4563 } 4564 4565 /** 4566 * sparse_memory_present_with_active_regions - Call memory_present for each active range 4567 * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. 4568 * 4569 * If an architecture guarantees that all ranges registered contain no holes and may 4570 * be freed, this function may be used instead of calling memory_present() manually. 4571 */ 4572 void __init sparse_memory_present_with_active_regions(int nid) 4573 { 4574 unsigned long start_pfn, end_pfn; 4575 int i, this_nid; 4576 4577 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) 4578 memory_present(this_nid, start_pfn, end_pfn); 4579 } 4580 4581 /** 4582 * get_pfn_range_for_nid - Return the start and end page frames for a node 4583 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 4584 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 4585 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 4586 * 4587 * It returns the start and end page frame of a node based on information 4588 * provided by memblock_set_node(). If called for a node 4589 * with no available memory, a warning is printed and the start and end 4590 * PFNs will be 0. 4591 */ 4592 void __meminit get_pfn_range_for_nid(unsigned int nid, 4593 unsigned long *start_pfn, unsigned long *end_pfn) 4594 { 4595 unsigned long this_start_pfn, this_end_pfn; 4596 int i; 4597 4598 *start_pfn = -1UL; 4599 *end_pfn = 0; 4600 4601 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 4602 *start_pfn = min(*start_pfn, this_start_pfn); 4603 *end_pfn = max(*end_pfn, this_end_pfn); 4604 } 4605 4606 if (*start_pfn == -1UL) 4607 *start_pfn = 0; 4608 } 4609 4610 /* 4611 * This finds a zone that can be used for ZONE_MOVABLE pages. The 4612 * assumption is made that zones within a node are ordered in monotonic 4613 * increasing memory addresses so that the "highest" populated zone is used 4614 */ 4615 static void __init find_usable_zone_for_movable(void) 4616 { 4617 int zone_index; 4618 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 4619 if (zone_index == ZONE_MOVABLE) 4620 continue; 4621 4622 if (arch_zone_highest_possible_pfn[zone_index] > 4623 arch_zone_lowest_possible_pfn[zone_index]) 4624 break; 4625 } 4626 4627 VM_BUG_ON(zone_index == -1); 4628 movable_zone = zone_index; 4629 } 4630 4631 /* 4632 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 4633 * because it is sized independent of architecture. Unlike the other zones, 4634 * the starting point for ZONE_MOVABLE is not fixed. It may be different 4635 * in each node depending on the size of each node and how evenly kernelcore 4636 * is distributed. This helper function adjusts the zone ranges 4637 * provided by the architecture for a given node by using the end of the 4638 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 4639 * zones within a node are in order of monotonic increases memory addresses 4640 */ 4641 static void __meminit adjust_zone_range_for_zone_movable(int nid, 4642 unsigned long zone_type, 4643 unsigned long node_start_pfn, 4644 unsigned long node_end_pfn, 4645 unsigned long *zone_start_pfn, 4646 unsigned long *zone_end_pfn) 4647 { 4648 /* Only adjust if ZONE_MOVABLE is on this node */ 4649 if (zone_movable_pfn[nid]) { 4650 /* Size ZONE_MOVABLE */ 4651 if (zone_type == ZONE_MOVABLE) { 4652 *zone_start_pfn = zone_movable_pfn[nid]; 4653 *zone_end_pfn = min(node_end_pfn, 4654 arch_zone_highest_possible_pfn[movable_zone]); 4655 4656 /* Adjust for ZONE_MOVABLE starting within this range */ 4657 } else if (*zone_start_pfn < zone_movable_pfn[nid] && 4658 *zone_end_pfn > zone_movable_pfn[nid]) { 4659 *zone_end_pfn = zone_movable_pfn[nid]; 4660 4661 /* Check if this whole range is within ZONE_MOVABLE */ 4662 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 4663 *zone_start_pfn = *zone_end_pfn; 4664 } 4665 } 4666 4667 /* 4668 * Return the number of pages a zone spans in a node, including holes 4669 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 4670 */ 4671 static unsigned long __meminit zone_spanned_pages_in_node(int nid, 4672 unsigned long zone_type, 4673 unsigned long node_start_pfn, 4674 unsigned long node_end_pfn, 4675 unsigned long *ignored) 4676 { 4677 unsigned long zone_start_pfn, zone_end_pfn; 4678 4679 /* Get the start and end of the zone */ 4680 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 4681 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 4682 adjust_zone_range_for_zone_movable(nid, zone_type, 4683 node_start_pfn, node_end_pfn, 4684 &zone_start_pfn, &zone_end_pfn); 4685 4686 /* Check that this node has pages within the zone's required range */ 4687 if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn) 4688 return 0; 4689 4690 /* Move the zone boundaries inside the node if necessary */ 4691 zone_end_pfn = min(zone_end_pfn, node_end_pfn); 4692 zone_start_pfn = max(zone_start_pfn, node_start_pfn); 4693 4694 /* Return the spanned pages */ 4695 return zone_end_pfn - zone_start_pfn; 4696 } 4697 4698 /* 4699 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 4700 * then all holes in the requested range will be accounted for. 4701 */ 4702 unsigned long __meminit __absent_pages_in_range(int nid, 4703 unsigned long range_start_pfn, 4704 unsigned long range_end_pfn) 4705 { 4706 unsigned long nr_absent = range_end_pfn - range_start_pfn; 4707 unsigned long start_pfn, end_pfn; 4708 int i; 4709 4710 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 4711 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 4712 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 4713 nr_absent -= end_pfn - start_pfn; 4714 } 4715 return nr_absent; 4716 } 4717 4718 /** 4719 * absent_pages_in_range - Return number of page frames in holes within a range 4720 * @start_pfn: The start PFN to start searching for holes 4721 * @end_pfn: The end PFN to stop searching for holes 4722 * 4723 * It returns the number of pages frames in memory holes within a range. 4724 */ 4725 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 4726 unsigned long end_pfn) 4727 { 4728 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 4729 } 4730 4731 /* Return the number of page frames in holes in a zone on a node */ 4732 static unsigned long __meminit zone_absent_pages_in_node(int nid, 4733 unsigned long zone_type, 4734 unsigned long node_start_pfn, 4735 unsigned long node_end_pfn, 4736 unsigned long *ignored) 4737 { 4738 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 4739 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 4740 unsigned long zone_start_pfn, zone_end_pfn; 4741 4742 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 4743 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 4744 4745 adjust_zone_range_for_zone_movable(nid, zone_type, 4746 node_start_pfn, node_end_pfn, 4747 &zone_start_pfn, &zone_end_pfn); 4748 return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 4749 } 4750 4751 #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 4752 static inline unsigned long __meminit zone_spanned_pages_in_node(int nid, 4753 unsigned long zone_type, 4754 unsigned long node_start_pfn, 4755 unsigned long node_end_pfn, 4756 unsigned long *zones_size) 4757 { 4758 return zones_size[zone_type]; 4759 } 4760 4761 static inline unsigned long __meminit zone_absent_pages_in_node(int nid, 4762 unsigned long zone_type, 4763 unsigned long node_start_pfn, 4764 unsigned long node_end_pfn, 4765 unsigned long *zholes_size) 4766 { 4767 if (!zholes_size) 4768 return 0; 4769 4770 return zholes_size[zone_type]; 4771 } 4772 4773 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 4774 4775 static void __meminit calculate_node_totalpages(struct pglist_data *pgdat, 4776 unsigned long node_start_pfn, 4777 unsigned long node_end_pfn, 4778 unsigned long *zones_size, 4779 unsigned long *zholes_size) 4780 { 4781 unsigned long realtotalpages, totalpages = 0; 4782 enum zone_type i; 4783 4784 for (i = 0; i < MAX_NR_ZONES; i++) 4785 totalpages += zone_spanned_pages_in_node(pgdat->node_id, i, 4786 node_start_pfn, 4787 node_end_pfn, 4788 zones_size); 4789 pgdat->node_spanned_pages = totalpages; 4790 4791 realtotalpages = totalpages; 4792 for (i = 0; i < MAX_NR_ZONES; i++) 4793 realtotalpages -= 4794 zone_absent_pages_in_node(pgdat->node_id, i, 4795 node_start_pfn, node_end_pfn, 4796 zholes_size); 4797 pgdat->node_present_pages = realtotalpages; 4798 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id, 4799 realtotalpages); 4800 } 4801 4802 #ifndef CONFIG_SPARSEMEM 4803 /* 4804 * Calculate the size of the zone->blockflags rounded to an unsigned long 4805 * Start by making sure zonesize is a multiple of pageblock_order by rounding 4806 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 4807 * round what is now in bits to nearest long in bits, then return it in 4808 * bytes. 4809 */ 4810 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 4811 { 4812 unsigned long usemapsize; 4813 4814 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 4815 usemapsize = roundup(zonesize, pageblock_nr_pages); 4816 usemapsize = usemapsize >> pageblock_order; 4817 usemapsize *= NR_PAGEBLOCK_BITS; 4818 usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long)); 4819 4820 return usemapsize / 8; 4821 } 4822 4823 static void __init setup_usemap(struct pglist_data *pgdat, 4824 struct zone *zone, 4825 unsigned long zone_start_pfn, 4826 unsigned long zonesize) 4827 { 4828 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize); 4829 zone->pageblock_flags = NULL; 4830 if (usemapsize) 4831 zone->pageblock_flags = 4832 memblock_virt_alloc_node_nopanic(usemapsize, 4833 pgdat->node_id); 4834 } 4835 #else 4836 static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, 4837 unsigned long zone_start_pfn, unsigned long zonesize) {} 4838 #endif /* CONFIG_SPARSEMEM */ 4839 4840 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4841 4842 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 4843 void __paginginit set_pageblock_order(void) 4844 { 4845 unsigned int order; 4846 4847 /* Check that pageblock_nr_pages has not already been setup */ 4848 if (pageblock_order) 4849 return; 4850 4851 if (HPAGE_SHIFT > PAGE_SHIFT) 4852 order = HUGETLB_PAGE_ORDER; 4853 else 4854 order = MAX_ORDER - 1; 4855 4856 /* 4857 * Assume the largest contiguous order of interest is a huge page. 4858 * This value may be variable depending on boot parameters on IA64 and 4859 * powerpc. 4860 */ 4861 pageblock_order = order; 4862 } 4863 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4864 4865 /* 4866 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 4867 * is unused as pageblock_order is set at compile-time. See 4868 * include/linux/pageblock-flags.h for the values of pageblock_order based on 4869 * the kernel config 4870 */ 4871 void __paginginit set_pageblock_order(void) 4872 { 4873 } 4874 4875 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4876 4877 static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages, 4878 unsigned long present_pages) 4879 { 4880 unsigned long pages = spanned_pages; 4881 4882 /* 4883 * Provide a more accurate estimation if there are holes within 4884 * the zone and SPARSEMEM is in use. If there are holes within the 4885 * zone, each populated memory region may cost us one or two extra 4886 * memmap pages due to alignment because memmap pages for each 4887 * populated regions may not naturally algined on page boundary. 4888 * So the (present_pages >> 4) heuristic is a tradeoff for that. 4889 */ 4890 if (spanned_pages > present_pages + (present_pages >> 4) && 4891 IS_ENABLED(CONFIG_SPARSEMEM)) 4892 pages = present_pages; 4893 4894 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 4895 } 4896 4897 /* 4898 * Set up the zone data structures: 4899 * - mark all pages reserved 4900 * - mark all memory queues empty 4901 * - clear the memory bitmaps 4902 * 4903 * NOTE: pgdat should get zeroed by caller. 4904 */ 4905 static void __paginginit free_area_init_core(struct pglist_data *pgdat, 4906 unsigned long node_start_pfn, unsigned long node_end_pfn, 4907 unsigned long *zones_size, unsigned long *zholes_size) 4908 { 4909 enum zone_type j; 4910 int nid = pgdat->node_id; 4911 unsigned long zone_start_pfn = pgdat->node_start_pfn; 4912 int ret; 4913 4914 pgdat_resize_init(pgdat); 4915 #ifdef CONFIG_NUMA_BALANCING 4916 spin_lock_init(&pgdat->numabalancing_migrate_lock); 4917 pgdat->numabalancing_migrate_nr_pages = 0; 4918 pgdat->numabalancing_migrate_next_window = jiffies; 4919 #endif 4920 init_waitqueue_head(&pgdat->kswapd_wait); 4921 init_waitqueue_head(&pgdat->pfmemalloc_wait); 4922 pgdat_page_ext_init(pgdat); 4923 4924 for (j = 0; j < MAX_NR_ZONES; j++) { 4925 struct zone *zone = pgdat->node_zones + j; 4926 unsigned long size, realsize, freesize, memmap_pages; 4927 4928 size = zone_spanned_pages_in_node(nid, j, node_start_pfn, 4929 node_end_pfn, zones_size); 4930 realsize = freesize = size - zone_absent_pages_in_node(nid, j, 4931 node_start_pfn, 4932 node_end_pfn, 4933 zholes_size); 4934 4935 /* 4936 * Adjust freesize so that it accounts for how much memory 4937 * is used by this zone for memmap. This affects the watermark 4938 * and per-cpu initialisations 4939 */ 4940 memmap_pages = calc_memmap_size(size, realsize); 4941 if (!is_highmem_idx(j)) { 4942 if (freesize >= memmap_pages) { 4943 freesize -= memmap_pages; 4944 if (memmap_pages) 4945 printk(KERN_DEBUG 4946 " %s zone: %lu pages used for memmap\n", 4947 zone_names[j], memmap_pages); 4948 } else 4949 printk(KERN_WARNING 4950 " %s zone: %lu pages exceeds freesize %lu\n", 4951 zone_names[j], memmap_pages, freesize); 4952 } 4953 4954 /* Account for reserved pages */ 4955 if (j == 0 && freesize > dma_reserve) { 4956 freesize -= dma_reserve; 4957 printk(KERN_DEBUG " %s zone: %lu pages reserved\n", 4958 zone_names[0], dma_reserve); 4959 } 4960 4961 if (!is_highmem_idx(j)) 4962 nr_kernel_pages += freesize; 4963 /* Charge for highmem memmap if there are enough kernel pages */ 4964 else if (nr_kernel_pages > memmap_pages * 2) 4965 nr_kernel_pages -= memmap_pages; 4966 nr_all_pages += freesize; 4967 4968 zone->spanned_pages = size; 4969 zone->present_pages = realsize; 4970 /* 4971 * Set an approximate value for lowmem here, it will be adjusted 4972 * when the bootmem allocator frees pages into the buddy system. 4973 * And all highmem pages will be managed by the buddy system. 4974 */ 4975 zone->managed_pages = is_highmem_idx(j) ? realsize : freesize; 4976 #ifdef CONFIG_NUMA 4977 zone->node = nid; 4978 zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio) 4979 / 100; 4980 zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100; 4981 #endif 4982 zone->name = zone_names[j]; 4983 spin_lock_init(&zone->lock); 4984 spin_lock_init(&zone->lru_lock); 4985 zone_seqlock_init(zone); 4986 zone->zone_pgdat = pgdat; 4987 zone_pcp_init(zone); 4988 4989 /* For bootup, initialized properly in watermark setup */ 4990 mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages); 4991 4992 lruvec_init(&zone->lruvec); 4993 if (!size) 4994 continue; 4995 4996 set_pageblock_order(); 4997 setup_usemap(pgdat, zone, zone_start_pfn, size); 4998 ret = init_currently_empty_zone(zone, zone_start_pfn, 4999 size, MEMMAP_EARLY); 5000 BUG_ON(ret); 5001 memmap_init(size, nid, j, zone_start_pfn); 5002 zone_start_pfn += size; 5003 } 5004 } 5005 5006 static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) 5007 { 5008 /* Skip empty nodes */ 5009 if (!pgdat->node_spanned_pages) 5010 return; 5011 5012 #ifdef CONFIG_FLAT_NODE_MEM_MAP 5013 /* ia64 gets its own node_mem_map, before this, without bootmem */ 5014 if (!pgdat->node_mem_map) { 5015 unsigned long size, start, end; 5016 struct page *map; 5017 5018 /* 5019 * The zone's endpoints aren't required to be MAX_ORDER 5020 * aligned but the node_mem_map endpoints must be in order 5021 * for the buddy allocator to function correctly. 5022 */ 5023 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 5024 end = pgdat_end_pfn(pgdat); 5025 end = ALIGN(end, MAX_ORDER_NR_PAGES); 5026 size = (end - start) * sizeof(struct page); 5027 map = alloc_remap(pgdat->node_id, size); 5028 if (!map) 5029 map = memblock_virt_alloc_node_nopanic(size, 5030 pgdat->node_id); 5031 pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); 5032 } 5033 #ifndef CONFIG_NEED_MULTIPLE_NODES 5034 /* 5035 * With no DISCONTIG, the global mem_map is just set as node 0's 5036 */ 5037 if (pgdat == NODE_DATA(0)) { 5038 mem_map = NODE_DATA(0)->node_mem_map; 5039 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5040 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 5041 mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET); 5042 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5043 } 5044 #endif 5045 #endif /* CONFIG_FLAT_NODE_MEM_MAP */ 5046 } 5047 5048 void __paginginit free_area_init_node(int nid, unsigned long *zones_size, 5049 unsigned long node_start_pfn, unsigned long *zholes_size) 5050 { 5051 pg_data_t *pgdat = NODE_DATA(nid); 5052 unsigned long start_pfn = 0; 5053 unsigned long end_pfn = 0; 5054 5055 /* pg_data_t should be reset to zero when it's allocated */ 5056 WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); 5057 5058 pgdat->node_id = nid; 5059 pgdat->node_start_pfn = node_start_pfn; 5060 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5061 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 5062 printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid, 5063 (u64) start_pfn << PAGE_SHIFT, (u64) (end_pfn << PAGE_SHIFT) - 1); 5064 #endif 5065 calculate_node_totalpages(pgdat, start_pfn, end_pfn, 5066 zones_size, zholes_size); 5067 5068 alloc_node_mem_map(pgdat); 5069 #ifdef CONFIG_FLAT_NODE_MEM_MAP 5070 printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", 5071 nid, (unsigned long)pgdat, 5072 (unsigned long)pgdat->node_mem_map); 5073 #endif 5074 5075 free_area_init_core(pgdat, start_pfn, end_pfn, 5076 zones_size, zholes_size); 5077 } 5078 5079 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 5080 5081 #if MAX_NUMNODES > 1 5082 /* 5083 * Figure out the number of possible node ids. 5084 */ 5085 void __init setup_nr_node_ids(void) 5086 { 5087 unsigned int node; 5088 unsigned int highest = 0; 5089 5090 for_each_node_mask(node, node_possible_map) 5091 highest = node; 5092 nr_node_ids = highest + 1; 5093 } 5094 #endif 5095 5096 /** 5097 * node_map_pfn_alignment - determine the maximum internode alignment 5098 * 5099 * This function should be called after node map is populated and sorted. 5100 * It calculates the maximum power of two alignment which can distinguish 5101 * all the nodes. 5102 * 5103 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 5104 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 5105 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 5106 * shifted, 1GiB is enough and this function will indicate so. 5107 * 5108 * This is used to test whether pfn -> nid mapping of the chosen memory 5109 * model has fine enough granularity to avoid incorrect mapping for the 5110 * populated node map. 5111 * 5112 * Returns the determined alignment in pfn's. 0 if there is no alignment 5113 * requirement (single node). 5114 */ 5115 unsigned long __init node_map_pfn_alignment(void) 5116 { 5117 unsigned long accl_mask = 0, last_end = 0; 5118 unsigned long start, end, mask; 5119 int last_nid = -1; 5120 int i, nid; 5121 5122 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 5123 if (!start || last_nid < 0 || last_nid == nid) { 5124 last_nid = nid; 5125 last_end = end; 5126 continue; 5127 } 5128 5129 /* 5130 * Start with a mask granular enough to pin-point to the 5131 * start pfn and tick off bits one-by-one until it becomes 5132 * too coarse to separate the current node from the last. 5133 */ 5134 mask = ~((1 << __ffs(start)) - 1); 5135 while (mask && last_end <= (start & (mask << 1))) 5136 mask <<= 1; 5137 5138 /* accumulate all internode masks */ 5139 accl_mask |= mask; 5140 } 5141 5142 /* convert mask to number of pages */ 5143 return ~accl_mask + 1; 5144 } 5145 5146 /* Find the lowest pfn for a node */ 5147 static unsigned long __init find_min_pfn_for_node(int nid) 5148 { 5149 unsigned long min_pfn = ULONG_MAX; 5150 unsigned long start_pfn; 5151 int i; 5152 5153 for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL) 5154 min_pfn = min(min_pfn, start_pfn); 5155 5156 if (min_pfn == ULONG_MAX) { 5157 printk(KERN_WARNING 5158 "Could not find start_pfn for node %d\n", nid); 5159 return 0; 5160 } 5161 5162 return min_pfn; 5163 } 5164 5165 /** 5166 * find_min_pfn_with_active_regions - Find the minimum PFN registered 5167 * 5168 * It returns the minimum PFN based on information provided via 5169 * memblock_set_node(). 5170 */ 5171 unsigned long __init find_min_pfn_with_active_regions(void) 5172 { 5173 return find_min_pfn_for_node(MAX_NUMNODES); 5174 } 5175 5176 /* 5177 * early_calculate_totalpages() 5178 * Sum pages in active regions for movable zone. 5179 * Populate N_MEMORY for calculating usable_nodes. 5180 */ 5181 static unsigned long __init early_calculate_totalpages(void) 5182 { 5183 unsigned long totalpages = 0; 5184 unsigned long start_pfn, end_pfn; 5185 int i, nid; 5186 5187 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 5188 unsigned long pages = end_pfn - start_pfn; 5189 5190 totalpages += pages; 5191 if (pages) 5192 node_set_state(nid, N_MEMORY); 5193 } 5194 return totalpages; 5195 } 5196 5197 /* 5198 * Find the PFN the Movable zone begins in each node. Kernel memory 5199 * is spread evenly between nodes as long as the nodes have enough 5200 * memory. When they don't, some nodes will have more kernelcore than 5201 * others 5202 */ 5203 static void __init find_zone_movable_pfns_for_nodes(void) 5204 { 5205 int i, nid; 5206 unsigned long usable_startpfn; 5207 unsigned long kernelcore_node, kernelcore_remaining; 5208 /* save the state before borrow the nodemask */ 5209 nodemask_t saved_node_state = node_states[N_MEMORY]; 5210 unsigned long totalpages = early_calculate_totalpages(); 5211 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 5212 struct memblock_region *r; 5213 5214 /* Need to find movable_zone earlier when movable_node is specified. */ 5215 find_usable_zone_for_movable(); 5216 5217 /* 5218 * If movable_node is specified, ignore kernelcore and movablecore 5219 * options. 5220 */ 5221 if (movable_node_is_enabled()) { 5222 for_each_memblock(memory, r) { 5223 if (!memblock_is_hotpluggable(r)) 5224 continue; 5225 5226 nid = r->nid; 5227 5228 usable_startpfn = PFN_DOWN(r->base); 5229 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 5230 min(usable_startpfn, zone_movable_pfn[nid]) : 5231 usable_startpfn; 5232 } 5233 5234 goto out2; 5235 } 5236 5237 /* 5238 * If movablecore=nn[KMG] was specified, calculate what size of 5239 * kernelcore that corresponds so that memory usable for 5240 * any allocation type is evenly spread. If both kernelcore 5241 * and movablecore are specified, then the value of kernelcore 5242 * will be used for required_kernelcore if it's greater than 5243 * what movablecore would have allowed. 5244 */ 5245 if (required_movablecore) { 5246 unsigned long corepages; 5247 5248 /* 5249 * Round-up so that ZONE_MOVABLE is at least as large as what 5250 * was requested by the user 5251 */ 5252 required_movablecore = 5253 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 5254 corepages = totalpages - required_movablecore; 5255 5256 required_kernelcore = max(required_kernelcore, corepages); 5257 } 5258 5259 /* If kernelcore was not specified, there is no ZONE_MOVABLE */ 5260 if (!required_kernelcore) 5261 goto out; 5262 5263 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 5264 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 5265 5266 restart: 5267 /* Spread kernelcore memory as evenly as possible throughout nodes */ 5268 kernelcore_node = required_kernelcore / usable_nodes; 5269 for_each_node_state(nid, N_MEMORY) { 5270 unsigned long start_pfn, end_pfn; 5271 5272 /* 5273 * Recalculate kernelcore_node if the division per node 5274 * now exceeds what is necessary to satisfy the requested 5275 * amount of memory for the kernel 5276 */ 5277 if (required_kernelcore < kernelcore_node) 5278 kernelcore_node = required_kernelcore / usable_nodes; 5279 5280 /* 5281 * As the map is walked, we track how much memory is usable 5282 * by the kernel using kernelcore_remaining. When it is 5283 * 0, the rest of the node is usable by ZONE_MOVABLE 5284 */ 5285 kernelcore_remaining = kernelcore_node; 5286 5287 /* Go through each range of PFNs within this node */ 5288 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 5289 unsigned long size_pages; 5290 5291 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 5292 if (start_pfn >= end_pfn) 5293 continue; 5294 5295 /* Account for what is only usable for kernelcore */ 5296 if (start_pfn < usable_startpfn) { 5297 unsigned long kernel_pages; 5298 kernel_pages = min(end_pfn, usable_startpfn) 5299 - start_pfn; 5300 5301 kernelcore_remaining -= min(kernel_pages, 5302 kernelcore_remaining); 5303 required_kernelcore -= min(kernel_pages, 5304 required_kernelcore); 5305 5306 /* Continue if range is now fully accounted */ 5307 if (end_pfn <= usable_startpfn) { 5308 5309 /* 5310 * Push zone_movable_pfn to the end so 5311 * that if we have to rebalance 5312 * kernelcore across nodes, we will 5313 * not double account here 5314 */ 5315 zone_movable_pfn[nid] = end_pfn; 5316 continue; 5317 } 5318 start_pfn = usable_startpfn; 5319 } 5320 5321 /* 5322 * The usable PFN range for ZONE_MOVABLE is from 5323 * start_pfn->end_pfn. Calculate size_pages as the 5324 * number of pages used as kernelcore 5325 */ 5326 size_pages = end_pfn - start_pfn; 5327 if (size_pages > kernelcore_remaining) 5328 size_pages = kernelcore_remaining; 5329 zone_movable_pfn[nid] = start_pfn + size_pages; 5330 5331 /* 5332 * Some kernelcore has been met, update counts and 5333 * break if the kernelcore for this node has been 5334 * satisfied 5335 */ 5336 required_kernelcore -= min(required_kernelcore, 5337 size_pages); 5338 kernelcore_remaining -= size_pages; 5339 if (!kernelcore_remaining) 5340 break; 5341 } 5342 } 5343 5344 /* 5345 * If there is still required_kernelcore, we do another pass with one 5346 * less node in the count. This will push zone_movable_pfn[nid] further 5347 * along on the nodes that still have memory until kernelcore is 5348 * satisfied 5349 */ 5350 usable_nodes--; 5351 if (usable_nodes && required_kernelcore > usable_nodes) 5352 goto restart; 5353 5354 out2: 5355 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 5356 for (nid = 0; nid < MAX_NUMNODES; nid++) 5357 zone_movable_pfn[nid] = 5358 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 5359 5360 out: 5361 /* restore the node_state */ 5362 node_states[N_MEMORY] = saved_node_state; 5363 } 5364 5365 /* Any regular or high memory on that node ? */ 5366 static void check_for_memory(pg_data_t *pgdat, int nid) 5367 { 5368 enum zone_type zone_type; 5369 5370 if (N_MEMORY == N_NORMAL_MEMORY) 5371 return; 5372 5373 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 5374 struct zone *zone = &pgdat->node_zones[zone_type]; 5375 if (populated_zone(zone)) { 5376 node_set_state(nid, N_HIGH_MEMORY); 5377 if (N_NORMAL_MEMORY != N_HIGH_MEMORY && 5378 zone_type <= ZONE_NORMAL) 5379 node_set_state(nid, N_NORMAL_MEMORY); 5380 break; 5381 } 5382 } 5383 } 5384 5385 /** 5386 * free_area_init_nodes - Initialise all pg_data_t and zone data 5387 * @max_zone_pfn: an array of max PFNs for each zone 5388 * 5389 * This will call free_area_init_node() for each active node in the system. 5390 * Using the page ranges provided by memblock_set_node(), the size of each 5391 * zone in each node and their holes is calculated. If the maximum PFN 5392 * between two adjacent zones match, it is assumed that the zone is empty. 5393 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 5394 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 5395 * starts where the previous one ended. For example, ZONE_DMA32 starts 5396 * at arch_max_dma_pfn. 5397 */ 5398 void __init free_area_init_nodes(unsigned long *max_zone_pfn) 5399 { 5400 unsigned long start_pfn, end_pfn; 5401 int i, nid; 5402 5403 /* Record where the zone boundaries are */ 5404 memset(arch_zone_lowest_possible_pfn, 0, 5405 sizeof(arch_zone_lowest_possible_pfn)); 5406 memset(arch_zone_highest_possible_pfn, 0, 5407 sizeof(arch_zone_highest_possible_pfn)); 5408 arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); 5409 arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; 5410 for (i = 1; i < MAX_NR_ZONES; i++) { 5411 if (i == ZONE_MOVABLE) 5412 continue; 5413 arch_zone_lowest_possible_pfn[i] = 5414 arch_zone_highest_possible_pfn[i-1]; 5415 arch_zone_highest_possible_pfn[i] = 5416 max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); 5417 } 5418 arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; 5419 arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; 5420 5421 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 5422 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 5423 find_zone_movable_pfns_for_nodes(); 5424 5425 /* Print out the zone ranges */ 5426 pr_info("Zone ranges:\n"); 5427 for (i = 0; i < MAX_NR_ZONES; i++) { 5428 if (i == ZONE_MOVABLE) 5429 continue; 5430 pr_info(" %-8s ", zone_names[i]); 5431 if (arch_zone_lowest_possible_pfn[i] == 5432 arch_zone_highest_possible_pfn[i]) 5433 pr_cont("empty\n"); 5434 else 5435 pr_cont("[mem %0#10lx-%0#10lx]\n", 5436 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT, 5437 (arch_zone_highest_possible_pfn[i] 5438 << PAGE_SHIFT) - 1); 5439 } 5440 5441 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 5442 pr_info("Movable zone start for each node\n"); 5443 for (i = 0; i < MAX_NUMNODES; i++) { 5444 if (zone_movable_pfn[i]) 5445 pr_info(" Node %d: %#010lx\n", i, 5446 zone_movable_pfn[i] << PAGE_SHIFT); 5447 } 5448 5449 /* Print out the early node map */ 5450 pr_info("Early memory node ranges\n"); 5451 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 5452 pr_info(" node %3d: [mem %#010lx-%#010lx]\n", nid, 5453 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); 5454 5455 /* Initialise every node */ 5456 mminit_verify_pageflags_layout(); 5457 setup_nr_node_ids(); 5458 for_each_online_node(nid) { 5459 pg_data_t *pgdat = NODE_DATA(nid); 5460 free_area_init_node(nid, NULL, 5461 find_min_pfn_for_node(nid), NULL); 5462 5463 /* Any memory on that node */ 5464 if (pgdat->node_present_pages) 5465 node_set_state(nid, N_MEMORY); 5466 check_for_memory(pgdat, nid); 5467 } 5468 } 5469 5470 static int __init cmdline_parse_core(char *p, unsigned long *core) 5471 { 5472 unsigned long long coremem; 5473 if (!p) 5474 return -EINVAL; 5475 5476 coremem = memparse(p, &p); 5477 *core = coremem >> PAGE_SHIFT; 5478 5479 /* Paranoid check that UL is enough for the coremem value */ 5480 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 5481 5482 return 0; 5483 } 5484 5485 /* 5486 * kernelcore=size sets the amount of memory for use for allocations that 5487 * cannot be reclaimed or migrated. 5488 */ 5489 static int __init cmdline_parse_kernelcore(char *p) 5490 { 5491 return cmdline_parse_core(p, &required_kernelcore); 5492 } 5493 5494 /* 5495 * movablecore=size sets the amount of memory for use for allocations that 5496 * can be reclaimed or migrated. 5497 */ 5498 static int __init cmdline_parse_movablecore(char *p) 5499 { 5500 return cmdline_parse_core(p, &required_movablecore); 5501 } 5502 5503 early_param("kernelcore", cmdline_parse_kernelcore); 5504 early_param("movablecore", cmdline_parse_movablecore); 5505 5506 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 5507 5508 void adjust_managed_page_count(struct page *page, long count) 5509 { 5510 spin_lock(&managed_page_count_lock); 5511 page_zone(page)->managed_pages += count; 5512 totalram_pages += count; 5513 #ifdef CONFIG_HIGHMEM 5514 if (PageHighMem(page)) 5515 totalhigh_pages += count; 5516 #endif 5517 spin_unlock(&managed_page_count_lock); 5518 } 5519 EXPORT_SYMBOL(adjust_managed_page_count); 5520 5521 unsigned long free_reserved_area(void *start, void *end, int poison, char *s) 5522 { 5523 void *pos; 5524 unsigned long pages = 0; 5525 5526 start = (void *)PAGE_ALIGN((unsigned long)start); 5527 end = (void *)((unsigned long)end & PAGE_MASK); 5528 for (pos = start; pos < end; pos += PAGE_SIZE, pages++) { 5529 if ((unsigned int)poison <= 0xFF) 5530 memset(pos, poison, PAGE_SIZE); 5531 free_reserved_page(virt_to_page(pos)); 5532 } 5533 5534 if (pages && s) 5535 pr_info("Freeing %s memory: %ldK (%p - %p)\n", 5536 s, pages << (PAGE_SHIFT - 10), start, end); 5537 5538 return pages; 5539 } 5540 EXPORT_SYMBOL(free_reserved_area); 5541 5542 #ifdef CONFIG_HIGHMEM 5543 void free_highmem_page(struct page *page) 5544 { 5545 __free_reserved_page(page); 5546 totalram_pages++; 5547 page_zone(page)->managed_pages++; 5548 totalhigh_pages++; 5549 } 5550 #endif 5551 5552 5553 void __init mem_init_print_info(const char *str) 5554 { 5555 unsigned long physpages, codesize, datasize, rosize, bss_size; 5556 unsigned long init_code_size, init_data_size; 5557 5558 physpages = get_num_physpages(); 5559 codesize = _etext - _stext; 5560 datasize = _edata - _sdata; 5561 rosize = __end_rodata - __start_rodata; 5562 bss_size = __bss_stop - __bss_start; 5563 init_data_size = __init_end - __init_begin; 5564 init_code_size = _einittext - _sinittext; 5565 5566 /* 5567 * Detect special cases and adjust section sizes accordingly: 5568 * 1) .init.* may be embedded into .data sections 5569 * 2) .init.text.* may be out of [__init_begin, __init_end], 5570 * please refer to arch/tile/kernel/vmlinux.lds.S. 5571 * 3) .rodata.* may be embedded into .text or .data sections. 5572 */ 5573 #define adj_init_size(start, end, size, pos, adj) \ 5574 do { \ 5575 if (start <= pos && pos < end && size > adj) \ 5576 size -= adj; \ 5577 } while (0) 5578 5579 adj_init_size(__init_begin, __init_end, init_data_size, 5580 _sinittext, init_code_size); 5581 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 5582 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 5583 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 5584 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 5585 5586 #undef adj_init_size 5587 5588 pr_info("Memory: %luK/%luK available " 5589 "(%luK kernel code, %luK rwdata, %luK rodata, " 5590 "%luK init, %luK bss, %luK reserved, %luK cma-reserved" 5591 #ifdef CONFIG_HIGHMEM 5592 ", %luK highmem" 5593 #endif 5594 "%s%s)\n", 5595 nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10), 5596 codesize >> 10, datasize >> 10, rosize >> 10, 5597 (init_data_size + init_code_size) >> 10, bss_size >> 10, 5598 (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT-10), 5599 totalcma_pages << (PAGE_SHIFT-10), 5600 #ifdef CONFIG_HIGHMEM 5601 totalhigh_pages << (PAGE_SHIFT-10), 5602 #endif 5603 str ? ", " : "", str ? str : ""); 5604 } 5605 5606 /** 5607 * set_dma_reserve - set the specified number of pages reserved in the first zone 5608 * @new_dma_reserve: The number of pages to mark reserved 5609 * 5610 * The per-cpu batchsize and zone watermarks are determined by present_pages. 5611 * In the DMA zone, a significant percentage may be consumed by kernel image 5612 * and other unfreeable allocations which can skew the watermarks badly. This 5613 * function may optionally be used to account for unfreeable pages in the 5614 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 5615 * smaller per-cpu batchsize. 5616 */ 5617 void __init set_dma_reserve(unsigned long new_dma_reserve) 5618 { 5619 dma_reserve = new_dma_reserve; 5620 } 5621 5622 void __init free_area_init(unsigned long *zones_size) 5623 { 5624 free_area_init_node(0, zones_size, 5625 __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); 5626 } 5627 5628 static int page_alloc_cpu_notify(struct notifier_block *self, 5629 unsigned long action, void *hcpu) 5630 { 5631 int cpu = (unsigned long)hcpu; 5632 5633 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 5634 lru_add_drain_cpu(cpu); 5635 drain_pages(cpu); 5636 5637 /* 5638 * Spill the event counters of the dead processor 5639 * into the current processors event counters. 5640 * This artificially elevates the count of the current 5641 * processor. 5642 */ 5643 vm_events_fold_cpu(cpu); 5644 5645 /* 5646 * Zero the differential counters of the dead processor 5647 * so that the vm statistics are consistent. 5648 * 5649 * This is only okay since the processor is dead and cannot 5650 * race with what we are doing. 5651 */ 5652 cpu_vm_stats_fold(cpu); 5653 } 5654 return NOTIFY_OK; 5655 } 5656 5657 void __init page_alloc_init(void) 5658 { 5659 hotcpu_notifier(page_alloc_cpu_notify, 0); 5660 } 5661 5662 /* 5663 * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio 5664 * or min_free_kbytes changes. 5665 */ 5666 static void calculate_totalreserve_pages(void) 5667 { 5668 struct pglist_data *pgdat; 5669 unsigned long reserve_pages = 0; 5670 enum zone_type i, j; 5671 5672 for_each_online_pgdat(pgdat) { 5673 for (i = 0; i < MAX_NR_ZONES; i++) { 5674 struct zone *zone = pgdat->node_zones + i; 5675 long max = 0; 5676 5677 /* Find valid and maximum lowmem_reserve in the zone */ 5678 for (j = i; j < MAX_NR_ZONES; j++) { 5679 if (zone->lowmem_reserve[j] > max) 5680 max = zone->lowmem_reserve[j]; 5681 } 5682 5683 /* we treat the high watermark as reserved pages. */ 5684 max += high_wmark_pages(zone); 5685 5686 if (max > zone->managed_pages) 5687 max = zone->managed_pages; 5688 reserve_pages += max; 5689 /* 5690 * Lowmem reserves are not available to 5691 * GFP_HIGHUSER page cache allocations and 5692 * kswapd tries to balance zones to their high 5693 * watermark. As a result, neither should be 5694 * regarded as dirtyable memory, to prevent a 5695 * situation where reclaim has to clean pages 5696 * in order to balance the zones. 5697 */ 5698 zone->dirty_balance_reserve = max; 5699 } 5700 } 5701 dirty_balance_reserve = reserve_pages; 5702 totalreserve_pages = reserve_pages; 5703 } 5704 5705 /* 5706 * setup_per_zone_lowmem_reserve - called whenever 5707 * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone 5708 * has a correct pages reserved value, so an adequate number of 5709 * pages are left in the zone after a successful __alloc_pages(). 5710 */ 5711 static void setup_per_zone_lowmem_reserve(void) 5712 { 5713 struct pglist_data *pgdat; 5714 enum zone_type j, idx; 5715 5716 for_each_online_pgdat(pgdat) { 5717 for (j = 0; j < MAX_NR_ZONES; j++) { 5718 struct zone *zone = pgdat->node_zones + j; 5719 unsigned long managed_pages = zone->managed_pages; 5720 5721 zone->lowmem_reserve[j] = 0; 5722 5723 idx = j; 5724 while (idx) { 5725 struct zone *lower_zone; 5726 5727 idx--; 5728 5729 if (sysctl_lowmem_reserve_ratio[idx] < 1) 5730 sysctl_lowmem_reserve_ratio[idx] = 1; 5731 5732 lower_zone = pgdat->node_zones + idx; 5733 lower_zone->lowmem_reserve[j] = managed_pages / 5734 sysctl_lowmem_reserve_ratio[idx]; 5735 managed_pages += lower_zone->managed_pages; 5736 } 5737 } 5738 } 5739 5740 /* update totalreserve_pages */ 5741 calculate_totalreserve_pages(); 5742 } 5743 5744 static void __setup_per_zone_wmarks(void) 5745 { 5746 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5747 unsigned long lowmem_pages = 0; 5748 struct zone *zone; 5749 unsigned long flags; 5750 5751 /* Calculate total number of !ZONE_HIGHMEM pages */ 5752 for_each_zone(zone) { 5753 if (!is_highmem(zone)) 5754 lowmem_pages += zone->managed_pages; 5755 } 5756 5757 for_each_zone(zone) { 5758 u64 tmp; 5759 5760 spin_lock_irqsave(&zone->lock, flags); 5761 tmp = (u64)pages_min * zone->managed_pages; 5762 do_div(tmp, lowmem_pages); 5763 if (is_highmem(zone)) { 5764 /* 5765 * __GFP_HIGH and PF_MEMALLOC allocations usually don't 5766 * need highmem pages, so cap pages_min to a small 5767 * value here. 5768 * 5769 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) 5770 * deltas controls asynch page reclaim, and so should 5771 * not be capped for highmem. 5772 */ 5773 unsigned long min_pages; 5774 5775 min_pages = zone->managed_pages / 1024; 5776 min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL); 5777 zone->watermark[WMARK_MIN] = min_pages; 5778 } else { 5779 /* 5780 * If it's a lowmem zone, reserve a number of pages 5781 * proportionate to the zone's size. 5782 */ 5783 zone->watermark[WMARK_MIN] = tmp; 5784 } 5785 5786 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 5787 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5788 5789 __mod_zone_page_state(zone, NR_ALLOC_BATCH, 5790 high_wmark_pages(zone) - low_wmark_pages(zone) - 5791 atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); 5792 5793 setup_zone_migrate_reserve(zone); 5794 spin_unlock_irqrestore(&zone->lock, flags); 5795 } 5796 5797 /* update totalreserve_pages */ 5798 calculate_totalreserve_pages(); 5799 } 5800 5801 /** 5802 * setup_per_zone_wmarks - called when min_free_kbytes changes 5803 * or when memory is hot-{added|removed} 5804 * 5805 * Ensures that the watermark[min,low,high] values for each zone are set 5806 * correctly with respect to min_free_kbytes. 5807 */ 5808 void setup_per_zone_wmarks(void) 5809 { 5810 mutex_lock(&zonelists_mutex); 5811 __setup_per_zone_wmarks(); 5812 mutex_unlock(&zonelists_mutex); 5813 } 5814 5815 /* 5816 * The inactive anon list should be small enough that the VM never has to 5817 * do too much work, but large enough that each inactive page has a chance 5818 * to be referenced again before it is swapped out. 5819 * 5820 * The inactive_anon ratio is the target ratio of ACTIVE_ANON to 5821 * INACTIVE_ANON pages on this zone's LRU, maintained by the 5822 * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of 5823 * the anonymous pages are kept on the inactive list. 5824 * 5825 * total target max 5826 * memory ratio inactive anon 5827 * ------------------------------------- 5828 * 10MB 1 5MB 5829 * 100MB 1 50MB 5830 * 1GB 3 250MB 5831 * 10GB 10 0.9GB 5832 * 100GB 31 3GB 5833 * 1TB 101 10GB 5834 * 10TB 320 32GB 5835 */ 5836 static void __meminit calculate_zone_inactive_ratio(struct zone *zone) 5837 { 5838 unsigned int gb, ratio; 5839 5840 /* Zone size in gigabytes */ 5841 gb = zone->managed_pages >> (30 - PAGE_SHIFT); 5842 if (gb) 5843 ratio = int_sqrt(10 * gb); 5844 else 5845 ratio = 1; 5846 5847 zone->inactive_ratio = ratio; 5848 } 5849 5850 static void __meminit setup_per_zone_inactive_ratio(void) 5851 { 5852 struct zone *zone; 5853 5854 for_each_zone(zone) 5855 calculate_zone_inactive_ratio(zone); 5856 } 5857 5858 /* 5859 * Initialise min_free_kbytes. 5860 * 5861 * For small machines we want it small (128k min). For large machines 5862 * we want it large (64MB max). But it is not linear, because network 5863 * bandwidth does not increase linearly with machine size. We use 5864 * 5865 * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: 5866 * min_free_kbytes = sqrt(lowmem_kbytes * 16) 5867 * 5868 * which yields 5869 * 5870 * 16MB: 512k 5871 * 32MB: 724k 5872 * 64MB: 1024k 5873 * 128MB: 1448k 5874 * 256MB: 2048k 5875 * 512MB: 2896k 5876 * 1024MB: 4096k 5877 * 2048MB: 5792k 5878 * 4096MB: 8192k 5879 * 8192MB: 11584k 5880 * 16384MB: 16384k 5881 */ 5882 int __meminit init_per_zone_wmark_min(void) 5883 { 5884 unsigned long lowmem_kbytes; 5885 int new_min_free_kbytes; 5886 5887 lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10); 5888 new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16); 5889 5890 if (new_min_free_kbytes > user_min_free_kbytes) { 5891 min_free_kbytes = new_min_free_kbytes; 5892 if (min_free_kbytes < 128) 5893 min_free_kbytes = 128; 5894 if (min_free_kbytes > 65536) 5895 min_free_kbytes = 65536; 5896 } else { 5897 pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n", 5898 new_min_free_kbytes, user_min_free_kbytes); 5899 } 5900 setup_per_zone_wmarks(); 5901 refresh_zone_stat_thresholds(); 5902 setup_per_zone_lowmem_reserve(); 5903 setup_per_zone_inactive_ratio(); 5904 return 0; 5905 } 5906 module_init(init_per_zone_wmark_min) 5907 5908 /* 5909 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 5910 * that we can call two helper functions whenever min_free_kbytes 5911 * changes. 5912 */ 5913 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, 5914 void __user *buffer, size_t *length, loff_t *ppos) 5915 { 5916 int rc; 5917 5918 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5919 if (rc) 5920 return rc; 5921 5922 if (write) { 5923 user_min_free_kbytes = min_free_kbytes; 5924 setup_per_zone_wmarks(); 5925 } 5926 return 0; 5927 } 5928 5929 #ifdef CONFIG_NUMA 5930 int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, 5931 void __user *buffer, size_t *length, loff_t *ppos) 5932 { 5933 struct zone *zone; 5934 int rc; 5935 5936 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5937 if (rc) 5938 return rc; 5939 5940 for_each_zone(zone) 5941 zone->min_unmapped_pages = (zone->managed_pages * 5942 sysctl_min_unmapped_ratio) / 100; 5943 return 0; 5944 } 5945 5946 int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, 5947 void __user *buffer, size_t *length, loff_t *ppos) 5948 { 5949 struct zone *zone; 5950 int rc; 5951 5952 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 5953 if (rc) 5954 return rc; 5955 5956 for_each_zone(zone) 5957 zone->min_slab_pages = (zone->managed_pages * 5958 sysctl_min_slab_ratio) / 100; 5959 return 0; 5960 } 5961 #endif 5962 5963 /* 5964 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around 5965 * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve() 5966 * whenever sysctl_lowmem_reserve_ratio changes. 5967 * 5968 * The reserve ratio obviously has absolutely no relation with the 5969 * minimum watermarks. The lowmem reserve ratio can only make sense 5970 * if in function of the boot time zone sizes. 5971 */ 5972 int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, 5973 void __user *buffer, size_t *length, loff_t *ppos) 5974 { 5975 proc_dointvec_minmax(table, write, buffer, length, ppos); 5976 setup_per_zone_lowmem_reserve(); 5977 return 0; 5978 } 5979 5980 /* 5981 * percpu_pagelist_fraction - changes the pcp->high for each zone on each 5982 * cpu. It is the fraction of total pages in each zone that a hot per cpu 5983 * pagelist can have before it gets flushed back to buddy allocator. 5984 */ 5985 int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write, 5986 void __user *buffer, size_t *length, loff_t *ppos) 5987 { 5988 struct zone *zone; 5989 int old_percpu_pagelist_fraction; 5990 int ret; 5991 5992 mutex_lock(&pcp_batch_high_lock); 5993 old_percpu_pagelist_fraction = percpu_pagelist_fraction; 5994 5995 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5996 if (!write || ret < 0) 5997 goto out; 5998 5999 /* Sanity checking to avoid pcp imbalance */ 6000 if (percpu_pagelist_fraction && 6001 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) { 6002 percpu_pagelist_fraction = old_percpu_pagelist_fraction; 6003 ret = -EINVAL; 6004 goto out; 6005 } 6006 6007 /* No change? */ 6008 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction) 6009 goto out; 6010 6011 for_each_populated_zone(zone) { 6012 unsigned int cpu; 6013 6014 for_each_possible_cpu(cpu) 6015 pageset_set_high_and_batch(zone, 6016 per_cpu_ptr(zone->pageset, cpu)); 6017 } 6018 out: 6019 mutex_unlock(&pcp_batch_high_lock); 6020 return ret; 6021 } 6022 6023 int hashdist = HASHDIST_DEFAULT; 6024 6025 #ifdef CONFIG_NUMA 6026 static int __init set_hashdist(char *str) 6027 { 6028 if (!str) 6029 return 0; 6030 hashdist = simple_strtoul(str, &str, 0); 6031 return 1; 6032 } 6033 __setup("hashdist=", set_hashdist); 6034 #endif 6035 6036 /* 6037 * allocate a large system hash table from bootmem 6038 * - it is assumed that the hash table must contain an exact power-of-2 6039 * quantity of entries 6040 * - limit is the number of hash buckets, not the total allocation size 6041 */ 6042 void *__init alloc_large_system_hash(const char *tablename, 6043 unsigned long bucketsize, 6044 unsigned long numentries, 6045 int scale, 6046 int flags, 6047 unsigned int *_hash_shift, 6048 unsigned int *_hash_mask, 6049 unsigned long low_limit, 6050 unsigned long high_limit) 6051 { 6052 unsigned long long max = high_limit; 6053 unsigned long log2qty, size; 6054 void *table = NULL; 6055 6056 /* allow the kernel cmdline to have a say */ 6057 if (!numentries) { 6058 /* round applicable memory size up to nearest megabyte */ 6059 numentries = nr_kernel_pages; 6060 6061 /* It isn't necessary when PAGE_SIZE >= 1MB */ 6062 if (PAGE_SHIFT < 20) 6063 numentries = round_up(numentries, (1<<20)/PAGE_SIZE); 6064 6065 /* limit to 1 bucket per 2^scale bytes of low memory */ 6066 if (scale > PAGE_SHIFT) 6067 numentries >>= (scale - PAGE_SHIFT); 6068 else 6069 numentries <<= (PAGE_SHIFT - scale); 6070 6071 /* Make sure we've got at least a 0-order allocation.. */ 6072 if (unlikely(flags & HASH_SMALL)) { 6073 /* Makes no sense without HASH_EARLY */ 6074 WARN_ON(!(flags & HASH_EARLY)); 6075 if (!(numentries >> *_hash_shift)) { 6076 numentries = 1UL << *_hash_shift; 6077 BUG_ON(!numentries); 6078 } 6079 } else if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 6080 numentries = PAGE_SIZE / bucketsize; 6081 } 6082 numentries = roundup_pow_of_two(numentries); 6083 6084 /* limit allocation size to 1/16 total memory by default */ 6085 if (max == 0) { 6086 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 6087 do_div(max, bucketsize); 6088 } 6089 max = min(max, 0x80000000ULL); 6090 6091 if (numentries < low_limit) 6092 numentries = low_limit; 6093 if (numentries > max) 6094 numentries = max; 6095 6096 log2qty = ilog2(numentries); 6097 6098 do { 6099 size = bucketsize << log2qty; 6100 if (flags & HASH_EARLY) 6101 table = memblock_virt_alloc_nopanic(size, 0); 6102 else if (hashdist) 6103 table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); 6104 else { 6105 /* 6106 * If bucketsize is not a power-of-two, we may free 6107 * some pages at the end of hash table which 6108 * alloc_pages_exact() automatically does 6109 */ 6110 if (get_order(size) < MAX_ORDER) { 6111 table = alloc_pages_exact(size, GFP_ATOMIC); 6112 kmemleak_alloc(table, size, 1, GFP_ATOMIC); 6113 } 6114 } 6115 } while (!table && size > PAGE_SIZE && --log2qty); 6116 6117 if (!table) 6118 panic("Failed to allocate %s hash table\n", tablename); 6119 6120 printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n", 6121 tablename, 6122 (1UL << log2qty), 6123 ilog2(size) - PAGE_SHIFT, 6124 size); 6125 6126 if (_hash_shift) 6127 *_hash_shift = log2qty; 6128 if (_hash_mask) 6129 *_hash_mask = (1 << log2qty) - 1; 6130 6131 return table; 6132 } 6133 6134 /* Return a pointer to the bitmap storing bits affecting a block of pages */ 6135 static inline unsigned long *get_pageblock_bitmap(struct zone *zone, 6136 unsigned long pfn) 6137 { 6138 #ifdef CONFIG_SPARSEMEM 6139 return __pfn_to_section(pfn)->pageblock_flags; 6140 #else 6141 return zone->pageblock_flags; 6142 #endif /* CONFIG_SPARSEMEM */ 6143 } 6144 6145 static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn) 6146 { 6147 #ifdef CONFIG_SPARSEMEM 6148 pfn &= (PAGES_PER_SECTION-1); 6149 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 6150 #else 6151 pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages); 6152 return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS; 6153 #endif /* CONFIG_SPARSEMEM */ 6154 } 6155 6156 /** 6157 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages 6158 * @page: The page within the block of interest 6159 * @pfn: The target page frame number 6160 * @end_bitidx: The last bit of interest to retrieve 6161 * @mask: mask of bits that the caller is interested in 6162 * 6163 * Return: pageblock_bits flags 6164 */ 6165 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn, 6166 unsigned long end_bitidx, 6167 unsigned long mask) 6168 { 6169 struct zone *zone; 6170 unsigned long *bitmap; 6171 unsigned long bitidx, word_bitidx; 6172 unsigned long word; 6173 6174 zone = page_zone(page); 6175 bitmap = get_pageblock_bitmap(zone, pfn); 6176 bitidx = pfn_to_bitidx(zone, pfn); 6177 word_bitidx = bitidx / BITS_PER_LONG; 6178 bitidx &= (BITS_PER_LONG-1); 6179 6180 word = bitmap[word_bitidx]; 6181 bitidx += end_bitidx; 6182 return (word >> (BITS_PER_LONG - bitidx - 1)) & mask; 6183 } 6184 6185 /** 6186 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages 6187 * @page: The page within the block of interest 6188 * @flags: The flags to set 6189 * @pfn: The target page frame number 6190 * @end_bitidx: The last bit of interest 6191 * @mask: mask of bits that the caller is interested in 6192 */ 6193 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, 6194 unsigned long pfn, 6195 unsigned long end_bitidx, 6196 unsigned long mask) 6197 { 6198 struct zone *zone; 6199 unsigned long *bitmap; 6200 unsigned long bitidx, word_bitidx; 6201 unsigned long old_word, word; 6202 6203 BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4); 6204 6205 zone = page_zone(page); 6206 bitmap = get_pageblock_bitmap(zone, pfn); 6207 bitidx = pfn_to_bitidx(zone, pfn); 6208 word_bitidx = bitidx / BITS_PER_LONG; 6209 bitidx &= (BITS_PER_LONG-1); 6210 6211 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page); 6212 6213 bitidx += end_bitidx; 6214 mask <<= (BITS_PER_LONG - bitidx - 1); 6215 flags <<= (BITS_PER_LONG - bitidx - 1); 6216 6217 word = ACCESS_ONCE(bitmap[word_bitidx]); 6218 for (;;) { 6219 old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags); 6220 if (word == old_word) 6221 break; 6222 word = old_word; 6223 } 6224 } 6225 6226 /* 6227 * This function checks whether pageblock includes unmovable pages or not. 6228 * If @count is not zero, it is okay to include less @count unmovable pages 6229 * 6230 * PageLRU check without isolation or lru_lock could race so that 6231 * MIGRATE_MOVABLE block might include unmovable pages. It means you can't 6232 * expect this function should be exact. 6233 */ 6234 bool has_unmovable_pages(struct zone *zone, struct page *page, int count, 6235 bool skip_hwpoisoned_pages) 6236 { 6237 unsigned long pfn, iter, found; 6238 int mt; 6239 6240 /* 6241 * For avoiding noise data, lru_add_drain_all() should be called 6242 * If ZONE_MOVABLE, the zone never contains unmovable pages 6243 */ 6244 if (zone_idx(zone) == ZONE_MOVABLE) 6245 return false; 6246 mt = get_pageblock_migratetype(page); 6247 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt)) 6248 return false; 6249 6250 pfn = page_to_pfn(page); 6251 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 6252 unsigned long check = pfn + iter; 6253 6254 if (!pfn_valid_within(check)) 6255 continue; 6256 6257 page = pfn_to_page(check); 6258 6259 /* 6260 * Hugepages are not in LRU lists, but they're movable. 6261 * We need not scan over tail pages bacause we don't 6262 * handle each tail page individually in migration. 6263 */ 6264 if (PageHuge(page)) { 6265 iter = round_up(iter + 1, 1<<compound_order(page)) - 1; 6266 continue; 6267 } 6268 6269 /* 6270 * We can't use page_count without pin a page 6271 * because another CPU can free compound page. 6272 * This check already skips compound tails of THP 6273 * because their page->_count is zero at all time. 6274 */ 6275 if (!atomic_read(&page->_count)) { 6276 if (PageBuddy(page)) 6277 iter += (1 << page_order(page)) - 1; 6278 continue; 6279 } 6280 6281 /* 6282 * The HWPoisoned page may be not in buddy system, and 6283 * page_count() is not 0. 6284 */ 6285 if (skip_hwpoisoned_pages && PageHWPoison(page)) 6286 continue; 6287 6288 if (!PageLRU(page)) 6289 found++; 6290 /* 6291 * If there are RECLAIMABLE pages, we need to check 6292 * it. But now, memory offline itself doesn't call 6293 * shrink_node_slabs() and it still to be fixed. 6294 */ 6295 /* 6296 * If the page is not RAM, page_count()should be 0. 6297 * we don't need more check. This is an _used_ not-movable page. 6298 * 6299 * The problematic thing here is PG_reserved pages. PG_reserved 6300 * is set to both of a memory hole page and a _used_ kernel 6301 * page at boot. 6302 */ 6303 if (found > count) 6304 return true; 6305 } 6306 return false; 6307 } 6308 6309 bool is_pageblock_removable_nolock(struct page *page) 6310 { 6311 struct zone *zone; 6312 unsigned long pfn; 6313 6314 /* 6315 * We have to be careful here because we are iterating over memory 6316 * sections which are not zone aware so we might end up outside of 6317 * the zone but still within the section. 6318 * We have to take care about the node as well. If the node is offline 6319 * its NODE_DATA will be NULL - see page_zone. 6320 */ 6321 if (!node_online(page_to_nid(page))) 6322 return false; 6323 6324 zone = page_zone(page); 6325 pfn = page_to_pfn(page); 6326 if (!zone_spans_pfn(zone, pfn)) 6327 return false; 6328 6329 return !has_unmovable_pages(zone, page, 0, true); 6330 } 6331 6332 #ifdef CONFIG_CMA 6333 6334 static unsigned long pfn_max_align_down(unsigned long pfn) 6335 { 6336 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES, 6337 pageblock_nr_pages) - 1); 6338 } 6339 6340 static unsigned long pfn_max_align_up(unsigned long pfn) 6341 { 6342 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES, 6343 pageblock_nr_pages)); 6344 } 6345 6346 /* [start, end) must belong to a single zone. */ 6347 static int __alloc_contig_migrate_range(struct compact_control *cc, 6348 unsigned long start, unsigned long end) 6349 { 6350 /* This function is based on compact_zone() from compaction.c. */ 6351 unsigned long nr_reclaimed; 6352 unsigned long pfn = start; 6353 unsigned int tries = 0; 6354 int ret = 0; 6355 6356 migrate_prep(); 6357 6358 while (pfn < end || !list_empty(&cc->migratepages)) { 6359 if (fatal_signal_pending(current)) { 6360 ret = -EINTR; 6361 break; 6362 } 6363 6364 if (list_empty(&cc->migratepages)) { 6365 cc->nr_migratepages = 0; 6366 pfn = isolate_migratepages_range(cc, pfn, end); 6367 if (!pfn) { 6368 ret = -EINTR; 6369 break; 6370 } 6371 tries = 0; 6372 } else if (++tries == 5) { 6373 ret = ret < 0 ? ret : -EBUSY; 6374 break; 6375 } 6376 6377 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, 6378 &cc->migratepages); 6379 cc->nr_migratepages -= nr_reclaimed; 6380 6381 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 6382 NULL, 0, cc->mode, MR_CMA); 6383 } 6384 if (ret < 0) { 6385 putback_movable_pages(&cc->migratepages); 6386 return ret; 6387 } 6388 return 0; 6389 } 6390 6391 /** 6392 * alloc_contig_range() -- tries to allocate given range of pages 6393 * @start: start PFN to allocate 6394 * @end: one-past-the-last PFN to allocate 6395 * @migratetype: migratetype of the underlaying pageblocks (either 6396 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks 6397 * in range must have the same migratetype and it must 6398 * be either of the two. 6399 * 6400 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES 6401 * aligned, however it's the caller's responsibility to guarantee that 6402 * we are the only thread that changes migrate type of pageblocks the 6403 * pages fall in. 6404 * 6405 * The PFN range must belong to a single zone. 6406 * 6407 * Returns zero on success or negative error code. On success all 6408 * pages which PFN is in [start, end) are allocated for the caller and 6409 * need to be freed with free_contig_range(). 6410 */ 6411 int alloc_contig_range(unsigned long start, unsigned long end, 6412 unsigned migratetype) 6413 { 6414 unsigned long outer_start, outer_end; 6415 int ret = 0, order; 6416 6417 struct compact_control cc = { 6418 .nr_migratepages = 0, 6419 .order = -1, 6420 .zone = page_zone(pfn_to_page(start)), 6421 .mode = MIGRATE_SYNC, 6422 .ignore_skip_hint = true, 6423 }; 6424 INIT_LIST_HEAD(&cc.migratepages); 6425 6426 /* 6427 * What we do here is we mark all pageblocks in range as 6428 * MIGRATE_ISOLATE. Because pageblock and max order pages may 6429 * have different sizes, and due to the way page allocator 6430 * work, we align the range to biggest of the two pages so 6431 * that page allocator won't try to merge buddies from 6432 * different pageblocks and change MIGRATE_ISOLATE to some 6433 * other migration type. 6434 * 6435 * Once the pageblocks are marked as MIGRATE_ISOLATE, we 6436 * migrate the pages from an unaligned range (ie. pages that 6437 * we are interested in). This will put all the pages in 6438 * range back to page allocator as MIGRATE_ISOLATE. 6439 * 6440 * When this is done, we take the pages in range from page 6441 * allocator removing them from the buddy system. This way 6442 * page allocator will never consider using them. 6443 * 6444 * This lets us mark the pageblocks back as 6445 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the 6446 * aligned range but not in the unaligned, original range are 6447 * put back to page allocator so that buddy can use them. 6448 */ 6449 6450 ret = start_isolate_page_range(pfn_max_align_down(start), 6451 pfn_max_align_up(end), migratetype, 6452 false); 6453 if (ret) 6454 return ret; 6455 6456 ret = __alloc_contig_migrate_range(&cc, start, end); 6457 if (ret) 6458 goto done; 6459 6460 /* 6461 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES 6462 * aligned blocks that are marked as MIGRATE_ISOLATE. What's 6463 * more, all pages in [start, end) are free in page allocator. 6464 * What we are going to do is to allocate all pages from 6465 * [start, end) (that is remove them from page allocator). 6466 * 6467 * The only problem is that pages at the beginning and at the 6468 * end of interesting range may be not aligned with pages that 6469 * page allocator holds, ie. they can be part of higher order 6470 * pages. Because of this, we reserve the bigger range and 6471 * once this is done free the pages we are not interested in. 6472 * 6473 * We don't have to hold zone->lock here because the pages are 6474 * isolated thus they won't get removed from buddy. 6475 */ 6476 6477 lru_add_drain_all(); 6478 drain_all_pages(cc.zone); 6479 6480 order = 0; 6481 outer_start = start; 6482 while (!PageBuddy(pfn_to_page(outer_start))) { 6483 if (++order >= MAX_ORDER) { 6484 ret = -EBUSY; 6485 goto done; 6486 } 6487 outer_start &= ~0UL << order; 6488 } 6489 6490 /* Make sure the range is really isolated. */ 6491 if (test_pages_isolated(outer_start, end, false)) { 6492 pr_info("%s: [%lx, %lx) PFNs busy\n", 6493 __func__, outer_start, end); 6494 ret = -EBUSY; 6495 goto done; 6496 } 6497 6498 /* Grab isolated pages from freelists. */ 6499 outer_end = isolate_freepages_range(&cc, outer_start, end); 6500 if (!outer_end) { 6501 ret = -EBUSY; 6502 goto done; 6503 } 6504 6505 /* Free head and tail (if any) */ 6506 if (start != outer_start) 6507 free_contig_range(outer_start, start - outer_start); 6508 if (end != outer_end) 6509 free_contig_range(end, outer_end - end); 6510 6511 done: 6512 undo_isolate_page_range(pfn_max_align_down(start), 6513 pfn_max_align_up(end), migratetype); 6514 return ret; 6515 } 6516 6517 void free_contig_range(unsigned long pfn, unsigned nr_pages) 6518 { 6519 unsigned int count = 0; 6520 6521 for (; nr_pages--; pfn++) { 6522 struct page *page = pfn_to_page(pfn); 6523 6524 count += page_count(page) != 1; 6525 __free_page(page); 6526 } 6527 WARN(count != 0, "%d pages are still in use!\n", count); 6528 } 6529 #endif 6530 6531 #ifdef CONFIG_MEMORY_HOTPLUG 6532 /* 6533 * The zone indicated has a new number of managed_pages; batch sizes and percpu 6534 * page high values need to be recalulated. 6535 */ 6536 void __meminit zone_pcp_update(struct zone *zone) 6537 { 6538 unsigned cpu; 6539 mutex_lock(&pcp_batch_high_lock); 6540 for_each_possible_cpu(cpu) 6541 pageset_set_high_and_batch(zone, 6542 per_cpu_ptr(zone->pageset, cpu)); 6543 mutex_unlock(&pcp_batch_high_lock); 6544 } 6545 #endif 6546 6547 void zone_pcp_reset(struct zone *zone) 6548 { 6549 unsigned long flags; 6550 int cpu; 6551 struct per_cpu_pageset *pset; 6552 6553 /* avoid races with drain_pages() */ 6554 local_irq_save(flags); 6555 if (zone->pageset != &boot_pageset) { 6556 for_each_online_cpu(cpu) { 6557 pset = per_cpu_ptr(zone->pageset, cpu); 6558 drain_zonestat(zone, pset); 6559 } 6560 free_percpu(zone->pageset); 6561 zone->pageset = &boot_pageset; 6562 } 6563 local_irq_restore(flags); 6564 } 6565 6566 #ifdef CONFIG_MEMORY_HOTREMOVE 6567 /* 6568 * All pages in the range must be isolated before calling this. 6569 */ 6570 void 6571 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 6572 { 6573 struct page *page; 6574 struct zone *zone; 6575 unsigned int order, i; 6576 unsigned long pfn; 6577 unsigned long flags; 6578 /* find the first valid pfn */ 6579 for (pfn = start_pfn; pfn < end_pfn; pfn++) 6580 if (pfn_valid(pfn)) 6581 break; 6582 if (pfn == end_pfn) 6583 return; 6584 zone = page_zone(pfn_to_page(pfn)); 6585 spin_lock_irqsave(&zone->lock, flags); 6586 pfn = start_pfn; 6587 while (pfn < end_pfn) { 6588 if (!pfn_valid(pfn)) { 6589 pfn++; 6590 continue; 6591 } 6592 page = pfn_to_page(pfn); 6593 /* 6594 * The HWPoisoned page may be not in buddy system, and 6595 * page_count() is not 0. 6596 */ 6597 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { 6598 pfn++; 6599 SetPageReserved(page); 6600 continue; 6601 } 6602 6603 BUG_ON(page_count(page)); 6604 BUG_ON(!PageBuddy(page)); 6605 order = page_order(page); 6606 #ifdef CONFIG_DEBUG_VM 6607 printk(KERN_INFO "remove from free list %lx %d %lx\n", 6608 pfn, 1 << order, end_pfn); 6609 #endif 6610 list_del(&page->lru); 6611 rmv_page_order(page); 6612 zone->free_area[order].nr_free--; 6613 for (i = 0; i < (1 << order); i++) 6614 SetPageReserved((page+i)); 6615 pfn += (1 << order); 6616 } 6617 spin_unlock_irqrestore(&zone->lock, flags); 6618 } 6619 #endif 6620 6621 #ifdef CONFIG_MEMORY_FAILURE 6622 bool is_free_buddy_page(struct page *page) 6623 { 6624 struct zone *zone = page_zone(page); 6625 unsigned long pfn = page_to_pfn(page); 6626 unsigned long flags; 6627 unsigned int order; 6628 6629 spin_lock_irqsave(&zone->lock, flags); 6630 for (order = 0; order < MAX_ORDER; order++) { 6631 struct page *page_head = page - (pfn & ((1 << order) - 1)); 6632 6633 if (PageBuddy(page_head) && page_order(page_head) >= order) 6634 break; 6635 } 6636 spin_unlock_irqrestore(&zone->lock, flags); 6637 6638 return order < MAX_ORDER; 6639 } 6640 #endif 6641