1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm_init.c - Memory initialisation verification and debugging 4 * 5 * Copyright 2008 IBM Corporation, 2008 6 * Author Mel Gorman <mel@csn.ul.ie> 7 * 8 */ 9 #include <linux/kernel.h> 10 #include <linux/init.h> 11 #include <linux/kobject.h> 12 #include <linux/export.h> 13 #include <linux/memory.h> 14 #include <linux/notifier.h> 15 #include <linux/sched.h> 16 #include <linux/mman.h> 17 #include <linux/memblock.h> 18 #include <linux/page-isolation.h> 19 #include <linux/padata.h> 20 #include <linux/nmi.h> 21 #include <linux/buffer_head.h> 22 #include <linux/kmemleak.h> 23 #include <linux/kfence.h> 24 #include <linux/page_ext.h> 25 #include <linux/pti.h> 26 #include <linux/pgtable.h> 27 #include <linux/swap.h> 28 #include <linux/cma.h> 29 #include <linux/crash_dump.h> 30 #include "internal.h" 31 #include "slab.h" 32 #include "shuffle.h" 33 34 #include <asm/setup.h> 35 36 #ifdef CONFIG_DEBUG_MEMORY_INIT 37 int __meminitdata mminit_loglevel; 38 39 /* The zonelists are simply reported, validation is manual. */ 40 void __init mminit_verify_zonelist(void) 41 { 42 int nid; 43 44 if (mminit_loglevel < MMINIT_VERIFY) 45 return; 46 47 for_each_online_node(nid) { 48 pg_data_t *pgdat = NODE_DATA(nid); 49 struct zone *zone; 50 struct zoneref *z; 51 struct zonelist *zonelist; 52 int i, listid, zoneid; 53 54 BUILD_BUG_ON(MAX_ZONELISTS > 2); 55 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) { 56 57 /* Identify the zone and nodelist */ 58 zoneid = i % MAX_NR_ZONES; 59 listid = i / MAX_NR_ZONES; 60 zonelist = &pgdat->node_zonelists[listid]; 61 zone = &pgdat->node_zones[zoneid]; 62 if (!populated_zone(zone)) 63 continue; 64 65 /* Print information about the zonelist */ 66 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ", 67 listid > 0 ? "thisnode" : "general", nid, 68 zone->name); 69 70 /* Iterate the zonelist */ 71 for_each_zone_zonelist(zone, z, zonelist, zoneid) 72 pr_cont("%d:%s ", zone_to_nid(zone), zone->name); 73 pr_cont("\n"); 74 } 75 } 76 } 77 78 void __init mminit_verify_pageflags_layout(void) 79 { 80 int shift, width; 81 unsigned long or_mask, add_mask; 82 83 shift = BITS_PER_LONG; 84 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH 85 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH; 86 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths", 87 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n", 88 SECTIONS_WIDTH, 89 NODES_WIDTH, 90 ZONES_WIDTH, 91 LAST_CPUPID_WIDTH, 92 KASAN_TAG_WIDTH, 93 LRU_GEN_WIDTH, 94 LRU_REFS_WIDTH, 95 NR_PAGEFLAGS); 96 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", 97 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", 98 SECTIONS_SHIFT, 99 NODES_SHIFT, 100 ZONES_SHIFT, 101 LAST_CPUPID_SHIFT, 102 KASAN_TAG_WIDTH); 103 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts", 104 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n", 105 (unsigned long)SECTIONS_PGSHIFT, 106 (unsigned long)NODES_PGSHIFT, 107 (unsigned long)ZONES_PGSHIFT, 108 (unsigned long)LAST_CPUPID_PGSHIFT, 109 (unsigned long)KASAN_TAG_PGSHIFT); 110 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid", 111 "Node/Zone ID: %lu -> %lu\n", 112 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT), 113 (unsigned long)ZONEID_PGOFF); 114 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage", 115 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n", 116 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0); 117 #ifdef NODE_NOT_IN_PAGE_FLAGS 118 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 119 "Node not in page flags"); 120 #endif 121 #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS 122 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags", 123 "Last cpupid not in page flags"); 124 #endif 125 126 if (SECTIONS_WIDTH) { 127 shift -= SECTIONS_WIDTH; 128 BUG_ON(shift != SECTIONS_PGSHIFT); 129 } 130 if (NODES_WIDTH) { 131 shift -= NODES_WIDTH; 132 BUG_ON(shift != NODES_PGSHIFT); 133 } 134 if (ZONES_WIDTH) { 135 shift -= ZONES_WIDTH; 136 BUG_ON(shift != ZONES_PGSHIFT); 137 } 138 139 /* Check for bitmask overlaps */ 140 or_mask = (ZONES_MASK << ZONES_PGSHIFT) | 141 (NODES_MASK << NODES_PGSHIFT) | 142 (SECTIONS_MASK << SECTIONS_PGSHIFT); 143 add_mask = (ZONES_MASK << ZONES_PGSHIFT) + 144 (NODES_MASK << NODES_PGSHIFT) + 145 (SECTIONS_MASK << SECTIONS_PGSHIFT); 146 BUG_ON(or_mask != add_mask); 147 } 148 149 static __init int set_mminit_loglevel(char *str) 150 { 151 get_option(&str, &mminit_loglevel); 152 return 0; 153 } 154 early_param("mminit_loglevel", set_mminit_loglevel); 155 #endif /* CONFIG_DEBUG_MEMORY_INIT */ 156 157 struct kobject *mm_kobj; 158 159 #ifdef CONFIG_SMP 160 s32 vm_committed_as_batch = 32; 161 162 void mm_compute_batch(int overcommit_policy) 163 { 164 u64 memsized_batch; 165 s32 nr = num_present_cpus(); 166 s32 batch = max_t(s32, nr*2, 32); 167 unsigned long ram_pages = totalram_pages(); 168 169 /* 170 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of 171 * (total memory/#cpus), and lift it to 25% for other policies 172 * to easy the possible lock contention for percpu_counter 173 * vm_committed_as, while the max limit is INT_MAX 174 */ 175 if (overcommit_policy == OVERCOMMIT_NEVER) 176 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX); 177 else 178 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX); 179 180 vm_committed_as_batch = max_t(s32, memsized_batch, batch); 181 } 182 183 static int __meminit mm_compute_batch_notifier(struct notifier_block *self, 184 unsigned long action, void *arg) 185 { 186 switch (action) { 187 case MEM_ONLINE: 188 case MEM_OFFLINE: 189 mm_compute_batch(sysctl_overcommit_memory); 190 break; 191 default: 192 break; 193 } 194 return NOTIFY_OK; 195 } 196 197 static int __init mm_compute_batch_init(void) 198 { 199 mm_compute_batch(sysctl_overcommit_memory); 200 hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI); 201 return 0; 202 } 203 204 __initcall(mm_compute_batch_init); 205 206 #endif 207 208 static int __init mm_sysfs_init(void) 209 { 210 mm_kobj = kobject_create_and_add("mm", kernel_kobj); 211 if (!mm_kobj) 212 return -ENOMEM; 213 214 return 0; 215 } 216 postcore_initcall(mm_sysfs_init); 217 218 static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata; 219 static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata; 220 static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata; 221 222 static unsigned long required_kernelcore __initdata; 223 static unsigned long required_kernelcore_percent __initdata; 224 static unsigned long required_movablecore __initdata; 225 static unsigned long required_movablecore_percent __initdata; 226 227 static unsigned long nr_kernel_pages __initdata; 228 static unsigned long nr_all_pages __initdata; 229 static unsigned long dma_reserve __initdata; 230 231 static bool deferred_struct_pages __meminitdata; 232 233 static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats); 234 235 static int __init cmdline_parse_core(char *p, unsigned long *core, 236 unsigned long *percent) 237 { 238 unsigned long long coremem; 239 char *endptr; 240 241 if (!p) 242 return -EINVAL; 243 244 /* Value may be a percentage of total memory, otherwise bytes */ 245 coremem = simple_strtoull(p, &endptr, 0); 246 if (*endptr == '%') { 247 /* Paranoid check for percent values greater than 100 */ 248 WARN_ON(coremem > 100); 249 250 *percent = coremem; 251 } else { 252 coremem = memparse(p, &p); 253 /* Paranoid check that UL is enough for the coremem value */ 254 WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX); 255 256 *core = coremem >> PAGE_SHIFT; 257 *percent = 0UL; 258 } 259 return 0; 260 } 261 262 bool mirrored_kernelcore __initdata_memblock; 263 264 /* 265 * kernelcore=size sets the amount of memory for use for allocations that 266 * cannot be reclaimed or migrated. 267 */ 268 static int __init cmdline_parse_kernelcore(char *p) 269 { 270 /* parse kernelcore=mirror */ 271 if (parse_option_str(p, "mirror")) { 272 mirrored_kernelcore = true; 273 return 0; 274 } 275 276 return cmdline_parse_core(p, &required_kernelcore, 277 &required_kernelcore_percent); 278 } 279 early_param("kernelcore", cmdline_parse_kernelcore); 280 281 /* 282 * movablecore=size sets the amount of memory for use for allocations that 283 * can be reclaimed or migrated. 284 */ 285 static int __init cmdline_parse_movablecore(char *p) 286 { 287 return cmdline_parse_core(p, &required_movablecore, 288 &required_movablecore_percent); 289 } 290 early_param("movablecore", cmdline_parse_movablecore); 291 292 /* 293 * early_calculate_totalpages() 294 * Sum pages in active regions for movable zone. 295 * Populate N_MEMORY for calculating usable_nodes. 296 */ 297 static unsigned long __init early_calculate_totalpages(void) 298 { 299 unsigned long totalpages = 0; 300 unsigned long start_pfn, end_pfn; 301 int i, nid; 302 303 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 304 unsigned long pages = end_pfn - start_pfn; 305 306 totalpages += pages; 307 if (pages) 308 node_set_state(nid, N_MEMORY); 309 } 310 return totalpages; 311 } 312 313 /* 314 * This finds a zone that can be used for ZONE_MOVABLE pages. The 315 * assumption is made that zones within a node are ordered in monotonic 316 * increasing memory addresses so that the "highest" populated zone is used 317 */ 318 static void __init find_usable_zone_for_movable(void) 319 { 320 int zone_index; 321 for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { 322 if (zone_index == ZONE_MOVABLE) 323 continue; 324 325 if (arch_zone_highest_possible_pfn[zone_index] > 326 arch_zone_lowest_possible_pfn[zone_index]) 327 break; 328 } 329 330 VM_BUG_ON(zone_index == -1); 331 movable_zone = zone_index; 332 } 333 334 /* 335 * Find the PFN the Movable zone begins in each node. Kernel memory 336 * is spread evenly between nodes as long as the nodes have enough 337 * memory. When they don't, some nodes will have more kernelcore than 338 * others 339 */ 340 static void __init find_zone_movable_pfns_for_nodes(void) 341 { 342 int i, nid; 343 unsigned long usable_startpfn; 344 unsigned long kernelcore_node, kernelcore_remaining; 345 /* save the state before borrow the nodemask */ 346 nodemask_t saved_node_state = node_states[N_MEMORY]; 347 unsigned long totalpages = early_calculate_totalpages(); 348 int usable_nodes = nodes_weight(node_states[N_MEMORY]); 349 struct memblock_region *r; 350 351 /* Need to find movable_zone earlier when movable_node is specified. */ 352 find_usable_zone_for_movable(); 353 354 /* 355 * If movable_node is specified, ignore kernelcore and movablecore 356 * options. 357 */ 358 if (movable_node_is_enabled()) { 359 for_each_mem_region(r) { 360 if (!memblock_is_hotpluggable(r)) 361 continue; 362 363 nid = memblock_get_region_node(r); 364 365 usable_startpfn = PFN_DOWN(r->base); 366 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 367 min(usable_startpfn, zone_movable_pfn[nid]) : 368 usable_startpfn; 369 } 370 371 goto out2; 372 } 373 374 /* 375 * If kernelcore=mirror is specified, ignore movablecore option 376 */ 377 if (mirrored_kernelcore) { 378 bool mem_below_4gb_not_mirrored = false; 379 380 if (!memblock_has_mirror()) { 381 pr_warn("The system has no mirror memory, ignore kernelcore=mirror.\n"); 382 goto out; 383 } 384 385 if (is_kdump_kernel()) { 386 pr_warn("The system is under kdump, ignore kernelcore=mirror.\n"); 387 goto out; 388 } 389 390 for_each_mem_region(r) { 391 if (memblock_is_mirror(r)) 392 continue; 393 394 nid = memblock_get_region_node(r); 395 396 usable_startpfn = memblock_region_memory_base_pfn(r); 397 398 if (usable_startpfn < PHYS_PFN(SZ_4G)) { 399 mem_below_4gb_not_mirrored = true; 400 continue; 401 } 402 403 zone_movable_pfn[nid] = zone_movable_pfn[nid] ? 404 min(usable_startpfn, zone_movable_pfn[nid]) : 405 usable_startpfn; 406 } 407 408 if (mem_below_4gb_not_mirrored) 409 pr_warn("This configuration results in unmirrored kernel memory.\n"); 410 411 goto out2; 412 } 413 414 /* 415 * If kernelcore=nn% or movablecore=nn% was specified, calculate the 416 * amount of necessary memory. 417 */ 418 if (required_kernelcore_percent) 419 required_kernelcore = (totalpages * 100 * required_kernelcore_percent) / 420 10000UL; 421 if (required_movablecore_percent) 422 required_movablecore = (totalpages * 100 * required_movablecore_percent) / 423 10000UL; 424 425 /* 426 * If movablecore= was specified, calculate what size of 427 * kernelcore that corresponds so that memory usable for 428 * any allocation type is evenly spread. If both kernelcore 429 * and movablecore are specified, then the value of kernelcore 430 * will be used for required_kernelcore if it's greater than 431 * what movablecore would have allowed. 432 */ 433 if (required_movablecore) { 434 unsigned long corepages; 435 436 /* 437 * Round-up so that ZONE_MOVABLE is at least as large as what 438 * was requested by the user 439 */ 440 required_movablecore = 441 roundup(required_movablecore, MAX_ORDER_NR_PAGES); 442 required_movablecore = min(totalpages, required_movablecore); 443 corepages = totalpages - required_movablecore; 444 445 required_kernelcore = max(required_kernelcore, corepages); 446 } 447 448 /* 449 * If kernelcore was not specified or kernelcore size is larger 450 * than totalpages, there is no ZONE_MOVABLE. 451 */ 452 if (!required_kernelcore || required_kernelcore >= totalpages) 453 goto out; 454 455 /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */ 456 usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone]; 457 458 restart: 459 /* Spread kernelcore memory as evenly as possible throughout nodes */ 460 kernelcore_node = required_kernelcore / usable_nodes; 461 for_each_node_state(nid, N_MEMORY) { 462 unsigned long start_pfn, end_pfn; 463 464 /* 465 * Recalculate kernelcore_node if the division per node 466 * now exceeds what is necessary to satisfy the requested 467 * amount of memory for the kernel 468 */ 469 if (required_kernelcore < kernelcore_node) 470 kernelcore_node = required_kernelcore / usable_nodes; 471 472 /* 473 * As the map is walked, we track how much memory is usable 474 * by the kernel using kernelcore_remaining. When it is 475 * 0, the rest of the node is usable by ZONE_MOVABLE 476 */ 477 kernelcore_remaining = kernelcore_node; 478 479 /* Go through each range of PFNs within this node */ 480 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 481 unsigned long size_pages; 482 483 start_pfn = max(start_pfn, zone_movable_pfn[nid]); 484 if (start_pfn >= end_pfn) 485 continue; 486 487 /* Account for what is only usable for kernelcore */ 488 if (start_pfn < usable_startpfn) { 489 unsigned long kernel_pages; 490 kernel_pages = min(end_pfn, usable_startpfn) 491 - start_pfn; 492 493 kernelcore_remaining -= min(kernel_pages, 494 kernelcore_remaining); 495 required_kernelcore -= min(kernel_pages, 496 required_kernelcore); 497 498 /* Continue if range is now fully accounted */ 499 if (end_pfn <= usable_startpfn) { 500 501 /* 502 * Push zone_movable_pfn to the end so 503 * that if we have to rebalance 504 * kernelcore across nodes, we will 505 * not double account here 506 */ 507 zone_movable_pfn[nid] = end_pfn; 508 continue; 509 } 510 start_pfn = usable_startpfn; 511 } 512 513 /* 514 * The usable PFN range for ZONE_MOVABLE is from 515 * start_pfn->end_pfn. Calculate size_pages as the 516 * number of pages used as kernelcore 517 */ 518 size_pages = end_pfn - start_pfn; 519 if (size_pages > kernelcore_remaining) 520 size_pages = kernelcore_remaining; 521 zone_movable_pfn[nid] = start_pfn + size_pages; 522 523 /* 524 * Some kernelcore has been met, update counts and 525 * break if the kernelcore for this node has been 526 * satisfied 527 */ 528 required_kernelcore -= min(required_kernelcore, 529 size_pages); 530 kernelcore_remaining -= size_pages; 531 if (!kernelcore_remaining) 532 break; 533 } 534 } 535 536 /* 537 * If there is still required_kernelcore, we do another pass with one 538 * less node in the count. This will push zone_movable_pfn[nid] further 539 * along on the nodes that still have memory until kernelcore is 540 * satisfied 541 */ 542 usable_nodes--; 543 if (usable_nodes && required_kernelcore > usable_nodes) 544 goto restart; 545 546 out2: 547 /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */ 548 for (nid = 0; nid < MAX_NUMNODES; nid++) { 549 unsigned long start_pfn, end_pfn; 550 551 zone_movable_pfn[nid] = 552 roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES); 553 554 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 555 if (zone_movable_pfn[nid] >= end_pfn) 556 zone_movable_pfn[nid] = 0; 557 } 558 559 out: 560 /* restore the node_state */ 561 node_states[N_MEMORY] = saved_node_state; 562 } 563 564 static void __meminit __init_single_page(struct page *page, unsigned long pfn, 565 unsigned long zone, int nid) 566 { 567 mm_zero_struct_page(page); 568 set_page_links(page, zone, nid, pfn); 569 init_page_count(page); 570 page_mapcount_reset(page); 571 page_cpupid_reset_last(page); 572 page_kasan_tag_reset(page); 573 574 INIT_LIST_HEAD(&page->lru); 575 #ifdef WANT_PAGE_VIRTUAL 576 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 577 if (!is_highmem_idx(zone)) 578 set_page_address(page, __va(pfn << PAGE_SHIFT)); 579 #endif 580 } 581 582 #ifdef CONFIG_NUMA 583 /* 584 * During memory init memblocks map pfns to nids. The search is expensive and 585 * this caches recent lookups. The implementation of __early_pfn_to_nid 586 * treats start/end as pfns. 587 */ 588 struct mminit_pfnnid_cache { 589 unsigned long last_start; 590 unsigned long last_end; 591 int last_nid; 592 }; 593 594 static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 595 596 /* 597 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. 598 */ 599 static int __meminit __early_pfn_to_nid(unsigned long pfn, 600 struct mminit_pfnnid_cache *state) 601 { 602 unsigned long start_pfn, end_pfn; 603 int nid; 604 605 if (state->last_start <= pfn && pfn < state->last_end) 606 return state->last_nid; 607 608 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); 609 if (nid != NUMA_NO_NODE) { 610 state->last_start = start_pfn; 611 state->last_end = end_pfn; 612 state->last_nid = nid; 613 } 614 615 return nid; 616 } 617 618 int __meminit early_pfn_to_nid(unsigned long pfn) 619 { 620 static DEFINE_SPINLOCK(early_pfn_lock); 621 int nid; 622 623 spin_lock(&early_pfn_lock); 624 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 625 if (nid < 0) 626 nid = first_online_node; 627 spin_unlock(&early_pfn_lock); 628 629 return nid; 630 } 631 632 int hashdist = HASHDIST_DEFAULT; 633 634 static int __init set_hashdist(char *str) 635 { 636 if (!str) 637 return 0; 638 hashdist = simple_strtoul(str, &str, 0); 639 return 1; 640 } 641 __setup("hashdist=", set_hashdist); 642 643 static inline void fixup_hashdist(void) 644 { 645 if (num_node_state(N_MEMORY) == 1) 646 hashdist = 0; 647 } 648 #else 649 static inline void fixup_hashdist(void) {} 650 #endif /* CONFIG_NUMA */ 651 652 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 653 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) 654 { 655 pgdat->first_deferred_pfn = ULONG_MAX; 656 } 657 658 /* Returns true if the struct page for the pfn is initialised */ 659 static inline bool __meminit early_page_initialised(unsigned long pfn, int nid) 660 { 661 if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) 662 return false; 663 664 return true; 665 } 666 667 /* 668 * Returns true when the remaining initialisation should be deferred until 669 * later in the boot cycle when it can be parallelised. 670 */ 671 static bool __meminit 672 defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 673 { 674 static unsigned long prev_end_pfn, nr_initialised; 675 676 if (early_page_ext_enabled()) 677 return false; 678 /* 679 * prev_end_pfn static that contains the end of previous zone 680 * No need to protect because called very early in boot before smp_init. 681 */ 682 if (prev_end_pfn != end_pfn) { 683 prev_end_pfn = end_pfn; 684 nr_initialised = 0; 685 } 686 687 /* Always populate low zones for address-constrained allocations */ 688 if (end_pfn < pgdat_end_pfn(NODE_DATA(nid))) 689 return false; 690 691 if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX) 692 return true; 693 /* 694 * We start only with one section of pages, more pages are added as 695 * needed until the rest of deferred pages are initialized. 696 */ 697 nr_initialised++; 698 if ((nr_initialised > PAGES_PER_SECTION) && 699 (pfn & (PAGES_PER_SECTION - 1)) == 0) { 700 NODE_DATA(nid)->first_deferred_pfn = pfn; 701 return true; 702 } 703 return false; 704 } 705 706 static void __meminit init_reserved_page(unsigned long pfn, int nid) 707 { 708 pg_data_t *pgdat; 709 int zid; 710 711 if (early_page_initialised(pfn, nid)) 712 return; 713 714 pgdat = NODE_DATA(nid); 715 716 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 717 struct zone *zone = &pgdat->node_zones[zid]; 718 719 if (zone_spans_pfn(zone, pfn)) 720 break; 721 } 722 __init_single_page(pfn_to_page(pfn), pfn, zid, nid); 723 } 724 #else 725 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} 726 727 static inline bool early_page_initialised(unsigned long pfn, int nid) 728 { 729 return true; 730 } 731 732 static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) 733 { 734 return false; 735 } 736 737 static inline void init_reserved_page(unsigned long pfn, int nid) 738 { 739 } 740 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 741 742 /* 743 * Initialised pages do not have PageReserved set. This function is 744 * called for each range allocated by the bootmem allocator and 745 * marks the pages PageReserved. The remaining valid pages are later 746 * sent to the buddy page allocator. 747 */ 748 void __meminit reserve_bootmem_region(phys_addr_t start, 749 phys_addr_t end, int nid) 750 { 751 unsigned long start_pfn = PFN_DOWN(start); 752 unsigned long end_pfn = PFN_UP(end); 753 754 for (; start_pfn < end_pfn; start_pfn++) { 755 if (pfn_valid(start_pfn)) { 756 struct page *page = pfn_to_page(start_pfn); 757 758 init_reserved_page(start_pfn, nid); 759 760 /* Avoid false-positive PageTail() */ 761 INIT_LIST_HEAD(&page->lru); 762 763 /* 764 * no need for atomic set_bit because the struct 765 * page is not visible yet so nobody should 766 * access it yet. 767 */ 768 __SetPageReserved(page); 769 } 770 } 771 } 772 773 /* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */ 774 static bool __meminit 775 overlap_memmap_init(unsigned long zone, unsigned long *pfn) 776 { 777 static struct memblock_region *r; 778 779 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { 780 if (!r || *pfn >= memblock_region_memory_end_pfn(r)) { 781 for_each_mem_region(r) { 782 if (*pfn < memblock_region_memory_end_pfn(r)) 783 break; 784 } 785 } 786 if (*pfn >= memblock_region_memory_base_pfn(r) && 787 memblock_is_mirror(r)) { 788 *pfn = memblock_region_memory_end_pfn(r); 789 return true; 790 } 791 } 792 return false; 793 } 794 795 /* 796 * Only struct pages that correspond to ranges defined by memblock.memory 797 * are zeroed and initialized by going through __init_single_page() during 798 * memmap_init_zone_range(). 799 * 800 * But, there could be struct pages that correspond to holes in 801 * memblock.memory. This can happen because of the following reasons: 802 * - physical memory bank size is not necessarily the exact multiple of the 803 * arbitrary section size 804 * - early reserved memory may not be listed in memblock.memory 805 * - memory layouts defined with memmap= kernel parameter may not align 806 * nicely with memmap sections 807 * 808 * Explicitly initialize those struct pages so that: 809 * - PG_Reserved is set 810 * - zone and node links point to zone and node that span the page if the 811 * hole is in the middle of a zone 812 * - zone and node links point to adjacent zone/node if the hole falls on 813 * the zone boundary; the pages in such holes will be prepended to the 814 * zone/node above the hole except for the trailing pages in the last 815 * section that will be appended to the zone/node below. 816 */ 817 static void __init init_unavailable_range(unsigned long spfn, 818 unsigned long epfn, 819 int zone, int node) 820 { 821 unsigned long pfn; 822 u64 pgcnt = 0; 823 824 for (pfn = spfn; pfn < epfn; pfn++) { 825 if (!pfn_valid(pageblock_start_pfn(pfn))) { 826 pfn = pageblock_end_pfn(pfn) - 1; 827 continue; 828 } 829 __init_single_page(pfn_to_page(pfn), pfn, zone, node); 830 __SetPageReserved(pfn_to_page(pfn)); 831 pgcnt++; 832 } 833 834 if (pgcnt) 835 pr_info("On node %d, zone %s: %lld pages in unavailable ranges", 836 node, zone_names[zone], pgcnt); 837 } 838 839 /* 840 * Initially all pages are reserved - free ones are freed 841 * up by memblock_free_all() once the early boot process is 842 * done. Non-atomic initialization, single-pass. 843 * 844 * All aligned pageblocks are initialized to the specified migratetype 845 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related 846 * zone stats (e.g., nr_isolate_pageblock) are touched. 847 */ 848 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, 849 unsigned long start_pfn, unsigned long zone_end_pfn, 850 enum meminit_context context, 851 struct vmem_altmap *altmap, int migratetype) 852 { 853 unsigned long pfn, end_pfn = start_pfn + size; 854 struct page *page; 855 856 if (highest_memmap_pfn < end_pfn - 1) 857 highest_memmap_pfn = end_pfn - 1; 858 859 #ifdef CONFIG_ZONE_DEVICE 860 /* 861 * Honor reservation requested by the driver for this ZONE_DEVICE 862 * memory. We limit the total number of pages to initialize to just 863 * those that might contain the memory mapping. We will defer the 864 * ZONE_DEVICE page initialization until after we have released 865 * the hotplug lock. 866 */ 867 if (zone == ZONE_DEVICE) { 868 if (!altmap) 869 return; 870 871 if (start_pfn == altmap->base_pfn) 872 start_pfn += altmap->reserve; 873 end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 874 } 875 #endif 876 877 for (pfn = start_pfn; pfn < end_pfn; ) { 878 /* 879 * There can be holes in boot-time mem_map[]s handed to this 880 * function. They do not exist on hotplugged memory. 881 */ 882 if (context == MEMINIT_EARLY) { 883 if (overlap_memmap_init(zone, &pfn)) 884 continue; 885 if (defer_init(nid, pfn, zone_end_pfn)) { 886 deferred_struct_pages = true; 887 break; 888 } 889 } 890 891 page = pfn_to_page(pfn); 892 __init_single_page(page, pfn, zone, nid); 893 if (context == MEMINIT_HOTPLUG) 894 __SetPageReserved(page); 895 896 /* 897 * Usually, we want to mark the pageblock MIGRATE_MOVABLE, 898 * such that unmovable allocations won't be scattered all 899 * over the place during system boot. 900 */ 901 if (pageblock_aligned(pfn)) { 902 set_pageblock_migratetype(page, migratetype); 903 cond_resched(); 904 } 905 pfn++; 906 } 907 } 908 909 static void __init memmap_init_zone_range(struct zone *zone, 910 unsigned long start_pfn, 911 unsigned long end_pfn, 912 unsigned long *hole_pfn) 913 { 914 unsigned long zone_start_pfn = zone->zone_start_pfn; 915 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; 916 int nid = zone_to_nid(zone), zone_id = zone_idx(zone); 917 918 start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn); 919 end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn); 920 921 if (start_pfn >= end_pfn) 922 return; 923 924 memmap_init_range(end_pfn - start_pfn, nid, zone_id, start_pfn, 925 zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); 926 927 if (*hole_pfn < start_pfn) 928 init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid); 929 930 *hole_pfn = end_pfn; 931 } 932 933 static void __init memmap_init(void) 934 { 935 unsigned long start_pfn, end_pfn; 936 unsigned long hole_pfn = 0; 937 int i, j, zone_id = 0, nid; 938 939 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 940 struct pglist_data *node = NODE_DATA(nid); 941 942 for (j = 0; j < MAX_NR_ZONES; j++) { 943 struct zone *zone = node->node_zones + j; 944 945 if (!populated_zone(zone)) 946 continue; 947 948 memmap_init_zone_range(zone, start_pfn, end_pfn, 949 &hole_pfn); 950 zone_id = j; 951 } 952 } 953 954 #ifdef CONFIG_SPARSEMEM 955 /* 956 * Initialize the memory map for hole in the range [memory_end, 957 * section_end]. 958 * Append the pages in this hole to the highest zone in the last 959 * node. 960 * The call to init_unavailable_range() is outside the ifdef to 961 * silence the compiler warining about zone_id set but not used; 962 * for FLATMEM it is a nop anyway 963 */ 964 end_pfn = round_up(end_pfn, PAGES_PER_SECTION); 965 if (hole_pfn < end_pfn) 966 #endif 967 init_unavailable_range(hole_pfn, end_pfn, zone_id, nid); 968 } 969 970 #ifdef CONFIG_ZONE_DEVICE 971 static void __ref __init_zone_device_page(struct page *page, unsigned long pfn, 972 unsigned long zone_idx, int nid, 973 struct dev_pagemap *pgmap) 974 { 975 976 __init_single_page(page, pfn, zone_idx, nid); 977 978 /* 979 * Mark page reserved as it will need to wait for onlining 980 * phase for it to be fully associated with a zone. 981 * 982 * We can use the non-atomic __set_bit operation for setting 983 * the flag as we are still initializing the pages. 984 */ 985 __SetPageReserved(page); 986 987 /* 988 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer 989 * and zone_device_data. It is a bug if a ZONE_DEVICE page is 990 * ever freed or placed on a driver-private list. 991 */ 992 page->pgmap = pgmap; 993 page->zone_device_data = NULL; 994 995 /* 996 * Mark the block movable so that blocks are reserved for 997 * movable at startup. This will force kernel allocations 998 * to reserve their blocks rather than leaking throughout 999 * the address space during boot when many long-lived 1000 * kernel allocations are made. 1001 * 1002 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap 1003 * because this is done early in section_activate() 1004 */ 1005 if (pageblock_aligned(pfn)) { 1006 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1007 cond_resched(); 1008 } 1009 1010 /* 1011 * ZONE_DEVICE pages are released directly to the driver page allocator 1012 * which will set the page count to 1 when allocating the page. 1013 */ 1014 if (pgmap->type == MEMORY_DEVICE_PRIVATE || 1015 pgmap->type == MEMORY_DEVICE_COHERENT) 1016 set_page_count(page, 0); 1017 } 1018 1019 /* 1020 * With compound page geometry and when struct pages are stored in ram most 1021 * tail pages are reused. Consequently, the amount of unique struct pages to 1022 * initialize is a lot smaller that the total amount of struct pages being 1023 * mapped. This is a paired / mild layering violation with explicit knowledge 1024 * of how the sparse_vmemmap internals handle compound pages in the lack 1025 * of an altmap. See vmemmap_populate_compound_pages(). 1026 */ 1027 static inline unsigned long compound_nr_pages(struct vmem_altmap *altmap, 1028 struct dev_pagemap *pgmap) 1029 { 1030 if (!vmemmap_can_optimize(altmap, pgmap)) 1031 return pgmap_vmemmap_nr(pgmap); 1032 1033 return VMEMMAP_RESERVE_NR * (PAGE_SIZE / sizeof(struct page)); 1034 } 1035 1036 static void __ref memmap_init_compound(struct page *head, 1037 unsigned long head_pfn, 1038 unsigned long zone_idx, int nid, 1039 struct dev_pagemap *pgmap, 1040 unsigned long nr_pages) 1041 { 1042 unsigned long pfn, end_pfn = head_pfn + nr_pages; 1043 unsigned int order = pgmap->vmemmap_shift; 1044 1045 __SetPageHead(head); 1046 for (pfn = head_pfn + 1; pfn < end_pfn; pfn++) { 1047 struct page *page = pfn_to_page(pfn); 1048 1049 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1050 prep_compound_tail(head, pfn - head_pfn); 1051 set_page_count(page, 0); 1052 1053 /* 1054 * The first tail page stores important compound page info. 1055 * Call prep_compound_head() after the first tail page has 1056 * been initialized, to not have the data overwritten. 1057 */ 1058 if (pfn == head_pfn + 1) 1059 prep_compound_head(head, order); 1060 } 1061 } 1062 1063 void __ref memmap_init_zone_device(struct zone *zone, 1064 unsigned long start_pfn, 1065 unsigned long nr_pages, 1066 struct dev_pagemap *pgmap) 1067 { 1068 unsigned long pfn, end_pfn = start_pfn + nr_pages; 1069 struct pglist_data *pgdat = zone->zone_pgdat; 1070 struct vmem_altmap *altmap = pgmap_altmap(pgmap); 1071 unsigned int pfns_per_compound = pgmap_vmemmap_nr(pgmap); 1072 unsigned long zone_idx = zone_idx(zone); 1073 unsigned long start = jiffies; 1074 int nid = pgdat->node_id; 1075 1076 if (WARN_ON_ONCE(!pgmap || zone_idx != ZONE_DEVICE)) 1077 return; 1078 1079 /* 1080 * The call to memmap_init should have already taken care 1081 * of the pages reserved for the memmap, so we can just jump to 1082 * the end of that region and start processing the device pages. 1083 */ 1084 if (altmap) { 1085 start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap); 1086 nr_pages = end_pfn - start_pfn; 1087 } 1088 1089 for (pfn = start_pfn; pfn < end_pfn; pfn += pfns_per_compound) { 1090 struct page *page = pfn_to_page(pfn); 1091 1092 __init_zone_device_page(page, pfn, zone_idx, nid, pgmap); 1093 1094 if (pfns_per_compound == 1) 1095 continue; 1096 1097 memmap_init_compound(page, pfn, zone_idx, nid, pgmap, 1098 compound_nr_pages(altmap, pgmap)); 1099 } 1100 1101 pr_debug("%s initialised %lu pages in %ums\n", __func__, 1102 nr_pages, jiffies_to_msecs(jiffies - start)); 1103 } 1104 #endif 1105 1106 /* 1107 * The zone ranges provided by the architecture do not include ZONE_MOVABLE 1108 * because it is sized independent of architecture. Unlike the other zones, 1109 * the starting point for ZONE_MOVABLE is not fixed. It may be different 1110 * in each node depending on the size of each node and how evenly kernelcore 1111 * is distributed. This helper function adjusts the zone ranges 1112 * provided by the architecture for a given node by using the end of the 1113 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that 1114 * zones within a node are in order of monotonic increases memory addresses 1115 */ 1116 static void __init adjust_zone_range_for_zone_movable(int nid, 1117 unsigned long zone_type, 1118 unsigned long node_end_pfn, 1119 unsigned long *zone_start_pfn, 1120 unsigned long *zone_end_pfn) 1121 { 1122 /* Only adjust if ZONE_MOVABLE is on this node */ 1123 if (zone_movable_pfn[nid]) { 1124 /* Size ZONE_MOVABLE */ 1125 if (zone_type == ZONE_MOVABLE) { 1126 *zone_start_pfn = zone_movable_pfn[nid]; 1127 *zone_end_pfn = min(node_end_pfn, 1128 arch_zone_highest_possible_pfn[movable_zone]); 1129 1130 /* Adjust for ZONE_MOVABLE starting within this range */ 1131 } else if (!mirrored_kernelcore && 1132 *zone_start_pfn < zone_movable_pfn[nid] && 1133 *zone_end_pfn > zone_movable_pfn[nid]) { 1134 *zone_end_pfn = zone_movable_pfn[nid]; 1135 1136 /* Check if this whole range is within ZONE_MOVABLE */ 1137 } else if (*zone_start_pfn >= zone_movable_pfn[nid]) 1138 *zone_start_pfn = *zone_end_pfn; 1139 } 1140 } 1141 1142 /* 1143 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, 1144 * then all holes in the requested range will be accounted for. 1145 */ 1146 unsigned long __init __absent_pages_in_range(int nid, 1147 unsigned long range_start_pfn, 1148 unsigned long range_end_pfn) 1149 { 1150 unsigned long nr_absent = range_end_pfn - range_start_pfn; 1151 unsigned long start_pfn, end_pfn; 1152 int i; 1153 1154 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { 1155 start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn); 1156 end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn); 1157 nr_absent -= end_pfn - start_pfn; 1158 } 1159 return nr_absent; 1160 } 1161 1162 /** 1163 * absent_pages_in_range - Return number of page frames in holes within a range 1164 * @start_pfn: The start PFN to start searching for holes 1165 * @end_pfn: The end PFN to stop searching for holes 1166 * 1167 * Return: the number of pages frames in memory holes within a range. 1168 */ 1169 unsigned long __init absent_pages_in_range(unsigned long start_pfn, 1170 unsigned long end_pfn) 1171 { 1172 return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn); 1173 } 1174 1175 /* Return the number of page frames in holes in a zone on a node */ 1176 static unsigned long __init zone_absent_pages_in_node(int nid, 1177 unsigned long zone_type, 1178 unsigned long zone_start_pfn, 1179 unsigned long zone_end_pfn) 1180 { 1181 unsigned long nr_absent; 1182 1183 /* zone is empty, we don't have any absent pages */ 1184 if (zone_start_pfn == zone_end_pfn) 1185 return 0; 1186 1187 nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); 1188 1189 /* 1190 * ZONE_MOVABLE handling. 1191 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages 1192 * and vice versa. 1193 */ 1194 if (mirrored_kernelcore && zone_movable_pfn[nid]) { 1195 unsigned long start_pfn, end_pfn; 1196 struct memblock_region *r; 1197 1198 for_each_mem_region(r) { 1199 start_pfn = clamp(memblock_region_memory_base_pfn(r), 1200 zone_start_pfn, zone_end_pfn); 1201 end_pfn = clamp(memblock_region_memory_end_pfn(r), 1202 zone_start_pfn, zone_end_pfn); 1203 1204 if (zone_type == ZONE_MOVABLE && 1205 memblock_is_mirror(r)) 1206 nr_absent += end_pfn - start_pfn; 1207 1208 if (zone_type == ZONE_NORMAL && 1209 !memblock_is_mirror(r)) 1210 nr_absent += end_pfn - start_pfn; 1211 } 1212 } 1213 1214 return nr_absent; 1215 } 1216 1217 /* 1218 * Return the number of pages a zone spans in a node, including holes 1219 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node() 1220 */ 1221 static unsigned long __init zone_spanned_pages_in_node(int nid, 1222 unsigned long zone_type, 1223 unsigned long node_start_pfn, 1224 unsigned long node_end_pfn, 1225 unsigned long *zone_start_pfn, 1226 unsigned long *zone_end_pfn) 1227 { 1228 unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; 1229 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 1230 1231 /* Get the start and end of the zone */ 1232 *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 1233 *zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 1234 adjust_zone_range_for_zone_movable(nid, zone_type, node_end_pfn, 1235 zone_start_pfn, zone_end_pfn); 1236 1237 /* Check that this node has pages within the zone's required range */ 1238 if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn) 1239 return 0; 1240 1241 /* Move the zone boundaries inside the node if necessary */ 1242 *zone_end_pfn = min(*zone_end_pfn, node_end_pfn); 1243 *zone_start_pfn = max(*zone_start_pfn, node_start_pfn); 1244 1245 /* Return the spanned pages */ 1246 return *zone_end_pfn - *zone_start_pfn; 1247 } 1248 1249 static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat) 1250 { 1251 struct zone *z; 1252 1253 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { 1254 z->zone_start_pfn = 0; 1255 z->spanned_pages = 0; 1256 z->present_pages = 0; 1257 #if defined(CONFIG_MEMORY_HOTPLUG) 1258 z->present_early_pages = 0; 1259 #endif 1260 } 1261 1262 pgdat->node_spanned_pages = 0; 1263 pgdat->node_present_pages = 0; 1264 pr_debug("On node %d totalpages: 0\n", pgdat->node_id); 1265 } 1266 1267 static void __init calculate_node_totalpages(struct pglist_data *pgdat, 1268 unsigned long node_start_pfn, 1269 unsigned long node_end_pfn) 1270 { 1271 unsigned long realtotalpages = 0, totalpages = 0; 1272 enum zone_type i; 1273 1274 for (i = 0; i < MAX_NR_ZONES; i++) { 1275 struct zone *zone = pgdat->node_zones + i; 1276 unsigned long zone_start_pfn, zone_end_pfn; 1277 unsigned long spanned, absent; 1278 unsigned long real_size; 1279 1280 spanned = zone_spanned_pages_in_node(pgdat->node_id, i, 1281 node_start_pfn, 1282 node_end_pfn, 1283 &zone_start_pfn, 1284 &zone_end_pfn); 1285 absent = zone_absent_pages_in_node(pgdat->node_id, i, 1286 zone_start_pfn, 1287 zone_end_pfn); 1288 1289 real_size = spanned - absent; 1290 1291 if (spanned) 1292 zone->zone_start_pfn = zone_start_pfn; 1293 else 1294 zone->zone_start_pfn = 0; 1295 zone->spanned_pages = spanned; 1296 zone->present_pages = real_size; 1297 #if defined(CONFIG_MEMORY_HOTPLUG) 1298 zone->present_early_pages = real_size; 1299 #endif 1300 1301 totalpages += spanned; 1302 realtotalpages += real_size; 1303 } 1304 1305 pgdat->node_spanned_pages = totalpages; 1306 pgdat->node_present_pages = realtotalpages; 1307 pr_debug("On node %d totalpages: %lu\n", pgdat->node_id, realtotalpages); 1308 } 1309 1310 static unsigned long __init calc_memmap_size(unsigned long spanned_pages, 1311 unsigned long present_pages) 1312 { 1313 unsigned long pages = spanned_pages; 1314 1315 /* 1316 * Provide a more accurate estimation if there are holes within 1317 * the zone and SPARSEMEM is in use. If there are holes within the 1318 * zone, each populated memory region may cost us one or two extra 1319 * memmap pages due to alignment because memmap pages for each 1320 * populated regions may not be naturally aligned on page boundary. 1321 * So the (present_pages >> 4) heuristic is a tradeoff for that. 1322 */ 1323 if (spanned_pages > present_pages + (present_pages >> 4) && 1324 IS_ENABLED(CONFIG_SPARSEMEM)) 1325 pages = present_pages; 1326 1327 return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT; 1328 } 1329 1330 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1331 static void pgdat_init_split_queue(struct pglist_data *pgdat) 1332 { 1333 struct deferred_split *ds_queue = &pgdat->deferred_split_queue; 1334 1335 spin_lock_init(&ds_queue->split_queue_lock); 1336 INIT_LIST_HEAD(&ds_queue->split_queue); 1337 ds_queue->split_queue_len = 0; 1338 } 1339 #else 1340 static void pgdat_init_split_queue(struct pglist_data *pgdat) {} 1341 #endif 1342 1343 #ifdef CONFIG_COMPACTION 1344 static void pgdat_init_kcompactd(struct pglist_data *pgdat) 1345 { 1346 init_waitqueue_head(&pgdat->kcompactd_wait); 1347 } 1348 #else 1349 static void pgdat_init_kcompactd(struct pglist_data *pgdat) {} 1350 #endif 1351 1352 static void __meminit pgdat_init_internals(struct pglist_data *pgdat) 1353 { 1354 int i; 1355 1356 pgdat_resize_init(pgdat); 1357 pgdat_kswapd_lock_init(pgdat); 1358 1359 pgdat_init_split_queue(pgdat); 1360 pgdat_init_kcompactd(pgdat); 1361 1362 init_waitqueue_head(&pgdat->kswapd_wait); 1363 init_waitqueue_head(&pgdat->pfmemalloc_wait); 1364 1365 for (i = 0; i < NR_VMSCAN_THROTTLE; i++) 1366 init_waitqueue_head(&pgdat->reclaim_wait[i]); 1367 1368 pgdat_page_ext_init(pgdat); 1369 lruvec_init(&pgdat->__lruvec); 1370 } 1371 1372 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, 1373 unsigned long remaining_pages) 1374 { 1375 atomic_long_set(&zone->managed_pages, remaining_pages); 1376 zone_set_nid(zone, nid); 1377 zone->name = zone_names[idx]; 1378 zone->zone_pgdat = NODE_DATA(nid); 1379 spin_lock_init(&zone->lock); 1380 zone_seqlock_init(zone); 1381 zone_pcp_init(zone); 1382 } 1383 1384 static void __meminit zone_init_free_lists(struct zone *zone) 1385 { 1386 unsigned int order, t; 1387 for_each_migratetype_order(order, t) { 1388 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); 1389 zone->free_area[order].nr_free = 0; 1390 } 1391 1392 #ifdef CONFIG_UNACCEPTED_MEMORY 1393 INIT_LIST_HEAD(&zone->unaccepted_pages); 1394 #endif 1395 } 1396 1397 void __meminit init_currently_empty_zone(struct zone *zone, 1398 unsigned long zone_start_pfn, 1399 unsigned long size) 1400 { 1401 struct pglist_data *pgdat = zone->zone_pgdat; 1402 int zone_idx = zone_idx(zone) + 1; 1403 1404 if (zone_idx > pgdat->nr_zones) 1405 pgdat->nr_zones = zone_idx; 1406 1407 zone->zone_start_pfn = zone_start_pfn; 1408 1409 mminit_dprintk(MMINIT_TRACE, "memmap_init", 1410 "Initialising map node %d zone %lu pfns %lu -> %lu\n", 1411 pgdat->node_id, 1412 (unsigned long)zone_idx(zone), 1413 zone_start_pfn, (zone_start_pfn + size)); 1414 1415 zone_init_free_lists(zone); 1416 zone->initialized = 1; 1417 } 1418 1419 #ifndef CONFIG_SPARSEMEM 1420 /* 1421 * Calculate the size of the zone->blockflags rounded to an unsigned long 1422 * Start by making sure zonesize is a multiple of pageblock_order by rounding 1423 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally 1424 * round what is now in bits to nearest long in bits, then return it in 1425 * bytes. 1426 */ 1427 static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize) 1428 { 1429 unsigned long usemapsize; 1430 1431 zonesize += zone_start_pfn & (pageblock_nr_pages-1); 1432 usemapsize = roundup(zonesize, pageblock_nr_pages); 1433 usemapsize = usemapsize >> pageblock_order; 1434 usemapsize *= NR_PAGEBLOCK_BITS; 1435 usemapsize = roundup(usemapsize, BITS_PER_LONG); 1436 1437 return usemapsize / BITS_PER_BYTE; 1438 } 1439 1440 static void __ref setup_usemap(struct zone *zone) 1441 { 1442 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, 1443 zone->spanned_pages); 1444 zone->pageblock_flags = NULL; 1445 if (usemapsize) { 1446 zone->pageblock_flags = 1447 memblock_alloc_node(usemapsize, SMP_CACHE_BYTES, 1448 zone_to_nid(zone)); 1449 if (!zone->pageblock_flags) 1450 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n", 1451 usemapsize, zone->name, zone_to_nid(zone)); 1452 } 1453 } 1454 #else 1455 static inline void setup_usemap(struct zone *zone) {} 1456 #endif /* CONFIG_SPARSEMEM */ 1457 1458 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 1459 1460 /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 1461 void __init set_pageblock_order(void) 1462 { 1463 unsigned int order = MAX_ORDER; 1464 1465 /* Check that pageblock_nr_pages has not already been setup */ 1466 if (pageblock_order) 1467 return; 1468 1469 /* Don't let pageblocks exceed the maximum allocation granularity. */ 1470 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) 1471 order = HUGETLB_PAGE_ORDER; 1472 1473 /* 1474 * Assume the largest contiguous order of interest is a huge page. 1475 * This value may be variable depending on boot parameters on IA64 and 1476 * powerpc. 1477 */ 1478 pageblock_order = order; 1479 } 1480 #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1481 1482 /* 1483 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 1484 * is unused as pageblock_order is set at compile-time. See 1485 * include/linux/pageblock-flags.h for the values of pageblock_order based on 1486 * the kernel config 1487 */ 1488 void __init set_pageblock_order(void) 1489 { 1490 } 1491 1492 #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 1493 1494 /* 1495 * Set up the zone data structures 1496 * - init pgdat internals 1497 * - init all zones belonging to this node 1498 * 1499 * NOTE: this function is only called during memory hotplug 1500 */ 1501 #ifdef CONFIG_MEMORY_HOTPLUG 1502 void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) 1503 { 1504 int nid = pgdat->node_id; 1505 enum zone_type z; 1506 int cpu; 1507 1508 pgdat_init_internals(pgdat); 1509 1510 if (pgdat->per_cpu_nodestats == &boot_nodestats) 1511 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 1512 1513 /* 1514 * Reset the nr_zones, order and highest_zoneidx before reuse. 1515 * Note that kswapd will init kswapd_highest_zoneidx properly 1516 * when it starts in the near future. 1517 */ 1518 pgdat->nr_zones = 0; 1519 pgdat->kswapd_order = 0; 1520 pgdat->kswapd_highest_zoneidx = 0; 1521 pgdat->node_start_pfn = 0; 1522 pgdat->node_present_pages = 0; 1523 1524 for_each_online_cpu(cpu) { 1525 struct per_cpu_nodestat *p; 1526 1527 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 1528 memset(p, 0, sizeof(*p)); 1529 } 1530 1531 /* 1532 * When memory is hot-added, all the memory is in offline state. So 1533 * clear all zones' present_pages and managed_pages because they will 1534 * be updated in online_pages() and offline_pages(). 1535 */ 1536 for (z = 0; z < MAX_NR_ZONES; z++) { 1537 struct zone *zone = pgdat->node_zones + z; 1538 1539 zone->present_pages = 0; 1540 zone_init_internals(zone, z, nid, 0); 1541 } 1542 } 1543 #endif 1544 1545 /* 1546 * Set up the zone data structures: 1547 * - mark all pages reserved 1548 * - mark all memory queues empty 1549 * - clear the memory bitmaps 1550 * 1551 * NOTE: pgdat should get zeroed by caller. 1552 * NOTE: this function is only called during early init. 1553 */ 1554 static void __init free_area_init_core(struct pglist_data *pgdat) 1555 { 1556 enum zone_type j; 1557 int nid = pgdat->node_id; 1558 1559 pgdat_init_internals(pgdat); 1560 pgdat->per_cpu_nodestats = &boot_nodestats; 1561 1562 for (j = 0; j < MAX_NR_ZONES; j++) { 1563 struct zone *zone = pgdat->node_zones + j; 1564 unsigned long size, freesize, memmap_pages; 1565 1566 size = zone->spanned_pages; 1567 freesize = zone->present_pages; 1568 1569 /* 1570 * Adjust freesize so that it accounts for how much memory 1571 * is used by this zone for memmap. This affects the watermark 1572 * and per-cpu initialisations 1573 */ 1574 memmap_pages = calc_memmap_size(size, freesize); 1575 if (!is_highmem_idx(j)) { 1576 if (freesize >= memmap_pages) { 1577 freesize -= memmap_pages; 1578 if (memmap_pages) 1579 pr_debug(" %s zone: %lu pages used for memmap\n", 1580 zone_names[j], memmap_pages); 1581 } else 1582 pr_warn(" %s zone: %lu memmap pages exceeds freesize %lu\n", 1583 zone_names[j], memmap_pages, freesize); 1584 } 1585 1586 /* Account for reserved pages */ 1587 if (j == 0 && freesize > dma_reserve) { 1588 freesize -= dma_reserve; 1589 pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); 1590 } 1591 1592 if (!is_highmem_idx(j)) 1593 nr_kernel_pages += freesize; 1594 /* Charge for highmem memmap if there are enough kernel pages */ 1595 else if (nr_kernel_pages > memmap_pages * 2) 1596 nr_kernel_pages -= memmap_pages; 1597 nr_all_pages += freesize; 1598 1599 /* 1600 * Set an approximate value for lowmem here, it will be adjusted 1601 * when the bootmem allocator frees pages into the buddy system. 1602 * And all highmem pages will be managed by the buddy system. 1603 */ 1604 zone_init_internals(zone, j, nid, freesize); 1605 1606 if (!size) 1607 continue; 1608 1609 setup_usemap(zone); 1610 init_currently_empty_zone(zone, zone->zone_start_pfn, size); 1611 } 1612 } 1613 1614 void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, 1615 phys_addr_t min_addr, int nid, bool exact_nid) 1616 { 1617 void *ptr; 1618 1619 if (exact_nid) 1620 ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, 1621 MEMBLOCK_ALLOC_ACCESSIBLE, 1622 nid); 1623 else 1624 ptr = memblock_alloc_try_nid_raw(size, align, min_addr, 1625 MEMBLOCK_ALLOC_ACCESSIBLE, 1626 nid); 1627 1628 if (ptr && size > 0) 1629 page_init_poison(ptr, size); 1630 1631 return ptr; 1632 } 1633 1634 #ifdef CONFIG_FLATMEM 1635 static void __init alloc_node_mem_map(struct pglist_data *pgdat) 1636 { 1637 unsigned long __maybe_unused start = 0; 1638 unsigned long __maybe_unused offset = 0; 1639 1640 /* Skip empty nodes */ 1641 if (!pgdat->node_spanned_pages) 1642 return; 1643 1644 start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); 1645 offset = pgdat->node_start_pfn - start; 1646 /* ia64 gets its own node_mem_map, before this, without bootmem */ 1647 if (!pgdat->node_mem_map) { 1648 unsigned long size, end; 1649 struct page *map; 1650 1651 /* 1652 * The zone's endpoints aren't required to be MAX_ORDER 1653 * aligned but the node_mem_map endpoints must be in order 1654 * for the buddy allocator to function correctly. 1655 */ 1656 end = pgdat_end_pfn(pgdat); 1657 end = ALIGN(end, MAX_ORDER_NR_PAGES); 1658 size = (end - start) * sizeof(struct page); 1659 map = memmap_alloc(size, SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT, 1660 pgdat->node_id, false); 1661 if (!map) 1662 panic("Failed to allocate %ld bytes for node %d memory map\n", 1663 size, pgdat->node_id); 1664 pgdat->node_mem_map = map + offset; 1665 } 1666 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n", 1667 __func__, pgdat->node_id, (unsigned long)pgdat, 1668 (unsigned long)pgdat->node_mem_map); 1669 #ifndef CONFIG_NUMA 1670 /* 1671 * With no DISCONTIG, the global mem_map is just set as node 0's 1672 */ 1673 if (pgdat == NODE_DATA(0)) { 1674 mem_map = NODE_DATA(0)->node_mem_map; 1675 if (page_to_pfn(mem_map) != pgdat->node_start_pfn) 1676 mem_map -= offset; 1677 } 1678 #endif 1679 } 1680 #else 1681 static inline void alloc_node_mem_map(struct pglist_data *pgdat) { } 1682 #endif /* CONFIG_FLATMEM */ 1683 1684 /** 1685 * get_pfn_range_for_nid - Return the start and end page frames for a node 1686 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned. 1687 * @start_pfn: Passed by reference. On return, it will have the node start_pfn. 1688 * @end_pfn: Passed by reference. On return, it will have the node end_pfn. 1689 * 1690 * It returns the start and end page frame of a node based on information 1691 * provided by memblock_set_node(). If called for a node 1692 * with no available memory, the start and end PFNs will be 0. 1693 */ 1694 void __init get_pfn_range_for_nid(unsigned int nid, 1695 unsigned long *start_pfn, unsigned long *end_pfn) 1696 { 1697 unsigned long this_start_pfn, this_end_pfn; 1698 int i; 1699 1700 *start_pfn = -1UL; 1701 *end_pfn = 0; 1702 1703 for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) { 1704 *start_pfn = min(*start_pfn, this_start_pfn); 1705 *end_pfn = max(*end_pfn, this_end_pfn); 1706 } 1707 1708 if (*start_pfn == -1UL) 1709 *start_pfn = 0; 1710 } 1711 1712 static void __init free_area_init_node(int nid) 1713 { 1714 pg_data_t *pgdat = NODE_DATA(nid); 1715 unsigned long start_pfn = 0; 1716 unsigned long end_pfn = 0; 1717 1718 /* pg_data_t should be reset to zero when it's allocated */ 1719 WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx); 1720 1721 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 1722 1723 pgdat->node_id = nid; 1724 pgdat->node_start_pfn = start_pfn; 1725 pgdat->per_cpu_nodestats = NULL; 1726 1727 if (start_pfn != end_pfn) { 1728 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, 1729 (u64)start_pfn << PAGE_SHIFT, 1730 end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); 1731 1732 calculate_node_totalpages(pgdat, start_pfn, end_pfn); 1733 } else { 1734 pr_info("Initmem setup node %d as memoryless\n", nid); 1735 1736 reset_memoryless_node_totalpages(pgdat); 1737 } 1738 1739 alloc_node_mem_map(pgdat); 1740 pgdat_set_deferred_range(pgdat); 1741 1742 free_area_init_core(pgdat); 1743 lru_gen_init_pgdat(pgdat); 1744 } 1745 1746 /* Any regular or high memory on that node ? */ 1747 static void __init check_for_memory(pg_data_t *pgdat) 1748 { 1749 enum zone_type zone_type; 1750 1751 for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) { 1752 struct zone *zone = &pgdat->node_zones[zone_type]; 1753 if (populated_zone(zone)) { 1754 if (IS_ENABLED(CONFIG_HIGHMEM)) 1755 node_set_state(pgdat->node_id, N_HIGH_MEMORY); 1756 if (zone_type <= ZONE_NORMAL) 1757 node_set_state(pgdat->node_id, N_NORMAL_MEMORY); 1758 break; 1759 } 1760 } 1761 } 1762 1763 #if MAX_NUMNODES > 1 1764 /* 1765 * Figure out the number of possible node ids. 1766 */ 1767 void __init setup_nr_node_ids(void) 1768 { 1769 unsigned int highest; 1770 1771 highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES); 1772 nr_node_ids = highest + 1; 1773 } 1774 #endif 1775 1776 /* 1777 * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For 1778 * such cases we allow max_zone_pfn sorted in the descending order 1779 */ 1780 static bool arch_has_descending_max_zone_pfns(void) 1781 { 1782 return IS_ENABLED(CONFIG_ARC) && !IS_ENABLED(CONFIG_ARC_HAS_PAE40); 1783 } 1784 1785 /** 1786 * free_area_init - Initialise all pg_data_t and zone data 1787 * @max_zone_pfn: an array of max PFNs for each zone 1788 * 1789 * This will call free_area_init_node() for each active node in the system. 1790 * Using the page ranges provided by memblock_set_node(), the size of each 1791 * zone in each node and their holes is calculated. If the maximum PFN 1792 * between two adjacent zones match, it is assumed that the zone is empty. 1793 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed 1794 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone 1795 * starts where the previous one ended. For example, ZONE_DMA32 starts 1796 * at arch_max_dma_pfn. 1797 */ 1798 void __init free_area_init(unsigned long *max_zone_pfn) 1799 { 1800 unsigned long start_pfn, end_pfn; 1801 int i, nid, zone; 1802 bool descending; 1803 1804 /* Record where the zone boundaries are */ 1805 memset(arch_zone_lowest_possible_pfn, 0, 1806 sizeof(arch_zone_lowest_possible_pfn)); 1807 memset(arch_zone_highest_possible_pfn, 0, 1808 sizeof(arch_zone_highest_possible_pfn)); 1809 1810 start_pfn = PHYS_PFN(memblock_start_of_DRAM()); 1811 descending = arch_has_descending_max_zone_pfns(); 1812 1813 for (i = 0; i < MAX_NR_ZONES; i++) { 1814 if (descending) 1815 zone = MAX_NR_ZONES - i - 1; 1816 else 1817 zone = i; 1818 1819 if (zone == ZONE_MOVABLE) 1820 continue; 1821 1822 end_pfn = max(max_zone_pfn[zone], start_pfn); 1823 arch_zone_lowest_possible_pfn[zone] = start_pfn; 1824 arch_zone_highest_possible_pfn[zone] = end_pfn; 1825 1826 start_pfn = end_pfn; 1827 } 1828 1829 /* Find the PFNs that ZONE_MOVABLE begins at in each node */ 1830 memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn)); 1831 find_zone_movable_pfns_for_nodes(); 1832 1833 /* Print out the zone ranges */ 1834 pr_info("Zone ranges:\n"); 1835 for (i = 0; i < MAX_NR_ZONES; i++) { 1836 if (i == ZONE_MOVABLE) 1837 continue; 1838 pr_info(" %-8s ", zone_names[i]); 1839 if (arch_zone_lowest_possible_pfn[i] == 1840 arch_zone_highest_possible_pfn[i]) 1841 pr_cont("empty\n"); 1842 else 1843 pr_cont("[mem %#018Lx-%#018Lx]\n", 1844 (u64)arch_zone_lowest_possible_pfn[i] 1845 << PAGE_SHIFT, 1846 ((u64)arch_zone_highest_possible_pfn[i] 1847 << PAGE_SHIFT) - 1); 1848 } 1849 1850 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 1851 pr_info("Movable zone start for each node\n"); 1852 for (i = 0; i < MAX_NUMNODES; i++) { 1853 if (zone_movable_pfn[i]) 1854 pr_info(" Node %d: %#018Lx\n", i, 1855 (u64)zone_movable_pfn[i] << PAGE_SHIFT); 1856 } 1857 1858 /* 1859 * Print out the early node map, and initialize the 1860 * subsection-map relative to active online memory ranges to 1861 * enable future "sub-section" extensions of the memory map. 1862 */ 1863 pr_info("Early memory node ranges\n"); 1864 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { 1865 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid, 1866 (u64)start_pfn << PAGE_SHIFT, 1867 ((u64)end_pfn << PAGE_SHIFT) - 1); 1868 subsection_map_init(start_pfn, end_pfn - start_pfn); 1869 } 1870 1871 /* Initialise every node */ 1872 mminit_verify_pageflags_layout(); 1873 setup_nr_node_ids(); 1874 set_pageblock_order(); 1875 1876 for_each_node(nid) { 1877 pg_data_t *pgdat; 1878 1879 if (!node_online(nid)) { 1880 pr_info("Initializing node %d as memoryless\n", nid); 1881 1882 /* Allocator not initialized yet */ 1883 pgdat = arch_alloc_nodedata(nid); 1884 if (!pgdat) 1885 panic("Cannot allocate %zuB for node %d.\n", 1886 sizeof(*pgdat), nid); 1887 arch_refresh_nodedata(nid, pgdat); 1888 free_area_init_node(nid); 1889 1890 /* 1891 * We do not want to confuse userspace by sysfs 1892 * files/directories for node without any memory 1893 * attached to it, so this node is not marked as 1894 * N_MEMORY and not marked online so that no sysfs 1895 * hierarchy will be created via register_one_node for 1896 * it. The pgdat will get fully initialized by 1897 * hotadd_init_pgdat() when memory is hotplugged into 1898 * this node. 1899 */ 1900 continue; 1901 } 1902 1903 pgdat = NODE_DATA(nid); 1904 free_area_init_node(nid); 1905 1906 /* Any memory on that node */ 1907 if (pgdat->node_present_pages) 1908 node_set_state(nid, N_MEMORY); 1909 check_for_memory(pgdat); 1910 } 1911 1912 memmap_init(); 1913 1914 /* disable hash distribution for systems with a single node */ 1915 fixup_hashdist(); 1916 } 1917 1918 /** 1919 * node_map_pfn_alignment - determine the maximum internode alignment 1920 * 1921 * This function should be called after node map is populated and sorted. 1922 * It calculates the maximum power of two alignment which can distinguish 1923 * all the nodes. 1924 * 1925 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value 1926 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the 1927 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is 1928 * shifted, 1GiB is enough and this function will indicate so. 1929 * 1930 * This is used to test whether pfn -> nid mapping of the chosen memory 1931 * model has fine enough granularity to avoid incorrect mapping for the 1932 * populated node map. 1933 * 1934 * Return: the determined alignment in pfn's. 0 if there is no alignment 1935 * requirement (single node). 1936 */ 1937 unsigned long __init node_map_pfn_alignment(void) 1938 { 1939 unsigned long accl_mask = 0, last_end = 0; 1940 unsigned long start, end, mask; 1941 int last_nid = NUMA_NO_NODE; 1942 int i, nid; 1943 1944 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) { 1945 if (!start || last_nid < 0 || last_nid == nid) { 1946 last_nid = nid; 1947 last_end = end; 1948 continue; 1949 } 1950 1951 /* 1952 * Start with a mask granular enough to pin-point to the 1953 * start pfn and tick off bits one-by-one until it becomes 1954 * too coarse to separate the current node from the last. 1955 */ 1956 mask = ~((1 << __ffs(start)) - 1); 1957 while (mask && last_end <= (start & (mask << 1))) 1958 mask <<= 1; 1959 1960 /* accumulate all internode masks */ 1961 accl_mask |= mask; 1962 } 1963 1964 /* convert mask to number of pages */ 1965 return ~accl_mask + 1; 1966 } 1967 1968 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1969 static void __init deferred_free_range(unsigned long pfn, 1970 unsigned long nr_pages) 1971 { 1972 struct page *page; 1973 unsigned long i; 1974 1975 if (!nr_pages) 1976 return; 1977 1978 page = pfn_to_page(pfn); 1979 1980 /* Free a large naturally-aligned chunk if possible */ 1981 if (nr_pages == MAX_ORDER_NR_PAGES && IS_MAX_ORDER_ALIGNED(pfn)) { 1982 for (i = 0; i < nr_pages; i += pageblock_nr_pages) 1983 set_pageblock_migratetype(page + i, MIGRATE_MOVABLE); 1984 __free_pages_core(page, MAX_ORDER); 1985 return; 1986 } 1987 1988 /* Accept chunks smaller than MAX_ORDER upfront */ 1989 accept_memory(PFN_PHYS(pfn), PFN_PHYS(pfn + nr_pages)); 1990 1991 for (i = 0; i < nr_pages; i++, page++, pfn++) { 1992 if (pageblock_aligned(pfn)) 1993 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1994 __free_pages_core(page, 0); 1995 } 1996 } 1997 1998 /* Completion tracking for deferred_init_memmap() threads */ 1999 static atomic_t pgdat_init_n_undone __initdata; 2000 static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp); 2001 2002 static inline void __init pgdat_init_report_one_done(void) 2003 { 2004 if (atomic_dec_and_test(&pgdat_init_n_undone)) 2005 complete(&pgdat_init_all_done_comp); 2006 } 2007 2008 /* 2009 * Returns true if page needs to be initialized or freed to buddy allocator. 2010 * 2011 * We check if a current MAX_ORDER block is valid by only checking the validity 2012 * of the head pfn. 2013 */ 2014 static inline bool __init deferred_pfn_valid(unsigned long pfn) 2015 { 2016 if (IS_MAX_ORDER_ALIGNED(pfn) && !pfn_valid(pfn)) 2017 return false; 2018 return true; 2019 } 2020 2021 /* 2022 * Free pages to buddy allocator. Try to free aligned pages in 2023 * MAX_ORDER_NR_PAGES sizes. 2024 */ 2025 static void __init deferred_free_pages(unsigned long pfn, 2026 unsigned long end_pfn) 2027 { 2028 unsigned long nr_free = 0; 2029 2030 for (; pfn < end_pfn; pfn++) { 2031 if (!deferred_pfn_valid(pfn)) { 2032 deferred_free_range(pfn - nr_free, nr_free); 2033 nr_free = 0; 2034 } else if (IS_MAX_ORDER_ALIGNED(pfn)) { 2035 deferred_free_range(pfn - nr_free, nr_free); 2036 nr_free = 1; 2037 } else { 2038 nr_free++; 2039 } 2040 } 2041 /* Free the last block of pages to allocator */ 2042 deferred_free_range(pfn - nr_free, nr_free); 2043 } 2044 2045 /* 2046 * Initialize struct pages. We minimize pfn page lookups and scheduler checks 2047 * by performing it only once every MAX_ORDER_NR_PAGES. 2048 * Return number of pages initialized. 2049 */ 2050 static unsigned long __init deferred_init_pages(struct zone *zone, 2051 unsigned long pfn, 2052 unsigned long end_pfn) 2053 { 2054 int nid = zone_to_nid(zone); 2055 unsigned long nr_pages = 0; 2056 int zid = zone_idx(zone); 2057 struct page *page = NULL; 2058 2059 for (; pfn < end_pfn; pfn++) { 2060 if (!deferred_pfn_valid(pfn)) { 2061 page = NULL; 2062 continue; 2063 } else if (!page || IS_MAX_ORDER_ALIGNED(pfn)) { 2064 page = pfn_to_page(pfn); 2065 } else { 2066 page++; 2067 } 2068 __init_single_page(page, pfn, zid, nid); 2069 nr_pages++; 2070 } 2071 return (nr_pages); 2072 } 2073 2074 /* 2075 * This function is meant to pre-load the iterator for the zone init. 2076 * Specifically it walks through the ranges until we are caught up to the 2077 * first_init_pfn value and exits there. If we never encounter the value we 2078 * return false indicating there are no valid ranges left. 2079 */ 2080 static bool __init 2081 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, 2082 unsigned long *spfn, unsigned long *epfn, 2083 unsigned long first_init_pfn) 2084 { 2085 u64 j; 2086 2087 /* 2088 * Start out by walking through the ranges in this zone that have 2089 * already been initialized. We don't need to do anything with them 2090 * so we just need to flush them out of the system. 2091 */ 2092 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { 2093 if (*epfn <= first_init_pfn) 2094 continue; 2095 if (*spfn < first_init_pfn) 2096 *spfn = first_init_pfn; 2097 *i = j; 2098 return true; 2099 } 2100 2101 return false; 2102 } 2103 2104 /* 2105 * Initialize and free pages. We do it in two loops: first we initialize 2106 * struct page, then free to buddy allocator, because while we are 2107 * freeing pages we can access pages that are ahead (computing buddy 2108 * page in __free_one_page()). 2109 * 2110 * In order to try and keep some memory in the cache we have the loop 2111 * broken along max page order boundaries. This way we will not cause 2112 * any issues with the buddy page computation. 2113 */ 2114 static unsigned long __init 2115 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, 2116 unsigned long *end_pfn) 2117 { 2118 unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES); 2119 unsigned long spfn = *start_pfn, epfn = *end_pfn; 2120 unsigned long nr_pages = 0; 2121 u64 j = *i; 2122 2123 /* First we loop through and initialize the page values */ 2124 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { 2125 unsigned long t; 2126 2127 if (mo_pfn <= *start_pfn) 2128 break; 2129 2130 t = min(mo_pfn, *end_pfn); 2131 nr_pages += deferred_init_pages(zone, *start_pfn, t); 2132 2133 if (mo_pfn < *end_pfn) { 2134 *start_pfn = mo_pfn; 2135 break; 2136 } 2137 } 2138 2139 /* Reset values and now loop through freeing pages as needed */ 2140 swap(j, *i); 2141 2142 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { 2143 unsigned long t; 2144 2145 if (mo_pfn <= spfn) 2146 break; 2147 2148 t = min(mo_pfn, epfn); 2149 deferred_free_pages(spfn, t); 2150 2151 if (mo_pfn <= epfn) 2152 break; 2153 } 2154 2155 return nr_pages; 2156 } 2157 2158 static void __init 2159 deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, 2160 void *arg) 2161 { 2162 unsigned long spfn, epfn; 2163 struct zone *zone = arg; 2164 u64 i; 2165 2166 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); 2167 2168 /* 2169 * Initialize and free pages in MAX_ORDER sized increments so that we 2170 * can avoid introducing any issues with the buddy allocator. 2171 */ 2172 while (spfn < end_pfn) { 2173 deferred_init_maxorder(&i, zone, &spfn, &epfn); 2174 cond_resched(); 2175 } 2176 } 2177 2178 /* An arch may override for more concurrency. */ 2179 __weak int __init 2180 deferred_page_init_max_threads(const struct cpumask *node_cpumask) 2181 { 2182 return 1; 2183 } 2184 2185 /* Initialise remaining memory on a node */ 2186 static int __init deferred_init_memmap(void *data) 2187 { 2188 pg_data_t *pgdat = data; 2189 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2190 unsigned long spfn = 0, epfn = 0; 2191 unsigned long first_init_pfn, flags; 2192 unsigned long start = jiffies; 2193 struct zone *zone; 2194 int zid, max_threads; 2195 u64 i; 2196 2197 /* Bind memory initialisation thread to a local node if possible */ 2198 if (!cpumask_empty(cpumask)) 2199 set_cpus_allowed_ptr(current, cpumask); 2200 2201 pgdat_resize_lock(pgdat, &flags); 2202 first_init_pfn = pgdat->first_deferred_pfn; 2203 if (first_init_pfn == ULONG_MAX) { 2204 pgdat_resize_unlock(pgdat, &flags); 2205 pgdat_init_report_one_done(); 2206 return 0; 2207 } 2208 2209 /* Sanity check boundaries */ 2210 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); 2211 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); 2212 pgdat->first_deferred_pfn = ULONG_MAX; 2213 2214 /* 2215 * Once we unlock here, the zone cannot be grown anymore, thus if an 2216 * interrupt thread must allocate this early in boot, zone must be 2217 * pre-grown prior to start of deferred page initialization. 2218 */ 2219 pgdat_resize_unlock(pgdat, &flags); 2220 2221 /* Only the highest zone is deferred so find it */ 2222 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 2223 zone = pgdat->node_zones + zid; 2224 if (first_init_pfn < zone_end_pfn(zone)) 2225 break; 2226 } 2227 2228 /* If the zone is empty somebody else may have cleared out the zone */ 2229 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2230 first_init_pfn)) 2231 goto zone_empty; 2232 2233 max_threads = deferred_page_init_max_threads(cpumask); 2234 2235 while (spfn < epfn) { 2236 unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION); 2237 struct padata_mt_job job = { 2238 .thread_fn = deferred_init_memmap_chunk, 2239 .fn_arg = zone, 2240 .start = spfn, 2241 .size = epfn_align - spfn, 2242 .align = PAGES_PER_SECTION, 2243 .min_chunk = PAGES_PER_SECTION, 2244 .max_threads = max_threads, 2245 }; 2246 2247 padata_do_multithreaded(&job); 2248 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2249 epfn_align); 2250 } 2251 zone_empty: 2252 /* Sanity check that the next zone really is unpopulated */ 2253 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); 2254 2255 pr_info("node %d deferred pages initialised in %ums\n", 2256 pgdat->node_id, jiffies_to_msecs(jiffies - start)); 2257 2258 pgdat_init_report_one_done(); 2259 return 0; 2260 } 2261 2262 /* 2263 * If this zone has deferred pages, try to grow it by initializing enough 2264 * deferred pages to satisfy the allocation specified by order, rounded up to 2265 * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments 2266 * of SECTION_SIZE bytes by initializing struct pages in increments of 2267 * PAGES_PER_SECTION * sizeof(struct page) bytes. 2268 * 2269 * Return true when zone was grown, otherwise return false. We return true even 2270 * when we grow less than requested, to let the caller decide if there are 2271 * enough pages to satisfy the allocation. 2272 * 2273 * Note: We use noinline because this function is needed only during boot, and 2274 * it is called from a __ref function _deferred_grow_zone. This way we are 2275 * making sure that it is not inlined into permanent text section. 2276 */ 2277 bool __init deferred_grow_zone(struct zone *zone, unsigned int order) 2278 { 2279 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); 2280 pg_data_t *pgdat = zone->zone_pgdat; 2281 unsigned long first_deferred_pfn = pgdat->first_deferred_pfn; 2282 unsigned long spfn, epfn, flags; 2283 unsigned long nr_pages = 0; 2284 u64 i; 2285 2286 /* Only the last zone may have deferred pages */ 2287 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) 2288 return false; 2289 2290 pgdat_resize_lock(pgdat, &flags); 2291 2292 /* 2293 * If someone grew this zone while we were waiting for spinlock, return 2294 * true, as there might be enough pages already. 2295 */ 2296 if (first_deferred_pfn != pgdat->first_deferred_pfn) { 2297 pgdat_resize_unlock(pgdat, &flags); 2298 return true; 2299 } 2300 2301 /* If the zone is empty somebody else may have cleared out the zone */ 2302 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, 2303 first_deferred_pfn)) { 2304 pgdat->first_deferred_pfn = ULONG_MAX; 2305 pgdat_resize_unlock(pgdat, &flags); 2306 /* Retry only once. */ 2307 return first_deferred_pfn != ULONG_MAX; 2308 } 2309 2310 /* 2311 * Initialize and free pages in MAX_ORDER sized increments so 2312 * that we can avoid introducing any issues with the buddy 2313 * allocator. 2314 */ 2315 while (spfn < epfn) { 2316 /* update our first deferred PFN for this section */ 2317 first_deferred_pfn = spfn; 2318 2319 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); 2320 touch_nmi_watchdog(); 2321 2322 /* We should only stop along section boundaries */ 2323 if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION) 2324 continue; 2325 2326 /* If our quota has been met we can stop here */ 2327 if (nr_pages >= nr_pages_needed) 2328 break; 2329 } 2330 2331 pgdat->first_deferred_pfn = spfn; 2332 pgdat_resize_unlock(pgdat, &flags); 2333 2334 return nr_pages > 0; 2335 } 2336 2337 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 2338 2339 #ifdef CONFIG_CMA 2340 void __init init_cma_reserved_pageblock(struct page *page) 2341 { 2342 unsigned i = pageblock_nr_pages; 2343 struct page *p = page; 2344 2345 do { 2346 __ClearPageReserved(p); 2347 set_page_count(p, 0); 2348 } while (++p, --i); 2349 2350 set_pageblock_migratetype(page, MIGRATE_CMA); 2351 set_page_refcounted(page); 2352 __free_pages(page, pageblock_order); 2353 2354 adjust_managed_page_count(page, pageblock_nr_pages); 2355 page_zone(page)->cma_pages += pageblock_nr_pages; 2356 } 2357 #endif 2358 2359 void set_zone_contiguous(struct zone *zone) 2360 { 2361 unsigned long block_start_pfn = zone->zone_start_pfn; 2362 unsigned long block_end_pfn; 2363 2364 block_end_pfn = pageblock_end_pfn(block_start_pfn); 2365 for (; block_start_pfn < zone_end_pfn(zone); 2366 block_start_pfn = block_end_pfn, 2367 block_end_pfn += pageblock_nr_pages) { 2368 2369 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); 2370 2371 if (!__pageblock_pfn_to_page(block_start_pfn, 2372 block_end_pfn, zone)) 2373 return; 2374 cond_resched(); 2375 } 2376 2377 /* We confirm that there is no hole */ 2378 zone->contiguous = true; 2379 } 2380 2381 void __init page_alloc_init_late(void) 2382 { 2383 struct zone *zone; 2384 int nid; 2385 2386 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 2387 2388 /* There will be num_node_state(N_MEMORY) threads */ 2389 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY)); 2390 for_each_node_state(nid, N_MEMORY) { 2391 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 2392 } 2393 2394 /* Block until all are initialised */ 2395 wait_for_completion(&pgdat_init_all_done_comp); 2396 2397 /* 2398 * We initialized the rest of the deferred pages. Permanently disable 2399 * on-demand struct page initialization. 2400 */ 2401 static_branch_disable(&deferred_pages); 2402 2403 /* Reinit limits that are based on free pages after the kernel is up */ 2404 files_maxfiles_init(); 2405 #endif 2406 2407 buffer_init(); 2408 2409 /* Discard memblock private memory */ 2410 memblock_discard(); 2411 2412 for_each_node_state(nid, N_MEMORY) 2413 shuffle_free_memory(NODE_DATA(nid)); 2414 2415 for_each_populated_zone(zone) 2416 set_zone_contiguous(zone); 2417 2418 /* Initialize page ext after all struct pages are initialized. */ 2419 if (deferred_struct_pages) 2420 page_ext_init(); 2421 2422 page_alloc_sysctl_init(); 2423 } 2424 2425 #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES 2426 /* 2427 * Returns the number of pages that arch has reserved but 2428 * is not known to alloc_large_system_hash(). 2429 */ 2430 static unsigned long __init arch_reserved_kernel_pages(void) 2431 { 2432 return 0; 2433 } 2434 #endif 2435 2436 /* 2437 * Adaptive scale is meant to reduce sizes of hash tables on large memory 2438 * machines. As memory size is increased the scale is also increased but at 2439 * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory 2440 * quadruples the scale is increased by one, which means the size of hash table 2441 * only doubles, instead of quadrupling as well. 2442 * Because 32-bit systems cannot have large physical memory, where this scaling 2443 * makes sense, it is disabled on such platforms. 2444 */ 2445 #if __BITS_PER_LONG > 32 2446 #define ADAPT_SCALE_BASE (64ul << 30) 2447 #define ADAPT_SCALE_SHIFT 2 2448 #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT) 2449 #endif 2450 2451 /* 2452 * allocate a large system hash table from bootmem 2453 * - it is assumed that the hash table must contain an exact power-of-2 2454 * quantity of entries 2455 * - limit is the number of hash buckets, not the total allocation size 2456 */ 2457 void *__init alloc_large_system_hash(const char *tablename, 2458 unsigned long bucketsize, 2459 unsigned long numentries, 2460 int scale, 2461 int flags, 2462 unsigned int *_hash_shift, 2463 unsigned int *_hash_mask, 2464 unsigned long low_limit, 2465 unsigned long high_limit) 2466 { 2467 unsigned long long max = high_limit; 2468 unsigned long log2qty, size; 2469 void *table; 2470 gfp_t gfp_flags; 2471 bool virt; 2472 bool huge; 2473 2474 /* allow the kernel cmdline to have a say */ 2475 if (!numentries) { 2476 /* round applicable memory size up to nearest megabyte */ 2477 numentries = nr_kernel_pages; 2478 numentries -= arch_reserved_kernel_pages(); 2479 2480 /* It isn't necessary when PAGE_SIZE >= 1MB */ 2481 if (PAGE_SIZE < SZ_1M) 2482 numentries = round_up(numentries, SZ_1M / PAGE_SIZE); 2483 2484 #if __BITS_PER_LONG > 32 2485 if (!high_limit) { 2486 unsigned long adapt; 2487 2488 for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries; 2489 adapt <<= ADAPT_SCALE_SHIFT) 2490 scale++; 2491 } 2492 #endif 2493 2494 /* limit to 1 bucket per 2^scale bytes of low memory */ 2495 if (scale > PAGE_SHIFT) 2496 numentries >>= (scale - PAGE_SHIFT); 2497 else 2498 numentries <<= (PAGE_SHIFT - scale); 2499 2500 if (unlikely((numentries * bucketsize) < PAGE_SIZE)) 2501 numentries = PAGE_SIZE / bucketsize; 2502 } 2503 numentries = roundup_pow_of_two(numentries); 2504 2505 /* limit allocation size to 1/16 total memory by default */ 2506 if (max == 0) { 2507 max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; 2508 do_div(max, bucketsize); 2509 } 2510 max = min(max, 0x80000000ULL); 2511 2512 if (numentries < low_limit) 2513 numentries = low_limit; 2514 if (numentries > max) 2515 numentries = max; 2516 2517 log2qty = ilog2(numentries); 2518 2519 gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC; 2520 do { 2521 virt = false; 2522 size = bucketsize << log2qty; 2523 if (flags & HASH_EARLY) { 2524 if (flags & HASH_ZERO) 2525 table = memblock_alloc(size, SMP_CACHE_BYTES); 2526 else 2527 table = memblock_alloc_raw(size, 2528 SMP_CACHE_BYTES); 2529 } else if (get_order(size) > MAX_ORDER || hashdist) { 2530 table = vmalloc_huge(size, gfp_flags); 2531 virt = true; 2532 if (table) 2533 huge = is_vm_area_hugepages(table); 2534 } else { 2535 /* 2536 * If bucketsize is not a power-of-two, we may free 2537 * some pages at the end of hash table which 2538 * alloc_pages_exact() automatically does 2539 */ 2540 table = alloc_pages_exact(size, gfp_flags); 2541 kmemleak_alloc(table, size, 1, gfp_flags); 2542 } 2543 } while (!table && size > PAGE_SIZE && --log2qty); 2544 2545 if (!table) 2546 panic("Failed to allocate %s hash table\n", tablename); 2547 2548 pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n", 2549 tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size, 2550 virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear"); 2551 2552 if (_hash_shift) 2553 *_hash_shift = log2qty; 2554 if (_hash_mask) 2555 *_hash_mask = (1 << log2qty) - 1; 2556 2557 return table; 2558 } 2559 2560 /** 2561 * set_dma_reserve - set the specified number of pages reserved in the first zone 2562 * @new_dma_reserve: The number of pages to mark reserved 2563 * 2564 * The per-cpu batchsize and zone watermarks are determined by managed_pages. 2565 * In the DMA zone, a significant percentage may be consumed by kernel image 2566 * and other unfreeable allocations which can skew the watermarks badly. This 2567 * function may optionally be used to account for unfreeable pages in the 2568 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and 2569 * smaller per-cpu batchsize. 2570 */ 2571 void __init set_dma_reserve(unsigned long new_dma_reserve) 2572 { 2573 dma_reserve = new_dma_reserve; 2574 } 2575 2576 void __init memblock_free_pages(struct page *page, unsigned long pfn, 2577 unsigned int order) 2578 { 2579 2580 if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { 2581 int nid = early_pfn_to_nid(pfn); 2582 2583 if (!early_page_initialised(pfn, nid)) 2584 return; 2585 } 2586 2587 if (!kmsan_memblock_free_pages(page, order)) { 2588 /* KMSAN will take care of these pages. */ 2589 return; 2590 } 2591 __free_pages_core(page, order); 2592 } 2593 2594 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); 2595 EXPORT_SYMBOL(init_on_alloc); 2596 2597 DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); 2598 EXPORT_SYMBOL(init_on_free); 2599 2600 static bool _init_on_alloc_enabled_early __read_mostly 2601 = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); 2602 static int __init early_init_on_alloc(char *buf) 2603 { 2604 2605 return kstrtobool(buf, &_init_on_alloc_enabled_early); 2606 } 2607 early_param("init_on_alloc", early_init_on_alloc); 2608 2609 static bool _init_on_free_enabled_early __read_mostly 2610 = IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON); 2611 static int __init early_init_on_free(char *buf) 2612 { 2613 return kstrtobool(buf, &_init_on_free_enabled_early); 2614 } 2615 early_param("init_on_free", early_init_on_free); 2616 2617 DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); 2618 2619 /* 2620 * Enable static keys related to various memory debugging and hardening options. 2621 * Some override others, and depend on early params that are evaluated in the 2622 * order of appearance. So we need to first gather the full picture of what was 2623 * enabled, and then make decisions. 2624 */ 2625 static void __init mem_debugging_and_hardening_init(void) 2626 { 2627 bool page_poisoning_requested = false; 2628 bool want_check_pages = false; 2629 2630 #ifdef CONFIG_PAGE_POISONING 2631 /* 2632 * Page poisoning is debug page alloc for some arches. If 2633 * either of those options are enabled, enable poisoning. 2634 */ 2635 if (page_poisoning_enabled() || 2636 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 2637 debug_pagealloc_enabled())) { 2638 static_branch_enable(&_page_poisoning_enabled); 2639 page_poisoning_requested = true; 2640 want_check_pages = true; 2641 } 2642 #endif 2643 2644 if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && 2645 page_poisoning_requested) { 2646 pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " 2647 "will take precedence over init_on_alloc and init_on_free\n"); 2648 _init_on_alloc_enabled_early = false; 2649 _init_on_free_enabled_early = false; 2650 } 2651 2652 if (_init_on_alloc_enabled_early) { 2653 want_check_pages = true; 2654 static_branch_enable(&init_on_alloc); 2655 } else { 2656 static_branch_disable(&init_on_alloc); 2657 } 2658 2659 if (_init_on_free_enabled_early) { 2660 want_check_pages = true; 2661 static_branch_enable(&init_on_free); 2662 } else { 2663 static_branch_disable(&init_on_free); 2664 } 2665 2666 if (IS_ENABLED(CONFIG_KMSAN) && 2667 (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) 2668 pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); 2669 2670 #ifdef CONFIG_DEBUG_PAGEALLOC 2671 if (debug_pagealloc_enabled()) { 2672 want_check_pages = true; 2673 static_branch_enable(&_debug_pagealloc_enabled); 2674 2675 if (debug_guardpage_minorder()) 2676 static_branch_enable(&_debug_guardpage_enabled); 2677 } 2678 #endif 2679 2680 /* 2681 * Any page debugging or hardening option also enables sanity checking 2682 * of struct pages being allocated or freed. With CONFIG_DEBUG_VM it's 2683 * enabled already. 2684 */ 2685 if (!IS_ENABLED(CONFIG_DEBUG_VM) && want_check_pages) 2686 static_branch_enable(&check_pages_enabled); 2687 } 2688 2689 /* Report memory auto-initialization states for this boot. */ 2690 static void __init report_meminit(void) 2691 { 2692 const char *stack; 2693 2694 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) 2695 stack = "all(pattern)"; 2696 else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) 2697 stack = "all(zero)"; 2698 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) 2699 stack = "byref_all(zero)"; 2700 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) 2701 stack = "byref(zero)"; 2702 else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) 2703 stack = "__user(zero)"; 2704 else 2705 stack = "off"; 2706 2707 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", 2708 stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", 2709 want_init_on_free() ? "on" : "off"); 2710 if (want_init_on_free()) 2711 pr_info("mem auto-init: clearing system memory may take some time...\n"); 2712 } 2713 2714 static void __init mem_init_print_info(void) 2715 { 2716 unsigned long physpages, codesize, datasize, rosize, bss_size; 2717 unsigned long init_code_size, init_data_size; 2718 2719 physpages = get_num_physpages(); 2720 codesize = _etext - _stext; 2721 datasize = _edata - _sdata; 2722 rosize = __end_rodata - __start_rodata; 2723 bss_size = __bss_stop - __bss_start; 2724 init_data_size = __init_end - __init_begin; 2725 init_code_size = _einittext - _sinittext; 2726 2727 /* 2728 * Detect special cases and adjust section sizes accordingly: 2729 * 1) .init.* may be embedded into .data sections 2730 * 2) .init.text.* may be out of [__init_begin, __init_end], 2731 * please refer to arch/tile/kernel/vmlinux.lds.S. 2732 * 3) .rodata.* may be embedded into .text or .data sections. 2733 */ 2734 #define adj_init_size(start, end, size, pos, adj) \ 2735 do { \ 2736 if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \ 2737 size -= adj; \ 2738 } while (0) 2739 2740 adj_init_size(__init_begin, __init_end, init_data_size, 2741 _sinittext, init_code_size); 2742 adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size); 2743 adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size); 2744 adj_init_size(_stext, _etext, codesize, __start_rodata, rosize); 2745 adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize); 2746 2747 #undef adj_init_size 2748 2749 pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved" 2750 #ifdef CONFIG_HIGHMEM 2751 ", %luK highmem" 2752 #endif 2753 ")\n", 2754 K(nr_free_pages()), K(physpages), 2755 codesize / SZ_1K, datasize / SZ_1K, rosize / SZ_1K, 2756 (init_data_size + init_code_size) / SZ_1K, bss_size / SZ_1K, 2757 K(physpages - totalram_pages() - totalcma_pages), 2758 K(totalcma_pages) 2759 #ifdef CONFIG_HIGHMEM 2760 , K(totalhigh_pages()) 2761 #endif 2762 ); 2763 } 2764 2765 /* 2766 * Set up kernel memory allocators 2767 */ 2768 void __init mm_core_init(void) 2769 { 2770 /* Initializations relying on SMP setup */ 2771 build_all_zonelists(NULL); 2772 page_alloc_init_cpuhp(); 2773 2774 /* 2775 * page_ext requires contiguous pages, 2776 * bigger than MAX_ORDER unless SPARSEMEM. 2777 */ 2778 page_ext_init_flatmem(); 2779 mem_debugging_and_hardening_init(); 2780 kfence_alloc_pool_and_metadata(); 2781 report_meminit(); 2782 kmsan_init_shadow(); 2783 stack_depot_early_init(); 2784 mem_init(); 2785 mem_init_print_info(); 2786 kmem_cache_init(); 2787 /* 2788 * page_owner must be initialized after buddy is ready, and also after 2789 * slab is ready so that stack_depot_init() works properly 2790 */ 2791 page_ext_init_flatmem_late(); 2792 kmemleak_init(); 2793 ptlock_cache_init(); 2794 pgtable_cache_init(); 2795 debug_objects_mem_init(); 2796 vmalloc_init(); 2797 /* If no deferred init page_ext now, as vmap is fully initialized */ 2798 if (!deferred_struct_pages) 2799 page_ext_init(); 2800 /* Should be run before the first non-init thread is created */ 2801 init_espfix_bsp(); 2802 /* Should be run after espfix64 is set up. */ 2803 pti_init(); 2804 kmsan_init_runtime(); 2805 mm_cache_init(); 2806 } 2807