1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sparse memory mappings. 4 */ 5 #include <linux/mm.h> 6 #include <linux/slab.h> 7 #include <linux/mmzone.h> 8 #include <linux/memblock.h> 9 #include <linux/compiler.h> 10 #include <linux/highmem.h> 11 #include <linux/export.h> 12 #include <linux/spinlock.h> 13 #include <linux/vmalloc.h> 14 #include <linux/swap.h> 15 #include <linux/swapops.h> 16 #include <linux/bootmem_info.h> 17 18 #include "internal.h" 19 #include <asm/dma.h> 20 21 /* 22 * Permanent SPARSEMEM data: 23 * 24 * 1) mem_section - memory sections, mem_map's for valid memory 25 */ 26 #ifdef CONFIG_SPARSEMEM_EXTREME 27 struct mem_section **mem_section; 28 #else 29 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] 30 ____cacheline_internodealigned_in_smp; 31 #endif 32 EXPORT_SYMBOL(mem_section); 33 34 #ifdef NODE_NOT_IN_PAGE_FLAGS 35 /* 36 * If we did not store the node number in the page then we have to 37 * do a lookup in the section_to_node_table in order to find which 38 * node the page belongs to. 39 */ 40 #if MAX_NUMNODES <= 256 41 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 42 #else 43 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 44 #endif 45 46 int page_to_nid(const struct page *page) 47 { 48 return section_to_node_table[page_to_section(page)]; 49 } 50 EXPORT_SYMBOL(page_to_nid); 51 52 static void set_section_nid(unsigned long section_nr, int nid) 53 { 54 section_to_node_table[section_nr] = nid; 55 } 56 #else /* !NODE_NOT_IN_PAGE_FLAGS */ 57 static inline void set_section_nid(unsigned long section_nr, int nid) 58 { 59 } 60 #endif 61 62 #ifdef CONFIG_SPARSEMEM_EXTREME 63 static noinline struct mem_section __ref *sparse_index_alloc(int nid) 64 { 65 struct mem_section *section = NULL; 66 unsigned long array_size = SECTIONS_PER_ROOT * 67 sizeof(struct mem_section); 68 69 if (slab_is_available()) { 70 section = kzalloc_node(array_size, GFP_KERNEL, nid); 71 } else { 72 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, 73 nid); 74 if (!section) 75 panic("%s: Failed to allocate %lu bytes nid=%d\n", 76 __func__, array_size, nid); 77 } 78 79 return section; 80 } 81 82 static int __meminit sparse_index_init(unsigned long section_nr, int nid) 83 { 84 unsigned long root = SECTION_NR_TO_ROOT(section_nr); 85 struct mem_section *section; 86 87 /* 88 * An existing section is possible in the sub-section hotplug 89 * case. First hot-add instantiates, follow-on hot-add reuses 90 * the existing section. 91 * 92 * The mem_hotplug_lock resolves the apparent race below. 93 */ 94 if (mem_section[root]) 95 return 0; 96 97 section = sparse_index_alloc(nid); 98 if (!section) 99 return -ENOMEM; 100 101 mem_section[root] = section; 102 103 return 0; 104 } 105 #else /* !SPARSEMEM_EXTREME */ 106 static inline int sparse_index_init(unsigned long section_nr, int nid) 107 { 108 return 0; 109 } 110 #endif 111 112 /* 113 * During early boot, before section_mem_map is used for an actual 114 * mem_map, we use section_mem_map to store the section's NUMA 115 * node. This keeps us from having to use another data structure. The 116 * node information is cleared just before we store the real mem_map. 117 */ 118 static inline unsigned long sparse_encode_early_nid(int nid) 119 { 120 return ((unsigned long)nid << SECTION_NID_SHIFT); 121 } 122 123 static inline int sparse_early_nid(struct mem_section *section) 124 { 125 return (section->section_mem_map >> SECTION_NID_SHIFT); 126 } 127 128 /* Validate the physical addressing limitations of the model */ 129 static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, 130 unsigned long *end_pfn) 131 { 132 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 133 134 /* 135 * Sanity checks - do not allow an architecture to pass 136 * in larger pfns than the maximum scope of sparsemem: 137 */ 138 if (*start_pfn > max_sparsemem_pfn) { 139 mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 140 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 141 *start_pfn, *end_pfn, max_sparsemem_pfn); 142 WARN_ON_ONCE(1); 143 *start_pfn = max_sparsemem_pfn; 144 *end_pfn = max_sparsemem_pfn; 145 } else if (*end_pfn > max_sparsemem_pfn) { 146 mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 147 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 148 *start_pfn, *end_pfn, max_sparsemem_pfn); 149 WARN_ON_ONCE(1); 150 *end_pfn = max_sparsemem_pfn; 151 } 152 } 153 154 /* 155 * There are a number of times that we loop over NR_MEM_SECTIONS, 156 * looking for section_present() on each. But, when we have very 157 * large physical address spaces, NR_MEM_SECTIONS can also be 158 * very large which makes the loops quite long. 159 * 160 * Keeping track of this gives us an easy way to break out of 161 * those loops early. 162 */ 163 unsigned long __highest_present_section_nr; 164 static void __section_mark_present(struct mem_section *ms, 165 unsigned long section_nr) 166 { 167 if (section_nr > __highest_present_section_nr) 168 __highest_present_section_nr = section_nr; 169 170 ms->section_mem_map |= SECTION_MARKED_PRESENT; 171 } 172 173 #define for_each_present_section_nr(start, section_nr) \ 174 for (section_nr = next_present_section_nr(start-1); \ 175 ((section_nr != -1) && \ 176 (section_nr <= __highest_present_section_nr)); \ 177 section_nr = next_present_section_nr(section_nr)) 178 179 static inline unsigned long first_present_section_nr(void) 180 { 181 return next_present_section_nr(-1); 182 } 183 184 #ifdef CONFIG_SPARSEMEM_VMEMMAP 185 static void subsection_mask_set(unsigned long *map, unsigned long pfn, 186 unsigned long nr_pages) 187 { 188 int idx = subsection_map_index(pfn); 189 int end = subsection_map_index(pfn + nr_pages - 1); 190 191 bitmap_set(map, idx, end - idx + 1); 192 } 193 194 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) 195 { 196 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); 197 unsigned long nr, start_sec = pfn_to_section_nr(pfn); 198 199 if (!nr_pages) 200 return; 201 202 for (nr = start_sec; nr <= end_sec; nr++) { 203 struct mem_section *ms; 204 unsigned long pfns; 205 206 pfns = min(nr_pages, PAGES_PER_SECTION 207 - (pfn & ~PAGE_SECTION_MASK)); 208 ms = __nr_to_section(nr); 209 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); 210 211 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, 212 pfns, subsection_map_index(pfn), 213 subsection_map_index(pfn + pfns - 1)); 214 215 pfn += pfns; 216 nr_pages -= pfns; 217 } 218 } 219 #else 220 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) 221 { 222 } 223 #endif 224 225 /* Record a memory area against a node. */ 226 static void __init memory_present(int nid, unsigned long start, unsigned long end) 227 { 228 unsigned long pfn; 229 230 #ifdef CONFIG_SPARSEMEM_EXTREME 231 if (unlikely(!mem_section)) { 232 unsigned long size, align; 233 234 size = sizeof(struct mem_section *) * NR_SECTION_ROOTS; 235 align = 1 << (INTERNODE_CACHE_SHIFT); 236 mem_section = memblock_alloc(size, align); 237 if (!mem_section) 238 panic("%s: Failed to allocate %lu bytes align=0x%lx\n", 239 __func__, size, align); 240 } 241 #endif 242 243 start &= PAGE_SECTION_MASK; 244 mminit_validate_memmodel_limits(&start, &end); 245 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 246 unsigned long section = pfn_to_section_nr(pfn); 247 struct mem_section *ms; 248 249 sparse_index_init(section, nid); 250 set_section_nid(section, nid); 251 252 ms = __nr_to_section(section); 253 if (!ms->section_mem_map) { 254 ms->section_mem_map = sparse_encode_early_nid(nid) | 255 SECTION_IS_ONLINE; 256 __section_mark_present(ms, section); 257 } 258 } 259 } 260 261 /* 262 * Mark all memblocks as present using memory_present(). 263 * This is a convenience function that is useful to mark all of the systems 264 * memory as present during initialization. 265 */ 266 static void __init memblocks_present(void) 267 { 268 unsigned long start, end; 269 int i, nid; 270 271 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) 272 memory_present(nid, start, end); 273 } 274 275 /* 276 * Subtle, we encode the real pfn into the mem_map such that 277 * the identity pfn - section_mem_map will return the actual 278 * physical page frame number. 279 */ 280 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) 281 { 282 unsigned long coded_mem_map = 283 (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); 284 BUILD_BUG_ON(SECTION_MAP_LAST_BIT > PFN_SECTION_SHIFT); 285 BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); 286 return coded_mem_map; 287 } 288 289 #ifdef CONFIG_MEMORY_HOTPLUG 290 /* 291 * Decode mem_map from the coded memmap 292 */ 293 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 294 { 295 /* mask off the extra low bits of information */ 296 coded_mem_map &= SECTION_MAP_MASK; 297 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 298 } 299 #endif /* CONFIG_MEMORY_HOTPLUG */ 300 301 static void __meminit sparse_init_one_section(struct mem_section *ms, 302 unsigned long pnum, struct page *mem_map, 303 struct mem_section_usage *usage, unsigned long flags) 304 { 305 ms->section_mem_map &= ~SECTION_MAP_MASK; 306 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) 307 | SECTION_HAS_MEM_MAP | flags; 308 ms->usage = usage; 309 } 310 311 static unsigned long usemap_size(void) 312 { 313 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); 314 } 315 316 size_t mem_section_usage_size(void) 317 { 318 return sizeof(struct mem_section_usage) + usemap_size(); 319 } 320 321 static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) 322 { 323 #ifndef CONFIG_NUMA 324 VM_BUG_ON(pgdat != &contig_page_data); 325 return __pa_symbol(&contig_page_data); 326 #else 327 return __pa(pgdat); 328 #endif 329 } 330 331 #ifdef CONFIG_MEMORY_HOTREMOVE 332 static struct mem_section_usage * __init 333 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 334 unsigned long size) 335 { 336 struct mem_section_usage *usage; 337 unsigned long goal, limit; 338 int nid; 339 /* 340 * A page may contain usemaps for other sections preventing the 341 * page being freed and making a section unremovable while 342 * other sections referencing the usemap remain active. Similarly, 343 * a pgdat can prevent a section being removed. If section A 344 * contains a pgdat and section B contains the usemap, both 345 * sections become inter-dependent. This allocates usemaps 346 * from the same section as the pgdat where possible to avoid 347 * this problem. 348 */ 349 goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); 350 limit = goal + (1UL << PA_SECTION_SHIFT); 351 nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 352 again: 353 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); 354 if (!usage && limit) { 355 limit = 0; 356 goto again; 357 } 358 return usage; 359 } 360 361 static void __init check_usemap_section_nr(int nid, 362 struct mem_section_usage *usage) 363 { 364 unsigned long usemap_snr, pgdat_snr; 365 static unsigned long old_usemap_snr; 366 static unsigned long old_pgdat_snr; 367 struct pglist_data *pgdat = NODE_DATA(nid); 368 int usemap_nid; 369 370 /* First call */ 371 if (!old_usemap_snr) { 372 old_usemap_snr = NR_MEM_SECTIONS; 373 old_pgdat_snr = NR_MEM_SECTIONS; 374 } 375 376 usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); 377 pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT); 378 if (usemap_snr == pgdat_snr) 379 return; 380 381 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) 382 /* skip redundant message */ 383 return; 384 385 old_usemap_snr = usemap_snr; 386 old_pgdat_snr = pgdat_snr; 387 388 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); 389 if (usemap_nid != nid) { 390 pr_info("node %d must be removed before remove section %ld\n", 391 nid, usemap_snr); 392 return; 393 } 394 /* 395 * There is a circular dependency. 396 * Some platforms allow un-removable section because they will just 397 * gather other removable sections for dynamic partitioning. 398 * Just notify un-removable section's number here. 399 */ 400 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", 401 usemap_snr, pgdat_snr, nid); 402 } 403 #else 404 static struct mem_section_usage * __init 405 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 406 unsigned long size) 407 { 408 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); 409 } 410 411 static void __init check_usemap_section_nr(int nid, 412 struct mem_section_usage *usage) 413 { 414 } 415 #endif /* CONFIG_MEMORY_HOTREMOVE */ 416 417 #ifdef CONFIG_SPARSEMEM_VMEMMAP 418 static unsigned long __init section_map_size(void) 419 { 420 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); 421 } 422 423 #else 424 static unsigned long __init section_map_size(void) 425 { 426 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 427 } 428 429 struct page __init *__populate_section_memmap(unsigned long pfn, 430 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 431 struct dev_pagemap *pgmap) 432 { 433 unsigned long size = section_map_size(); 434 struct page *map = sparse_buffer_alloc(size); 435 phys_addr_t addr = __pa(MAX_DMA_ADDRESS); 436 437 if (map) 438 return map; 439 440 map = memmap_alloc(size, size, addr, nid, false); 441 if (!map) 442 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", 443 __func__, size, PAGE_SIZE, nid, &addr); 444 445 return map; 446 } 447 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 448 449 static void *sparsemap_buf __meminitdata; 450 static void *sparsemap_buf_end __meminitdata; 451 452 static inline void __meminit sparse_buffer_free(unsigned long size) 453 { 454 WARN_ON(!sparsemap_buf || size == 0); 455 memblock_free(sparsemap_buf, size); 456 } 457 458 static void __init sparse_buffer_init(unsigned long size, int nid) 459 { 460 phys_addr_t addr = __pa(MAX_DMA_ADDRESS); 461 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ 462 /* 463 * Pre-allocated buffer is mainly used by __populate_section_memmap 464 * and we want it to be properly aligned to the section size - this is 465 * especially the case for VMEMMAP which maps memmap to PMDs 466 */ 467 sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true); 468 sparsemap_buf_end = sparsemap_buf + size; 469 } 470 471 static void __init sparse_buffer_fini(void) 472 { 473 unsigned long size = sparsemap_buf_end - sparsemap_buf; 474 475 if (sparsemap_buf && size > 0) 476 sparse_buffer_free(size); 477 sparsemap_buf = NULL; 478 } 479 480 void * __meminit sparse_buffer_alloc(unsigned long size) 481 { 482 void *ptr = NULL; 483 484 if (sparsemap_buf) { 485 ptr = (void *) roundup((unsigned long)sparsemap_buf, size); 486 if (ptr + size > sparsemap_buf_end) 487 ptr = NULL; 488 else { 489 /* Free redundant aligned space */ 490 if ((unsigned long)(ptr - sparsemap_buf) > 0) 491 sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); 492 sparsemap_buf = ptr + size; 493 } 494 } 495 return ptr; 496 } 497 498 void __weak __meminit vmemmap_populate_print_last(void) 499 { 500 } 501 502 /* 503 * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) 504 * And number of present sections in this node is map_count. 505 */ 506 static void __init sparse_init_nid(int nid, unsigned long pnum_begin, 507 unsigned long pnum_end, 508 unsigned long map_count) 509 { 510 struct mem_section_usage *usage; 511 unsigned long pnum; 512 struct page *map; 513 514 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), 515 mem_section_usage_size() * map_count); 516 if (!usage) { 517 pr_err("%s: node[%d] usemap allocation failed", __func__, nid); 518 goto failed; 519 } 520 sparse_buffer_init(map_count * section_map_size(), nid); 521 for_each_present_section_nr(pnum_begin, pnum) { 522 unsigned long pfn = section_nr_to_pfn(pnum); 523 524 if (pnum >= pnum_end) 525 break; 526 527 map = __populate_section_memmap(pfn, PAGES_PER_SECTION, 528 nid, NULL, NULL); 529 if (!map) { 530 pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", 531 __func__, nid); 532 pnum_begin = pnum; 533 sparse_buffer_fini(); 534 goto failed; 535 } 536 check_usemap_section_nr(nid, usage); 537 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, 538 SECTION_IS_EARLY); 539 usage = (void *) usage + mem_section_usage_size(); 540 } 541 sparse_buffer_fini(); 542 return; 543 failed: 544 /* We failed to allocate, mark all the following pnums as not present */ 545 for_each_present_section_nr(pnum_begin, pnum) { 546 struct mem_section *ms; 547 548 if (pnum >= pnum_end) 549 break; 550 ms = __nr_to_section(pnum); 551 ms->section_mem_map = 0; 552 } 553 } 554 555 /* 556 * Allocate the accumulated non-linear sections, allocate a mem_map 557 * for each and record the physical to section mapping. 558 */ 559 void __init sparse_init(void) 560 { 561 unsigned long pnum_end, pnum_begin, map_count = 1; 562 int nid_begin; 563 564 memblocks_present(); 565 566 pnum_begin = first_present_section_nr(); 567 nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); 568 569 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ 570 set_pageblock_order(); 571 572 for_each_present_section_nr(pnum_begin + 1, pnum_end) { 573 int nid = sparse_early_nid(__nr_to_section(pnum_end)); 574 575 if (nid == nid_begin) { 576 map_count++; 577 continue; 578 } 579 /* Init node with sections in range [pnum_begin, pnum_end) */ 580 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); 581 nid_begin = nid; 582 pnum_begin = pnum_end; 583 map_count = 1; 584 } 585 /* cover the last node */ 586 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); 587 vmemmap_populate_print_last(); 588 } 589 590 #ifdef CONFIG_MEMORY_HOTPLUG 591 592 /* Mark all memory sections within the pfn range as online */ 593 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 594 { 595 unsigned long pfn; 596 597 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 598 unsigned long section_nr = pfn_to_section_nr(pfn); 599 struct mem_section *ms; 600 601 /* onlining code should never touch invalid ranges */ 602 if (WARN_ON(!valid_section_nr(section_nr))) 603 continue; 604 605 ms = __nr_to_section(section_nr); 606 ms->section_mem_map |= SECTION_IS_ONLINE; 607 } 608 } 609 610 /* Mark all memory sections within the pfn range as offline */ 611 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 612 { 613 unsigned long pfn; 614 615 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 616 unsigned long section_nr = pfn_to_section_nr(pfn); 617 struct mem_section *ms; 618 619 /* 620 * TODO this needs some double checking. Offlining code makes 621 * sure to check pfn_valid but those checks might be just bogus 622 */ 623 if (WARN_ON(!valid_section_nr(section_nr))) 624 continue; 625 626 ms = __nr_to_section(section_nr); 627 ms->section_mem_map &= ~SECTION_IS_ONLINE; 628 } 629 } 630 631 #ifdef CONFIG_SPARSEMEM_VMEMMAP 632 static struct page * __meminit populate_section_memmap(unsigned long pfn, 633 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 634 struct dev_pagemap *pgmap) 635 { 636 return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); 637 } 638 639 static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, 640 struct vmem_altmap *altmap) 641 { 642 unsigned long start = (unsigned long) pfn_to_page(pfn); 643 unsigned long end = start + nr_pages * sizeof(struct page); 644 645 vmemmap_free(start, end, altmap); 646 } 647 static void free_map_bootmem(struct page *memmap) 648 { 649 unsigned long start = (unsigned long)memmap; 650 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 651 652 vmemmap_free(start, end, NULL); 653 } 654 655 static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) 656 { 657 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; 658 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; 659 struct mem_section *ms = __pfn_to_section(pfn); 660 unsigned long *subsection_map = ms->usage 661 ? &ms->usage->subsection_map[0] : NULL; 662 663 subsection_mask_set(map, pfn, nr_pages); 664 if (subsection_map) 665 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); 666 667 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), 668 "section already deactivated (%#lx + %ld)\n", 669 pfn, nr_pages)) 670 return -EINVAL; 671 672 bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); 673 return 0; 674 } 675 676 static bool is_subsection_map_empty(struct mem_section *ms) 677 { 678 return bitmap_empty(&ms->usage->subsection_map[0], 679 SUBSECTIONS_PER_SECTION); 680 } 681 682 static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) 683 { 684 struct mem_section *ms = __pfn_to_section(pfn); 685 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; 686 unsigned long *subsection_map; 687 int rc = 0; 688 689 subsection_mask_set(map, pfn, nr_pages); 690 691 subsection_map = &ms->usage->subsection_map[0]; 692 693 if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) 694 rc = -EINVAL; 695 else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION)) 696 rc = -EEXIST; 697 else 698 bitmap_or(subsection_map, map, subsection_map, 699 SUBSECTIONS_PER_SECTION); 700 701 return rc; 702 } 703 #else 704 struct page * __meminit populate_section_memmap(unsigned long pfn, 705 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 706 struct dev_pagemap *pgmap) 707 { 708 return kvmalloc_node(array_size(sizeof(struct page), 709 PAGES_PER_SECTION), GFP_KERNEL, nid); 710 } 711 712 static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, 713 struct vmem_altmap *altmap) 714 { 715 kvfree(pfn_to_page(pfn)); 716 } 717 718 static void free_map_bootmem(struct page *memmap) 719 { 720 unsigned long maps_section_nr, removing_section_nr, i; 721 unsigned long magic, nr_pages; 722 struct page *page = virt_to_page(memmap); 723 724 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 725 >> PAGE_SHIFT; 726 727 for (i = 0; i < nr_pages; i++, page++) { 728 magic = page->index; 729 730 BUG_ON(magic == NODE_INFO); 731 732 maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); 733 removing_section_nr = page_private(page); 734 735 /* 736 * When this function is called, the removing section is 737 * logical offlined state. This means all pages are isolated 738 * from page allocator. If removing section's memmap is placed 739 * on the same section, it must not be freed. 740 * If it is freed, page allocator may allocate it which will 741 * be removed physically soon. 742 */ 743 if (maps_section_nr != removing_section_nr) 744 put_page_bootmem(page); 745 } 746 } 747 748 static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) 749 { 750 return 0; 751 } 752 753 static bool is_subsection_map_empty(struct mem_section *ms) 754 { 755 return true; 756 } 757 758 static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) 759 { 760 return 0; 761 } 762 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 763 764 /* 765 * To deactivate a memory region, there are 3 cases to handle across 766 * two configurations (SPARSEMEM_VMEMMAP={y,n}): 767 * 768 * 1. deactivation of a partial hot-added section (only possible in 769 * the SPARSEMEM_VMEMMAP=y case). 770 * a) section was present at memory init. 771 * b) section was hot-added post memory init. 772 * 2. deactivation of a complete hot-added section. 773 * 3. deactivation of a complete section from memory init. 774 * 775 * For 1, when subsection_map does not empty we will not be freeing the 776 * usage map, but still need to free the vmemmap range. 777 * 778 * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified 779 */ 780 static void section_deactivate(unsigned long pfn, unsigned long nr_pages, 781 struct vmem_altmap *altmap) 782 { 783 struct mem_section *ms = __pfn_to_section(pfn); 784 bool section_is_early = early_section(ms); 785 struct page *memmap = NULL; 786 bool empty; 787 788 if (clear_subsection_map(pfn, nr_pages)) 789 return; 790 791 empty = is_subsection_map_empty(ms); 792 if (empty) { 793 unsigned long section_nr = pfn_to_section_nr(pfn); 794 795 /* 796 * When removing an early section, the usage map is kept (as the 797 * usage maps of other sections fall into the same page). It 798 * will be re-used when re-adding the section - which is then no 799 * longer an early section. If the usage map is PageReserved, it 800 * was allocated during boot. 801 */ 802 if (!PageReserved(virt_to_page(ms->usage))) { 803 kfree(ms->usage); 804 ms->usage = NULL; 805 } 806 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 807 /* 808 * Mark the section invalid so that valid_section() 809 * return false. This prevents code from dereferencing 810 * ms->usage array. 811 */ 812 ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; 813 } 814 815 /* 816 * The memmap of early sections is always fully populated. See 817 * section_activate() and pfn_valid() . 818 */ 819 if (!section_is_early) 820 depopulate_section_memmap(pfn, nr_pages, altmap); 821 else if (memmap) 822 free_map_bootmem(memmap); 823 824 if (empty) 825 ms->section_mem_map = (unsigned long)NULL; 826 } 827 828 static struct page * __meminit section_activate(int nid, unsigned long pfn, 829 unsigned long nr_pages, struct vmem_altmap *altmap, 830 struct dev_pagemap *pgmap) 831 { 832 struct mem_section *ms = __pfn_to_section(pfn); 833 struct mem_section_usage *usage = NULL; 834 struct page *memmap; 835 int rc = 0; 836 837 if (!ms->usage) { 838 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); 839 if (!usage) 840 return ERR_PTR(-ENOMEM); 841 ms->usage = usage; 842 } 843 844 rc = fill_subsection_map(pfn, nr_pages); 845 if (rc) { 846 if (usage) 847 ms->usage = NULL; 848 kfree(usage); 849 return ERR_PTR(rc); 850 } 851 852 /* 853 * The early init code does not consider partially populated 854 * initial sections, it simply assumes that memory will never be 855 * referenced. If we hot-add memory into such a section then we 856 * do not need to populate the memmap and can simply reuse what 857 * is already there. 858 */ 859 if (nr_pages < PAGES_PER_SECTION && early_section(ms)) 860 return pfn_to_page(pfn); 861 862 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap); 863 if (!memmap) { 864 section_deactivate(pfn, nr_pages, altmap); 865 return ERR_PTR(-ENOMEM); 866 } 867 868 return memmap; 869 } 870 871 /** 872 * sparse_add_section - add a memory section, or populate an existing one 873 * @nid: The node to add section on 874 * @start_pfn: start pfn of the memory range 875 * @nr_pages: number of pfns to add in the section 876 * @altmap: alternate pfns to allocate the memmap backing store 877 * @pgmap: alternate compound page geometry for devmap mappings 878 * 879 * This is only intended for hotplug. 880 * 881 * Note that only VMEMMAP supports sub-section aligned hotplug, 882 * the proper alignment and size are gated by check_pfn_span(). 883 * 884 * 885 * Return: 886 * * 0 - On success. 887 * * -EEXIST - Section has been present. 888 * * -ENOMEM - Out of memory. 889 */ 890 int __meminit sparse_add_section(int nid, unsigned long start_pfn, 891 unsigned long nr_pages, struct vmem_altmap *altmap, 892 struct dev_pagemap *pgmap) 893 { 894 unsigned long section_nr = pfn_to_section_nr(start_pfn); 895 struct mem_section *ms; 896 struct page *memmap; 897 int ret; 898 899 ret = sparse_index_init(section_nr, nid); 900 if (ret < 0) 901 return ret; 902 903 memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap); 904 if (IS_ERR(memmap)) 905 return PTR_ERR(memmap); 906 907 /* 908 * Poison uninitialized struct pages in order to catch invalid flags 909 * combinations. 910 */ 911 page_init_poison(memmap, sizeof(struct page) * nr_pages); 912 913 ms = __nr_to_section(section_nr); 914 set_section_nid(section_nr, nid); 915 __section_mark_present(ms, section_nr); 916 917 /* Align memmap to section boundary in the subsection case */ 918 if (section_nr_to_pfn(section_nr) != start_pfn) 919 memmap = pfn_to_page(section_nr_to_pfn(section_nr)); 920 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0); 921 922 return 0; 923 } 924 925 void sparse_remove_section(struct mem_section *ms, unsigned long pfn, 926 unsigned long nr_pages, unsigned long map_offset, 927 struct vmem_altmap *altmap) 928 { 929 section_deactivate(pfn, nr_pages, altmap); 930 } 931 #endif /* CONFIG_MEMORY_HOTPLUG */ 932