1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * sparse memory mappings. 4 */ 5 #include <linux/mm.h> 6 #include <linux/slab.h> 7 #include <linux/mmzone.h> 8 #include <linux/bootmem.h> 9 #include <linux/compiler.h> 10 #include <linux/highmem.h> 11 #include <linux/export.h> 12 #include <linux/spinlock.h> 13 #include <linux/vmalloc.h> 14 15 #include "internal.h" 16 #include <asm/dma.h> 17 #include <asm/pgalloc.h> 18 #include <asm/pgtable.h> 19 20 /* 21 * Permanent SPARSEMEM data: 22 * 23 * 1) mem_section - memory sections, mem_map's for valid memory 24 */ 25 #ifdef CONFIG_SPARSEMEM_EXTREME 26 struct mem_section **mem_section; 27 #else 28 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] 29 ____cacheline_internodealigned_in_smp; 30 #endif 31 EXPORT_SYMBOL(mem_section); 32 33 #ifdef NODE_NOT_IN_PAGE_FLAGS 34 /* 35 * If we did not store the node number in the page then we have to 36 * do a lookup in the section_to_node_table in order to find which 37 * node the page belongs to. 38 */ 39 #if MAX_NUMNODES <= 256 40 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 41 #else 42 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 43 #endif 44 45 int page_to_nid(const struct page *page) 46 { 47 return section_to_node_table[page_to_section(page)]; 48 } 49 EXPORT_SYMBOL(page_to_nid); 50 51 static void set_section_nid(unsigned long section_nr, int nid) 52 { 53 section_to_node_table[section_nr] = nid; 54 } 55 #else /* !NODE_NOT_IN_PAGE_FLAGS */ 56 static inline void set_section_nid(unsigned long section_nr, int nid) 57 { 58 } 59 #endif 60 61 #ifdef CONFIG_SPARSEMEM_EXTREME 62 static noinline struct mem_section __ref *sparse_index_alloc(int nid) 63 { 64 struct mem_section *section = NULL; 65 unsigned long array_size = SECTIONS_PER_ROOT * 66 sizeof(struct mem_section); 67 68 if (slab_is_available()) 69 section = kzalloc_node(array_size, GFP_KERNEL, nid); 70 else 71 section = memblock_virt_alloc_node(array_size, nid); 72 73 return section; 74 } 75 76 static int __meminit sparse_index_init(unsigned long section_nr, int nid) 77 { 78 unsigned long root = SECTION_NR_TO_ROOT(section_nr); 79 struct mem_section *section; 80 81 if (mem_section[root]) 82 return -EEXIST; 83 84 section = sparse_index_alloc(nid); 85 if (!section) 86 return -ENOMEM; 87 88 mem_section[root] = section; 89 90 return 0; 91 } 92 #else /* !SPARSEMEM_EXTREME */ 93 static inline int sparse_index_init(unsigned long section_nr, int nid) 94 { 95 return 0; 96 } 97 #endif 98 99 #ifdef CONFIG_SPARSEMEM_EXTREME 100 int __section_nr(struct mem_section* ms) 101 { 102 unsigned long root_nr; 103 struct mem_section *root = NULL; 104 105 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { 106 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); 107 if (!root) 108 continue; 109 110 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) 111 break; 112 } 113 114 VM_BUG_ON(!root); 115 116 return (root_nr * SECTIONS_PER_ROOT) + (ms - root); 117 } 118 #else 119 int __section_nr(struct mem_section* ms) 120 { 121 return (int)(ms - mem_section[0]); 122 } 123 #endif 124 125 /* 126 * During early boot, before section_mem_map is used for an actual 127 * mem_map, we use section_mem_map to store the section's NUMA 128 * node. This keeps us from having to use another data structure. The 129 * node information is cleared just before we store the real mem_map. 130 */ 131 static inline unsigned long sparse_encode_early_nid(int nid) 132 { 133 return (nid << SECTION_NID_SHIFT); 134 } 135 136 static inline int sparse_early_nid(struct mem_section *section) 137 { 138 return (section->section_mem_map >> SECTION_NID_SHIFT); 139 } 140 141 /* Validate the physical addressing limitations of the model */ 142 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, 143 unsigned long *end_pfn) 144 { 145 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 146 147 /* 148 * Sanity checks - do not allow an architecture to pass 149 * in larger pfns than the maximum scope of sparsemem: 150 */ 151 if (*start_pfn > max_sparsemem_pfn) { 152 mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 153 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 154 *start_pfn, *end_pfn, max_sparsemem_pfn); 155 WARN_ON_ONCE(1); 156 *start_pfn = max_sparsemem_pfn; 157 *end_pfn = max_sparsemem_pfn; 158 } else if (*end_pfn > max_sparsemem_pfn) { 159 mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 160 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 161 *start_pfn, *end_pfn, max_sparsemem_pfn); 162 WARN_ON_ONCE(1); 163 *end_pfn = max_sparsemem_pfn; 164 } 165 } 166 167 /* 168 * There are a number of times that we loop over NR_MEM_SECTIONS, 169 * looking for section_present() on each. But, when we have very 170 * large physical address spaces, NR_MEM_SECTIONS can also be 171 * very large which makes the loops quite long. 172 * 173 * Keeping track of this gives us an easy way to break out of 174 * those loops early. 175 */ 176 int __highest_present_section_nr; 177 static void section_mark_present(struct mem_section *ms) 178 { 179 int section_nr = __section_nr(ms); 180 181 if (section_nr > __highest_present_section_nr) 182 __highest_present_section_nr = section_nr; 183 184 ms->section_mem_map |= SECTION_MARKED_PRESENT; 185 } 186 187 static inline int next_present_section_nr(int section_nr) 188 { 189 do { 190 section_nr++; 191 if (present_section_nr(section_nr)) 192 return section_nr; 193 } while ((section_nr < NR_MEM_SECTIONS) && 194 (section_nr <= __highest_present_section_nr)); 195 196 return -1; 197 } 198 #define for_each_present_section_nr(start, section_nr) \ 199 for (section_nr = next_present_section_nr(start-1); \ 200 ((section_nr >= 0) && \ 201 (section_nr < NR_MEM_SECTIONS) && \ 202 (section_nr <= __highest_present_section_nr)); \ 203 section_nr = next_present_section_nr(section_nr)) 204 205 /* Record a memory area against a node. */ 206 void __init memory_present(int nid, unsigned long start, unsigned long end) 207 { 208 unsigned long pfn; 209 210 #ifdef CONFIG_SPARSEMEM_EXTREME 211 if (unlikely(!mem_section)) { 212 unsigned long size, align; 213 214 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; 215 align = 1 << (INTERNODE_CACHE_SHIFT); 216 mem_section = memblock_virt_alloc(size, align); 217 } 218 #endif 219 220 start &= PAGE_SECTION_MASK; 221 mminit_validate_memmodel_limits(&start, &end); 222 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 223 unsigned long section = pfn_to_section_nr(pfn); 224 struct mem_section *ms; 225 226 sparse_index_init(section, nid); 227 set_section_nid(section, nid); 228 229 ms = __nr_to_section(section); 230 if (!ms->section_mem_map) { 231 ms->section_mem_map = sparse_encode_early_nid(nid) | 232 SECTION_IS_ONLINE; 233 section_mark_present(ms); 234 } 235 } 236 } 237 238 /* 239 * Only used by the i386 NUMA architecures, but relatively 240 * generic code. 241 */ 242 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, 243 unsigned long end_pfn) 244 { 245 unsigned long pfn; 246 unsigned long nr_pages = 0; 247 248 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); 249 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 250 if (nid != early_pfn_to_nid(pfn)) 251 continue; 252 253 if (pfn_present(pfn)) 254 nr_pages += PAGES_PER_SECTION; 255 } 256 257 return nr_pages * sizeof(struct page); 258 } 259 260 /* 261 * Subtle, we encode the real pfn into the mem_map such that 262 * the identity pfn - section_mem_map will return the actual 263 * physical page frame number. 264 */ 265 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) 266 { 267 unsigned long coded_mem_map = 268 (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); 269 BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); 270 BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); 271 return coded_mem_map; 272 } 273 274 /* 275 * Decode mem_map from the coded memmap 276 */ 277 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 278 { 279 /* mask off the extra low bits of information */ 280 coded_mem_map &= SECTION_MAP_MASK; 281 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 282 } 283 284 static int __meminit sparse_init_one_section(struct mem_section *ms, 285 unsigned long pnum, struct page *mem_map, 286 unsigned long *pageblock_bitmap) 287 { 288 if (!present_section(ms)) 289 return -EINVAL; 290 291 ms->section_mem_map &= ~SECTION_MAP_MASK; 292 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | 293 SECTION_HAS_MEM_MAP; 294 ms->pageblock_flags = pageblock_bitmap; 295 296 return 1; 297 } 298 299 unsigned long usemap_size(void) 300 { 301 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); 302 } 303 304 #ifdef CONFIG_MEMORY_HOTPLUG 305 static unsigned long *__kmalloc_section_usemap(void) 306 { 307 return kmalloc(usemap_size(), GFP_KERNEL); 308 } 309 #endif /* CONFIG_MEMORY_HOTPLUG */ 310 311 #ifdef CONFIG_MEMORY_HOTREMOVE 312 static unsigned long * __init 313 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 314 unsigned long size) 315 { 316 unsigned long goal, limit; 317 unsigned long *p; 318 int nid; 319 /* 320 * A page may contain usemaps for other sections preventing the 321 * page being freed and making a section unremovable while 322 * other sections referencing the usemap remain active. Similarly, 323 * a pgdat can prevent a section being removed. If section A 324 * contains a pgdat and section B contains the usemap, both 325 * sections become inter-dependent. This allocates usemaps 326 * from the same section as the pgdat where possible to avoid 327 * this problem. 328 */ 329 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); 330 limit = goal + (1UL << PA_SECTION_SHIFT); 331 nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 332 again: 333 p = memblock_virt_alloc_try_nid_nopanic(size, 334 SMP_CACHE_BYTES, goal, limit, 335 nid); 336 if (!p && limit) { 337 limit = 0; 338 goto again; 339 } 340 return p; 341 } 342 343 static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 344 { 345 unsigned long usemap_snr, pgdat_snr; 346 static unsigned long old_usemap_snr; 347 static unsigned long old_pgdat_snr; 348 struct pglist_data *pgdat = NODE_DATA(nid); 349 int usemap_nid; 350 351 /* First call */ 352 if (!old_usemap_snr) { 353 old_usemap_snr = NR_MEM_SECTIONS; 354 old_pgdat_snr = NR_MEM_SECTIONS; 355 } 356 357 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); 358 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 359 if (usemap_snr == pgdat_snr) 360 return; 361 362 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) 363 /* skip redundant message */ 364 return; 365 366 old_usemap_snr = usemap_snr; 367 old_pgdat_snr = pgdat_snr; 368 369 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); 370 if (usemap_nid != nid) { 371 pr_info("node %d must be removed before remove section %ld\n", 372 nid, usemap_snr); 373 return; 374 } 375 /* 376 * There is a circular dependency. 377 * Some platforms allow un-removable section because they will just 378 * gather other removable sections for dynamic partitioning. 379 * Just notify un-removable section's number here. 380 */ 381 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", 382 usemap_snr, pgdat_snr, nid); 383 } 384 #else 385 static unsigned long * __init 386 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 387 unsigned long size) 388 { 389 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); 390 } 391 392 static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 393 { 394 } 395 #endif /* CONFIG_MEMORY_HOTREMOVE */ 396 397 static void __init sparse_early_usemaps_alloc_node(void *data, 398 unsigned long pnum_begin, 399 unsigned long pnum_end, 400 unsigned long usemap_count, int nodeid) 401 { 402 void *usemap; 403 unsigned long pnum; 404 unsigned long **usemap_map = (unsigned long **)data; 405 int size = usemap_size(); 406 407 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), 408 size * usemap_count); 409 if (!usemap) { 410 pr_warn("%s: allocation failed\n", __func__); 411 return; 412 } 413 414 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 415 if (!present_section_nr(pnum)) 416 continue; 417 usemap_map[pnum] = usemap; 418 usemap += size; 419 check_usemap_section_nr(nodeid, usemap_map[pnum]); 420 } 421 } 422 423 #ifndef CONFIG_SPARSEMEM_VMEMMAP 424 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid, 425 struct vmem_altmap *altmap) 426 { 427 struct page *map; 428 unsigned long size; 429 430 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); 431 if (map) 432 return map; 433 434 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 435 map = memblock_virt_alloc_try_nid(size, 436 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 437 BOOTMEM_ALLOC_ACCESSIBLE, nid); 438 return map; 439 } 440 void __init sparse_mem_maps_populate_node(struct page **map_map, 441 unsigned long pnum_begin, 442 unsigned long pnum_end, 443 unsigned long map_count, int nodeid) 444 { 445 void *map; 446 unsigned long pnum; 447 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; 448 449 map = alloc_remap(nodeid, size * map_count); 450 if (map) { 451 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 452 if (!present_section_nr(pnum)) 453 continue; 454 map_map[pnum] = map; 455 map += size; 456 } 457 return; 458 } 459 460 size = PAGE_ALIGN(size); 461 map = memblock_virt_alloc_try_nid_raw(size * map_count, 462 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 463 BOOTMEM_ALLOC_ACCESSIBLE, nodeid); 464 if (map) { 465 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 466 if (!present_section_nr(pnum)) 467 continue; 468 map_map[pnum] = map; 469 map += size; 470 } 471 return; 472 } 473 474 /* fallback */ 475 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 476 struct mem_section *ms; 477 478 if (!present_section_nr(pnum)) 479 continue; 480 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL); 481 if (map_map[pnum]) 482 continue; 483 ms = __nr_to_section(pnum); 484 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 485 __func__); 486 ms->section_mem_map = 0; 487 } 488 } 489 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 490 491 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 492 static void __init sparse_early_mem_maps_alloc_node(void *data, 493 unsigned long pnum_begin, 494 unsigned long pnum_end, 495 unsigned long map_count, int nodeid) 496 { 497 struct page **map_map = (struct page **)data; 498 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, 499 map_count, nodeid); 500 } 501 #else 502 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) 503 { 504 struct page *map; 505 struct mem_section *ms = __nr_to_section(pnum); 506 int nid = sparse_early_nid(ms); 507 508 map = sparse_mem_map_populate(pnum, nid, NULL); 509 if (map) 510 return map; 511 512 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 513 __func__); 514 ms->section_mem_map = 0; 515 return NULL; 516 } 517 #endif 518 519 void __weak __meminit vmemmap_populate_print_last(void) 520 { 521 } 522 523 /** 524 * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap 525 * @map: usemap_map for pageblock flags or mmap_map for vmemmap 526 */ 527 static void __init alloc_usemap_and_memmap(void (*alloc_func) 528 (void *, unsigned long, unsigned long, 529 unsigned long, int), void *data) 530 { 531 unsigned long pnum; 532 unsigned long map_count; 533 int nodeid_begin = 0; 534 unsigned long pnum_begin = 0; 535 536 for_each_present_section_nr(0, pnum) { 537 struct mem_section *ms; 538 539 ms = __nr_to_section(pnum); 540 nodeid_begin = sparse_early_nid(ms); 541 pnum_begin = pnum; 542 break; 543 } 544 map_count = 1; 545 for_each_present_section_nr(pnum_begin + 1, pnum) { 546 struct mem_section *ms; 547 int nodeid; 548 549 ms = __nr_to_section(pnum); 550 nodeid = sparse_early_nid(ms); 551 if (nodeid == nodeid_begin) { 552 map_count++; 553 continue; 554 } 555 /* ok, we need to take cake of from pnum_begin to pnum - 1*/ 556 alloc_func(data, pnum_begin, pnum, 557 map_count, nodeid_begin); 558 /* new start, update count etc*/ 559 nodeid_begin = nodeid; 560 pnum_begin = pnum; 561 map_count = 1; 562 } 563 /* ok, last chunk */ 564 alloc_func(data, pnum_begin, NR_MEM_SECTIONS, 565 map_count, nodeid_begin); 566 } 567 568 /* 569 * Allocate the accumulated non-linear sections, allocate a mem_map 570 * for each and record the physical to section mapping. 571 */ 572 void __init sparse_init(void) 573 { 574 unsigned long pnum; 575 struct page *map; 576 unsigned long *usemap; 577 unsigned long **usemap_map; 578 int size; 579 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 580 int size2; 581 struct page **map_map; 582 #endif 583 584 /* see include/linux/mmzone.h 'struct mem_section' definition */ 585 BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); 586 587 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ 588 set_pageblock_order(); 589 590 /* 591 * map is using big page (aka 2M in x86 64 bit) 592 * usemap is less one page (aka 24 bytes) 593 * so alloc 2M (with 2M align) and 24 bytes in turn will 594 * make next 2M slip to one more 2M later. 595 * then in big system, the memory will have a lot of holes... 596 * here try to allocate 2M pages continuously. 597 * 598 * powerpc need to call sparse_init_one_section right after each 599 * sparse_early_mem_map_alloc, so allocate usemap_map at first. 600 */ 601 size = sizeof(unsigned long *) * NR_MEM_SECTIONS; 602 usemap_map = memblock_virt_alloc(size, 0); 603 if (!usemap_map) 604 panic("can not allocate usemap_map\n"); 605 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, 606 (void *)usemap_map); 607 608 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 609 size2 = sizeof(struct page *) * NR_MEM_SECTIONS; 610 map_map = memblock_virt_alloc(size2, 0); 611 if (!map_map) 612 panic("can not allocate map_map\n"); 613 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, 614 (void *)map_map); 615 #endif 616 617 for_each_present_section_nr(0, pnum) { 618 usemap = usemap_map[pnum]; 619 if (!usemap) 620 continue; 621 622 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 623 map = map_map[pnum]; 624 #else 625 map = sparse_early_mem_map_alloc(pnum); 626 #endif 627 if (!map) 628 continue; 629 630 sparse_init_one_section(__nr_to_section(pnum), pnum, map, 631 usemap); 632 } 633 634 vmemmap_populate_print_last(); 635 636 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 637 memblock_free_early(__pa(map_map), size2); 638 #endif 639 memblock_free_early(__pa(usemap_map), size); 640 } 641 642 #ifdef CONFIG_MEMORY_HOTPLUG 643 644 /* Mark all memory sections within the pfn range as online */ 645 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 646 { 647 unsigned long pfn; 648 649 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 650 unsigned long section_nr = pfn_to_section_nr(pfn); 651 struct mem_section *ms; 652 653 /* onlining code should never touch invalid ranges */ 654 if (WARN_ON(!valid_section_nr(section_nr))) 655 continue; 656 657 ms = __nr_to_section(section_nr); 658 ms->section_mem_map |= SECTION_IS_ONLINE; 659 } 660 } 661 662 #ifdef CONFIG_MEMORY_HOTREMOVE 663 /* Mark all memory sections within the pfn range as online */ 664 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 665 { 666 unsigned long pfn; 667 668 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 669 unsigned long section_nr = pfn_to_section_nr(start_pfn); 670 struct mem_section *ms; 671 672 /* 673 * TODO this needs some double checking. Offlining code makes 674 * sure to check pfn_valid but those checks might be just bogus 675 */ 676 if (WARN_ON(!valid_section_nr(section_nr))) 677 continue; 678 679 ms = __nr_to_section(section_nr); 680 ms->section_mem_map &= ~SECTION_IS_ONLINE; 681 } 682 } 683 #endif 684 685 #ifdef CONFIG_SPARSEMEM_VMEMMAP 686 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 687 struct vmem_altmap *altmap) 688 { 689 /* This will make the necessary allocations eventually. */ 690 return sparse_mem_map_populate(pnum, nid, altmap); 691 } 692 static void __kfree_section_memmap(struct page *memmap, 693 struct vmem_altmap *altmap) 694 { 695 unsigned long start = (unsigned long)memmap; 696 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 697 698 vmemmap_free(start, end, altmap); 699 } 700 #ifdef CONFIG_MEMORY_HOTREMOVE 701 static void free_map_bootmem(struct page *memmap) 702 { 703 unsigned long start = (unsigned long)memmap; 704 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 705 706 vmemmap_free(start, end, NULL); 707 } 708 #endif /* CONFIG_MEMORY_HOTREMOVE */ 709 #else 710 static struct page *__kmalloc_section_memmap(void) 711 { 712 struct page *page, *ret; 713 unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; 714 715 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); 716 if (page) 717 goto got_map_page; 718 719 ret = vmalloc(memmap_size); 720 if (ret) 721 goto got_map_ptr; 722 723 return NULL; 724 got_map_page: 725 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); 726 got_map_ptr: 727 728 return ret; 729 } 730 731 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, 732 struct vmem_altmap *altmap) 733 { 734 return __kmalloc_section_memmap(); 735 } 736 737 static void __kfree_section_memmap(struct page *memmap, 738 struct vmem_altmap *altmap) 739 { 740 if (is_vmalloc_addr(memmap)) 741 vfree(memmap); 742 else 743 free_pages((unsigned long)memmap, 744 get_order(sizeof(struct page) * PAGES_PER_SECTION)); 745 } 746 747 #ifdef CONFIG_MEMORY_HOTREMOVE 748 static void free_map_bootmem(struct page *memmap) 749 { 750 unsigned long maps_section_nr, removing_section_nr, i; 751 unsigned long magic, nr_pages; 752 struct page *page = virt_to_page(memmap); 753 754 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 755 >> PAGE_SHIFT; 756 757 for (i = 0; i < nr_pages; i++, page++) { 758 magic = (unsigned long) page->freelist; 759 760 BUG_ON(magic == NODE_INFO); 761 762 maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); 763 removing_section_nr = page_private(page); 764 765 /* 766 * When this function is called, the removing section is 767 * logical offlined state. This means all pages are isolated 768 * from page allocator. If removing section's memmap is placed 769 * on the same section, it must not be freed. 770 * If it is freed, page allocator may allocate it which will 771 * be removed physically soon. 772 */ 773 if (maps_section_nr != removing_section_nr) 774 put_page_bootmem(page); 775 } 776 } 777 #endif /* CONFIG_MEMORY_HOTREMOVE */ 778 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 779 780 /* 781 * returns the number of sections whose mem_maps were properly 782 * set. If this is <=0, then that means that the passed-in 783 * map was not consumed and must be freed. 784 */ 785 int __meminit sparse_add_one_section(struct pglist_data *pgdat, 786 unsigned long start_pfn, struct vmem_altmap *altmap) 787 { 788 unsigned long section_nr = pfn_to_section_nr(start_pfn); 789 struct mem_section *ms; 790 struct page *memmap; 791 unsigned long *usemap; 792 unsigned long flags; 793 int ret; 794 795 /* 796 * no locking for this, because it does its own 797 * plus, it does a kmalloc 798 */ 799 ret = sparse_index_init(section_nr, pgdat->node_id); 800 if (ret < 0 && ret != -EEXIST) 801 return ret; 802 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap); 803 if (!memmap) 804 return -ENOMEM; 805 usemap = __kmalloc_section_usemap(); 806 if (!usemap) { 807 __kfree_section_memmap(memmap, altmap); 808 return -ENOMEM; 809 } 810 811 pgdat_resize_lock(pgdat, &flags); 812 813 ms = __pfn_to_section(start_pfn); 814 if (ms->section_mem_map & SECTION_MARKED_PRESENT) { 815 ret = -EEXIST; 816 goto out; 817 } 818 819 memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); 820 821 section_mark_present(ms); 822 823 ret = sparse_init_one_section(ms, section_nr, memmap, usemap); 824 825 out: 826 pgdat_resize_unlock(pgdat, &flags); 827 if (ret <= 0) { 828 kfree(usemap); 829 __kfree_section_memmap(memmap, altmap); 830 } 831 return ret; 832 } 833 834 #ifdef CONFIG_MEMORY_HOTREMOVE 835 #ifdef CONFIG_MEMORY_FAILURE 836 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) 837 { 838 int i; 839 840 if (!memmap) 841 return; 842 843 for (i = 0; i < nr_pages; i++) { 844 if (PageHWPoison(&memmap[i])) { 845 atomic_long_sub(1, &num_poisoned_pages); 846 ClearPageHWPoison(&memmap[i]); 847 } 848 } 849 } 850 #else 851 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) 852 { 853 } 854 #endif 855 856 static void free_section_usemap(struct page *memmap, unsigned long *usemap, 857 struct vmem_altmap *altmap) 858 { 859 struct page *usemap_page; 860 861 if (!usemap) 862 return; 863 864 usemap_page = virt_to_page(usemap); 865 /* 866 * Check to see if allocation came from hot-plug-add 867 */ 868 if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 869 kfree(usemap); 870 if (memmap) 871 __kfree_section_memmap(memmap, altmap); 872 return; 873 } 874 875 /* 876 * The usemap came from bootmem. This is packed with other usemaps 877 * on the section which has pgdat at boot time. Just keep it as is now. 878 */ 879 880 if (memmap) 881 free_map_bootmem(memmap); 882 } 883 884 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, 885 unsigned long map_offset, struct vmem_altmap *altmap) 886 { 887 struct page *memmap = NULL; 888 unsigned long *usemap = NULL, flags; 889 struct pglist_data *pgdat = zone->zone_pgdat; 890 891 pgdat_resize_lock(pgdat, &flags); 892 if (ms->section_mem_map) { 893 usemap = ms->pageblock_flags; 894 memmap = sparse_decode_mem_map(ms->section_mem_map, 895 __section_nr(ms)); 896 ms->section_mem_map = 0; 897 ms->pageblock_flags = NULL; 898 } 899 pgdat_resize_unlock(pgdat, &flags); 900 901 clear_hwpoisoned_pages(memmap + map_offset, 902 PAGES_PER_SECTION - map_offset); 903 free_section_usemap(memmap, usemap, altmap); 904 } 905 #endif /* CONFIG_MEMORY_HOTREMOVE */ 906 #endif /* CONFIG_MEMORY_HOTPLUG */ 907