1 /* 2 * sparse memory mappings. 3 */ 4 #include <linux/mm.h> 5 #include <linux/slab.h> 6 #include <linux/mmzone.h> 7 #include <linux/bootmem.h> 8 #include <linux/compiler.h> 9 #include <linux/highmem.h> 10 #include <linux/export.h> 11 #include <linux/spinlock.h> 12 #include <linux/vmalloc.h> 13 14 #include "internal.h" 15 #include <asm/dma.h> 16 #include <asm/pgalloc.h> 17 #include <asm/pgtable.h> 18 19 /* 20 * Permanent SPARSEMEM data: 21 * 22 * 1) mem_section - memory sections, mem_map's for valid memory 23 */ 24 #ifdef CONFIG_SPARSEMEM_EXTREME 25 struct mem_section *mem_section[NR_SECTION_ROOTS] 26 ____cacheline_internodealigned_in_smp; 27 #else 28 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] 29 ____cacheline_internodealigned_in_smp; 30 #endif 31 EXPORT_SYMBOL(mem_section); 32 33 #ifdef NODE_NOT_IN_PAGE_FLAGS 34 /* 35 * If we did not store the node number in the page then we have to 36 * do a lookup in the section_to_node_table in order to find which 37 * node the page belongs to. 38 */ 39 #if MAX_NUMNODES <= 256 40 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 41 #else 42 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; 43 #endif 44 45 int page_to_nid(const struct page *page) 46 { 47 return section_to_node_table[page_to_section(page)]; 48 } 49 EXPORT_SYMBOL(page_to_nid); 50 51 static void set_section_nid(unsigned long section_nr, int nid) 52 { 53 section_to_node_table[section_nr] = nid; 54 } 55 #else /* !NODE_NOT_IN_PAGE_FLAGS */ 56 static inline void set_section_nid(unsigned long section_nr, int nid) 57 { 58 } 59 #endif 60 61 #ifdef CONFIG_SPARSEMEM_EXTREME 62 static noinline struct mem_section __ref *sparse_index_alloc(int nid) 63 { 64 struct mem_section *section = NULL; 65 unsigned long array_size = SECTIONS_PER_ROOT * 66 sizeof(struct mem_section); 67 68 if (slab_is_available()) { 69 if (node_state(nid, N_HIGH_MEMORY)) 70 section = kzalloc_node(array_size, GFP_KERNEL, nid); 71 else 72 section = kzalloc(array_size, GFP_KERNEL); 73 } else { 74 section = memblock_virt_alloc_node(array_size, nid); 75 } 76 77 return section; 78 } 79 80 static int __meminit sparse_index_init(unsigned long section_nr, int nid) 81 { 82 unsigned long root = SECTION_NR_TO_ROOT(section_nr); 83 struct mem_section *section; 84 85 if (mem_section[root]) 86 return -EEXIST; 87 88 section = sparse_index_alloc(nid); 89 if (!section) 90 return -ENOMEM; 91 92 mem_section[root] = section; 93 94 return 0; 95 } 96 #else /* !SPARSEMEM_EXTREME */ 97 static inline int sparse_index_init(unsigned long section_nr, int nid) 98 { 99 return 0; 100 } 101 #endif 102 103 #ifdef CONFIG_SPARSEMEM_EXTREME 104 int __section_nr(struct mem_section* ms) 105 { 106 unsigned long root_nr; 107 struct mem_section* root; 108 109 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { 110 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); 111 if (!root) 112 continue; 113 114 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) 115 break; 116 } 117 118 VM_BUG_ON(root_nr == NR_SECTION_ROOTS); 119 120 return (root_nr * SECTIONS_PER_ROOT) + (ms - root); 121 } 122 #else 123 int __section_nr(struct mem_section* ms) 124 { 125 return (int)(ms - mem_section[0]); 126 } 127 #endif 128 129 /* 130 * During early boot, before section_mem_map is used for an actual 131 * mem_map, we use section_mem_map to store the section's NUMA 132 * node. This keeps us from having to use another data structure. The 133 * node information is cleared just before we store the real mem_map. 134 */ 135 static inline unsigned long sparse_encode_early_nid(int nid) 136 { 137 return (nid << SECTION_NID_SHIFT); 138 } 139 140 static inline int sparse_early_nid(struct mem_section *section) 141 { 142 return (section->section_mem_map >> SECTION_NID_SHIFT); 143 } 144 145 /* Validate the physical addressing limitations of the model */ 146 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, 147 unsigned long *end_pfn) 148 { 149 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); 150 151 /* 152 * Sanity checks - do not allow an architecture to pass 153 * in larger pfns than the maximum scope of sparsemem: 154 */ 155 if (*start_pfn > max_sparsemem_pfn) { 156 mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 157 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 158 *start_pfn, *end_pfn, max_sparsemem_pfn); 159 WARN_ON_ONCE(1); 160 *start_pfn = max_sparsemem_pfn; 161 *end_pfn = max_sparsemem_pfn; 162 } else if (*end_pfn > max_sparsemem_pfn) { 163 mminit_dprintk(MMINIT_WARNING, "pfnvalidation", 164 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", 165 *start_pfn, *end_pfn, max_sparsemem_pfn); 166 WARN_ON_ONCE(1); 167 *end_pfn = max_sparsemem_pfn; 168 } 169 } 170 171 /* 172 * There are a number of times that we loop over NR_MEM_SECTIONS, 173 * looking for section_present() on each. But, when we have very 174 * large physical address spaces, NR_MEM_SECTIONS can also be 175 * very large which makes the loops quite long. 176 * 177 * Keeping track of this gives us an easy way to break out of 178 * those loops early. 179 */ 180 int __highest_present_section_nr; 181 static void section_mark_present(struct mem_section *ms) 182 { 183 int section_nr = __section_nr(ms); 184 185 if (section_nr > __highest_present_section_nr) 186 __highest_present_section_nr = section_nr; 187 188 ms->section_mem_map |= SECTION_MARKED_PRESENT; 189 } 190 191 static inline int next_present_section_nr(int section_nr) 192 { 193 do { 194 section_nr++; 195 if (present_section_nr(section_nr)) 196 return section_nr; 197 } while ((section_nr < NR_MEM_SECTIONS) && 198 (section_nr <= __highest_present_section_nr)); 199 200 return -1; 201 } 202 #define for_each_present_section_nr(start, section_nr) \ 203 for (section_nr = next_present_section_nr(start-1); \ 204 ((section_nr >= 0) && \ 205 (section_nr < NR_MEM_SECTIONS) && \ 206 (section_nr <= __highest_present_section_nr)); \ 207 section_nr = next_present_section_nr(section_nr)) 208 209 /* Record a memory area against a node. */ 210 void __init memory_present(int nid, unsigned long start, unsigned long end) 211 { 212 unsigned long pfn; 213 214 start &= PAGE_SECTION_MASK; 215 mminit_validate_memmodel_limits(&start, &end); 216 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 217 unsigned long section = pfn_to_section_nr(pfn); 218 struct mem_section *ms; 219 220 sparse_index_init(section, nid); 221 set_section_nid(section, nid); 222 223 ms = __nr_to_section(section); 224 if (!ms->section_mem_map) { 225 ms->section_mem_map = sparse_encode_early_nid(nid) | 226 SECTION_IS_ONLINE; 227 section_mark_present(ms); 228 } 229 } 230 } 231 232 /* 233 * Only used by the i386 NUMA architecures, but relatively 234 * generic code. 235 */ 236 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, 237 unsigned long end_pfn) 238 { 239 unsigned long pfn; 240 unsigned long nr_pages = 0; 241 242 mminit_validate_memmodel_limits(&start_pfn, &end_pfn); 243 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 244 if (nid != early_pfn_to_nid(pfn)) 245 continue; 246 247 if (pfn_present(pfn)) 248 nr_pages += PAGES_PER_SECTION; 249 } 250 251 return nr_pages * sizeof(struct page); 252 } 253 254 /* 255 * Subtle, we encode the real pfn into the mem_map such that 256 * the identity pfn - section_mem_map will return the actual 257 * physical page frame number. 258 */ 259 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) 260 { 261 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); 262 } 263 264 /* 265 * Decode mem_map from the coded memmap 266 */ 267 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) 268 { 269 /* mask off the extra low bits of information */ 270 coded_mem_map &= SECTION_MAP_MASK; 271 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); 272 } 273 274 static int __meminit sparse_init_one_section(struct mem_section *ms, 275 unsigned long pnum, struct page *mem_map, 276 unsigned long *pageblock_bitmap) 277 { 278 if (!present_section(ms)) 279 return -EINVAL; 280 281 ms->section_mem_map &= ~SECTION_MAP_MASK; 282 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | 283 SECTION_HAS_MEM_MAP; 284 ms->pageblock_flags = pageblock_bitmap; 285 286 return 1; 287 } 288 289 unsigned long usemap_size(void) 290 { 291 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); 292 } 293 294 #ifdef CONFIG_MEMORY_HOTPLUG 295 static unsigned long *__kmalloc_section_usemap(void) 296 { 297 return kmalloc(usemap_size(), GFP_KERNEL); 298 } 299 #endif /* CONFIG_MEMORY_HOTPLUG */ 300 301 #ifdef CONFIG_MEMORY_HOTREMOVE 302 static unsigned long * __init 303 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 304 unsigned long size) 305 { 306 unsigned long goal, limit; 307 unsigned long *p; 308 int nid; 309 /* 310 * A page may contain usemaps for other sections preventing the 311 * page being freed and making a section unremovable while 312 * other sections referencing the usemap remain active. Similarly, 313 * a pgdat can prevent a section being removed. If section A 314 * contains a pgdat and section B contains the usemap, both 315 * sections become inter-dependent. This allocates usemaps 316 * from the same section as the pgdat where possible to avoid 317 * this problem. 318 */ 319 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); 320 limit = goal + (1UL << PA_SECTION_SHIFT); 321 nid = early_pfn_to_nid(goal >> PAGE_SHIFT); 322 again: 323 p = memblock_virt_alloc_try_nid_nopanic(size, 324 SMP_CACHE_BYTES, goal, limit, 325 nid); 326 if (!p && limit) { 327 limit = 0; 328 goto again; 329 } 330 return p; 331 } 332 333 static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 334 { 335 unsigned long usemap_snr, pgdat_snr; 336 static unsigned long old_usemap_snr = NR_MEM_SECTIONS; 337 static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; 338 struct pglist_data *pgdat = NODE_DATA(nid); 339 int usemap_nid; 340 341 usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); 342 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 343 if (usemap_snr == pgdat_snr) 344 return; 345 346 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) 347 /* skip redundant message */ 348 return; 349 350 old_usemap_snr = usemap_snr; 351 old_pgdat_snr = pgdat_snr; 352 353 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); 354 if (usemap_nid != nid) { 355 pr_info("node %d must be removed before remove section %ld\n", 356 nid, usemap_snr); 357 return; 358 } 359 /* 360 * There is a circular dependency. 361 * Some platforms allow un-removable section because they will just 362 * gather other removable sections for dynamic partitioning. 363 * Just notify un-removable section's number here. 364 */ 365 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", 366 usemap_snr, pgdat_snr, nid); 367 } 368 #else 369 static unsigned long * __init 370 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 371 unsigned long size) 372 { 373 return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); 374 } 375 376 static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 377 { 378 } 379 #endif /* CONFIG_MEMORY_HOTREMOVE */ 380 381 static void __init sparse_early_usemaps_alloc_node(void *data, 382 unsigned long pnum_begin, 383 unsigned long pnum_end, 384 unsigned long usemap_count, int nodeid) 385 { 386 void *usemap; 387 unsigned long pnum; 388 unsigned long **usemap_map = (unsigned long **)data; 389 int size = usemap_size(); 390 391 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), 392 size * usemap_count); 393 if (!usemap) { 394 pr_warn("%s: allocation failed\n", __func__); 395 return; 396 } 397 398 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 399 if (!present_section_nr(pnum)) 400 continue; 401 usemap_map[pnum] = usemap; 402 usemap += size; 403 check_usemap_section_nr(nodeid, usemap_map[pnum]); 404 } 405 } 406 407 #ifndef CONFIG_SPARSEMEM_VMEMMAP 408 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) 409 { 410 struct page *map; 411 unsigned long size; 412 413 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); 414 if (map) 415 return map; 416 417 size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); 418 map = memblock_virt_alloc_try_nid(size, 419 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 420 BOOTMEM_ALLOC_ACCESSIBLE, nid); 421 return map; 422 } 423 void __init sparse_mem_maps_populate_node(struct page **map_map, 424 unsigned long pnum_begin, 425 unsigned long pnum_end, 426 unsigned long map_count, int nodeid) 427 { 428 void *map; 429 unsigned long pnum; 430 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; 431 432 map = alloc_remap(nodeid, size * map_count); 433 if (map) { 434 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 435 if (!present_section_nr(pnum)) 436 continue; 437 map_map[pnum] = map; 438 map += size; 439 } 440 return; 441 } 442 443 size = PAGE_ALIGN(size); 444 map = memblock_virt_alloc_try_nid(size * map_count, 445 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 446 BOOTMEM_ALLOC_ACCESSIBLE, nodeid); 447 if (map) { 448 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 449 if (!present_section_nr(pnum)) 450 continue; 451 map_map[pnum] = map; 452 map += size; 453 } 454 return; 455 } 456 457 /* fallback */ 458 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 459 struct mem_section *ms; 460 461 if (!present_section_nr(pnum)) 462 continue; 463 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); 464 if (map_map[pnum]) 465 continue; 466 ms = __nr_to_section(pnum); 467 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 468 __func__); 469 ms->section_mem_map = 0; 470 } 471 } 472 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 473 474 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 475 static void __init sparse_early_mem_maps_alloc_node(void *data, 476 unsigned long pnum_begin, 477 unsigned long pnum_end, 478 unsigned long map_count, int nodeid) 479 { 480 struct page **map_map = (struct page **)data; 481 sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, 482 map_count, nodeid); 483 } 484 #else 485 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) 486 { 487 struct page *map; 488 struct mem_section *ms = __nr_to_section(pnum); 489 int nid = sparse_early_nid(ms); 490 491 map = sparse_mem_map_populate(pnum, nid); 492 if (map) 493 return map; 494 495 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", 496 __func__); 497 ms->section_mem_map = 0; 498 return NULL; 499 } 500 #endif 501 502 void __weak __meminit vmemmap_populate_print_last(void) 503 { 504 } 505 506 /** 507 * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap 508 * @map: usemap_map for pageblock flags or mmap_map for vmemmap 509 */ 510 static void __init alloc_usemap_and_memmap(void (*alloc_func) 511 (void *, unsigned long, unsigned long, 512 unsigned long, int), void *data) 513 { 514 unsigned long pnum; 515 unsigned long map_count; 516 int nodeid_begin = 0; 517 unsigned long pnum_begin = 0; 518 519 for_each_present_section_nr(0, pnum) { 520 struct mem_section *ms; 521 522 ms = __nr_to_section(pnum); 523 nodeid_begin = sparse_early_nid(ms); 524 pnum_begin = pnum; 525 break; 526 } 527 map_count = 1; 528 for_each_present_section_nr(pnum_begin + 1, pnum) { 529 struct mem_section *ms; 530 int nodeid; 531 532 ms = __nr_to_section(pnum); 533 nodeid = sparse_early_nid(ms); 534 if (nodeid == nodeid_begin) { 535 map_count++; 536 continue; 537 } 538 /* ok, we need to take cake of from pnum_begin to pnum - 1*/ 539 alloc_func(data, pnum_begin, pnum, 540 map_count, nodeid_begin); 541 /* new start, update count etc*/ 542 nodeid_begin = nodeid; 543 pnum_begin = pnum; 544 map_count = 1; 545 } 546 /* ok, last chunk */ 547 alloc_func(data, pnum_begin, NR_MEM_SECTIONS, 548 map_count, nodeid_begin); 549 } 550 551 /* 552 * Allocate the accumulated non-linear sections, allocate a mem_map 553 * for each and record the physical to section mapping. 554 */ 555 void __init sparse_init(void) 556 { 557 unsigned long pnum; 558 struct page *map; 559 unsigned long *usemap; 560 unsigned long **usemap_map; 561 int size; 562 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 563 int size2; 564 struct page **map_map; 565 #endif 566 567 /* see include/linux/mmzone.h 'struct mem_section' definition */ 568 BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); 569 570 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ 571 set_pageblock_order(); 572 573 /* 574 * map is using big page (aka 2M in x86 64 bit) 575 * usemap is less one page (aka 24 bytes) 576 * so alloc 2M (with 2M align) and 24 bytes in turn will 577 * make next 2M slip to one more 2M later. 578 * then in big system, the memory will have a lot of holes... 579 * here try to allocate 2M pages continuously. 580 * 581 * powerpc need to call sparse_init_one_section right after each 582 * sparse_early_mem_map_alloc, so allocate usemap_map at first. 583 */ 584 size = sizeof(unsigned long *) * NR_MEM_SECTIONS; 585 usemap_map = memblock_virt_alloc(size, 0); 586 if (!usemap_map) 587 panic("can not allocate usemap_map\n"); 588 alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, 589 (void *)usemap_map); 590 591 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 592 size2 = sizeof(struct page *) * NR_MEM_SECTIONS; 593 map_map = memblock_virt_alloc(size2, 0); 594 if (!map_map) 595 panic("can not allocate map_map\n"); 596 alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, 597 (void *)map_map); 598 #endif 599 600 for_each_present_section_nr(0, pnum) { 601 usemap = usemap_map[pnum]; 602 if (!usemap) 603 continue; 604 605 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 606 map = map_map[pnum]; 607 #else 608 map = sparse_early_mem_map_alloc(pnum); 609 #endif 610 if (!map) 611 continue; 612 613 sparse_init_one_section(__nr_to_section(pnum), pnum, map, 614 usemap); 615 } 616 617 vmemmap_populate_print_last(); 618 619 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER 620 memblock_free_early(__pa(map_map), size2); 621 #endif 622 memblock_free_early(__pa(usemap_map), size); 623 } 624 625 #ifdef CONFIG_MEMORY_HOTPLUG 626 627 /* Mark all memory sections within the pfn range as online */ 628 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 629 { 630 unsigned long pfn; 631 632 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 633 unsigned long section_nr = pfn_to_section_nr(start_pfn); 634 struct mem_section *ms; 635 636 /* onlining code should never touch invalid ranges */ 637 if (WARN_ON(!valid_section_nr(section_nr))) 638 continue; 639 640 ms = __nr_to_section(section_nr); 641 ms->section_mem_map |= SECTION_IS_ONLINE; 642 } 643 } 644 645 #ifdef CONFIG_MEMORY_HOTREMOVE 646 /* Mark all memory sections within the pfn range as online */ 647 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 648 { 649 unsigned long pfn; 650 651 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 652 unsigned long section_nr = pfn_to_section_nr(start_pfn); 653 struct mem_section *ms; 654 655 /* 656 * TODO this needs some double checking. Offlining code makes 657 * sure to check pfn_valid but those checks might be just bogus 658 */ 659 if (WARN_ON(!valid_section_nr(section_nr))) 660 continue; 661 662 ms = __nr_to_section(section_nr); 663 ms->section_mem_map &= ~SECTION_IS_ONLINE; 664 } 665 } 666 #endif 667 668 #ifdef CONFIG_SPARSEMEM_VMEMMAP 669 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 670 { 671 /* This will make the necessary allocations eventually. */ 672 return sparse_mem_map_populate(pnum, nid); 673 } 674 static void __kfree_section_memmap(struct page *memmap) 675 { 676 unsigned long start = (unsigned long)memmap; 677 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 678 679 vmemmap_free(start, end); 680 } 681 #ifdef CONFIG_MEMORY_HOTREMOVE 682 static void free_map_bootmem(struct page *memmap) 683 { 684 unsigned long start = (unsigned long)memmap; 685 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); 686 687 vmemmap_free(start, end); 688 } 689 #endif /* CONFIG_MEMORY_HOTREMOVE */ 690 #else 691 static struct page *__kmalloc_section_memmap(void) 692 { 693 struct page *page, *ret; 694 unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; 695 696 page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); 697 if (page) 698 goto got_map_page; 699 700 ret = vmalloc(memmap_size); 701 if (ret) 702 goto got_map_ptr; 703 704 return NULL; 705 got_map_page: 706 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); 707 got_map_ptr: 708 709 return ret; 710 } 711 712 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) 713 { 714 return __kmalloc_section_memmap(); 715 } 716 717 static void __kfree_section_memmap(struct page *memmap) 718 { 719 if (is_vmalloc_addr(memmap)) 720 vfree(memmap); 721 else 722 free_pages((unsigned long)memmap, 723 get_order(sizeof(struct page) * PAGES_PER_SECTION)); 724 } 725 726 #ifdef CONFIG_MEMORY_HOTREMOVE 727 static void free_map_bootmem(struct page *memmap) 728 { 729 unsigned long maps_section_nr, removing_section_nr, i; 730 unsigned long magic, nr_pages; 731 struct page *page = virt_to_page(memmap); 732 733 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) 734 >> PAGE_SHIFT; 735 736 for (i = 0; i < nr_pages; i++, page++) { 737 magic = (unsigned long) page->freelist; 738 739 BUG_ON(magic == NODE_INFO); 740 741 maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); 742 removing_section_nr = page_private(page); 743 744 /* 745 * When this function is called, the removing section is 746 * logical offlined state. This means all pages are isolated 747 * from page allocator. If removing section's memmap is placed 748 * on the same section, it must not be freed. 749 * If it is freed, page allocator may allocate it which will 750 * be removed physically soon. 751 */ 752 if (maps_section_nr != removing_section_nr) 753 put_page_bootmem(page); 754 } 755 } 756 #endif /* CONFIG_MEMORY_HOTREMOVE */ 757 #endif /* CONFIG_SPARSEMEM_VMEMMAP */ 758 759 /* 760 * returns the number of sections whose mem_maps were properly 761 * set. If this is <=0, then that means that the passed-in 762 * map was not consumed and must be freed. 763 */ 764 int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn) 765 { 766 unsigned long section_nr = pfn_to_section_nr(start_pfn); 767 struct mem_section *ms; 768 struct page *memmap; 769 unsigned long *usemap; 770 unsigned long flags; 771 int ret; 772 773 /* 774 * no locking for this, because it does its own 775 * plus, it does a kmalloc 776 */ 777 ret = sparse_index_init(section_nr, pgdat->node_id); 778 if (ret < 0 && ret != -EEXIST) 779 return ret; 780 memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); 781 if (!memmap) 782 return -ENOMEM; 783 usemap = __kmalloc_section_usemap(); 784 if (!usemap) { 785 __kfree_section_memmap(memmap); 786 return -ENOMEM; 787 } 788 789 pgdat_resize_lock(pgdat, &flags); 790 791 ms = __pfn_to_section(start_pfn); 792 if (ms->section_mem_map & SECTION_MARKED_PRESENT) { 793 ret = -EEXIST; 794 goto out; 795 } 796 797 memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); 798 799 section_mark_present(ms); 800 801 ret = sparse_init_one_section(ms, section_nr, memmap, usemap); 802 803 out: 804 pgdat_resize_unlock(pgdat, &flags); 805 if (ret <= 0) { 806 kfree(usemap); 807 __kfree_section_memmap(memmap); 808 } 809 return ret; 810 } 811 812 #ifdef CONFIG_MEMORY_HOTREMOVE 813 #ifdef CONFIG_MEMORY_FAILURE 814 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) 815 { 816 int i; 817 818 if (!memmap) 819 return; 820 821 for (i = 0; i < nr_pages; i++) { 822 if (PageHWPoison(&memmap[i])) { 823 atomic_long_sub(1, &num_poisoned_pages); 824 ClearPageHWPoison(&memmap[i]); 825 } 826 } 827 } 828 #else 829 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) 830 { 831 } 832 #endif 833 834 static void free_section_usemap(struct page *memmap, unsigned long *usemap) 835 { 836 struct page *usemap_page; 837 838 if (!usemap) 839 return; 840 841 usemap_page = virt_to_page(usemap); 842 /* 843 * Check to see if allocation came from hot-plug-add 844 */ 845 if (PageSlab(usemap_page) || PageCompound(usemap_page)) { 846 kfree(usemap); 847 if (memmap) 848 __kfree_section_memmap(memmap); 849 return; 850 } 851 852 /* 853 * The usemap came from bootmem. This is packed with other usemaps 854 * on the section which has pgdat at boot time. Just keep it as is now. 855 */ 856 857 if (memmap) 858 free_map_bootmem(memmap); 859 } 860 861 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, 862 unsigned long map_offset) 863 { 864 struct page *memmap = NULL; 865 unsigned long *usemap = NULL, flags; 866 struct pglist_data *pgdat = zone->zone_pgdat; 867 868 pgdat_resize_lock(pgdat, &flags); 869 if (ms->section_mem_map) { 870 usemap = ms->pageblock_flags; 871 memmap = sparse_decode_mem_map(ms->section_mem_map, 872 __section_nr(ms)); 873 ms->section_mem_map = 0; 874 ms->pageblock_flags = NULL; 875 } 876 pgdat_resize_unlock(pgdat, &flags); 877 878 clear_hwpoisoned_pages(memmap + map_offset, 879 PAGES_PER_SECTION - map_offset); 880 free_section_usemap(memmap, usemap); 881 } 882 #endif /* CONFIG_MEMORY_HOTREMOVE */ 883 #endif /* CONFIG_MEMORY_HOTPLUG */ 884