1 /* 2 * linux/mm/memory_hotplug.c 3 * 4 * Copyright (C) 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/mm.h> 9 #include <linux/sched/signal.h> 10 #include <linux/swap.h> 11 #include <linux/interrupt.h> 12 #include <linux/pagemap.h> 13 #include <linux/compiler.h> 14 #include <linux/export.h> 15 #include <linux/pagevec.h> 16 #include <linux/writeback.h> 17 #include <linux/slab.h> 18 #include <linux/sysctl.h> 19 #include <linux/cpu.h> 20 #include <linux/memory.h> 21 #include <linux/memremap.h> 22 #include <linux/memory_hotplug.h> 23 #include <linux/highmem.h> 24 #include <linux/vmalloc.h> 25 #include <linux/ioport.h> 26 #include <linux/delay.h> 27 #include <linux/migrate.h> 28 #include <linux/page-isolation.h> 29 #include <linux/pfn.h> 30 #include <linux/suspend.h> 31 #include <linux/mm_inline.h> 32 #include <linux/firmware-map.h> 33 #include <linux/stop_machine.h> 34 #include <linux/hugetlb.h> 35 #include <linux/memblock.h> 36 #include <linux/bootmem.h> 37 #include <linux/compaction.h> 38 39 #include <asm/tlbflush.h> 40 41 #include "internal.h" 42 43 /* 44 * online_page_callback contains pointer to current page onlining function. 45 * Initially it is generic_online_page(). If it is required it could be 46 * changed by calling set_online_page_callback() for callback registration 47 * and restore_online_page_callback() for generic callback restore. 48 */ 49 50 static void generic_online_page(struct page *page); 51 52 static online_page_callback_t online_page_callback = generic_online_page; 53 static DEFINE_MUTEX(online_page_callback_lock); 54 55 DEFINE_STATIC_PERCPU_RWSEM(mem_hotplug_lock); 56 57 void get_online_mems(void) 58 { 59 percpu_down_read(&mem_hotplug_lock); 60 } 61 62 void put_online_mems(void) 63 { 64 percpu_up_read(&mem_hotplug_lock); 65 } 66 67 bool movable_node_enabled = false; 68 69 #ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE 70 bool memhp_auto_online; 71 #else 72 bool memhp_auto_online = true; 73 #endif 74 EXPORT_SYMBOL_GPL(memhp_auto_online); 75 76 static int __init setup_memhp_default_state(char *str) 77 { 78 if (!strcmp(str, "online")) 79 memhp_auto_online = true; 80 else if (!strcmp(str, "offline")) 81 memhp_auto_online = false; 82 83 return 1; 84 } 85 __setup("memhp_default_state=", setup_memhp_default_state); 86 87 void mem_hotplug_begin(void) 88 { 89 cpus_read_lock(); 90 percpu_down_write(&mem_hotplug_lock); 91 } 92 93 void mem_hotplug_done(void) 94 { 95 percpu_up_write(&mem_hotplug_lock); 96 cpus_read_unlock(); 97 } 98 99 /* add this memory to iomem resource */ 100 static struct resource *register_memory_resource(u64 start, u64 size) 101 { 102 struct resource *res, *conflict; 103 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 104 if (!res) 105 return ERR_PTR(-ENOMEM); 106 107 res->name = "System RAM"; 108 res->start = start; 109 res->end = start + size - 1; 110 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; 111 conflict = request_resource_conflict(&iomem_resource, res); 112 if (conflict) { 113 if (conflict->desc == IORES_DESC_DEVICE_PRIVATE_MEMORY) { 114 pr_debug("Device unaddressable memory block " 115 "memory hotplug at %#010llx !\n", 116 (unsigned long long)start); 117 } 118 pr_debug("System RAM resource %pR cannot be added\n", res); 119 kfree(res); 120 return ERR_PTR(-EEXIST); 121 } 122 return res; 123 } 124 125 static void release_memory_resource(struct resource *res) 126 { 127 if (!res) 128 return; 129 release_resource(res); 130 kfree(res); 131 return; 132 } 133 134 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 135 void get_page_bootmem(unsigned long info, struct page *page, 136 unsigned long type) 137 { 138 page->freelist = (void *)type; 139 SetPagePrivate(page); 140 set_page_private(page, info); 141 page_ref_inc(page); 142 } 143 144 void put_page_bootmem(struct page *page) 145 { 146 unsigned long type; 147 148 type = (unsigned long) page->freelist; 149 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || 150 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); 151 152 if (page_ref_dec_return(page) == 1) { 153 page->freelist = NULL; 154 ClearPagePrivate(page); 155 set_page_private(page, 0); 156 INIT_LIST_HEAD(&page->lru); 157 free_reserved_page(page); 158 } 159 } 160 161 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 162 #ifndef CONFIG_SPARSEMEM_VMEMMAP 163 static void register_page_bootmem_info_section(unsigned long start_pfn) 164 { 165 unsigned long *usemap, mapsize, section_nr, i; 166 struct mem_section *ms; 167 struct page *page, *memmap; 168 169 section_nr = pfn_to_section_nr(start_pfn); 170 ms = __nr_to_section(section_nr); 171 172 /* Get section's memmap address */ 173 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 174 175 /* 176 * Get page for the memmap's phys address 177 * XXX: need more consideration for sparse_vmemmap... 178 */ 179 page = virt_to_page(memmap); 180 mapsize = sizeof(struct page) * PAGES_PER_SECTION; 181 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; 182 183 /* remember memmap's page */ 184 for (i = 0; i < mapsize; i++, page++) 185 get_page_bootmem(section_nr, page, SECTION_INFO); 186 187 usemap = ms->pageblock_flags; 188 page = virt_to_page(usemap); 189 190 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 191 192 for (i = 0; i < mapsize; i++, page++) 193 get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 194 195 } 196 #else /* CONFIG_SPARSEMEM_VMEMMAP */ 197 static void register_page_bootmem_info_section(unsigned long start_pfn) 198 { 199 unsigned long *usemap, mapsize, section_nr, i; 200 struct mem_section *ms; 201 struct page *page, *memmap; 202 203 section_nr = pfn_to_section_nr(start_pfn); 204 ms = __nr_to_section(section_nr); 205 206 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 207 208 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); 209 210 usemap = ms->pageblock_flags; 211 page = virt_to_page(usemap); 212 213 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 214 215 for (i = 0; i < mapsize; i++, page++) 216 get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 217 } 218 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 219 220 void __init register_page_bootmem_info_node(struct pglist_data *pgdat) 221 { 222 unsigned long i, pfn, end_pfn, nr_pages; 223 int node = pgdat->node_id; 224 struct page *page; 225 226 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 227 page = virt_to_page(pgdat); 228 229 for (i = 0; i < nr_pages; i++, page++) 230 get_page_bootmem(node, page, NODE_INFO); 231 232 pfn = pgdat->node_start_pfn; 233 end_pfn = pgdat_end_pfn(pgdat); 234 235 /* register section info */ 236 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 237 /* 238 * Some platforms can assign the same pfn to multiple nodes - on 239 * node0 as well as nodeN. To avoid registering a pfn against 240 * multiple nodes we check that this pfn does not already 241 * reside in some other nodes. 242 */ 243 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node)) 244 register_page_bootmem_info_section(pfn); 245 } 246 } 247 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ 248 249 static int __meminit __add_section(int nid, unsigned long phys_start_pfn, 250 struct vmem_altmap *altmap, bool want_memblock) 251 { 252 int ret; 253 254 if (pfn_valid(phys_start_pfn)) 255 return -EEXIST; 256 257 ret = sparse_add_one_section(NODE_DATA(nid), phys_start_pfn, altmap); 258 if (ret < 0) 259 return ret; 260 261 if (!want_memblock) 262 return 0; 263 264 return hotplug_memory_register(nid, __pfn_to_section(phys_start_pfn)); 265 } 266 267 /* 268 * Reasonably generic function for adding memory. It is 269 * expected that archs that support memory hotplug will 270 * call this function after deciding the zone to which to 271 * add the new pages. 272 */ 273 int __ref __add_pages(int nid, unsigned long phys_start_pfn, 274 unsigned long nr_pages, struct vmem_altmap *altmap, 275 bool want_memblock) 276 { 277 unsigned long i; 278 int err = 0; 279 int start_sec, end_sec; 280 281 /* during initialize mem_map, align hot-added range to section */ 282 start_sec = pfn_to_section_nr(phys_start_pfn); 283 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); 284 285 if (altmap) { 286 /* 287 * Validate altmap is within bounds of the total request 288 */ 289 if (altmap->base_pfn != phys_start_pfn 290 || vmem_altmap_offset(altmap) > nr_pages) { 291 pr_warn_once("memory add fail, invalid altmap\n"); 292 err = -EINVAL; 293 goto out; 294 } 295 altmap->alloc = 0; 296 } 297 298 for (i = start_sec; i <= end_sec; i++) { 299 err = __add_section(nid, section_nr_to_pfn(i), altmap, 300 want_memblock); 301 302 /* 303 * EEXIST is finally dealt with by ioresource collision 304 * check. see add_memory() => register_memory_resource() 305 * Warning will be printed if there is collision. 306 */ 307 if (err && (err != -EEXIST)) 308 break; 309 err = 0; 310 cond_resched(); 311 } 312 vmemmap_populate_print_last(); 313 out: 314 return err; 315 } 316 317 #ifdef CONFIG_MEMORY_HOTREMOVE 318 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ 319 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, 320 unsigned long start_pfn, 321 unsigned long end_pfn) 322 { 323 struct mem_section *ms; 324 325 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { 326 ms = __pfn_to_section(start_pfn); 327 328 if (unlikely(!valid_section(ms))) 329 continue; 330 331 if (unlikely(pfn_to_nid(start_pfn) != nid)) 332 continue; 333 334 if (zone && zone != page_zone(pfn_to_page(start_pfn))) 335 continue; 336 337 return start_pfn; 338 } 339 340 return 0; 341 } 342 343 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ 344 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, 345 unsigned long start_pfn, 346 unsigned long end_pfn) 347 { 348 struct mem_section *ms; 349 unsigned long pfn; 350 351 /* pfn is the end pfn of a memory section. */ 352 pfn = end_pfn - 1; 353 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { 354 ms = __pfn_to_section(pfn); 355 356 if (unlikely(!valid_section(ms))) 357 continue; 358 359 if (unlikely(pfn_to_nid(pfn) != nid)) 360 continue; 361 362 if (zone && zone != page_zone(pfn_to_page(pfn))) 363 continue; 364 365 return pfn; 366 } 367 368 return 0; 369 } 370 371 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, 372 unsigned long end_pfn) 373 { 374 unsigned long zone_start_pfn = zone->zone_start_pfn; 375 unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */ 376 unsigned long zone_end_pfn = z; 377 unsigned long pfn; 378 struct mem_section *ms; 379 int nid = zone_to_nid(zone); 380 381 zone_span_writelock(zone); 382 if (zone_start_pfn == start_pfn) { 383 /* 384 * If the section is smallest section in the zone, it need 385 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. 386 * In this case, we find second smallest valid mem_section 387 * for shrinking zone. 388 */ 389 pfn = find_smallest_section_pfn(nid, zone, end_pfn, 390 zone_end_pfn); 391 if (pfn) { 392 zone->zone_start_pfn = pfn; 393 zone->spanned_pages = zone_end_pfn - pfn; 394 } 395 } else if (zone_end_pfn == end_pfn) { 396 /* 397 * If the section is biggest section in the zone, it need 398 * shrink zone->spanned_pages. 399 * In this case, we find second biggest valid mem_section for 400 * shrinking zone. 401 */ 402 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, 403 start_pfn); 404 if (pfn) 405 zone->spanned_pages = pfn - zone_start_pfn + 1; 406 } 407 408 /* 409 * The section is not biggest or smallest mem_section in the zone, it 410 * only creates a hole in the zone. So in this case, we need not 411 * change the zone. But perhaps, the zone has only hole data. Thus 412 * it check the zone has only hole or not. 413 */ 414 pfn = zone_start_pfn; 415 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { 416 ms = __pfn_to_section(pfn); 417 418 if (unlikely(!valid_section(ms))) 419 continue; 420 421 if (page_zone(pfn_to_page(pfn)) != zone) 422 continue; 423 424 /* If the section is current section, it continues the loop */ 425 if (start_pfn == pfn) 426 continue; 427 428 /* If we find valid section, we have nothing to do */ 429 zone_span_writeunlock(zone); 430 return; 431 } 432 433 /* The zone has no valid section */ 434 zone->zone_start_pfn = 0; 435 zone->spanned_pages = 0; 436 zone_span_writeunlock(zone); 437 } 438 439 static void shrink_pgdat_span(struct pglist_data *pgdat, 440 unsigned long start_pfn, unsigned long end_pfn) 441 { 442 unsigned long pgdat_start_pfn = pgdat->node_start_pfn; 443 unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */ 444 unsigned long pgdat_end_pfn = p; 445 unsigned long pfn; 446 struct mem_section *ms; 447 int nid = pgdat->node_id; 448 449 if (pgdat_start_pfn == start_pfn) { 450 /* 451 * If the section is smallest section in the pgdat, it need 452 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. 453 * In this case, we find second smallest valid mem_section 454 * for shrinking zone. 455 */ 456 pfn = find_smallest_section_pfn(nid, NULL, end_pfn, 457 pgdat_end_pfn); 458 if (pfn) { 459 pgdat->node_start_pfn = pfn; 460 pgdat->node_spanned_pages = pgdat_end_pfn - pfn; 461 } 462 } else if (pgdat_end_pfn == end_pfn) { 463 /* 464 * If the section is biggest section in the pgdat, it need 465 * shrink pgdat->node_spanned_pages. 466 * In this case, we find second biggest valid mem_section for 467 * shrinking zone. 468 */ 469 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, 470 start_pfn); 471 if (pfn) 472 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; 473 } 474 475 /* 476 * If the section is not biggest or smallest mem_section in the pgdat, 477 * it only creates a hole in the pgdat. So in this case, we need not 478 * change the pgdat. 479 * But perhaps, the pgdat has only hole data. Thus it check the pgdat 480 * has only hole or not. 481 */ 482 pfn = pgdat_start_pfn; 483 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { 484 ms = __pfn_to_section(pfn); 485 486 if (unlikely(!valid_section(ms))) 487 continue; 488 489 if (pfn_to_nid(pfn) != nid) 490 continue; 491 492 /* If the section is current section, it continues the loop */ 493 if (start_pfn == pfn) 494 continue; 495 496 /* If we find valid section, we have nothing to do */ 497 return; 498 } 499 500 /* The pgdat has no valid section */ 501 pgdat->node_start_pfn = 0; 502 pgdat->node_spanned_pages = 0; 503 } 504 505 static void __remove_zone(struct zone *zone, unsigned long start_pfn) 506 { 507 struct pglist_data *pgdat = zone->zone_pgdat; 508 int nr_pages = PAGES_PER_SECTION; 509 unsigned long flags; 510 511 pgdat_resize_lock(zone->zone_pgdat, &flags); 512 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); 513 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); 514 pgdat_resize_unlock(zone->zone_pgdat, &flags); 515 } 516 517 static int __remove_section(struct zone *zone, struct mem_section *ms, 518 unsigned long map_offset, struct vmem_altmap *altmap) 519 { 520 unsigned long start_pfn; 521 int scn_nr; 522 int ret = -EINVAL; 523 524 if (!valid_section(ms)) 525 return ret; 526 527 ret = unregister_memory_section(ms); 528 if (ret) 529 return ret; 530 531 scn_nr = __section_nr(ms); 532 start_pfn = section_nr_to_pfn((unsigned long)scn_nr); 533 __remove_zone(zone, start_pfn); 534 535 sparse_remove_one_section(zone, ms, map_offset, altmap); 536 return 0; 537 } 538 539 /** 540 * __remove_pages() - remove sections of pages from a zone 541 * @zone: zone from which pages need to be removed 542 * @phys_start_pfn: starting pageframe (must be aligned to start of a section) 543 * @nr_pages: number of pages to remove (must be multiple of section size) 544 * 545 * Generic helper function to remove section mappings and sysfs entries 546 * for the section of the memory we are removing. Caller needs to make 547 * sure that pages are marked reserved and zones are adjust properly by 548 * calling offline_pages(). 549 */ 550 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, 551 unsigned long nr_pages, struct vmem_altmap *altmap) 552 { 553 unsigned long i; 554 unsigned long map_offset = 0; 555 int sections_to_remove, ret = 0; 556 557 /* In the ZONE_DEVICE case device driver owns the memory region */ 558 if (is_dev_zone(zone)) { 559 if (altmap) 560 map_offset = vmem_altmap_offset(altmap); 561 } else { 562 resource_size_t start, size; 563 564 start = phys_start_pfn << PAGE_SHIFT; 565 size = nr_pages * PAGE_SIZE; 566 567 ret = release_mem_region_adjustable(&iomem_resource, start, 568 size); 569 if (ret) { 570 resource_size_t endres = start + size - 1; 571 572 pr_warn("Unable to release resource <%pa-%pa> (%d)\n", 573 &start, &endres, ret); 574 } 575 } 576 577 clear_zone_contiguous(zone); 578 579 /* 580 * We can only remove entire sections 581 */ 582 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); 583 BUG_ON(nr_pages % PAGES_PER_SECTION); 584 585 sections_to_remove = nr_pages / PAGES_PER_SECTION; 586 for (i = 0; i < sections_to_remove; i++) { 587 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; 588 589 ret = __remove_section(zone, __pfn_to_section(pfn), map_offset, 590 altmap); 591 map_offset = 0; 592 if (ret) 593 break; 594 } 595 596 set_zone_contiguous(zone); 597 598 return ret; 599 } 600 #endif /* CONFIG_MEMORY_HOTREMOVE */ 601 602 int set_online_page_callback(online_page_callback_t callback) 603 { 604 int rc = -EINVAL; 605 606 get_online_mems(); 607 mutex_lock(&online_page_callback_lock); 608 609 if (online_page_callback == generic_online_page) { 610 online_page_callback = callback; 611 rc = 0; 612 } 613 614 mutex_unlock(&online_page_callback_lock); 615 put_online_mems(); 616 617 return rc; 618 } 619 EXPORT_SYMBOL_GPL(set_online_page_callback); 620 621 int restore_online_page_callback(online_page_callback_t callback) 622 { 623 int rc = -EINVAL; 624 625 get_online_mems(); 626 mutex_lock(&online_page_callback_lock); 627 628 if (online_page_callback == callback) { 629 online_page_callback = generic_online_page; 630 rc = 0; 631 } 632 633 mutex_unlock(&online_page_callback_lock); 634 put_online_mems(); 635 636 return rc; 637 } 638 EXPORT_SYMBOL_GPL(restore_online_page_callback); 639 640 void __online_page_set_limits(struct page *page) 641 { 642 } 643 EXPORT_SYMBOL_GPL(__online_page_set_limits); 644 645 void __online_page_increment_counters(struct page *page) 646 { 647 adjust_managed_page_count(page, 1); 648 } 649 EXPORT_SYMBOL_GPL(__online_page_increment_counters); 650 651 void __online_page_free(struct page *page) 652 { 653 __free_reserved_page(page); 654 } 655 EXPORT_SYMBOL_GPL(__online_page_free); 656 657 static void generic_online_page(struct page *page) 658 { 659 __online_page_set_limits(page); 660 __online_page_increment_counters(page); 661 __online_page_free(page); 662 } 663 664 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, 665 void *arg) 666 { 667 unsigned long i; 668 unsigned long onlined_pages = *(unsigned long *)arg; 669 struct page *page; 670 671 if (PageReserved(pfn_to_page(start_pfn))) 672 for (i = 0; i < nr_pages; i++) { 673 page = pfn_to_page(start_pfn + i); 674 (*online_page_callback)(page); 675 onlined_pages++; 676 } 677 678 online_mem_sections(start_pfn, start_pfn + nr_pages); 679 680 *(unsigned long *)arg = onlined_pages; 681 return 0; 682 } 683 684 /* check which state of node_states will be changed when online memory */ 685 static void node_states_check_changes_online(unsigned long nr_pages, 686 struct zone *zone, struct memory_notify *arg) 687 { 688 int nid = zone_to_nid(zone); 689 enum zone_type zone_last = ZONE_NORMAL; 690 691 /* 692 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 693 * contains nodes which have zones of 0...ZONE_NORMAL, 694 * set zone_last to ZONE_NORMAL. 695 * 696 * If we don't have HIGHMEM nor movable node, 697 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 698 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 699 */ 700 if (N_MEMORY == N_NORMAL_MEMORY) 701 zone_last = ZONE_MOVABLE; 702 703 /* 704 * if the memory to be online is in a zone of 0...zone_last, and 705 * the zones of 0...zone_last don't have memory before online, we will 706 * need to set the node to node_states[N_NORMAL_MEMORY] after 707 * the memory is online. 708 */ 709 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) 710 arg->status_change_nid_normal = nid; 711 else 712 arg->status_change_nid_normal = -1; 713 714 #ifdef CONFIG_HIGHMEM 715 /* 716 * If we have movable node, node_states[N_HIGH_MEMORY] 717 * contains nodes which have zones of 0...ZONE_HIGHMEM, 718 * set zone_last to ZONE_HIGHMEM. 719 * 720 * If we don't have movable node, node_states[N_NORMAL_MEMORY] 721 * contains nodes which have zones of 0...ZONE_MOVABLE, 722 * set zone_last to ZONE_MOVABLE. 723 */ 724 zone_last = ZONE_HIGHMEM; 725 if (N_MEMORY == N_HIGH_MEMORY) 726 zone_last = ZONE_MOVABLE; 727 728 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) 729 arg->status_change_nid_high = nid; 730 else 731 arg->status_change_nid_high = -1; 732 #else 733 arg->status_change_nid_high = arg->status_change_nid_normal; 734 #endif 735 736 /* 737 * if the node don't have memory befor online, we will need to 738 * set the node to node_states[N_MEMORY] after the memory 739 * is online. 740 */ 741 if (!node_state(nid, N_MEMORY)) 742 arg->status_change_nid = nid; 743 else 744 arg->status_change_nid = -1; 745 } 746 747 static void node_states_set_node(int node, struct memory_notify *arg) 748 { 749 if (arg->status_change_nid_normal >= 0) 750 node_set_state(node, N_NORMAL_MEMORY); 751 752 if (arg->status_change_nid_high >= 0) 753 node_set_state(node, N_HIGH_MEMORY); 754 755 node_set_state(node, N_MEMORY); 756 } 757 758 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, 759 unsigned long nr_pages) 760 { 761 unsigned long old_end_pfn = zone_end_pfn(zone); 762 763 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) 764 zone->zone_start_pfn = start_pfn; 765 766 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; 767 } 768 769 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, 770 unsigned long nr_pages) 771 { 772 unsigned long old_end_pfn = pgdat_end_pfn(pgdat); 773 774 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) 775 pgdat->node_start_pfn = start_pfn; 776 777 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; 778 } 779 780 void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 781 unsigned long nr_pages, struct vmem_altmap *altmap) 782 { 783 struct pglist_data *pgdat = zone->zone_pgdat; 784 int nid = pgdat->node_id; 785 unsigned long flags; 786 787 if (zone_is_empty(zone)) 788 init_currently_empty_zone(zone, start_pfn, nr_pages); 789 790 clear_zone_contiguous(zone); 791 792 /* TODO Huh pgdat is irqsave while zone is not. It used to be like that before */ 793 pgdat_resize_lock(pgdat, &flags); 794 zone_span_writelock(zone); 795 resize_zone_range(zone, start_pfn, nr_pages); 796 zone_span_writeunlock(zone); 797 resize_pgdat_range(pgdat, start_pfn, nr_pages); 798 pgdat_resize_unlock(pgdat, &flags); 799 800 /* 801 * TODO now we have a visible range of pages which are not associated 802 * with their zone properly. Not nice but set_pfnblock_flags_mask 803 * expects the zone spans the pfn range. All the pages in the range 804 * are reserved so nobody should be touching them so we should be safe 805 */ 806 memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, 807 MEMMAP_HOTPLUG, altmap); 808 809 set_zone_contiguous(zone); 810 } 811 812 /* 813 * Returns a default kernel memory zone for the given pfn range. 814 * If no kernel zone covers this pfn range it will automatically go 815 * to the ZONE_NORMAL. 816 */ 817 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, 818 unsigned long nr_pages) 819 { 820 struct pglist_data *pgdat = NODE_DATA(nid); 821 int zid; 822 823 for (zid = 0; zid <= ZONE_NORMAL; zid++) { 824 struct zone *zone = &pgdat->node_zones[zid]; 825 826 if (zone_intersects(zone, start_pfn, nr_pages)) 827 return zone; 828 } 829 830 return &pgdat->node_zones[ZONE_NORMAL]; 831 } 832 833 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, 834 unsigned long nr_pages) 835 { 836 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, 837 nr_pages); 838 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; 839 bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); 840 bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); 841 842 /* 843 * We inherit the existing zone in a simple case where zones do not 844 * overlap in the given range 845 */ 846 if (in_kernel ^ in_movable) 847 return (in_kernel) ? kernel_zone : movable_zone; 848 849 /* 850 * If the range doesn't belong to any zone or two zones overlap in the 851 * given range then we use movable zone only if movable_node is 852 * enabled because we always online to a kernel zone by default. 853 */ 854 return movable_node_enabled ? movable_zone : kernel_zone; 855 } 856 857 struct zone * zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, 858 unsigned long nr_pages) 859 { 860 if (online_type == MMOP_ONLINE_KERNEL) 861 return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); 862 863 if (online_type == MMOP_ONLINE_MOVABLE) 864 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; 865 866 return default_zone_for_pfn(nid, start_pfn, nr_pages); 867 } 868 869 /* 870 * Associates the given pfn range with the given node and the zone appropriate 871 * for the given online type. 872 */ 873 static struct zone * __meminit move_pfn_range(int online_type, int nid, 874 unsigned long start_pfn, unsigned long nr_pages) 875 { 876 struct zone *zone; 877 878 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); 879 move_pfn_range_to_zone(zone, start_pfn, nr_pages, NULL); 880 return zone; 881 } 882 883 /* Must be protected by mem_hotplug_begin() or a device_lock */ 884 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) 885 { 886 unsigned long flags; 887 unsigned long onlined_pages = 0; 888 struct zone *zone; 889 int need_zonelists_rebuild = 0; 890 int nid; 891 int ret; 892 struct memory_notify arg; 893 struct memory_block *mem; 894 895 /* 896 * We can't use pfn_to_nid() because nid might be stored in struct page 897 * which is not yet initialized. Instead, we find nid from memory block. 898 */ 899 mem = find_memory_block(__pfn_to_section(pfn)); 900 nid = mem->nid; 901 902 /* associate pfn range with the zone */ 903 zone = move_pfn_range(online_type, nid, pfn, nr_pages); 904 905 arg.start_pfn = pfn; 906 arg.nr_pages = nr_pages; 907 node_states_check_changes_online(nr_pages, zone, &arg); 908 909 ret = memory_notify(MEM_GOING_ONLINE, &arg); 910 ret = notifier_to_errno(ret); 911 if (ret) 912 goto failed_addition; 913 914 /* 915 * If this zone is not populated, then it is not in zonelist. 916 * This means the page allocator ignores this zone. 917 * So, zonelist must be updated after online. 918 */ 919 if (!populated_zone(zone)) { 920 need_zonelists_rebuild = 1; 921 setup_zone_pageset(zone); 922 } 923 924 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, 925 online_pages_range); 926 if (ret) { 927 if (need_zonelists_rebuild) 928 zone_pcp_reset(zone); 929 goto failed_addition; 930 } 931 932 zone->present_pages += onlined_pages; 933 934 pgdat_resize_lock(zone->zone_pgdat, &flags); 935 zone->zone_pgdat->node_present_pages += onlined_pages; 936 pgdat_resize_unlock(zone->zone_pgdat, &flags); 937 938 if (onlined_pages) { 939 node_states_set_node(nid, &arg); 940 if (need_zonelists_rebuild) 941 build_all_zonelists(NULL); 942 else 943 zone_pcp_update(zone); 944 } 945 946 init_per_zone_wmark_min(); 947 948 if (onlined_pages) { 949 kswapd_run(nid); 950 kcompactd_run(nid); 951 } 952 953 vm_total_pages = nr_free_pagecache_pages(); 954 955 writeback_set_ratelimit(); 956 957 if (onlined_pages) 958 memory_notify(MEM_ONLINE, &arg); 959 return 0; 960 961 failed_addition: 962 pr_debug("online_pages [mem %#010llx-%#010llx] failed\n", 963 (unsigned long long) pfn << PAGE_SHIFT, 964 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); 965 memory_notify(MEM_CANCEL_ONLINE, &arg); 966 return ret; 967 } 968 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 969 970 static void reset_node_present_pages(pg_data_t *pgdat) 971 { 972 struct zone *z; 973 974 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 975 z->present_pages = 0; 976 977 pgdat->node_present_pages = 0; 978 } 979 980 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 981 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 982 { 983 struct pglist_data *pgdat; 984 unsigned long zones_size[MAX_NR_ZONES] = {0}; 985 unsigned long zholes_size[MAX_NR_ZONES] = {0}; 986 unsigned long start_pfn = PFN_DOWN(start); 987 988 pgdat = NODE_DATA(nid); 989 if (!pgdat) { 990 pgdat = arch_alloc_nodedata(nid); 991 if (!pgdat) 992 return NULL; 993 994 arch_refresh_nodedata(nid, pgdat); 995 } else { 996 /* 997 * Reset the nr_zones, order and classzone_idx before reuse. 998 * Note that kswapd will init kswapd_classzone_idx properly 999 * when it starts in the near future. 1000 */ 1001 pgdat->nr_zones = 0; 1002 pgdat->kswapd_order = 0; 1003 pgdat->kswapd_classzone_idx = 0; 1004 } 1005 1006 /* we can use NODE_DATA(nid) from here */ 1007 1008 /* init node's zones as empty zones, we don't have any present pages.*/ 1009 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 1010 pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); 1011 1012 /* 1013 * The node we allocated has no zone fallback lists. For avoiding 1014 * to access not-initialized zonelist, build here. 1015 */ 1016 build_all_zonelists(pgdat); 1017 1018 /* 1019 * zone->managed_pages is set to an approximate value in 1020 * free_area_init_core(), which will cause 1021 * /sys/device/system/node/nodeX/meminfo has wrong data. 1022 * So reset it to 0 before any memory is onlined. 1023 */ 1024 reset_node_managed_pages(pgdat); 1025 1026 /* 1027 * When memory is hot-added, all the memory is in offline state. So 1028 * clear all zones' present_pages because they will be updated in 1029 * online_pages() and offline_pages(). 1030 */ 1031 reset_node_present_pages(pgdat); 1032 1033 return pgdat; 1034 } 1035 1036 static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 1037 { 1038 arch_refresh_nodedata(nid, NULL); 1039 free_percpu(pgdat->per_cpu_nodestats); 1040 arch_free_nodedata(pgdat); 1041 return; 1042 } 1043 1044 1045 /** 1046 * try_online_node - online a node if offlined 1047 * 1048 * called by cpu_up() to online a node without onlined memory. 1049 */ 1050 int try_online_node(int nid) 1051 { 1052 pg_data_t *pgdat; 1053 int ret; 1054 1055 if (node_online(nid)) 1056 return 0; 1057 1058 mem_hotplug_begin(); 1059 pgdat = hotadd_new_pgdat(nid, 0); 1060 if (!pgdat) { 1061 pr_err("Cannot online node %d due to NULL pgdat\n", nid); 1062 ret = -ENOMEM; 1063 goto out; 1064 } 1065 node_set_online(nid); 1066 ret = register_one_node(nid); 1067 BUG_ON(ret); 1068 out: 1069 mem_hotplug_done(); 1070 return ret; 1071 } 1072 1073 static int check_hotplug_memory_range(u64 start, u64 size) 1074 { 1075 unsigned long block_sz = memory_block_size_bytes(); 1076 u64 block_nr_pages = block_sz >> PAGE_SHIFT; 1077 u64 nr_pages = size >> PAGE_SHIFT; 1078 u64 start_pfn = PFN_DOWN(start); 1079 1080 /* memory range must be block size aligned */ 1081 if (!nr_pages || !IS_ALIGNED(start_pfn, block_nr_pages) || 1082 !IS_ALIGNED(nr_pages, block_nr_pages)) { 1083 pr_err("Block size [%#lx] unaligned hotplug range: start %#llx, size %#llx", 1084 block_sz, start, size); 1085 return -EINVAL; 1086 } 1087 1088 return 0; 1089 } 1090 1091 static int online_memory_block(struct memory_block *mem, void *arg) 1092 { 1093 return device_online(&mem->dev); 1094 } 1095 1096 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1097 int __ref add_memory_resource(int nid, struct resource *res, bool online) 1098 { 1099 u64 start, size; 1100 pg_data_t *pgdat = NULL; 1101 bool new_pgdat; 1102 bool new_node; 1103 int ret; 1104 1105 start = res->start; 1106 size = resource_size(res); 1107 1108 ret = check_hotplug_memory_range(start, size); 1109 if (ret) 1110 return ret; 1111 1112 { /* Stupid hack to suppress address-never-null warning */ 1113 void *p = NODE_DATA(nid); 1114 new_pgdat = !p; 1115 } 1116 1117 mem_hotplug_begin(); 1118 1119 /* 1120 * Add new range to memblock so that when hotadd_new_pgdat() is called 1121 * to allocate new pgdat, get_pfn_range_for_nid() will be able to find 1122 * this new range and calculate total pages correctly. The range will 1123 * be removed at hot-remove time. 1124 */ 1125 memblock_add_node(start, size, nid); 1126 1127 new_node = !node_online(nid); 1128 if (new_node) { 1129 pgdat = hotadd_new_pgdat(nid, start); 1130 ret = -ENOMEM; 1131 if (!pgdat) 1132 goto error; 1133 } 1134 1135 /* call arch's memory hotadd */ 1136 ret = arch_add_memory(nid, start, size, NULL, true); 1137 1138 if (ret < 0) 1139 goto error; 1140 1141 /* we online node here. we can't roll back from here. */ 1142 node_set_online(nid); 1143 1144 if (new_node) { 1145 unsigned long start_pfn = start >> PAGE_SHIFT; 1146 unsigned long nr_pages = size >> PAGE_SHIFT; 1147 1148 ret = __register_one_node(nid); 1149 if (ret) 1150 goto register_fail; 1151 1152 /* 1153 * link memory sections under this node. This is already 1154 * done when creatig memory section in register_new_memory 1155 * but that depends to have the node registered so offline 1156 * nodes have to go through register_node. 1157 * TODO clean up this mess. 1158 */ 1159 ret = link_mem_sections(nid, start_pfn, nr_pages); 1160 register_fail: 1161 /* 1162 * If sysfs file of new node can't create, cpu on the node 1163 * can't be hot-added. There is no rollback way now. 1164 * So, check by BUG_ON() to catch it reluctantly.. 1165 */ 1166 BUG_ON(ret); 1167 } 1168 1169 /* create new memmap entry */ 1170 firmware_map_add_hotplug(start, start + size, "System RAM"); 1171 1172 /* online pages if requested */ 1173 if (online) 1174 walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), 1175 NULL, online_memory_block); 1176 1177 goto out; 1178 1179 error: 1180 /* rollback pgdat allocation and others */ 1181 if (new_pgdat && pgdat) 1182 rollback_node_hotadd(nid, pgdat); 1183 memblock_remove(start, size); 1184 1185 out: 1186 mem_hotplug_done(); 1187 return ret; 1188 } 1189 EXPORT_SYMBOL_GPL(add_memory_resource); 1190 1191 int __ref add_memory(int nid, u64 start, u64 size) 1192 { 1193 struct resource *res; 1194 int ret; 1195 1196 res = register_memory_resource(start, size); 1197 if (IS_ERR(res)) 1198 return PTR_ERR(res); 1199 1200 ret = add_memory_resource(nid, res, memhp_auto_online); 1201 if (ret < 0) 1202 release_memory_resource(res); 1203 return ret; 1204 } 1205 EXPORT_SYMBOL_GPL(add_memory); 1206 1207 #ifdef CONFIG_MEMORY_HOTREMOVE 1208 /* 1209 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy 1210 * set and the size of the free page is given by page_order(). Using this, 1211 * the function determines if the pageblock contains only free pages. 1212 * Due to buddy contraints, a free page at least the size of a pageblock will 1213 * be located at the start of the pageblock 1214 */ 1215 static inline int pageblock_free(struct page *page) 1216 { 1217 return PageBuddy(page) && page_order(page) >= pageblock_order; 1218 } 1219 1220 /* Return the start of the next active pageblock after a given page */ 1221 static struct page *next_active_pageblock(struct page *page) 1222 { 1223 /* Ensure the starting page is pageblock-aligned */ 1224 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 1225 1226 /* If the entire pageblock is free, move to the end of free page */ 1227 if (pageblock_free(page)) { 1228 int order; 1229 /* be careful. we don't have locks, page_order can be changed.*/ 1230 order = page_order(page); 1231 if ((order < MAX_ORDER) && (order >= pageblock_order)) 1232 return page + (1 << order); 1233 } 1234 1235 return page + pageblock_nr_pages; 1236 } 1237 1238 /* Checks if this range of memory is likely to be hot-removable. */ 1239 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1240 { 1241 struct page *page = pfn_to_page(start_pfn); 1242 struct page *end_page = page + nr_pages; 1243 1244 /* Check the starting page of each pageblock within the range */ 1245 for (; page < end_page; page = next_active_pageblock(page)) { 1246 if (!is_pageblock_removable_nolock(page)) 1247 return false; 1248 cond_resched(); 1249 } 1250 1251 /* All pageblocks in the memory block are likely to be hot-removable */ 1252 return true; 1253 } 1254 1255 /* 1256 * Confirm all pages in a range [start, end) belong to the same zone. 1257 * When true, return its valid [start, end). 1258 */ 1259 int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, 1260 unsigned long *valid_start, unsigned long *valid_end) 1261 { 1262 unsigned long pfn, sec_end_pfn; 1263 unsigned long start, end; 1264 struct zone *zone = NULL; 1265 struct page *page; 1266 int i; 1267 for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1); 1268 pfn < end_pfn; 1269 pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) { 1270 /* Make sure the memory section is present first */ 1271 if (!present_section_nr(pfn_to_section_nr(pfn))) 1272 continue; 1273 for (; pfn < sec_end_pfn && pfn < end_pfn; 1274 pfn += MAX_ORDER_NR_PAGES) { 1275 i = 0; 1276 /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 1277 while ((i < MAX_ORDER_NR_PAGES) && 1278 !pfn_valid_within(pfn + i)) 1279 i++; 1280 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) 1281 continue; 1282 page = pfn_to_page(pfn + i); 1283 if (zone && page_zone(page) != zone) 1284 return 0; 1285 if (!zone) 1286 start = pfn + i; 1287 zone = page_zone(page); 1288 end = pfn + MAX_ORDER_NR_PAGES; 1289 } 1290 } 1291 1292 if (zone) { 1293 *valid_start = start; 1294 *valid_end = min(end, end_pfn); 1295 return 1; 1296 } else { 1297 return 0; 1298 } 1299 } 1300 1301 /* 1302 * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, 1303 * non-lru movable pages and hugepages). We scan pfn because it's much 1304 * easier than scanning over linked list. This function returns the pfn 1305 * of the first found movable page if it's found, otherwise 0. 1306 */ 1307 static unsigned long scan_movable_pages(unsigned long start, unsigned long end) 1308 { 1309 unsigned long pfn; 1310 struct page *page; 1311 for (pfn = start; pfn < end; pfn++) { 1312 if (pfn_valid(pfn)) { 1313 page = pfn_to_page(pfn); 1314 if (PageLRU(page)) 1315 return pfn; 1316 if (__PageMovable(page)) 1317 return pfn; 1318 if (PageHuge(page)) { 1319 if (page_huge_active(page)) 1320 return pfn; 1321 else 1322 pfn = round_up(pfn + 1, 1323 1 << compound_order(page)) - 1; 1324 } 1325 } 1326 } 1327 return 0; 1328 } 1329 1330 static struct page *new_node_page(struct page *page, unsigned long private, 1331 int **result) 1332 { 1333 int nid = page_to_nid(page); 1334 nodemask_t nmask = node_states[N_MEMORY]; 1335 1336 /* 1337 * try to allocate from a different node but reuse this node if there 1338 * are no other online nodes to be used (e.g. we are offlining a part 1339 * of the only existing node) 1340 */ 1341 node_clear(nid, nmask); 1342 if (nodes_empty(nmask)) 1343 node_set(nid, nmask); 1344 1345 return new_page_nodemask(page, nid, &nmask); 1346 } 1347 1348 #define NR_OFFLINE_AT_ONCE_PAGES (256) 1349 static int 1350 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) 1351 { 1352 unsigned long pfn; 1353 struct page *page; 1354 int move_pages = NR_OFFLINE_AT_ONCE_PAGES; 1355 int not_managed = 0; 1356 int ret = 0; 1357 LIST_HEAD(source); 1358 1359 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { 1360 if (!pfn_valid(pfn)) 1361 continue; 1362 page = pfn_to_page(pfn); 1363 1364 if (PageHuge(page)) { 1365 struct page *head = compound_head(page); 1366 pfn = page_to_pfn(head) + (1<<compound_order(head)) - 1; 1367 if (compound_order(head) > PFN_SECTION_SHIFT) { 1368 ret = -EBUSY; 1369 break; 1370 } 1371 if (isolate_huge_page(page, &source)) 1372 move_pages -= 1 << compound_order(head); 1373 continue; 1374 } else if (thp_migration_supported() && PageTransHuge(page)) 1375 pfn = page_to_pfn(compound_head(page)) 1376 + hpage_nr_pages(page) - 1; 1377 1378 if (!get_page_unless_zero(page)) 1379 continue; 1380 /* 1381 * We can skip free pages. And we can deal with pages on 1382 * LRU and non-lru movable pages. 1383 */ 1384 if (PageLRU(page)) 1385 ret = isolate_lru_page(page); 1386 else 1387 ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); 1388 if (!ret) { /* Success */ 1389 put_page(page); 1390 list_add_tail(&page->lru, &source); 1391 move_pages--; 1392 if (!__PageMovable(page)) 1393 inc_node_page_state(page, NR_ISOLATED_ANON + 1394 page_is_file_cache(page)); 1395 1396 } else { 1397 #ifdef CONFIG_DEBUG_VM 1398 pr_alert("failed to isolate pfn %lx\n", pfn); 1399 dump_page(page, "isolation failed"); 1400 #endif 1401 put_page(page); 1402 /* Because we don't have big zone->lock. we should 1403 check this again here. */ 1404 if (page_count(page)) { 1405 not_managed++; 1406 ret = -EBUSY; 1407 break; 1408 } 1409 } 1410 } 1411 if (!list_empty(&source)) { 1412 if (not_managed) { 1413 putback_movable_pages(&source); 1414 goto out; 1415 } 1416 1417 /* Allocate a new page from the nearest neighbor node */ 1418 ret = migrate_pages(&source, new_node_page, NULL, 0, 1419 MIGRATE_SYNC, MR_MEMORY_HOTPLUG); 1420 if (ret) 1421 putback_movable_pages(&source); 1422 } 1423 out: 1424 return ret; 1425 } 1426 1427 /* 1428 * remove from free_area[] and mark all as Reserved. 1429 */ 1430 static int 1431 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, 1432 void *data) 1433 { 1434 __offline_isolated_pages(start, start + nr_pages); 1435 return 0; 1436 } 1437 1438 static void 1439 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 1440 { 1441 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, 1442 offline_isolated_pages_cb); 1443 } 1444 1445 /* 1446 * Check all pages in range, recoreded as memory resource, are isolated. 1447 */ 1448 static int 1449 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, 1450 void *data) 1451 { 1452 int ret; 1453 long offlined = *(long *)data; 1454 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); 1455 offlined = nr_pages; 1456 if (!ret) 1457 *(long *)data += offlined; 1458 return ret; 1459 } 1460 1461 static long 1462 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 1463 { 1464 long offlined = 0; 1465 int ret; 1466 1467 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, 1468 check_pages_isolated_cb); 1469 if (ret < 0) 1470 offlined = (long)ret; 1471 return offlined; 1472 } 1473 1474 static int __init cmdline_parse_movable_node(char *p) 1475 { 1476 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1477 movable_node_enabled = true; 1478 #else 1479 pr_warn("movable_node parameter depends on CONFIG_HAVE_MEMBLOCK_NODE_MAP to work properly\n"); 1480 #endif 1481 return 0; 1482 } 1483 early_param("movable_node", cmdline_parse_movable_node); 1484 1485 /* check which state of node_states will be changed when offline memory */ 1486 static void node_states_check_changes_offline(unsigned long nr_pages, 1487 struct zone *zone, struct memory_notify *arg) 1488 { 1489 struct pglist_data *pgdat = zone->zone_pgdat; 1490 unsigned long present_pages = 0; 1491 enum zone_type zt, zone_last = ZONE_NORMAL; 1492 1493 /* 1494 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 1495 * contains nodes which have zones of 0...ZONE_NORMAL, 1496 * set zone_last to ZONE_NORMAL. 1497 * 1498 * If we don't have HIGHMEM nor movable node, 1499 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 1500 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 1501 */ 1502 if (N_MEMORY == N_NORMAL_MEMORY) 1503 zone_last = ZONE_MOVABLE; 1504 1505 /* 1506 * check whether node_states[N_NORMAL_MEMORY] will be changed. 1507 * If the memory to be offline is in a zone of 0...zone_last, 1508 * and it is the last present memory, 0...zone_last will 1509 * become empty after offline , thus we can determind we will 1510 * need to clear the node from node_states[N_NORMAL_MEMORY]. 1511 */ 1512 for (zt = 0; zt <= zone_last; zt++) 1513 present_pages += pgdat->node_zones[zt].present_pages; 1514 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 1515 arg->status_change_nid_normal = zone_to_nid(zone); 1516 else 1517 arg->status_change_nid_normal = -1; 1518 1519 #ifdef CONFIG_HIGHMEM 1520 /* 1521 * If we have movable node, node_states[N_HIGH_MEMORY] 1522 * contains nodes which have zones of 0...ZONE_HIGHMEM, 1523 * set zone_last to ZONE_HIGHMEM. 1524 * 1525 * If we don't have movable node, node_states[N_NORMAL_MEMORY] 1526 * contains nodes which have zones of 0...ZONE_MOVABLE, 1527 * set zone_last to ZONE_MOVABLE. 1528 */ 1529 zone_last = ZONE_HIGHMEM; 1530 if (N_MEMORY == N_HIGH_MEMORY) 1531 zone_last = ZONE_MOVABLE; 1532 1533 for (; zt <= zone_last; zt++) 1534 present_pages += pgdat->node_zones[zt].present_pages; 1535 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 1536 arg->status_change_nid_high = zone_to_nid(zone); 1537 else 1538 arg->status_change_nid_high = -1; 1539 #else 1540 arg->status_change_nid_high = arg->status_change_nid_normal; 1541 #endif 1542 1543 /* 1544 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE 1545 */ 1546 zone_last = ZONE_MOVABLE; 1547 1548 /* 1549 * check whether node_states[N_HIGH_MEMORY] will be changed 1550 * If we try to offline the last present @nr_pages from the node, 1551 * we can determind we will need to clear the node from 1552 * node_states[N_HIGH_MEMORY]. 1553 */ 1554 for (; zt <= zone_last; zt++) 1555 present_pages += pgdat->node_zones[zt].present_pages; 1556 if (nr_pages >= present_pages) 1557 arg->status_change_nid = zone_to_nid(zone); 1558 else 1559 arg->status_change_nid = -1; 1560 } 1561 1562 static void node_states_clear_node(int node, struct memory_notify *arg) 1563 { 1564 if (arg->status_change_nid_normal >= 0) 1565 node_clear_state(node, N_NORMAL_MEMORY); 1566 1567 if ((N_MEMORY != N_NORMAL_MEMORY) && 1568 (arg->status_change_nid_high >= 0)) 1569 node_clear_state(node, N_HIGH_MEMORY); 1570 1571 if ((N_MEMORY != N_HIGH_MEMORY) && 1572 (arg->status_change_nid >= 0)) 1573 node_clear_state(node, N_MEMORY); 1574 } 1575 1576 static int __ref __offline_pages(unsigned long start_pfn, 1577 unsigned long end_pfn) 1578 { 1579 unsigned long pfn, nr_pages; 1580 long offlined_pages; 1581 int ret, node; 1582 unsigned long flags; 1583 unsigned long valid_start, valid_end; 1584 struct zone *zone; 1585 struct memory_notify arg; 1586 1587 /* at least, alignment against pageblock is necessary */ 1588 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) 1589 return -EINVAL; 1590 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) 1591 return -EINVAL; 1592 /* This makes hotplug much easier...and readable. 1593 we assume this for now. .*/ 1594 if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end)) 1595 return -EINVAL; 1596 1597 zone = page_zone(pfn_to_page(valid_start)); 1598 node = zone_to_nid(zone); 1599 nr_pages = end_pfn - start_pfn; 1600 1601 /* set above range as isolated */ 1602 ret = start_isolate_page_range(start_pfn, end_pfn, 1603 MIGRATE_MOVABLE, true); 1604 if (ret) 1605 return ret; 1606 1607 arg.start_pfn = start_pfn; 1608 arg.nr_pages = nr_pages; 1609 node_states_check_changes_offline(nr_pages, zone, &arg); 1610 1611 ret = memory_notify(MEM_GOING_OFFLINE, &arg); 1612 ret = notifier_to_errno(ret); 1613 if (ret) 1614 goto failed_removal; 1615 1616 pfn = start_pfn; 1617 repeat: 1618 /* start memory hot removal */ 1619 ret = -EINTR; 1620 if (signal_pending(current)) 1621 goto failed_removal; 1622 1623 cond_resched(); 1624 lru_add_drain_all(); 1625 drain_all_pages(zone); 1626 1627 pfn = scan_movable_pages(start_pfn, end_pfn); 1628 if (pfn) { /* We have movable pages */ 1629 ret = do_migrate_range(pfn, end_pfn); 1630 goto repeat; 1631 } 1632 1633 /* 1634 * dissolve free hugepages in the memory block before doing offlining 1635 * actually in order to make hugetlbfs's object counting consistent. 1636 */ 1637 ret = dissolve_free_huge_pages(start_pfn, end_pfn); 1638 if (ret) 1639 goto failed_removal; 1640 /* check again */ 1641 offlined_pages = check_pages_isolated(start_pfn, end_pfn); 1642 if (offlined_pages < 0) 1643 goto repeat; 1644 pr_info("Offlined Pages %ld\n", offlined_pages); 1645 /* Ok, all of our target is isolated. 1646 We cannot do rollback at this point. */ 1647 offline_isolated_pages(start_pfn, end_pfn); 1648 /* reset pagetype flags and makes migrate type to be MOVABLE */ 1649 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1650 /* removal success */ 1651 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); 1652 zone->present_pages -= offlined_pages; 1653 1654 pgdat_resize_lock(zone->zone_pgdat, &flags); 1655 zone->zone_pgdat->node_present_pages -= offlined_pages; 1656 pgdat_resize_unlock(zone->zone_pgdat, &flags); 1657 1658 init_per_zone_wmark_min(); 1659 1660 if (!populated_zone(zone)) { 1661 zone_pcp_reset(zone); 1662 build_all_zonelists(NULL); 1663 } else 1664 zone_pcp_update(zone); 1665 1666 node_states_clear_node(node, &arg); 1667 if (arg.status_change_nid >= 0) { 1668 kswapd_stop(node); 1669 kcompactd_stop(node); 1670 } 1671 1672 vm_total_pages = nr_free_pagecache_pages(); 1673 writeback_set_ratelimit(); 1674 1675 memory_notify(MEM_OFFLINE, &arg); 1676 return 0; 1677 1678 failed_removal: 1679 pr_debug("memory offlining [mem %#010llx-%#010llx] failed\n", 1680 (unsigned long long) start_pfn << PAGE_SHIFT, 1681 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 1682 memory_notify(MEM_CANCEL_OFFLINE, &arg); 1683 /* pushback to free area */ 1684 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1685 return ret; 1686 } 1687 1688 /* Must be protected by mem_hotplug_begin() or a device_lock */ 1689 int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 1690 { 1691 return __offline_pages(start_pfn, start_pfn + nr_pages); 1692 } 1693 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1694 1695 /** 1696 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) 1697 * @start_pfn: start pfn of the memory range 1698 * @end_pfn: end pfn of the memory range 1699 * @arg: argument passed to func 1700 * @func: callback for each memory section walked 1701 * 1702 * This function walks through all present mem sections in range 1703 * [start_pfn, end_pfn) and call func on each mem section. 1704 * 1705 * Returns the return value of func. 1706 */ 1707 int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 1708 void *arg, int (*func)(struct memory_block *, void *)) 1709 { 1710 struct memory_block *mem = NULL; 1711 struct mem_section *section; 1712 unsigned long pfn, section_nr; 1713 int ret; 1714 1715 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1716 section_nr = pfn_to_section_nr(pfn); 1717 if (!present_section_nr(section_nr)) 1718 continue; 1719 1720 section = __nr_to_section(section_nr); 1721 /* same memblock? */ 1722 if (mem) 1723 if ((section_nr >= mem->start_section_nr) && 1724 (section_nr <= mem->end_section_nr)) 1725 continue; 1726 1727 mem = find_memory_block_hinted(section, mem); 1728 if (!mem) 1729 continue; 1730 1731 ret = func(mem, arg); 1732 if (ret) { 1733 kobject_put(&mem->dev.kobj); 1734 return ret; 1735 } 1736 } 1737 1738 if (mem) 1739 kobject_put(&mem->dev.kobj); 1740 1741 return 0; 1742 } 1743 1744 #ifdef CONFIG_MEMORY_HOTREMOVE 1745 static int check_memblock_offlined_cb(struct memory_block *mem, void *arg) 1746 { 1747 int ret = !is_memblock_offlined(mem); 1748 1749 if (unlikely(ret)) { 1750 phys_addr_t beginpa, endpa; 1751 1752 beginpa = PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)); 1753 endpa = PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1; 1754 pr_warn("removing memory fails, because memory [%pa-%pa] is onlined\n", 1755 &beginpa, &endpa); 1756 } 1757 1758 return ret; 1759 } 1760 1761 static int check_cpu_on_node(pg_data_t *pgdat) 1762 { 1763 int cpu; 1764 1765 for_each_present_cpu(cpu) { 1766 if (cpu_to_node(cpu) == pgdat->node_id) 1767 /* 1768 * the cpu on this node isn't removed, and we can't 1769 * offline this node. 1770 */ 1771 return -EBUSY; 1772 } 1773 1774 return 0; 1775 } 1776 1777 static void unmap_cpu_on_node(pg_data_t *pgdat) 1778 { 1779 #ifdef CONFIG_ACPI_NUMA 1780 int cpu; 1781 1782 for_each_possible_cpu(cpu) 1783 if (cpu_to_node(cpu) == pgdat->node_id) 1784 numa_clear_node(cpu); 1785 #endif 1786 } 1787 1788 static int check_and_unmap_cpu_on_node(pg_data_t *pgdat) 1789 { 1790 int ret; 1791 1792 ret = check_cpu_on_node(pgdat); 1793 if (ret) 1794 return ret; 1795 1796 /* 1797 * the node will be offlined when we come here, so we can clear 1798 * the cpu_to_node() now. 1799 */ 1800 1801 unmap_cpu_on_node(pgdat); 1802 return 0; 1803 } 1804 1805 /** 1806 * try_offline_node 1807 * 1808 * Offline a node if all memory sections and cpus of the node are removed. 1809 * 1810 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 1811 * and online/offline operations before this call. 1812 */ 1813 void try_offline_node(int nid) 1814 { 1815 pg_data_t *pgdat = NODE_DATA(nid); 1816 unsigned long start_pfn = pgdat->node_start_pfn; 1817 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 1818 unsigned long pfn; 1819 1820 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1821 unsigned long section_nr = pfn_to_section_nr(pfn); 1822 1823 if (!present_section_nr(section_nr)) 1824 continue; 1825 1826 if (pfn_to_nid(pfn) != nid) 1827 continue; 1828 1829 /* 1830 * some memory sections of this node are not removed, and we 1831 * can't offline node now. 1832 */ 1833 return; 1834 } 1835 1836 if (check_and_unmap_cpu_on_node(pgdat)) 1837 return; 1838 1839 /* 1840 * all memory/cpu of this node are removed, we can offline this 1841 * node now. 1842 */ 1843 node_set_offline(nid); 1844 unregister_one_node(nid); 1845 } 1846 EXPORT_SYMBOL(try_offline_node); 1847 1848 /** 1849 * remove_memory 1850 * 1851 * NOTE: The caller must call lock_device_hotplug() to serialize hotplug 1852 * and online/offline operations before this call, as required by 1853 * try_offline_node(). 1854 */ 1855 void __ref remove_memory(int nid, u64 start, u64 size) 1856 { 1857 int ret; 1858 1859 BUG_ON(check_hotplug_memory_range(start, size)); 1860 1861 mem_hotplug_begin(); 1862 1863 /* 1864 * All memory blocks must be offlined before removing memory. Check 1865 * whether all memory blocks in question are offline and trigger a BUG() 1866 * if this is not the case. 1867 */ 1868 ret = walk_memory_range(PFN_DOWN(start), PFN_UP(start + size - 1), NULL, 1869 check_memblock_offlined_cb); 1870 if (ret) 1871 BUG(); 1872 1873 /* remove memmap entry */ 1874 firmware_map_remove(start, start + size, "System RAM"); 1875 memblock_free(start, size); 1876 memblock_remove(start, size); 1877 1878 arch_remove_memory(start, size, NULL); 1879 1880 try_offline_node(nid); 1881 1882 mem_hotplug_done(); 1883 } 1884 EXPORT_SYMBOL_GPL(remove_memory); 1885 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1886