1 /* 2 * linux/mm/memory_hotplug.c 3 * 4 * Copyright (C) 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/mm.h> 9 #include <linux/swap.h> 10 #include <linux/interrupt.h> 11 #include <linux/pagemap.h> 12 #include <linux/bootmem.h> 13 #include <linux/compiler.h> 14 #include <linux/export.h> 15 #include <linux/pagevec.h> 16 #include <linux/writeback.h> 17 #include <linux/slab.h> 18 #include <linux/sysctl.h> 19 #include <linux/cpu.h> 20 #include <linux/memory.h> 21 #include <linux/memory_hotplug.h> 22 #include <linux/highmem.h> 23 #include <linux/vmalloc.h> 24 #include <linux/ioport.h> 25 #include <linux/delay.h> 26 #include <linux/migrate.h> 27 #include <linux/page-isolation.h> 28 #include <linux/pfn.h> 29 #include <linux/suspend.h> 30 #include <linux/mm_inline.h> 31 #include <linux/firmware-map.h> 32 #include <linux/stop_machine.h> 33 34 #include <asm/tlbflush.h> 35 36 #include "internal.h" 37 38 /* 39 * online_page_callback contains pointer to current page onlining function. 40 * Initially it is generic_online_page(). If it is required it could be 41 * changed by calling set_online_page_callback() for callback registration 42 * and restore_online_page_callback() for generic callback restore. 43 */ 44 45 static void generic_online_page(struct page *page); 46 47 static online_page_callback_t online_page_callback = generic_online_page; 48 49 DEFINE_MUTEX(mem_hotplug_mutex); 50 51 void lock_memory_hotplug(void) 52 { 53 mutex_lock(&mem_hotplug_mutex); 54 55 /* for exclusive hibernation if CONFIG_HIBERNATION=y */ 56 lock_system_sleep(); 57 } 58 59 void unlock_memory_hotplug(void) 60 { 61 unlock_system_sleep(); 62 mutex_unlock(&mem_hotplug_mutex); 63 } 64 65 66 /* add this memory to iomem resource */ 67 static struct resource *register_memory_resource(u64 start, u64 size) 68 { 69 struct resource *res; 70 res = kzalloc(sizeof(struct resource), GFP_KERNEL); 71 BUG_ON(!res); 72 73 res->name = "System RAM"; 74 res->start = start; 75 res->end = start + size - 1; 76 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 77 if (request_resource(&iomem_resource, res) < 0) { 78 printk("System RAM resource %pR cannot be added\n", res); 79 kfree(res); 80 res = NULL; 81 } 82 return res; 83 } 84 85 static void release_memory_resource(struct resource *res) 86 { 87 if (!res) 88 return; 89 release_resource(res); 90 kfree(res); 91 return; 92 } 93 94 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 95 void get_page_bootmem(unsigned long info, struct page *page, 96 unsigned long type) 97 { 98 page->lru.next = (struct list_head *) type; 99 SetPagePrivate(page); 100 set_page_private(page, info); 101 atomic_inc(&page->_count); 102 } 103 104 /* reference to __meminit __free_pages_bootmem is valid 105 * so use __ref to tell modpost not to generate a warning */ 106 void __ref put_page_bootmem(struct page *page) 107 { 108 unsigned long type; 109 static DEFINE_MUTEX(ppb_lock); 110 111 type = (unsigned long) page->lru.next; 112 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE || 113 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE); 114 115 if (atomic_dec_return(&page->_count) == 1) { 116 ClearPagePrivate(page); 117 set_page_private(page, 0); 118 INIT_LIST_HEAD(&page->lru); 119 120 /* 121 * Please refer to comment for __free_pages_bootmem() 122 * for why we serialize here. 123 */ 124 mutex_lock(&ppb_lock); 125 __free_pages_bootmem(page, 0); 126 mutex_unlock(&ppb_lock); 127 } 128 129 } 130 131 #ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE 132 #ifndef CONFIG_SPARSEMEM_VMEMMAP 133 static void register_page_bootmem_info_section(unsigned long start_pfn) 134 { 135 unsigned long *usemap, mapsize, section_nr, i; 136 struct mem_section *ms; 137 struct page *page, *memmap; 138 139 section_nr = pfn_to_section_nr(start_pfn); 140 ms = __nr_to_section(section_nr); 141 142 /* Get section's memmap address */ 143 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 144 145 /* 146 * Get page for the memmap's phys address 147 * XXX: need more consideration for sparse_vmemmap... 148 */ 149 page = virt_to_page(memmap); 150 mapsize = sizeof(struct page) * PAGES_PER_SECTION; 151 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT; 152 153 /* remember memmap's page */ 154 for (i = 0; i < mapsize; i++, page++) 155 get_page_bootmem(section_nr, page, SECTION_INFO); 156 157 usemap = __nr_to_section(section_nr)->pageblock_flags; 158 page = virt_to_page(usemap); 159 160 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 161 162 for (i = 0; i < mapsize; i++, page++) 163 get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 164 165 } 166 #else /* CONFIG_SPARSEMEM_VMEMMAP */ 167 static void register_page_bootmem_info_section(unsigned long start_pfn) 168 { 169 unsigned long *usemap, mapsize, section_nr, i; 170 struct mem_section *ms; 171 struct page *page, *memmap; 172 173 if (!pfn_valid(start_pfn)) 174 return; 175 176 section_nr = pfn_to_section_nr(start_pfn); 177 ms = __nr_to_section(section_nr); 178 179 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); 180 181 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION); 182 183 usemap = __nr_to_section(section_nr)->pageblock_flags; 184 page = virt_to_page(usemap); 185 186 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT; 187 188 for (i = 0; i < mapsize; i++, page++) 189 get_page_bootmem(section_nr, page, MIX_SECTION_INFO); 190 } 191 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 192 193 void register_page_bootmem_info_node(struct pglist_data *pgdat) 194 { 195 unsigned long i, pfn, end_pfn, nr_pages; 196 int node = pgdat->node_id; 197 struct page *page; 198 struct zone *zone; 199 200 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT; 201 page = virt_to_page(pgdat); 202 203 for (i = 0; i < nr_pages; i++, page++) 204 get_page_bootmem(node, page, NODE_INFO); 205 206 zone = &pgdat->node_zones[0]; 207 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) { 208 if (zone->wait_table) { 209 nr_pages = zone->wait_table_hash_nr_entries 210 * sizeof(wait_queue_head_t); 211 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT; 212 page = virt_to_page(zone->wait_table); 213 214 for (i = 0; i < nr_pages; i++, page++) 215 get_page_bootmem(node, page, NODE_INFO); 216 } 217 } 218 219 pfn = pgdat->node_start_pfn; 220 end_pfn = pfn + pgdat->node_spanned_pages; 221 222 /* register_section info */ 223 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 224 /* 225 * Some platforms can assign the same pfn to multiple nodes - on 226 * node0 as well as nodeN. To avoid registering a pfn against 227 * multiple nodes we check that this pfn does not already 228 * reside in some other node. 229 */ 230 if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) 231 register_page_bootmem_info_section(pfn); 232 } 233 } 234 #endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */ 235 236 static void grow_zone_span(struct zone *zone, unsigned long start_pfn, 237 unsigned long end_pfn) 238 { 239 unsigned long old_zone_end_pfn; 240 241 zone_span_writelock(zone); 242 243 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 244 if (!zone->spanned_pages || start_pfn < zone->zone_start_pfn) 245 zone->zone_start_pfn = start_pfn; 246 247 zone->spanned_pages = max(old_zone_end_pfn, end_pfn) - 248 zone->zone_start_pfn; 249 250 zone_span_writeunlock(zone); 251 } 252 253 static void resize_zone(struct zone *zone, unsigned long start_pfn, 254 unsigned long end_pfn) 255 { 256 zone_span_writelock(zone); 257 258 if (end_pfn - start_pfn) { 259 zone->zone_start_pfn = start_pfn; 260 zone->spanned_pages = end_pfn - start_pfn; 261 } else { 262 /* 263 * make it consist as free_area_init_core(), 264 * if spanned_pages = 0, then keep start_pfn = 0 265 */ 266 zone->zone_start_pfn = 0; 267 zone->spanned_pages = 0; 268 } 269 270 zone_span_writeunlock(zone); 271 } 272 273 static void fix_zone_id(struct zone *zone, unsigned long start_pfn, 274 unsigned long end_pfn) 275 { 276 enum zone_type zid = zone_idx(zone); 277 int nid = zone->zone_pgdat->node_id; 278 unsigned long pfn; 279 280 for (pfn = start_pfn; pfn < end_pfn; pfn++) 281 set_page_links(pfn_to_page(pfn), zid, nid, pfn); 282 } 283 284 static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2, 285 unsigned long start_pfn, unsigned long end_pfn) 286 { 287 int ret; 288 unsigned long flags; 289 unsigned long z1_start_pfn; 290 291 if (!z1->wait_table) { 292 ret = init_currently_empty_zone(z1, start_pfn, 293 end_pfn - start_pfn, MEMMAP_HOTPLUG); 294 if (ret) 295 return ret; 296 } 297 298 pgdat_resize_lock(z1->zone_pgdat, &flags); 299 300 /* can't move pfns which are higher than @z2 */ 301 if (end_pfn > z2->zone_start_pfn + z2->spanned_pages) 302 goto out_fail; 303 /* the move out part mast at the left most of @z2 */ 304 if (start_pfn > z2->zone_start_pfn) 305 goto out_fail; 306 /* must included/overlap */ 307 if (end_pfn <= z2->zone_start_pfn) 308 goto out_fail; 309 310 /* use start_pfn for z1's start_pfn if z1 is empty */ 311 if (z1->spanned_pages) 312 z1_start_pfn = z1->zone_start_pfn; 313 else 314 z1_start_pfn = start_pfn; 315 316 resize_zone(z1, z1_start_pfn, end_pfn); 317 resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages); 318 319 pgdat_resize_unlock(z1->zone_pgdat, &flags); 320 321 fix_zone_id(z1, start_pfn, end_pfn); 322 323 return 0; 324 out_fail: 325 pgdat_resize_unlock(z1->zone_pgdat, &flags); 326 return -1; 327 } 328 329 static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2, 330 unsigned long start_pfn, unsigned long end_pfn) 331 { 332 int ret; 333 unsigned long flags; 334 unsigned long z2_end_pfn; 335 336 if (!z2->wait_table) { 337 ret = init_currently_empty_zone(z2, start_pfn, 338 end_pfn - start_pfn, MEMMAP_HOTPLUG); 339 if (ret) 340 return ret; 341 } 342 343 pgdat_resize_lock(z1->zone_pgdat, &flags); 344 345 /* can't move pfns which are lower than @z1 */ 346 if (z1->zone_start_pfn > start_pfn) 347 goto out_fail; 348 /* the move out part mast at the right most of @z1 */ 349 if (z1->zone_start_pfn + z1->spanned_pages > end_pfn) 350 goto out_fail; 351 /* must included/overlap */ 352 if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages) 353 goto out_fail; 354 355 /* use end_pfn for z2's end_pfn if z2 is empty */ 356 if (z2->spanned_pages) 357 z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages; 358 else 359 z2_end_pfn = end_pfn; 360 361 resize_zone(z1, z1->zone_start_pfn, start_pfn); 362 resize_zone(z2, start_pfn, z2_end_pfn); 363 364 pgdat_resize_unlock(z1->zone_pgdat, &flags); 365 366 fix_zone_id(z2, start_pfn, end_pfn); 367 368 return 0; 369 out_fail: 370 pgdat_resize_unlock(z1->zone_pgdat, &flags); 371 return -1; 372 } 373 374 static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn, 375 unsigned long end_pfn) 376 { 377 unsigned long old_pgdat_end_pfn = 378 pgdat->node_start_pfn + pgdat->node_spanned_pages; 379 380 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) 381 pgdat->node_start_pfn = start_pfn; 382 383 pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) - 384 pgdat->node_start_pfn; 385 } 386 387 static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn) 388 { 389 struct pglist_data *pgdat = zone->zone_pgdat; 390 int nr_pages = PAGES_PER_SECTION; 391 int nid = pgdat->node_id; 392 int zone_type; 393 unsigned long flags; 394 395 zone_type = zone - pgdat->node_zones; 396 if (!zone->wait_table) { 397 int ret; 398 399 ret = init_currently_empty_zone(zone, phys_start_pfn, 400 nr_pages, MEMMAP_HOTPLUG); 401 if (ret) 402 return ret; 403 } 404 pgdat_resize_lock(zone->zone_pgdat, &flags); 405 grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages); 406 grow_pgdat_span(zone->zone_pgdat, phys_start_pfn, 407 phys_start_pfn + nr_pages); 408 pgdat_resize_unlock(zone->zone_pgdat, &flags); 409 memmap_init_zone(nr_pages, nid, zone_type, 410 phys_start_pfn, MEMMAP_HOTPLUG); 411 return 0; 412 } 413 414 static int __meminit __add_section(int nid, struct zone *zone, 415 unsigned long phys_start_pfn) 416 { 417 int nr_pages = PAGES_PER_SECTION; 418 int ret; 419 420 if (pfn_valid(phys_start_pfn)) 421 return -EEXIST; 422 423 ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages); 424 425 if (ret < 0) 426 return ret; 427 428 ret = __add_zone(zone, phys_start_pfn); 429 430 if (ret < 0) 431 return ret; 432 433 return register_new_memory(nid, __pfn_to_section(phys_start_pfn)); 434 } 435 436 /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ 437 static int find_smallest_section_pfn(int nid, struct zone *zone, 438 unsigned long start_pfn, 439 unsigned long end_pfn) 440 { 441 struct mem_section *ms; 442 443 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SECTION) { 444 ms = __pfn_to_section(start_pfn); 445 446 if (unlikely(!valid_section(ms))) 447 continue; 448 449 if (unlikely(pfn_to_nid(start_pfn) != nid)) 450 continue; 451 452 if (zone && zone != page_zone(pfn_to_page(start_pfn))) 453 continue; 454 455 return start_pfn; 456 } 457 458 return 0; 459 } 460 461 /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ 462 static int find_biggest_section_pfn(int nid, struct zone *zone, 463 unsigned long start_pfn, 464 unsigned long end_pfn) 465 { 466 struct mem_section *ms; 467 unsigned long pfn; 468 469 /* pfn is the end pfn of a memory section. */ 470 pfn = end_pfn - 1; 471 for (; pfn >= start_pfn; pfn -= PAGES_PER_SECTION) { 472 ms = __pfn_to_section(pfn); 473 474 if (unlikely(!valid_section(ms))) 475 continue; 476 477 if (unlikely(pfn_to_nid(pfn) != nid)) 478 continue; 479 480 if (zone && zone != page_zone(pfn_to_page(pfn))) 481 continue; 482 483 return pfn; 484 } 485 486 return 0; 487 } 488 489 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, 490 unsigned long end_pfn) 491 { 492 unsigned long zone_start_pfn = zone->zone_start_pfn; 493 unsigned long zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 494 unsigned long pfn; 495 struct mem_section *ms; 496 int nid = zone_to_nid(zone); 497 498 zone_span_writelock(zone); 499 if (zone_start_pfn == start_pfn) { 500 /* 501 * If the section is smallest section in the zone, it need 502 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. 503 * In this case, we find second smallest valid mem_section 504 * for shrinking zone. 505 */ 506 pfn = find_smallest_section_pfn(nid, zone, end_pfn, 507 zone_end_pfn); 508 if (pfn) { 509 zone->zone_start_pfn = pfn; 510 zone->spanned_pages = zone_end_pfn - pfn; 511 } 512 } else if (zone_end_pfn == end_pfn) { 513 /* 514 * If the section is biggest section in the zone, it need 515 * shrink zone->spanned_pages. 516 * In this case, we find second biggest valid mem_section for 517 * shrinking zone. 518 */ 519 pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn, 520 start_pfn); 521 if (pfn) 522 zone->spanned_pages = pfn - zone_start_pfn + 1; 523 } 524 525 /* 526 * The section is not biggest or smallest mem_section in the zone, it 527 * only creates a hole in the zone. So in this case, we need not 528 * change the zone. But perhaps, the zone has only hole data. Thus 529 * it check the zone has only hole or not. 530 */ 531 pfn = zone_start_pfn; 532 for (; pfn < zone_end_pfn; pfn += PAGES_PER_SECTION) { 533 ms = __pfn_to_section(pfn); 534 535 if (unlikely(!valid_section(ms))) 536 continue; 537 538 if (page_zone(pfn_to_page(pfn)) != zone) 539 continue; 540 541 /* If the section is current section, it continues the loop */ 542 if (start_pfn == pfn) 543 continue; 544 545 /* If we find valid section, we have nothing to do */ 546 zone_span_writeunlock(zone); 547 return; 548 } 549 550 /* The zone has no valid section */ 551 zone->zone_start_pfn = 0; 552 zone->spanned_pages = 0; 553 zone_span_writeunlock(zone); 554 } 555 556 static void shrink_pgdat_span(struct pglist_data *pgdat, 557 unsigned long start_pfn, unsigned long end_pfn) 558 { 559 unsigned long pgdat_start_pfn = pgdat->node_start_pfn; 560 unsigned long pgdat_end_pfn = 561 pgdat->node_start_pfn + pgdat->node_spanned_pages; 562 unsigned long pfn; 563 struct mem_section *ms; 564 int nid = pgdat->node_id; 565 566 if (pgdat_start_pfn == start_pfn) { 567 /* 568 * If the section is smallest section in the pgdat, it need 569 * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages. 570 * In this case, we find second smallest valid mem_section 571 * for shrinking zone. 572 */ 573 pfn = find_smallest_section_pfn(nid, NULL, end_pfn, 574 pgdat_end_pfn); 575 if (pfn) { 576 pgdat->node_start_pfn = pfn; 577 pgdat->node_spanned_pages = pgdat_end_pfn - pfn; 578 } 579 } else if (pgdat_end_pfn == end_pfn) { 580 /* 581 * If the section is biggest section in the pgdat, it need 582 * shrink pgdat->node_spanned_pages. 583 * In this case, we find second biggest valid mem_section for 584 * shrinking zone. 585 */ 586 pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn, 587 start_pfn); 588 if (pfn) 589 pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1; 590 } 591 592 /* 593 * If the section is not biggest or smallest mem_section in the pgdat, 594 * it only creates a hole in the pgdat. So in this case, we need not 595 * change the pgdat. 596 * But perhaps, the pgdat has only hole data. Thus it check the pgdat 597 * has only hole or not. 598 */ 599 pfn = pgdat_start_pfn; 600 for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SECTION) { 601 ms = __pfn_to_section(pfn); 602 603 if (unlikely(!valid_section(ms))) 604 continue; 605 606 if (pfn_to_nid(pfn) != nid) 607 continue; 608 609 /* If the section is current section, it continues the loop */ 610 if (start_pfn == pfn) 611 continue; 612 613 /* If we find valid section, we have nothing to do */ 614 return; 615 } 616 617 /* The pgdat has no valid section */ 618 pgdat->node_start_pfn = 0; 619 pgdat->node_spanned_pages = 0; 620 } 621 622 static void __remove_zone(struct zone *zone, unsigned long start_pfn) 623 { 624 struct pglist_data *pgdat = zone->zone_pgdat; 625 int nr_pages = PAGES_PER_SECTION; 626 int zone_type; 627 unsigned long flags; 628 629 zone_type = zone - pgdat->node_zones; 630 631 pgdat_resize_lock(zone->zone_pgdat, &flags); 632 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); 633 shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages); 634 pgdat_resize_unlock(zone->zone_pgdat, &flags); 635 } 636 637 static int __remove_section(struct zone *zone, struct mem_section *ms) 638 { 639 unsigned long start_pfn; 640 int scn_nr; 641 int ret = -EINVAL; 642 643 if (!valid_section(ms)) 644 return ret; 645 646 ret = unregister_memory_section(ms); 647 if (ret) 648 return ret; 649 650 scn_nr = __section_nr(ms); 651 start_pfn = section_nr_to_pfn(scn_nr); 652 __remove_zone(zone, start_pfn); 653 654 sparse_remove_one_section(zone, ms); 655 return 0; 656 } 657 658 /* 659 * Reasonably generic function for adding memory. It is 660 * expected that archs that support memory hotplug will 661 * call this function after deciding the zone to which to 662 * add the new pages. 663 */ 664 int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn, 665 unsigned long nr_pages) 666 { 667 unsigned long i; 668 int err = 0; 669 int start_sec, end_sec; 670 /* during initialize mem_map, align hot-added range to section */ 671 start_sec = pfn_to_section_nr(phys_start_pfn); 672 end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1); 673 674 for (i = start_sec; i <= end_sec; i++) { 675 err = __add_section(nid, zone, i << PFN_SECTION_SHIFT); 676 677 /* 678 * EEXIST is finally dealt with by ioresource collision 679 * check. see add_memory() => register_memory_resource() 680 * Warning will be printed if there is collision. 681 */ 682 if (err && (err != -EEXIST)) 683 break; 684 err = 0; 685 } 686 687 return err; 688 } 689 EXPORT_SYMBOL_GPL(__add_pages); 690 691 /** 692 * __remove_pages() - remove sections of pages from a zone 693 * @zone: zone from which pages need to be removed 694 * @phys_start_pfn: starting pageframe (must be aligned to start of a section) 695 * @nr_pages: number of pages to remove (must be multiple of section size) 696 * 697 * Generic helper function to remove section mappings and sysfs entries 698 * for the section of the memory we are removing. Caller needs to make 699 * sure that pages are marked reserved and zones are adjust properly by 700 * calling offline_pages(). 701 */ 702 int __remove_pages(struct zone *zone, unsigned long phys_start_pfn, 703 unsigned long nr_pages) 704 { 705 unsigned long i, ret = 0; 706 int sections_to_remove; 707 708 /* 709 * We can only remove entire sections 710 */ 711 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK); 712 BUG_ON(nr_pages % PAGES_PER_SECTION); 713 714 release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE); 715 716 sections_to_remove = nr_pages / PAGES_PER_SECTION; 717 for (i = 0; i < sections_to_remove; i++) { 718 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION; 719 ret = __remove_section(zone, __pfn_to_section(pfn)); 720 if (ret) 721 break; 722 } 723 return ret; 724 } 725 EXPORT_SYMBOL_GPL(__remove_pages); 726 727 int set_online_page_callback(online_page_callback_t callback) 728 { 729 int rc = -EINVAL; 730 731 lock_memory_hotplug(); 732 733 if (online_page_callback == generic_online_page) { 734 online_page_callback = callback; 735 rc = 0; 736 } 737 738 unlock_memory_hotplug(); 739 740 return rc; 741 } 742 EXPORT_SYMBOL_GPL(set_online_page_callback); 743 744 int restore_online_page_callback(online_page_callback_t callback) 745 { 746 int rc = -EINVAL; 747 748 lock_memory_hotplug(); 749 750 if (online_page_callback == callback) { 751 online_page_callback = generic_online_page; 752 rc = 0; 753 } 754 755 unlock_memory_hotplug(); 756 757 return rc; 758 } 759 EXPORT_SYMBOL_GPL(restore_online_page_callback); 760 761 void __online_page_set_limits(struct page *page) 762 { 763 unsigned long pfn = page_to_pfn(page); 764 765 if (pfn >= num_physpages) 766 num_physpages = pfn + 1; 767 } 768 EXPORT_SYMBOL_GPL(__online_page_set_limits); 769 770 void __online_page_increment_counters(struct page *page) 771 { 772 totalram_pages++; 773 774 #ifdef CONFIG_HIGHMEM 775 if (PageHighMem(page)) 776 totalhigh_pages++; 777 #endif 778 } 779 EXPORT_SYMBOL_GPL(__online_page_increment_counters); 780 781 void __online_page_free(struct page *page) 782 { 783 ClearPageReserved(page); 784 init_page_count(page); 785 __free_page(page); 786 } 787 EXPORT_SYMBOL_GPL(__online_page_free); 788 789 static void generic_online_page(struct page *page) 790 { 791 __online_page_set_limits(page); 792 __online_page_increment_counters(page); 793 __online_page_free(page); 794 } 795 796 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, 797 void *arg) 798 { 799 unsigned long i; 800 unsigned long onlined_pages = *(unsigned long *)arg; 801 struct page *page; 802 if (PageReserved(pfn_to_page(start_pfn))) 803 for (i = 0; i < nr_pages; i++) { 804 page = pfn_to_page(start_pfn + i); 805 (*online_page_callback)(page); 806 onlined_pages++; 807 } 808 *(unsigned long *)arg = onlined_pages; 809 return 0; 810 } 811 812 #ifdef CONFIG_MOVABLE_NODE 813 /* 814 * When CONFIG_MOVABLE_NODE, we permit onlining of a node which doesn't have 815 * normal memory. 816 */ 817 static bool can_online_high_movable(struct zone *zone) 818 { 819 return true; 820 } 821 #else /* CONFIG_MOVABLE_NODE */ 822 /* ensure every online node has NORMAL memory */ 823 static bool can_online_high_movable(struct zone *zone) 824 { 825 return node_state(zone_to_nid(zone), N_NORMAL_MEMORY); 826 } 827 #endif /* CONFIG_MOVABLE_NODE */ 828 829 /* check which state of node_states will be changed when online memory */ 830 static void node_states_check_changes_online(unsigned long nr_pages, 831 struct zone *zone, struct memory_notify *arg) 832 { 833 int nid = zone_to_nid(zone); 834 enum zone_type zone_last = ZONE_NORMAL; 835 836 /* 837 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 838 * contains nodes which have zones of 0...ZONE_NORMAL, 839 * set zone_last to ZONE_NORMAL. 840 * 841 * If we don't have HIGHMEM nor movable node, 842 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 843 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 844 */ 845 if (N_MEMORY == N_NORMAL_MEMORY) 846 zone_last = ZONE_MOVABLE; 847 848 /* 849 * if the memory to be online is in a zone of 0...zone_last, and 850 * the zones of 0...zone_last don't have memory before online, we will 851 * need to set the node to node_states[N_NORMAL_MEMORY] after 852 * the memory is online. 853 */ 854 if (zone_idx(zone) <= zone_last && !node_state(nid, N_NORMAL_MEMORY)) 855 arg->status_change_nid_normal = nid; 856 else 857 arg->status_change_nid_normal = -1; 858 859 #ifdef CONFIG_HIGHMEM 860 /* 861 * If we have movable node, node_states[N_HIGH_MEMORY] 862 * contains nodes which have zones of 0...ZONE_HIGHMEM, 863 * set zone_last to ZONE_HIGHMEM. 864 * 865 * If we don't have movable node, node_states[N_NORMAL_MEMORY] 866 * contains nodes which have zones of 0...ZONE_MOVABLE, 867 * set zone_last to ZONE_MOVABLE. 868 */ 869 zone_last = ZONE_HIGHMEM; 870 if (N_MEMORY == N_HIGH_MEMORY) 871 zone_last = ZONE_MOVABLE; 872 873 if (zone_idx(zone) <= zone_last && !node_state(nid, N_HIGH_MEMORY)) 874 arg->status_change_nid_high = nid; 875 else 876 arg->status_change_nid_high = -1; 877 #else 878 arg->status_change_nid_high = arg->status_change_nid_normal; 879 #endif 880 881 /* 882 * if the node don't have memory befor online, we will need to 883 * set the node to node_states[N_MEMORY] after the memory 884 * is online. 885 */ 886 if (!node_state(nid, N_MEMORY)) 887 arg->status_change_nid = nid; 888 else 889 arg->status_change_nid = -1; 890 } 891 892 static void node_states_set_node(int node, struct memory_notify *arg) 893 { 894 if (arg->status_change_nid_normal >= 0) 895 node_set_state(node, N_NORMAL_MEMORY); 896 897 if (arg->status_change_nid_high >= 0) 898 node_set_state(node, N_HIGH_MEMORY); 899 900 node_set_state(node, N_MEMORY); 901 } 902 903 904 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_type) 905 { 906 unsigned long onlined_pages = 0; 907 struct zone *zone; 908 int need_zonelists_rebuild = 0; 909 int nid; 910 int ret; 911 struct memory_notify arg; 912 913 lock_memory_hotplug(); 914 /* 915 * This doesn't need a lock to do pfn_to_page(). 916 * The section can't be removed here because of the 917 * memory_block->state_mutex. 918 */ 919 zone = page_zone(pfn_to_page(pfn)); 920 921 if ((zone_idx(zone) > ZONE_NORMAL || online_type == ONLINE_MOVABLE) && 922 !can_online_high_movable(zone)) { 923 unlock_memory_hotplug(); 924 return -1; 925 } 926 927 if (online_type == ONLINE_KERNEL && zone_idx(zone) == ZONE_MOVABLE) { 928 if (move_pfn_range_left(zone - 1, zone, pfn, pfn + nr_pages)) { 929 unlock_memory_hotplug(); 930 return -1; 931 } 932 } 933 if (online_type == ONLINE_MOVABLE && zone_idx(zone) == ZONE_MOVABLE - 1) { 934 if (move_pfn_range_right(zone, zone + 1, pfn, pfn + nr_pages)) { 935 unlock_memory_hotplug(); 936 return -1; 937 } 938 } 939 940 /* Previous code may changed the zone of the pfn range */ 941 zone = page_zone(pfn_to_page(pfn)); 942 943 arg.start_pfn = pfn; 944 arg.nr_pages = nr_pages; 945 node_states_check_changes_online(nr_pages, zone, &arg); 946 947 nid = page_to_nid(pfn_to_page(pfn)); 948 949 ret = memory_notify(MEM_GOING_ONLINE, &arg); 950 ret = notifier_to_errno(ret); 951 if (ret) { 952 memory_notify(MEM_CANCEL_ONLINE, &arg); 953 unlock_memory_hotplug(); 954 return ret; 955 } 956 /* 957 * If this zone is not populated, then it is not in zonelist. 958 * This means the page allocator ignores this zone. 959 * So, zonelist must be updated after online. 960 */ 961 mutex_lock(&zonelists_mutex); 962 if (!populated_zone(zone)) { 963 need_zonelists_rebuild = 1; 964 build_all_zonelists(NULL, zone); 965 } 966 967 ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages, 968 online_pages_range); 969 if (ret) { 970 if (need_zonelists_rebuild) 971 zone_pcp_reset(zone); 972 mutex_unlock(&zonelists_mutex); 973 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n", 974 (unsigned long long) pfn << PAGE_SHIFT, 975 (((unsigned long long) pfn + nr_pages) 976 << PAGE_SHIFT) - 1); 977 memory_notify(MEM_CANCEL_ONLINE, &arg); 978 unlock_memory_hotplug(); 979 return ret; 980 } 981 982 zone->managed_pages += onlined_pages; 983 zone->present_pages += onlined_pages; 984 zone->zone_pgdat->node_present_pages += onlined_pages; 985 if (onlined_pages) { 986 node_states_set_node(zone_to_nid(zone), &arg); 987 if (need_zonelists_rebuild) 988 build_all_zonelists(NULL, NULL); 989 else 990 zone_pcp_update(zone); 991 } 992 993 mutex_unlock(&zonelists_mutex); 994 995 init_per_zone_wmark_min(); 996 997 if (onlined_pages) 998 kswapd_run(zone_to_nid(zone)); 999 1000 vm_total_pages = nr_free_pagecache_pages(); 1001 1002 writeback_set_ratelimit(); 1003 1004 if (onlined_pages) 1005 memory_notify(MEM_ONLINE, &arg); 1006 unlock_memory_hotplug(); 1007 1008 return 0; 1009 } 1010 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 1011 1012 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1013 static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 1014 { 1015 struct pglist_data *pgdat; 1016 unsigned long zones_size[MAX_NR_ZONES] = {0}; 1017 unsigned long zholes_size[MAX_NR_ZONES] = {0}; 1018 unsigned long start_pfn = start >> PAGE_SHIFT; 1019 1020 pgdat = arch_alloc_nodedata(nid); 1021 if (!pgdat) 1022 return NULL; 1023 1024 arch_refresh_nodedata(nid, pgdat); 1025 1026 /* we can use NODE_DATA(nid) from here */ 1027 1028 /* init node's zones as empty zones, we don't have any present pages.*/ 1029 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 1030 1031 /* 1032 * The node we allocated has no zone fallback lists. For avoiding 1033 * to access not-initialized zonelist, build here. 1034 */ 1035 mutex_lock(&zonelists_mutex); 1036 build_all_zonelists(pgdat, NULL); 1037 mutex_unlock(&zonelists_mutex); 1038 1039 return pgdat; 1040 } 1041 1042 static void rollback_node_hotadd(int nid, pg_data_t *pgdat) 1043 { 1044 arch_refresh_nodedata(nid, NULL); 1045 arch_free_nodedata(pgdat); 1046 return; 1047 } 1048 1049 1050 /* 1051 * called by cpu_up() to online a node without onlined memory. 1052 */ 1053 int mem_online_node(int nid) 1054 { 1055 pg_data_t *pgdat; 1056 int ret; 1057 1058 lock_memory_hotplug(); 1059 pgdat = hotadd_new_pgdat(nid, 0); 1060 if (!pgdat) { 1061 ret = -ENOMEM; 1062 goto out; 1063 } 1064 node_set_online(nid); 1065 ret = register_one_node(nid); 1066 BUG_ON(ret); 1067 1068 out: 1069 unlock_memory_hotplug(); 1070 return ret; 1071 } 1072 1073 /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1074 int __ref add_memory(int nid, u64 start, u64 size) 1075 { 1076 pg_data_t *pgdat = NULL; 1077 int new_pgdat = 0; 1078 struct resource *res; 1079 int ret; 1080 1081 lock_memory_hotplug(); 1082 1083 res = register_memory_resource(start, size); 1084 ret = -EEXIST; 1085 if (!res) 1086 goto out; 1087 1088 if (!node_online(nid)) { 1089 pgdat = hotadd_new_pgdat(nid, start); 1090 ret = -ENOMEM; 1091 if (!pgdat) 1092 goto error; 1093 new_pgdat = 1; 1094 } 1095 1096 /* call arch's memory hotadd */ 1097 ret = arch_add_memory(nid, start, size); 1098 1099 if (ret < 0) 1100 goto error; 1101 1102 /* we online node here. we can't roll back from here. */ 1103 node_set_online(nid); 1104 1105 if (new_pgdat) { 1106 ret = register_one_node(nid); 1107 /* 1108 * If sysfs file of new node can't create, cpu on the node 1109 * can't be hot-added. There is no rollback way now. 1110 * So, check by BUG_ON() to catch it reluctantly.. 1111 */ 1112 BUG_ON(ret); 1113 } 1114 1115 /* create new memmap entry */ 1116 firmware_map_add_hotplug(start, start + size, "System RAM"); 1117 1118 goto out; 1119 1120 error: 1121 /* rollback pgdat allocation and others */ 1122 if (new_pgdat) 1123 rollback_node_hotadd(nid, pgdat); 1124 release_memory_resource(res); 1125 1126 out: 1127 unlock_memory_hotplug(); 1128 return ret; 1129 } 1130 EXPORT_SYMBOL_GPL(add_memory); 1131 1132 #ifdef CONFIG_MEMORY_HOTREMOVE 1133 /* 1134 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy 1135 * set and the size of the free page is given by page_order(). Using this, 1136 * the function determines if the pageblock contains only free pages. 1137 * Due to buddy contraints, a free page at least the size of a pageblock will 1138 * be located at the start of the pageblock 1139 */ 1140 static inline int pageblock_free(struct page *page) 1141 { 1142 return PageBuddy(page) && page_order(page) >= pageblock_order; 1143 } 1144 1145 /* Return the start of the next active pageblock after a given page */ 1146 static struct page *next_active_pageblock(struct page *page) 1147 { 1148 /* Ensure the starting page is pageblock-aligned */ 1149 BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); 1150 1151 /* If the entire pageblock is free, move to the end of free page */ 1152 if (pageblock_free(page)) { 1153 int order; 1154 /* be careful. we don't have locks, page_order can be changed.*/ 1155 order = page_order(page); 1156 if ((order < MAX_ORDER) && (order >= pageblock_order)) 1157 return page + (1 << order); 1158 } 1159 1160 return page + pageblock_nr_pages; 1161 } 1162 1163 /* Checks if this range of memory is likely to be hot-removable. */ 1164 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) 1165 { 1166 struct page *page = pfn_to_page(start_pfn); 1167 struct page *end_page = page + nr_pages; 1168 1169 /* Check the starting page of each pageblock within the range */ 1170 for (; page < end_page; page = next_active_pageblock(page)) { 1171 if (!is_pageblock_removable_nolock(page)) 1172 return 0; 1173 cond_resched(); 1174 } 1175 1176 /* All pageblocks in the memory block are likely to be hot-removable */ 1177 return 1; 1178 } 1179 1180 /* 1181 * Confirm all pages in a range [start, end) is belongs to the same zone. 1182 */ 1183 static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) 1184 { 1185 unsigned long pfn; 1186 struct zone *zone = NULL; 1187 struct page *page; 1188 int i; 1189 for (pfn = start_pfn; 1190 pfn < end_pfn; 1191 pfn += MAX_ORDER_NR_PAGES) { 1192 i = 0; 1193 /* This is just a CONFIG_HOLES_IN_ZONE check.*/ 1194 while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i)) 1195 i++; 1196 if (i == MAX_ORDER_NR_PAGES) 1197 continue; 1198 page = pfn_to_page(pfn + i); 1199 if (zone && page_zone(page) != zone) 1200 return 0; 1201 zone = page_zone(page); 1202 } 1203 return 1; 1204 } 1205 1206 /* 1207 * Scanning pfn is much easier than scanning lru list. 1208 * Scan pfn from start to end and Find LRU page. 1209 */ 1210 static unsigned long scan_lru_pages(unsigned long start, unsigned long end) 1211 { 1212 unsigned long pfn; 1213 struct page *page; 1214 for (pfn = start; pfn < end; pfn++) { 1215 if (pfn_valid(pfn)) { 1216 page = pfn_to_page(pfn); 1217 if (PageLRU(page)) 1218 return pfn; 1219 } 1220 } 1221 return 0; 1222 } 1223 1224 #define NR_OFFLINE_AT_ONCE_PAGES (256) 1225 static int 1226 do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) 1227 { 1228 unsigned long pfn; 1229 struct page *page; 1230 int move_pages = NR_OFFLINE_AT_ONCE_PAGES; 1231 int not_managed = 0; 1232 int ret = 0; 1233 LIST_HEAD(source); 1234 1235 for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) { 1236 if (!pfn_valid(pfn)) 1237 continue; 1238 page = pfn_to_page(pfn); 1239 if (!get_page_unless_zero(page)) 1240 continue; 1241 /* 1242 * We can skip free pages. And we can only deal with pages on 1243 * LRU. 1244 */ 1245 ret = isolate_lru_page(page); 1246 if (!ret) { /* Success */ 1247 put_page(page); 1248 list_add_tail(&page->lru, &source); 1249 move_pages--; 1250 inc_zone_page_state(page, NR_ISOLATED_ANON + 1251 page_is_file_cache(page)); 1252 1253 } else { 1254 #ifdef CONFIG_DEBUG_VM 1255 printk(KERN_ALERT "removing pfn %lx from LRU failed\n", 1256 pfn); 1257 dump_page(page); 1258 #endif 1259 put_page(page); 1260 /* Because we don't have big zone->lock. we should 1261 check this again here. */ 1262 if (page_count(page)) { 1263 not_managed++; 1264 ret = -EBUSY; 1265 break; 1266 } 1267 } 1268 } 1269 if (!list_empty(&source)) { 1270 if (not_managed) { 1271 putback_lru_pages(&source); 1272 goto out; 1273 } 1274 1275 /* 1276 * alloc_migrate_target should be improooooved!! 1277 * migrate_pages returns # of failed pages. 1278 */ 1279 ret = migrate_pages(&source, alloc_migrate_target, 0, 1280 true, MIGRATE_SYNC, 1281 MR_MEMORY_HOTPLUG); 1282 if (ret) 1283 putback_lru_pages(&source); 1284 } 1285 out: 1286 return ret; 1287 } 1288 1289 /* 1290 * remove from free_area[] and mark all as Reserved. 1291 */ 1292 static int 1293 offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, 1294 void *data) 1295 { 1296 __offline_isolated_pages(start, start + nr_pages); 1297 return 0; 1298 } 1299 1300 static void 1301 offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) 1302 { 1303 walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL, 1304 offline_isolated_pages_cb); 1305 } 1306 1307 /* 1308 * Check all pages in range, recoreded as memory resource, are isolated. 1309 */ 1310 static int 1311 check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, 1312 void *data) 1313 { 1314 int ret; 1315 long offlined = *(long *)data; 1316 ret = test_pages_isolated(start_pfn, start_pfn + nr_pages, true); 1317 offlined = nr_pages; 1318 if (!ret) 1319 *(long *)data += offlined; 1320 return ret; 1321 } 1322 1323 static long 1324 check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn) 1325 { 1326 long offlined = 0; 1327 int ret; 1328 1329 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined, 1330 check_pages_isolated_cb); 1331 if (ret < 0) 1332 offlined = (long)ret; 1333 return offlined; 1334 } 1335 1336 #ifdef CONFIG_MOVABLE_NODE 1337 /* 1338 * When CONFIG_MOVABLE_NODE, we permit offlining of a node which doesn't have 1339 * normal memory. 1340 */ 1341 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) 1342 { 1343 return true; 1344 } 1345 #else /* CONFIG_MOVABLE_NODE */ 1346 /* ensure the node has NORMAL memory if it is still online */ 1347 static bool can_offline_normal(struct zone *zone, unsigned long nr_pages) 1348 { 1349 struct pglist_data *pgdat = zone->zone_pgdat; 1350 unsigned long present_pages = 0; 1351 enum zone_type zt; 1352 1353 for (zt = 0; zt <= ZONE_NORMAL; zt++) 1354 present_pages += pgdat->node_zones[zt].present_pages; 1355 1356 if (present_pages > nr_pages) 1357 return true; 1358 1359 present_pages = 0; 1360 for (; zt <= ZONE_MOVABLE; zt++) 1361 present_pages += pgdat->node_zones[zt].present_pages; 1362 1363 /* 1364 * we can't offline the last normal memory until all 1365 * higher memory is offlined. 1366 */ 1367 return present_pages == 0; 1368 } 1369 #endif /* CONFIG_MOVABLE_NODE */ 1370 1371 /* check which state of node_states will be changed when offline memory */ 1372 static void node_states_check_changes_offline(unsigned long nr_pages, 1373 struct zone *zone, struct memory_notify *arg) 1374 { 1375 struct pglist_data *pgdat = zone->zone_pgdat; 1376 unsigned long present_pages = 0; 1377 enum zone_type zt, zone_last = ZONE_NORMAL; 1378 1379 /* 1380 * If we have HIGHMEM or movable node, node_states[N_NORMAL_MEMORY] 1381 * contains nodes which have zones of 0...ZONE_NORMAL, 1382 * set zone_last to ZONE_NORMAL. 1383 * 1384 * If we don't have HIGHMEM nor movable node, 1385 * node_states[N_NORMAL_MEMORY] contains nodes which have zones of 1386 * 0...ZONE_MOVABLE, set zone_last to ZONE_MOVABLE. 1387 */ 1388 if (N_MEMORY == N_NORMAL_MEMORY) 1389 zone_last = ZONE_MOVABLE; 1390 1391 /* 1392 * check whether node_states[N_NORMAL_MEMORY] will be changed. 1393 * If the memory to be offline is in a zone of 0...zone_last, 1394 * and it is the last present memory, 0...zone_last will 1395 * become empty after offline , thus we can determind we will 1396 * need to clear the node from node_states[N_NORMAL_MEMORY]. 1397 */ 1398 for (zt = 0; zt <= zone_last; zt++) 1399 present_pages += pgdat->node_zones[zt].present_pages; 1400 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 1401 arg->status_change_nid_normal = zone_to_nid(zone); 1402 else 1403 arg->status_change_nid_normal = -1; 1404 1405 #ifdef CONFIG_HIGHMEM 1406 /* 1407 * If we have movable node, node_states[N_HIGH_MEMORY] 1408 * contains nodes which have zones of 0...ZONE_HIGHMEM, 1409 * set zone_last to ZONE_HIGHMEM. 1410 * 1411 * If we don't have movable node, node_states[N_NORMAL_MEMORY] 1412 * contains nodes which have zones of 0...ZONE_MOVABLE, 1413 * set zone_last to ZONE_MOVABLE. 1414 */ 1415 zone_last = ZONE_HIGHMEM; 1416 if (N_MEMORY == N_HIGH_MEMORY) 1417 zone_last = ZONE_MOVABLE; 1418 1419 for (; zt <= zone_last; zt++) 1420 present_pages += pgdat->node_zones[zt].present_pages; 1421 if (zone_idx(zone) <= zone_last && nr_pages >= present_pages) 1422 arg->status_change_nid_high = zone_to_nid(zone); 1423 else 1424 arg->status_change_nid_high = -1; 1425 #else 1426 arg->status_change_nid_high = arg->status_change_nid_normal; 1427 #endif 1428 1429 /* 1430 * node_states[N_HIGH_MEMORY] contains nodes which have 0...ZONE_MOVABLE 1431 */ 1432 zone_last = ZONE_MOVABLE; 1433 1434 /* 1435 * check whether node_states[N_HIGH_MEMORY] will be changed 1436 * If we try to offline the last present @nr_pages from the node, 1437 * we can determind we will need to clear the node from 1438 * node_states[N_HIGH_MEMORY]. 1439 */ 1440 for (; zt <= zone_last; zt++) 1441 present_pages += pgdat->node_zones[zt].present_pages; 1442 if (nr_pages >= present_pages) 1443 arg->status_change_nid = zone_to_nid(zone); 1444 else 1445 arg->status_change_nid = -1; 1446 } 1447 1448 static void node_states_clear_node(int node, struct memory_notify *arg) 1449 { 1450 if (arg->status_change_nid_normal >= 0) 1451 node_clear_state(node, N_NORMAL_MEMORY); 1452 1453 if ((N_MEMORY != N_NORMAL_MEMORY) && 1454 (arg->status_change_nid_high >= 0)) 1455 node_clear_state(node, N_HIGH_MEMORY); 1456 1457 if ((N_MEMORY != N_HIGH_MEMORY) && 1458 (arg->status_change_nid >= 0)) 1459 node_clear_state(node, N_MEMORY); 1460 } 1461 1462 static int __ref __offline_pages(unsigned long start_pfn, 1463 unsigned long end_pfn, unsigned long timeout) 1464 { 1465 unsigned long pfn, nr_pages, expire; 1466 long offlined_pages; 1467 int ret, drain, retry_max, node; 1468 struct zone *zone; 1469 struct memory_notify arg; 1470 1471 BUG_ON(start_pfn >= end_pfn); 1472 /* at least, alignment against pageblock is necessary */ 1473 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages)) 1474 return -EINVAL; 1475 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages)) 1476 return -EINVAL; 1477 /* This makes hotplug much easier...and readable. 1478 we assume this for now. .*/ 1479 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 1480 return -EINVAL; 1481 1482 lock_memory_hotplug(); 1483 1484 zone = page_zone(pfn_to_page(start_pfn)); 1485 node = zone_to_nid(zone); 1486 nr_pages = end_pfn - start_pfn; 1487 1488 ret = -EINVAL; 1489 if (zone_idx(zone) <= ZONE_NORMAL && !can_offline_normal(zone, nr_pages)) 1490 goto out; 1491 1492 /* set above range as isolated */ 1493 ret = start_isolate_page_range(start_pfn, end_pfn, 1494 MIGRATE_MOVABLE, true); 1495 if (ret) 1496 goto out; 1497 1498 arg.start_pfn = start_pfn; 1499 arg.nr_pages = nr_pages; 1500 node_states_check_changes_offline(nr_pages, zone, &arg); 1501 1502 ret = memory_notify(MEM_GOING_OFFLINE, &arg); 1503 ret = notifier_to_errno(ret); 1504 if (ret) 1505 goto failed_removal; 1506 1507 pfn = start_pfn; 1508 expire = jiffies + timeout; 1509 drain = 0; 1510 retry_max = 5; 1511 repeat: 1512 /* start memory hot removal */ 1513 ret = -EAGAIN; 1514 if (time_after(jiffies, expire)) 1515 goto failed_removal; 1516 ret = -EINTR; 1517 if (signal_pending(current)) 1518 goto failed_removal; 1519 ret = 0; 1520 if (drain) { 1521 lru_add_drain_all(); 1522 cond_resched(); 1523 drain_all_pages(); 1524 } 1525 1526 pfn = scan_lru_pages(start_pfn, end_pfn); 1527 if (pfn) { /* We have page on LRU */ 1528 ret = do_migrate_range(pfn, end_pfn); 1529 if (!ret) { 1530 drain = 1; 1531 goto repeat; 1532 } else { 1533 if (ret < 0) 1534 if (--retry_max == 0) 1535 goto failed_removal; 1536 yield(); 1537 drain = 1; 1538 goto repeat; 1539 } 1540 } 1541 /* drain all zone's lru pagevec, this is asynchronous... */ 1542 lru_add_drain_all(); 1543 yield(); 1544 /* drain pcp pages, this is synchronous. */ 1545 drain_all_pages(); 1546 /* check again */ 1547 offlined_pages = check_pages_isolated(start_pfn, end_pfn); 1548 if (offlined_pages < 0) { 1549 ret = -EBUSY; 1550 goto failed_removal; 1551 } 1552 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages); 1553 /* Ok, all of our target is isolated. 1554 We cannot do rollback at this point. */ 1555 offline_isolated_pages(start_pfn, end_pfn); 1556 /* reset pagetype flags and makes migrate type to be MOVABLE */ 1557 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1558 /* removal success */ 1559 zone->managed_pages -= offlined_pages; 1560 zone->present_pages -= offlined_pages; 1561 zone->zone_pgdat->node_present_pages -= offlined_pages; 1562 totalram_pages -= offlined_pages; 1563 1564 init_per_zone_wmark_min(); 1565 1566 if (!populated_zone(zone)) { 1567 zone_pcp_reset(zone); 1568 mutex_lock(&zonelists_mutex); 1569 build_all_zonelists(NULL, NULL); 1570 mutex_unlock(&zonelists_mutex); 1571 } else 1572 zone_pcp_update(zone); 1573 1574 node_states_clear_node(node, &arg); 1575 if (arg.status_change_nid >= 0) 1576 kswapd_stop(node); 1577 1578 vm_total_pages = nr_free_pagecache_pages(); 1579 writeback_set_ratelimit(); 1580 1581 memory_notify(MEM_OFFLINE, &arg); 1582 unlock_memory_hotplug(); 1583 return 0; 1584 1585 failed_removal: 1586 printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n", 1587 (unsigned long long) start_pfn << PAGE_SHIFT, 1588 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1); 1589 memory_notify(MEM_CANCEL_OFFLINE, &arg); 1590 /* pushback to free area */ 1591 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1592 1593 out: 1594 unlock_memory_hotplug(); 1595 return ret; 1596 } 1597 1598 int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 1599 { 1600 return __offline_pages(start_pfn, start_pfn + nr_pages, 120 * HZ); 1601 } 1602 1603 /** 1604 * walk_memory_range - walks through all mem sections in [start_pfn, end_pfn) 1605 * @start_pfn: start pfn of the memory range 1606 * @end_pfn: end pft of the memory range 1607 * @arg: argument passed to func 1608 * @func: callback for each memory section walked 1609 * 1610 * This function walks through all present mem sections in range 1611 * [start_pfn, end_pfn) and call func on each mem section. 1612 * 1613 * Returns the return value of func. 1614 */ 1615 static int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, 1616 void *arg, int (*func)(struct memory_block *, void *)) 1617 { 1618 struct memory_block *mem = NULL; 1619 struct mem_section *section; 1620 unsigned long pfn, section_nr; 1621 int ret; 1622 1623 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1624 section_nr = pfn_to_section_nr(pfn); 1625 if (!present_section_nr(section_nr)) 1626 continue; 1627 1628 section = __nr_to_section(section_nr); 1629 /* same memblock? */ 1630 if (mem) 1631 if ((section_nr >= mem->start_section_nr) && 1632 (section_nr <= mem->end_section_nr)) 1633 continue; 1634 1635 mem = find_memory_block_hinted(section, mem); 1636 if (!mem) 1637 continue; 1638 1639 ret = func(mem, arg); 1640 if (ret) { 1641 kobject_put(&mem->dev.kobj); 1642 return ret; 1643 } 1644 } 1645 1646 if (mem) 1647 kobject_put(&mem->dev.kobj); 1648 1649 return 0; 1650 } 1651 1652 /** 1653 * offline_memory_block_cb - callback function for offlining memory block 1654 * @mem: the memory block to be offlined 1655 * @arg: buffer to hold error msg 1656 * 1657 * Always return 0, and put the error msg in arg if any. 1658 */ 1659 static int offline_memory_block_cb(struct memory_block *mem, void *arg) 1660 { 1661 int *ret = arg; 1662 int error = offline_memory_block(mem); 1663 1664 if (error != 0 && *ret == 0) 1665 *ret = error; 1666 1667 return 0; 1668 } 1669 1670 static int is_memblock_offlined_cb(struct memory_block *mem, void *arg) 1671 { 1672 int ret = !is_memblock_offlined(mem); 1673 1674 if (unlikely(ret)) 1675 pr_warn("removing memory fails, because memory " 1676 "[%#010llx-%#010llx] is onlined\n", 1677 PFN_PHYS(section_nr_to_pfn(mem->start_section_nr)), 1678 PFN_PHYS(section_nr_to_pfn(mem->end_section_nr + 1))-1); 1679 1680 return ret; 1681 } 1682 1683 static int check_cpu_on_node(void *data) 1684 { 1685 struct pglist_data *pgdat = data; 1686 int cpu; 1687 1688 for_each_present_cpu(cpu) { 1689 if (cpu_to_node(cpu) == pgdat->node_id) 1690 /* 1691 * the cpu on this node isn't removed, and we can't 1692 * offline this node. 1693 */ 1694 return -EBUSY; 1695 } 1696 1697 return 0; 1698 } 1699 1700 /* offline the node if all memory sections of this node are removed */ 1701 static void try_offline_node(int nid) 1702 { 1703 pg_data_t *pgdat = NODE_DATA(nid); 1704 unsigned long start_pfn = pgdat->node_start_pfn; 1705 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 1706 unsigned long pfn; 1707 struct page *pgdat_page = virt_to_page(pgdat); 1708 int i; 1709 1710 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1711 unsigned long section_nr = pfn_to_section_nr(pfn); 1712 1713 if (!present_section_nr(section_nr)) 1714 continue; 1715 1716 if (pfn_to_nid(pfn) != nid) 1717 continue; 1718 1719 /* 1720 * some memory sections of this node are not removed, and we 1721 * can't offline node now. 1722 */ 1723 return; 1724 } 1725 1726 if (stop_machine(check_cpu_on_node, pgdat, NULL)) 1727 return; 1728 1729 /* 1730 * all memory/cpu of this node are removed, we can offline this 1731 * node now. 1732 */ 1733 node_set_offline(nid); 1734 unregister_one_node(nid); 1735 1736 if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page)) 1737 /* node data is allocated from boot memory */ 1738 return; 1739 1740 /* free waittable in each zone */ 1741 for (i = 0; i < MAX_NR_ZONES; i++) { 1742 struct zone *zone = pgdat->node_zones + i; 1743 1744 if (zone->wait_table) 1745 vfree(zone->wait_table); 1746 } 1747 1748 /* 1749 * Since there is no way to guarentee the address of pgdat/zone is not 1750 * on stack of any kernel threads or used by other kernel objects 1751 * without reference counting or other symchronizing method, do not 1752 * reset node_data and free pgdat here. Just reset it to 0 and reuse 1753 * the memory when the node is online again. 1754 */ 1755 memset(pgdat, 0, sizeof(*pgdat)); 1756 } 1757 1758 int __ref remove_memory(int nid, u64 start, u64 size) 1759 { 1760 unsigned long start_pfn, end_pfn; 1761 int ret = 0; 1762 int retry = 1; 1763 1764 start_pfn = PFN_DOWN(start); 1765 end_pfn = start_pfn + PFN_DOWN(size); 1766 1767 /* 1768 * When CONFIG_MEMCG is on, one memory block may be used by other 1769 * blocks to store page cgroup when onlining pages. But we don't know 1770 * in what order pages are onlined. So we iterate twice to offline 1771 * memory: 1772 * 1st iterate: offline every non primary memory block. 1773 * 2nd iterate: offline primary (i.e. first added) memory block. 1774 */ 1775 repeat: 1776 walk_memory_range(start_pfn, end_pfn, &ret, 1777 offline_memory_block_cb); 1778 if (ret) { 1779 if (!retry) 1780 return ret; 1781 1782 retry = 0; 1783 ret = 0; 1784 goto repeat; 1785 } 1786 1787 lock_memory_hotplug(); 1788 1789 /* 1790 * we have offlined all memory blocks like this: 1791 * 1. lock memory hotplug 1792 * 2. offline a memory block 1793 * 3. unlock memory hotplug 1794 * 1795 * repeat step1-3 to offline the memory block. All memory blocks 1796 * must be offlined before removing memory. But we don't hold the 1797 * lock in the whole operation. So we should check whether all 1798 * memory blocks are offlined. 1799 */ 1800 1801 ret = walk_memory_range(start_pfn, end_pfn, NULL, 1802 is_memblock_offlined_cb); 1803 if (ret) { 1804 unlock_memory_hotplug(); 1805 return ret; 1806 } 1807 1808 /* remove memmap entry */ 1809 firmware_map_remove(start, start + size, "System RAM"); 1810 1811 arch_remove_memory(start, size); 1812 1813 try_offline_node(nid); 1814 1815 unlock_memory_hotplug(); 1816 1817 return 0; 1818 } 1819 #else 1820 int offline_pages(unsigned long start_pfn, unsigned long nr_pages) 1821 { 1822 return -EINVAL; 1823 } 1824 int remove_memory(int nid, u64 start, u64 size) 1825 { 1826 return -EINVAL; 1827 } 1828 #endif /* CONFIG_MEMORY_HOTREMOVE */ 1829 EXPORT_SYMBOL_GPL(remove_memory); 1830