1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory subsystem support 4 * 5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com> 6 * Dave Hansen <haveblue@us.ibm.com> 7 * 8 * This file provides the necessary infrastructure to represent 9 * a SPARSEMEM-memory-model system's physical memory in /sysfs. 10 * All arch-independent code that assumes MEMORY_HOTPLUG requires 11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/topology.h> 17 #include <linux/capability.h> 18 #include <linux/device.h> 19 #include <linux/memory.h> 20 #include <linux/memory_hotplug.h> 21 #include <linux/mm.h> 22 #include <linux/mutex.h> 23 #include <linux/stat.h> 24 #include <linux/slab.h> 25 26 #include <linux/atomic.h> 27 #include <linux/uaccess.h> 28 29 static DEFINE_MUTEX(mem_sysfs_mutex); 30 31 #define MEMORY_CLASS_NAME "memory" 32 33 #define to_memory_block(dev) container_of(dev, struct memory_block, dev) 34 35 static int sections_per_block; 36 37 static inline int base_memory_block_id(int section_nr) 38 { 39 return section_nr / sections_per_block; 40 } 41 42 static int memory_subsys_online(struct device *dev); 43 static int memory_subsys_offline(struct device *dev); 44 45 static struct bus_type memory_subsys = { 46 .name = MEMORY_CLASS_NAME, 47 .dev_name = MEMORY_CLASS_NAME, 48 .online = memory_subsys_online, 49 .offline = memory_subsys_offline, 50 }; 51 52 static BLOCKING_NOTIFIER_HEAD(memory_chain); 53 54 int register_memory_notifier(struct notifier_block *nb) 55 { 56 return blocking_notifier_chain_register(&memory_chain, nb); 57 } 58 EXPORT_SYMBOL(register_memory_notifier); 59 60 void unregister_memory_notifier(struct notifier_block *nb) 61 { 62 blocking_notifier_chain_unregister(&memory_chain, nb); 63 } 64 EXPORT_SYMBOL(unregister_memory_notifier); 65 66 static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain); 67 68 int register_memory_isolate_notifier(struct notifier_block *nb) 69 { 70 return atomic_notifier_chain_register(&memory_isolate_chain, nb); 71 } 72 EXPORT_SYMBOL(register_memory_isolate_notifier); 73 74 void unregister_memory_isolate_notifier(struct notifier_block *nb) 75 { 76 atomic_notifier_chain_unregister(&memory_isolate_chain, nb); 77 } 78 EXPORT_SYMBOL(unregister_memory_isolate_notifier); 79 80 static void memory_block_release(struct device *dev) 81 { 82 struct memory_block *mem = to_memory_block(dev); 83 84 kfree(mem); 85 } 86 87 unsigned long __weak memory_block_size_bytes(void) 88 { 89 return MIN_MEMORY_BLOCK_SIZE; 90 } 91 92 static unsigned long get_memory_block_size(void) 93 { 94 unsigned long block_sz; 95 96 block_sz = memory_block_size_bytes(); 97 98 /* Validate blk_sz is a power of 2 and not less than section size */ 99 if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) { 100 WARN_ON(1); 101 block_sz = MIN_MEMORY_BLOCK_SIZE; 102 } 103 104 return block_sz; 105 } 106 107 /* 108 * use this as the physical section index that this memsection 109 * uses. 110 */ 111 112 static ssize_t show_mem_start_phys_index(struct device *dev, 113 struct device_attribute *attr, char *buf) 114 { 115 struct memory_block *mem = to_memory_block(dev); 116 unsigned long phys_index; 117 118 phys_index = mem->start_section_nr / sections_per_block; 119 return sprintf(buf, "%08lx\n", phys_index); 120 } 121 122 /* 123 * Show whether the section of memory is likely to be hot-removable 124 */ 125 static ssize_t show_mem_removable(struct device *dev, 126 struct device_attribute *attr, char *buf) 127 { 128 unsigned long i, pfn; 129 int ret = 1; 130 struct memory_block *mem = to_memory_block(dev); 131 132 if (mem->state != MEM_ONLINE) 133 goto out; 134 135 for (i = 0; i < sections_per_block; i++) { 136 if (!present_section_nr(mem->start_section_nr + i)) 137 continue; 138 pfn = section_nr_to_pfn(mem->start_section_nr + i); 139 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); 140 } 141 142 out: 143 return sprintf(buf, "%d\n", ret); 144 } 145 146 /* 147 * online, offline, going offline, etc. 148 */ 149 static ssize_t show_mem_state(struct device *dev, 150 struct device_attribute *attr, char *buf) 151 { 152 struct memory_block *mem = to_memory_block(dev); 153 ssize_t len = 0; 154 155 /* 156 * We can probably put these states in a nice little array 157 * so that they're not open-coded 158 */ 159 switch (mem->state) { 160 case MEM_ONLINE: 161 len = sprintf(buf, "online\n"); 162 break; 163 case MEM_OFFLINE: 164 len = sprintf(buf, "offline\n"); 165 break; 166 case MEM_GOING_OFFLINE: 167 len = sprintf(buf, "going-offline\n"); 168 break; 169 default: 170 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n", 171 mem->state); 172 WARN_ON(1); 173 break; 174 } 175 176 return len; 177 } 178 179 int memory_notify(unsigned long val, void *v) 180 { 181 return blocking_notifier_call_chain(&memory_chain, val, v); 182 } 183 184 int memory_isolate_notify(unsigned long val, void *v) 185 { 186 return atomic_notifier_call_chain(&memory_isolate_chain, val, v); 187 } 188 189 /* 190 * The probe routines leave the pages uninitialized, just as the bootmem code 191 * does. Make sure we do not access them, but instead use only information from 192 * within sections. 193 */ 194 static bool pages_correctly_probed(unsigned long start_pfn) 195 { 196 unsigned long section_nr = pfn_to_section_nr(start_pfn); 197 unsigned long section_nr_end = section_nr + sections_per_block; 198 unsigned long pfn = start_pfn; 199 200 /* 201 * memmap between sections is not contiguous except with 202 * SPARSEMEM_VMEMMAP. We lookup the page once per section 203 * and assume memmap is contiguous within each section 204 */ 205 for (; section_nr < section_nr_end; section_nr++) { 206 if (WARN_ON_ONCE(!pfn_valid(pfn))) 207 return false; 208 209 if (!present_section_nr(section_nr)) { 210 pr_warn("section %ld pfn[%lx, %lx) not present", 211 section_nr, pfn, pfn + PAGES_PER_SECTION); 212 return false; 213 } else if (!valid_section_nr(section_nr)) { 214 pr_warn("section %ld pfn[%lx, %lx) no valid memmap", 215 section_nr, pfn, pfn + PAGES_PER_SECTION); 216 return false; 217 } else if (online_section_nr(section_nr)) { 218 pr_warn("section %ld pfn[%lx, %lx) is already online", 219 section_nr, pfn, pfn + PAGES_PER_SECTION); 220 return false; 221 } 222 pfn += PAGES_PER_SECTION; 223 } 224 225 return true; 226 } 227 228 /* 229 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is 230 * OK to have direct references to sparsemem variables in here. 231 * Must already be protected by mem_hotplug_begin(). 232 */ 233 static int 234 memory_block_action(unsigned long phys_index, unsigned long action, int online_type) 235 { 236 unsigned long start_pfn; 237 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 238 int ret; 239 240 start_pfn = section_nr_to_pfn(phys_index); 241 242 switch (action) { 243 case MEM_ONLINE: 244 if (!pages_correctly_probed(start_pfn)) 245 return -EBUSY; 246 247 ret = online_pages(start_pfn, nr_pages, online_type); 248 break; 249 case MEM_OFFLINE: 250 ret = offline_pages(start_pfn, nr_pages); 251 break; 252 default: 253 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " 254 "%ld\n", __func__, phys_index, action, action); 255 ret = -EINVAL; 256 } 257 258 return ret; 259 } 260 261 static int memory_block_change_state(struct memory_block *mem, 262 unsigned long to_state, unsigned long from_state_req) 263 { 264 int ret = 0; 265 266 if (mem->state != from_state_req) 267 return -EINVAL; 268 269 if (to_state == MEM_OFFLINE) 270 mem->state = MEM_GOING_OFFLINE; 271 272 ret = memory_block_action(mem->start_section_nr, to_state, 273 mem->online_type); 274 275 mem->state = ret ? from_state_req : to_state; 276 277 return ret; 278 } 279 280 /* The device lock serializes operations on memory_subsys_[online|offline] */ 281 static int memory_subsys_online(struct device *dev) 282 { 283 struct memory_block *mem = to_memory_block(dev); 284 int ret; 285 286 if (mem->state == MEM_ONLINE) 287 return 0; 288 289 /* 290 * If we are called from store_mem_state(), online_type will be 291 * set >= 0 Otherwise we were called from the device online 292 * attribute and need to set the online_type. 293 */ 294 if (mem->online_type < 0) 295 mem->online_type = MMOP_ONLINE_KEEP; 296 297 /* Already under protection of mem_hotplug_begin() */ 298 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); 299 300 /* clear online_type */ 301 mem->online_type = -1; 302 303 return ret; 304 } 305 306 static int memory_subsys_offline(struct device *dev) 307 { 308 struct memory_block *mem = to_memory_block(dev); 309 310 if (mem->state == MEM_OFFLINE) 311 return 0; 312 313 /* Can't offline block with non-present sections */ 314 if (mem->section_count != sections_per_block) 315 return -EINVAL; 316 317 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 318 } 319 320 static ssize_t 321 store_mem_state(struct device *dev, 322 struct device_attribute *attr, const char *buf, size_t count) 323 { 324 struct memory_block *mem = to_memory_block(dev); 325 int ret, online_type; 326 327 ret = lock_device_hotplug_sysfs(); 328 if (ret) 329 return ret; 330 331 if (sysfs_streq(buf, "online_kernel")) 332 online_type = MMOP_ONLINE_KERNEL; 333 else if (sysfs_streq(buf, "online_movable")) 334 online_type = MMOP_ONLINE_MOVABLE; 335 else if (sysfs_streq(buf, "online")) 336 online_type = MMOP_ONLINE_KEEP; 337 else if (sysfs_streq(buf, "offline")) 338 online_type = MMOP_OFFLINE; 339 else { 340 ret = -EINVAL; 341 goto err; 342 } 343 344 /* 345 * Memory hotplug needs to hold mem_hotplug_begin() for probe to find 346 * the correct memory block to online before doing device_online(dev), 347 * which will take dev->mutex. Take the lock early to prevent an 348 * inversion, memory_subsys_online() callbacks will be implemented by 349 * assuming it's already protected. 350 */ 351 mem_hotplug_begin(); 352 353 switch (online_type) { 354 case MMOP_ONLINE_KERNEL: 355 case MMOP_ONLINE_MOVABLE: 356 case MMOP_ONLINE_KEEP: 357 mem->online_type = online_type; 358 ret = device_online(&mem->dev); 359 break; 360 case MMOP_OFFLINE: 361 ret = device_offline(&mem->dev); 362 break; 363 default: 364 ret = -EINVAL; /* should never happen */ 365 } 366 367 mem_hotplug_done(); 368 err: 369 unlock_device_hotplug(); 370 371 if (ret < 0) 372 return ret; 373 if (ret) 374 return -EINVAL; 375 376 return count; 377 } 378 379 /* 380 * phys_device is a bad name for this. What I really want 381 * is a way to differentiate between memory ranges that 382 * are part of physical devices that constitute 383 * a complete removable unit or fru. 384 * i.e. do these ranges belong to the same physical device, 385 * s.t. if I offline all of these sections I can then 386 * remove the physical device? 387 */ 388 static ssize_t show_phys_device(struct device *dev, 389 struct device_attribute *attr, char *buf) 390 { 391 struct memory_block *mem = to_memory_block(dev); 392 return sprintf(buf, "%d\n", mem->phys_device); 393 } 394 395 #ifdef CONFIG_MEMORY_HOTREMOVE 396 static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn, 397 unsigned long nr_pages, int online_type, 398 struct zone *default_zone) 399 { 400 struct zone *zone; 401 402 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); 403 if (zone != default_zone) { 404 strcat(buf, " "); 405 strcat(buf, zone->name); 406 } 407 } 408 409 static ssize_t show_valid_zones(struct device *dev, 410 struct device_attribute *attr, char *buf) 411 { 412 struct memory_block *mem = to_memory_block(dev); 413 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); 414 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 415 unsigned long valid_start_pfn, valid_end_pfn; 416 struct zone *default_zone; 417 int nid; 418 419 /* 420 * The block contains more than one zone can not be offlined. 421 * This can happen e.g. for ZONE_DMA and ZONE_DMA32 422 */ 423 if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn)) 424 return sprintf(buf, "none\n"); 425 426 start_pfn = valid_start_pfn; 427 nr_pages = valid_end_pfn - start_pfn; 428 429 /* 430 * Check the existing zone. Make sure that we do that only on the 431 * online nodes otherwise the page_zone is not reliable 432 */ 433 if (mem->state == MEM_ONLINE) { 434 strcat(buf, page_zone(pfn_to_page(start_pfn))->name); 435 goto out; 436 } 437 438 nid = pfn_to_nid(start_pfn); 439 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); 440 strcat(buf, default_zone->name); 441 442 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, 443 default_zone); 444 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE, 445 default_zone); 446 out: 447 strcat(buf, "\n"); 448 449 return strlen(buf); 450 } 451 static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); 452 #endif 453 454 static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); 455 static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); 456 static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); 457 static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); 458 459 /* 460 * Block size attribute stuff 461 */ 462 static ssize_t 463 print_block_size(struct device *dev, struct device_attribute *attr, 464 char *buf) 465 { 466 return sprintf(buf, "%lx\n", get_memory_block_size()); 467 } 468 469 static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL); 470 471 /* 472 * Memory auto online policy. 473 */ 474 475 static ssize_t 476 show_auto_online_blocks(struct device *dev, struct device_attribute *attr, 477 char *buf) 478 { 479 if (memhp_auto_online) 480 return sprintf(buf, "online\n"); 481 else 482 return sprintf(buf, "offline\n"); 483 } 484 485 static ssize_t 486 store_auto_online_blocks(struct device *dev, struct device_attribute *attr, 487 const char *buf, size_t count) 488 { 489 if (sysfs_streq(buf, "online")) 490 memhp_auto_online = true; 491 else if (sysfs_streq(buf, "offline")) 492 memhp_auto_online = false; 493 else 494 return -EINVAL; 495 496 return count; 497 } 498 499 static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks, 500 store_auto_online_blocks); 501 502 /* 503 * Some architectures will have custom drivers to do this, and 504 * will not need to do it from userspace. The fake hot-add code 505 * as well as ppc64 will do all of their discovery in userspace 506 * and will require this interface. 507 */ 508 #ifdef CONFIG_ARCH_MEMORY_PROBE 509 static ssize_t 510 memory_probe_store(struct device *dev, struct device_attribute *attr, 511 const char *buf, size_t count) 512 { 513 u64 phys_addr; 514 int nid, ret; 515 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; 516 517 ret = kstrtoull(buf, 0, &phys_addr); 518 if (ret) 519 return ret; 520 521 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) 522 return -EINVAL; 523 524 nid = memory_add_physaddr_to_nid(phys_addr); 525 ret = add_memory(nid, phys_addr, 526 MIN_MEMORY_BLOCK_SIZE * sections_per_block); 527 528 if (ret) 529 goto out; 530 531 ret = count; 532 out: 533 return ret; 534 } 535 536 static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); 537 #endif 538 539 #ifdef CONFIG_MEMORY_FAILURE 540 /* 541 * Support for offlining pages of memory 542 */ 543 544 /* Soft offline a page */ 545 static ssize_t 546 store_soft_offline_page(struct device *dev, 547 struct device_attribute *attr, 548 const char *buf, size_t count) 549 { 550 int ret; 551 u64 pfn; 552 if (!capable(CAP_SYS_ADMIN)) 553 return -EPERM; 554 if (kstrtoull(buf, 0, &pfn) < 0) 555 return -EINVAL; 556 pfn >>= PAGE_SHIFT; 557 if (!pfn_valid(pfn)) 558 return -ENXIO; 559 ret = soft_offline_page(pfn_to_page(pfn), 0); 560 return ret == 0 ? count : ret; 561 } 562 563 /* Forcibly offline a page, including killing processes. */ 564 static ssize_t 565 store_hard_offline_page(struct device *dev, 566 struct device_attribute *attr, 567 const char *buf, size_t count) 568 { 569 int ret; 570 u64 pfn; 571 if (!capable(CAP_SYS_ADMIN)) 572 return -EPERM; 573 if (kstrtoull(buf, 0, &pfn) < 0) 574 return -EINVAL; 575 pfn >>= PAGE_SHIFT; 576 ret = memory_failure(pfn, 0); 577 return ret ? ret : count; 578 } 579 580 static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page); 581 static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page); 582 #endif 583 584 /* 585 * Note that phys_device is optional. It is here to allow for 586 * differentiation between which *physical* devices each 587 * section belongs to... 588 */ 589 int __weak arch_get_memory_phys_device(unsigned long start_pfn) 590 { 591 return 0; 592 } 593 594 /* 595 * A reference for the returned object is held and the reference for the 596 * hinted object is released. 597 */ 598 struct memory_block *find_memory_block_hinted(struct mem_section *section, 599 struct memory_block *hint) 600 { 601 int block_id = base_memory_block_id(__section_nr(section)); 602 struct device *hintdev = hint ? &hint->dev : NULL; 603 struct device *dev; 604 605 dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev); 606 if (hint) 607 put_device(&hint->dev); 608 if (!dev) 609 return NULL; 610 return to_memory_block(dev); 611 } 612 613 /* 614 * For now, we have a linear search to go find the appropriate 615 * memory_block corresponding to a particular phys_index. If 616 * this gets to be a real problem, we can always use a radix 617 * tree or something here. 618 * 619 * This could be made generic for all device subsystems. 620 */ 621 struct memory_block *find_memory_block(struct mem_section *section) 622 { 623 return find_memory_block_hinted(section, NULL); 624 } 625 626 static struct attribute *memory_memblk_attrs[] = { 627 &dev_attr_phys_index.attr, 628 &dev_attr_state.attr, 629 &dev_attr_phys_device.attr, 630 &dev_attr_removable.attr, 631 #ifdef CONFIG_MEMORY_HOTREMOVE 632 &dev_attr_valid_zones.attr, 633 #endif 634 NULL 635 }; 636 637 static struct attribute_group memory_memblk_attr_group = { 638 .attrs = memory_memblk_attrs, 639 }; 640 641 static const struct attribute_group *memory_memblk_attr_groups[] = { 642 &memory_memblk_attr_group, 643 NULL, 644 }; 645 646 /* 647 * register_memory - Setup a sysfs device for a memory block 648 */ 649 static 650 int register_memory(struct memory_block *memory) 651 { 652 int ret; 653 654 memory->dev.bus = &memory_subsys; 655 memory->dev.id = memory->start_section_nr / sections_per_block; 656 memory->dev.release = memory_block_release; 657 memory->dev.groups = memory_memblk_attr_groups; 658 memory->dev.offline = memory->state == MEM_OFFLINE; 659 660 ret = device_register(&memory->dev); 661 if (ret) 662 put_device(&memory->dev); 663 664 return ret; 665 } 666 667 static int init_memory_block(struct memory_block **memory, 668 struct mem_section *section, unsigned long state) 669 { 670 struct memory_block *mem; 671 unsigned long start_pfn; 672 int scn_nr; 673 int ret = 0; 674 675 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 676 if (!mem) 677 return -ENOMEM; 678 679 scn_nr = __section_nr(section); 680 mem->start_section_nr = 681 base_memory_block_id(scn_nr) * sections_per_block; 682 mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; 683 mem->state = state; 684 start_pfn = section_nr_to_pfn(mem->start_section_nr); 685 mem->phys_device = arch_get_memory_phys_device(start_pfn); 686 687 ret = register_memory(mem); 688 689 *memory = mem; 690 return ret; 691 } 692 693 static int add_memory_block(int base_section_nr) 694 { 695 struct memory_block *mem; 696 int i, ret, section_count = 0, section_nr; 697 698 for (i = base_section_nr; 699 (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS; 700 i++) { 701 if (!present_section_nr(i)) 702 continue; 703 if (section_count == 0) 704 section_nr = i; 705 section_count++; 706 } 707 708 if (section_count == 0) 709 return 0; 710 ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE); 711 if (ret) 712 return ret; 713 mem->section_count = section_count; 714 return 0; 715 } 716 717 /* 718 * need an interface for the VM to add new memory regions, 719 * but without onlining it. 720 */ 721 int hotplug_memory_register(int nid, struct mem_section *section) 722 { 723 int ret = 0; 724 struct memory_block *mem; 725 726 mutex_lock(&mem_sysfs_mutex); 727 728 mem = find_memory_block(section); 729 if (mem) { 730 mem->section_count++; 731 put_device(&mem->dev); 732 } else { 733 ret = init_memory_block(&mem, section, MEM_OFFLINE); 734 if (ret) 735 goto out; 736 mem->section_count++; 737 } 738 739 if (mem->section_count == sections_per_block) 740 ret = register_mem_sect_under_node(mem, nid, false); 741 out: 742 mutex_unlock(&mem_sysfs_mutex); 743 return ret; 744 } 745 746 #ifdef CONFIG_MEMORY_HOTREMOVE 747 static void 748 unregister_memory(struct memory_block *memory) 749 { 750 BUG_ON(memory->dev.bus != &memory_subsys); 751 752 /* drop the ref. we got in remove_memory_block() */ 753 put_device(&memory->dev); 754 device_unregister(&memory->dev); 755 } 756 757 static int remove_memory_section(unsigned long node_id, 758 struct mem_section *section, int phys_device) 759 { 760 struct memory_block *mem; 761 762 mutex_lock(&mem_sysfs_mutex); 763 764 /* 765 * Some users of the memory hotplug do not want/need memblock to 766 * track all sections. Skip over those. 767 */ 768 mem = find_memory_block(section); 769 if (!mem) 770 goto out_unlock; 771 772 unregister_mem_sect_under_nodes(mem, __section_nr(section)); 773 774 mem->section_count--; 775 if (mem->section_count == 0) 776 unregister_memory(mem); 777 else 778 put_device(&mem->dev); 779 780 out_unlock: 781 mutex_unlock(&mem_sysfs_mutex); 782 return 0; 783 } 784 785 int unregister_memory_section(struct mem_section *section) 786 { 787 if (!present_section(section)) 788 return -EINVAL; 789 790 return remove_memory_section(0, section, 0); 791 } 792 #endif /* CONFIG_MEMORY_HOTREMOVE */ 793 794 /* return true if the memory block is offlined, otherwise, return false */ 795 bool is_memblock_offlined(struct memory_block *mem) 796 { 797 return mem->state == MEM_OFFLINE; 798 } 799 800 static struct attribute *memory_root_attrs[] = { 801 #ifdef CONFIG_ARCH_MEMORY_PROBE 802 &dev_attr_probe.attr, 803 #endif 804 805 #ifdef CONFIG_MEMORY_FAILURE 806 &dev_attr_soft_offline_page.attr, 807 &dev_attr_hard_offline_page.attr, 808 #endif 809 810 &dev_attr_block_size_bytes.attr, 811 &dev_attr_auto_online_blocks.attr, 812 NULL 813 }; 814 815 static struct attribute_group memory_root_attr_group = { 816 .attrs = memory_root_attrs, 817 }; 818 819 static const struct attribute_group *memory_root_attr_groups[] = { 820 &memory_root_attr_group, 821 NULL, 822 }; 823 824 /* 825 * Initialize the sysfs support for memory devices... 826 */ 827 int __init memory_dev_init(void) 828 { 829 unsigned int i; 830 int ret; 831 int err; 832 unsigned long block_sz; 833 834 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); 835 if (ret) 836 goto out; 837 838 block_sz = get_memory_block_size(); 839 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 840 841 /* 842 * Create entries for memory sections that were found 843 * during boot and have been initialized 844 */ 845 mutex_lock(&mem_sysfs_mutex); 846 for (i = 0; i <= __highest_present_section_nr; 847 i += sections_per_block) { 848 err = add_memory_block(i); 849 if (!ret) 850 ret = err; 851 } 852 mutex_unlock(&mem_sysfs_mutex); 853 854 out: 855 if (ret) 856 printk(KERN_ERR "%s() failed: %d\n", __func__, ret); 857 return ret; 858 } 859