1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Memory subsystem support 4 * 5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com> 6 * Dave Hansen <haveblue@us.ibm.com> 7 * 8 * This file provides the necessary infrastructure to represent 9 * a SPARSEMEM-memory-model system's physical memory in /sysfs. 10 * All arch-independent code that assumes MEMORY_HOTPLUG requires 11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/init.h> 16 #include <linux/topology.h> 17 #include <linux/capability.h> 18 #include <linux/device.h> 19 #include <linux/memory.h> 20 #include <linux/memory_hotplug.h> 21 #include <linux/mm.h> 22 #include <linux/stat.h> 23 #include <linux/slab.h> 24 #include <linux/xarray.h> 25 26 #include <linux/atomic.h> 27 #include <linux/uaccess.h> 28 29 #define MEMORY_CLASS_NAME "memory" 30 31 static const char *const online_type_to_str[] = { 32 [MMOP_OFFLINE] = "offline", 33 [MMOP_ONLINE] = "online", 34 [MMOP_ONLINE_KERNEL] = "online_kernel", 35 [MMOP_ONLINE_MOVABLE] = "online_movable", 36 }; 37 38 int mhp_online_type_from_str(const char *str) 39 { 40 int i; 41 42 for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) { 43 if (sysfs_streq(str, online_type_to_str[i])) 44 return i; 45 } 46 return -EINVAL; 47 } 48 49 #define to_memory_block(dev) container_of(dev, struct memory_block, dev) 50 51 static int sections_per_block; 52 53 static inline unsigned long memory_block_id(unsigned long section_nr) 54 { 55 return section_nr / sections_per_block; 56 } 57 58 static inline unsigned long pfn_to_block_id(unsigned long pfn) 59 { 60 return memory_block_id(pfn_to_section_nr(pfn)); 61 } 62 63 static inline unsigned long phys_to_block_id(unsigned long phys) 64 { 65 return pfn_to_block_id(PFN_DOWN(phys)); 66 } 67 68 static int memory_subsys_online(struct device *dev); 69 static int memory_subsys_offline(struct device *dev); 70 71 static struct bus_type memory_subsys = { 72 .name = MEMORY_CLASS_NAME, 73 .dev_name = MEMORY_CLASS_NAME, 74 .online = memory_subsys_online, 75 .offline = memory_subsys_offline, 76 }; 77 78 /* 79 * Memory blocks are cached in a local radix tree to avoid 80 * a costly linear search for the corresponding device on 81 * the subsystem bus. 82 */ 83 static DEFINE_XARRAY(memory_blocks); 84 85 static BLOCKING_NOTIFIER_HEAD(memory_chain); 86 87 int register_memory_notifier(struct notifier_block *nb) 88 { 89 return blocking_notifier_chain_register(&memory_chain, nb); 90 } 91 EXPORT_SYMBOL(register_memory_notifier); 92 93 void unregister_memory_notifier(struct notifier_block *nb) 94 { 95 blocking_notifier_chain_unregister(&memory_chain, nb); 96 } 97 EXPORT_SYMBOL(unregister_memory_notifier); 98 99 static void memory_block_release(struct device *dev) 100 { 101 struct memory_block *mem = to_memory_block(dev); 102 103 kfree(mem); 104 } 105 106 unsigned long __weak memory_block_size_bytes(void) 107 { 108 return MIN_MEMORY_BLOCK_SIZE; 109 } 110 EXPORT_SYMBOL_GPL(memory_block_size_bytes); 111 112 /* 113 * Show the first physical section index (number) of this memory block. 114 */ 115 static ssize_t phys_index_show(struct device *dev, 116 struct device_attribute *attr, char *buf) 117 { 118 struct memory_block *mem = to_memory_block(dev); 119 unsigned long phys_index; 120 121 phys_index = mem->start_section_nr / sections_per_block; 122 123 return sysfs_emit(buf, "%08lx\n", phys_index); 124 } 125 126 /* 127 * Legacy interface that we cannot remove. Always indicate "removable" 128 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic. 129 */ 130 static ssize_t removable_show(struct device *dev, struct device_attribute *attr, 131 char *buf) 132 { 133 return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE)); 134 } 135 136 /* 137 * online, offline, going offline, etc. 138 */ 139 static ssize_t state_show(struct device *dev, struct device_attribute *attr, 140 char *buf) 141 { 142 struct memory_block *mem = to_memory_block(dev); 143 const char *output; 144 145 /* 146 * We can probably put these states in a nice little array 147 * so that they're not open-coded 148 */ 149 switch (mem->state) { 150 case MEM_ONLINE: 151 output = "online"; 152 break; 153 case MEM_OFFLINE: 154 output = "offline"; 155 break; 156 case MEM_GOING_OFFLINE: 157 output = "going-offline"; 158 break; 159 default: 160 WARN_ON(1); 161 return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state); 162 } 163 164 return sysfs_emit(buf, "%s\n", output); 165 } 166 167 int memory_notify(unsigned long val, void *v) 168 { 169 return blocking_notifier_call_chain(&memory_chain, val, v); 170 } 171 172 /* 173 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is 174 * OK to have direct references to sparsemem variables in here. 175 */ 176 static int 177 memory_block_action(unsigned long start_section_nr, unsigned long action, 178 int online_type, int nid) 179 { 180 unsigned long start_pfn; 181 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 182 int ret; 183 184 start_pfn = section_nr_to_pfn(start_section_nr); 185 186 switch (action) { 187 case MEM_ONLINE: 188 ret = online_pages(start_pfn, nr_pages, online_type, nid); 189 break; 190 case MEM_OFFLINE: 191 ret = offline_pages(start_pfn, nr_pages); 192 break; 193 default: 194 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " 195 "%ld\n", __func__, start_section_nr, action, action); 196 ret = -EINVAL; 197 } 198 199 return ret; 200 } 201 202 static int memory_block_change_state(struct memory_block *mem, 203 unsigned long to_state, unsigned long from_state_req) 204 { 205 int ret = 0; 206 207 if (mem->state != from_state_req) 208 return -EINVAL; 209 210 if (to_state == MEM_OFFLINE) 211 mem->state = MEM_GOING_OFFLINE; 212 213 ret = memory_block_action(mem->start_section_nr, to_state, 214 mem->online_type, mem->nid); 215 216 mem->state = ret ? from_state_req : to_state; 217 218 return ret; 219 } 220 221 /* The device lock serializes operations on memory_subsys_[online|offline] */ 222 static int memory_subsys_online(struct device *dev) 223 { 224 struct memory_block *mem = to_memory_block(dev); 225 int ret; 226 227 if (mem->state == MEM_ONLINE) 228 return 0; 229 230 /* 231 * When called via device_online() without configuring the online_type, 232 * we want to default to MMOP_ONLINE. 233 */ 234 if (mem->online_type == MMOP_OFFLINE) 235 mem->online_type = MMOP_ONLINE; 236 237 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); 238 mem->online_type = MMOP_OFFLINE; 239 240 return ret; 241 } 242 243 static int memory_subsys_offline(struct device *dev) 244 { 245 struct memory_block *mem = to_memory_block(dev); 246 247 if (mem->state == MEM_OFFLINE) 248 return 0; 249 250 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 251 } 252 253 static ssize_t state_store(struct device *dev, struct device_attribute *attr, 254 const char *buf, size_t count) 255 { 256 const int online_type = mhp_online_type_from_str(buf); 257 struct memory_block *mem = to_memory_block(dev); 258 int ret; 259 260 if (online_type < 0) 261 return -EINVAL; 262 263 ret = lock_device_hotplug_sysfs(); 264 if (ret) 265 return ret; 266 267 switch (online_type) { 268 case MMOP_ONLINE_KERNEL: 269 case MMOP_ONLINE_MOVABLE: 270 case MMOP_ONLINE: 271 /* mem->online_type is protected by device_hotplug_lock */ 272 mem->online_type = online_type; 273 ret = device_online(&mem->dev); 274 break; 275 case MMOP_OFFLINE: 276 ret = device_offline(&mem->dev); 277 break; 278 default: 279 ret = -EINVAL; /* should never happen */ 280 } 281 282 unlock_device_hotplug(); 283 284 if (ret < 0) 285 return ret; 286 if (ret) 287 return -EINVAL; 288 289 return count; 290 } 291 292 /* 293 * Legacy interface that we cannot remove: s390x exposes the storage increment 294 * covered by a memory block, allowing for identifying which memory blocks 295 * comprise a storage increment. Since a memory block spans complete 296 * storage increments nowadays, this interface is basically unused. Other 297 * archs never exposed != 0. 298 */ 299 static ssize_t phys_device_show(struct device *dev, 300 struct device_attribute *attr, char *buf) 301 { 302 struct memory_block *mem = to_memory_block(dev); 303 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); 304 305 return sysfs_emit(buf, "%d\n", 306 arch_get_memory_phys_device(start_pfn)); 307 } 308 309 #ifdef CONFIG_MEMORY_HOTREMOVE 310 static int print_allowed_zone(char *buf, int len, int nid, 311 unsigned long start_pfn, unsigned long nr_pages, 312 int online_type, struct zone *default_zone) 313 { 314 struct zone *zone; 315 316 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages); 317 if (zone == default_zone) 318 return 0; 319 320 return sysfs_emit_at(buf, len, " %s", zone->name); 321 } 322 323 static ssize_t valid_zones_show(struct device *dev, 324 struct device_attribute *attr, char *buf) 325 { 326 struct memory_block *mem = to_memory_block(dev); 327 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); 328 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 329 struct zone *default_zone; 330 int len = 0; 331 int nid; 332 333 /* 334 * Check the existing zone. Make sure that we do that only on the 335 * online nodes otherwise the page_zone is not reliable 336 */ 337 if (mem->state == MEM_ONLINE) { 338 /* 339 * The block contains more than one zone can not be offlined. 340 * This can happen e.g. for ZONE_DMA and ZONE_DMA32 341 */ 342 default_zone = test_pages_in_a_zone(start_pfn, 343 start_pfn + nr_pages); 344 if (!default_zone) 345 return sysfs_emit(buf, "%s\n", "none"); 346 len += sysfs_emit_at(buf, len, "%s", default_zone->name); 347 goto out; 348 } 349 350 nid = mem->nid; 351 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn, 352 nr_pages); 353 354 len += sysfs_emit_at(buf, len, "%s", default_zone->name); 355 len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages, 356 MMOP_ONLINE_KERNEL, default_zone); 357 len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages, 358 MMOP_ONLINE_MOVABLE, default_zone); 359 out: 360 len += sysfs_emit_at(buf, len, "\n"); 361 return len; 362 } 363 static DEVICE_ATTR_RO(valid_zones); 364 #endif 365 366 static DEVICE_ATTR_RO(phys_index); 367 static DEVICE_ATTR_RW(state); 368 static DEVICE_ATTR_RO(phys_device); 369 static DEVICE_ATTR_RO(removable); 370 371 /* 372 * Show the memory block size (shared by all memory blocks). 373 */ 374 static ssize_t block_size_bytes_show(struct device *dev, 375 struct device_attribute *attr, char *buf) 376 { 377 return sysfs_emit(buf, "%lx\n", memory_block_size_bytes()); 378 } 379 380 static DEVICE_ATTR_RO(block_size_bytes); 381 382 /* 383 * Memory auto online policy. 384 */ 385 386 static ssize_t auto_online_blocks_show(struct device *dev, 387 struct device_attribute *attr, char *buf) 388 { 389 return sysfs_emit(buf, "%s\n", 390 online_type_to_str[mhp_default_online_type]); 391 } 392 393 static ssize_t auto_online_blocks_store(struct device *dev, 394 struct device_attribute *attr, 395 const char *buf, size_t count) 396 { 397 const int online_type = mhp_online_type_from_str(buf); 398 399 if (online_type < 0) 400 return -EINVAL; 401 402 mhp_default_online_type = online_type; 403 return count; 404 } 405 406 static DEVICE_ATTR_RW(auto_online_blocks); 407 408 /* 409 * Some architectures will have custom drivers to do this, and 410 * will not need to do it from userspace. The fake hot-add code 411 * as well as ppc64 will do all of their discovery in userspace 412 * and will require this interface. 413 */ 414 #ifdef CONFIG_ARCH_MEMORY_PROBE 415 static ssize_t probe_store(struct device *dev, struct device_attribute *attr, 416 const char *buf, size_t count) 417 { 418 u64 phys_addr; 419 int nid, ret; 420 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; 421 422 ret = kstrtoull(buf, 0, &phys_addr); 423 if (ret) 424 return ret; 425 426 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) 427 return -EINVAL; 428 429 ret = lock_device_hotplug_sysfs(); 430 if (ret) 431 return ret; 432 433 nid = memory_add_physaddr_to_nid(phys_addr); 434 ret = __add_memory(nid, phys_addr, 435 MIN_MEMORY_BLOCK_SIZE * sections_per_block, 436 MHP_NONE); 437 438 if (ret) 439 goto out; 440 441 ret = count; 442 out: 443 unlock_device_hotplug(); 444 return ret; 445 } 446 447 static DEVICE_ATTR_WO(probe); 448 #endif 449 450 #ifdef CONFIG_MEMORY_FAILURE 451 /* 452 * Support for offlining pages of memory 453 */ 454 455 /* Soft offline a page */ 456 static ssize_t soft_offline_page_store(struct device *dev, 457 struct device_attribute *attr, 458 const char *buf, size_t count) 459 { 460 int ret; 461 u64 pfn; 462 if (!capable(CAP_SYS_ADMIN)) 463 return -EPERM; 464 if (kstrtoull(buf, 0, &pfn) < 0) 465 return -EINVAL; 466 pfn >>= PAGE_SHIFT; 467 ret = soft_offline_page(pfn, 0); 468 return ret == 0 ? count : ret; 469 } 470 471 /* Forcibly offline a page, including killing processes. */ 472 static ssize_t hard_offline_page_store(struct device *dev, 473 struct device_attribute *attr, 474 const char *buf, size_t count) 475 { 476 int ret; 477 u64 pfn; 478 if (!capable(CAP_SYS_ADMIN)) 479 return -EPERM; 480 if (kstrtoull(buf, 0, &pfn) < 0) 481 return -EINVAL; 482 pfn >>= PAGE_SHIFT; 483 ret = memory_failure(pfn, 0); 484 return ret ? ret : count; 485 } 486 487 static DEVICE_ATTR_WO(soft_offline_page); 488 static DEVICE_ATTR_WO(hard_offline_page); 489 #endif 490 491 /* See phys_device_show(). */ 492 int __weak arch_get_memory_phys_device(unsigned long start_pfn) 493 { 494 return 0; 495 } 496 497 /* 498 * A reference for the returned memory block device is acquired. 499 * 500 * Called under device_hotplug_lock. 501 */ 502 static struct memory_block *find_memory_block_by_id(unsigned long block_id) 503 { 504 struct memory_block *mem; 505 506 mem = xa_load(&memory_blocks, block_id); 507 if (mem) 508 get_device(&mem->dev); 509 return mem; 510 } 511 512 /* 513 * Called under device_hotplug_lock. 514 */ 515 struct memory_block *find_memory_block(struct mem_section *section) 516 { 517 unsigned long block_id = memory_block_id(__section_nr(section)); 518 519 return find_memory_block_by_id(block_id); 520 } 521 522 static struct attribute *memory_memblk_attrs[] = { 523 &dev_attr_phys_index.attr, 524 &dev_attr_state.attr, 525 &dev_attr_phys_device.attr, 526 &dev_attr_removable.attr, 527 #ifdef CONFIG_MEMORY_HOTREMOVE 528 &dev_attr_valid_zones.attr, 529 #endif 530 NULL 531 }; 532 533 static struct attribute_group memory_memblk_attr_group = { 534 .attrs = memory_memblk_attrs, 535 }; 536 537 static const struct attribute_group *memory_memblk_attr_groups[] = { 538 &memory_memblk_attr_group, 539 NULL, 540 }; 541 542 /* 543 * register_memory - Setup a sysfs device for a memory block 544 */ 545 static 546 int register_memory(struct memory_block *memory) 547 { 548 int ret; 549 550 memory->dev.bus = &memory_subsys; 551 memory->dev.id = memory->start_section_nr / sections_per_block; 552 memory->dev.release = memory_block_release; 553 memory->dev.groups = memory_memblk_attr_groups; 554 memory->dev.offline = memory->state == MEM_OFFLINE; 555 556 ret = device_register(&memory->dev); 557 if (ret) { 558 put_device(&memory->dev); 559 return ret; 560 } 561 ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory, 562 GFP_KERNEL)); 563 if (ret) { 564 put_device(&memory->dev); 565 device_unregister(&memory->dev); 566 } 567 return ret; 568 } 569 570 static int init_memory_block(unsigned long block_id, unsigned long state) 571 { 572 struct memory_block *mem; 573 int ret = 0; 574 575 mem = find_memory_block_by_id(block_id); 576 if (mem) { 577 put_device(&mem->dev); 578 return -EEXIST; 579 } 580 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 581 if (!mem) 582 return -ENOMEM; 583 584 mem->start_section_nr = block_id * sections_per_block; 585 mem->state = state; 586 mem->nid = NUMA_NO_NODE; 587 588 ret = register_memory(mem); 589 590 return ret; 591 } 592 593 static int add_memory_block(unsigned long base_section_nr) 594 { 595 int section_count = 0; 596 unsigned long nr; 597 598 for (nr = base_section_nr; nr < base_section_nr + sections_per_block; 599 nr++) 600 if (present_section_nr(nr)) 601 section_count++; 602 603 if (section_count == 0) 604 return 0; 605 return init_memory_block(memory_block_id(base_section_nr), 606 MEM_ONLINE); 607 } 608 609 static void unregister_memory(struct memory_block *memory) 610 { 611 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys)) 612 return; 613 614 WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL); 615 616 /* drop the ref. we got via find_memory_block() */ 617 put_device(&memory->dev); 618 device_unregister(&memory->dev); 619 } 620 621 /* 622 * Create memory block devices for the given memory area. Start and size 623 * have to be aligned to memory block granularity. Memory block devices 624 * will be initialized as offline. 625 * 626 * Called under device_hotplug_lock. 627 */ 628 int create_memory_block_devices(unsigned long start, unsigned long size) 629 { 630 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); 631 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); 632 struct memory_block *mem; 633 unsigned long block_id; 634 int ret = 0; 635 636 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || 637 !IS_ALIGNED(size, memory_block_size_bytes()))) 638 return -EINVAL; 639 640 for (block_id = start_block_id; block_id != end_block_id; block_id++) { 641 ret = init_memory_block(block_id, MEM_OFFLINE); 642 if (ret) 643 break; 644 } 645 if (ret) { 646 end_block_id = block_id; 647 for (block_id = start_block_id; block_id != end_block_id; 648 block_id++) { 649 mem = find_memory_block_by_id(block_id); 650 if (WARN_ON_ONCE(!mem)) 651 continue; 652 unregister_memory(mem); 653 } 654 } 655 return ret; 656 } 657 658 /* 659 * Remove memory block devices for the given memory area. Start and size 660 * have to be aligned to memory block granularity. Memory block devices 661 * have to be offline. 662 * 663 * Called under device_hotplug_lock. 664 */ 665 void remove_memory_block_devices(unsigned long start, unsigned long size) 666 { 667 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start)); 668 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size)); 669 struct memory_block *mem; 670 unsigned long block_id; 671 672 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) || 673 !IS_ALIGNED(size, memory_block_size_bytes()))) 674 return; 675 676 for (block_id = start_block_id; block_id != end_block_id; block_id++) { 677 mem = find_memory_block_by_id(block_id); 678 if (WARN_ON_ONCE(!mem)) 679 continue; 680 unregister_memory_block_under_nodes(mem); 681 unregister_memory(mem); 682 } 683 } 684 685 /* return true if the memory block is offlined, otherwise, return false */ 686 bool is_memblock_offlined(struct memory_block *mem) 687 { 688 return mem->state == MEM_OFFLINE; 689 } 690 691 static struct attribute *memory_root_attrs[] = { 692 #ifdef CONFIG_ARCH_MEMORY_PROBE 693 &dev_attr_probe.attr, 694 #endif 695 696 #ifdef CONFIG_MEMORY_FAILURE 697 &dev_attr_soft_offline_page.attr, 698 &dev_attr_hard_offline_page.attr, 699 #endif 700 701 &dev_attr_block_size_bytes.attr, 702 &dev_attr_auto_online_blocks.attr, 703 NULL 704 }; 705 706 static struct attribute_group memory_root_attr_group = { 707 .attrs = memory_root_attrs, 708 }; 709 710 static const struct attribute_group *memory_root_attr_groups[] = { 711 &memory_root_attr_group, 712 NULL, 713 }; 714 715 /* 716 * Initialize the sysfs support for memory devices. At the time this function 717 * is called, we cannot have concurrent creation/deletion of memory block 718 * devices, the device_hotplug_lock is not needed. 719 */ 720 void __init memory_dev_init(void) 721 { 722 int ret; 723 unsigned long block_sz, nr; 724 725 /* Validate the configured memory block size */ 726 block_sz = memory_block_size_bytes(); 727 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE) 728 panic("Memory block size not suitable: 0x%lx\n", block_sz); 729 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; 730 731 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups); 732 if (ret) 733 panic("%s() failed to register subsystem: %d\n", __func__, ret); 734 735 /* 736 * Create entries for memory sections that were found 737 * during boot and have been initialized 738 */ 739 for (nr = 0; nr <= __highest_present_section_nr; 740 nr += sections_per_block) { 741 ret = add_memory_block(nr); 742 if (ret) 743 panic("%s() failed to add memory block: %d\n", __func__, 744 ret); 745 } 746 } 747 748 /** 749 * walk_memory_blocks - walk through all present memory blocks overlapped 750 * by the range [start, start + size) 751 * 752 * @start: start address of the memory range 753 * @size: size of the memory range 754 * @arg: argument passed to func 755 * @func: callback for each memory section walked 756 * 757 * This function walks through all present memory blocks overlapped by the 758 * range [start, start + size), calling func on each memory block. 759 * 760 * In case func() returns an error, walking is aborted and the error is 761 * returned. 762 * 763 * Called under device_hotplug_lock. 764 */ 765 int walk_memory_blocks(unsigned long start, unsigned long size, 766 void *arg, walk_memory_blocks_func_t func) 767 { 768 const unsigned long start_block_id = phys_to_block_id(start); 769 const unsigned long end_block_id = phys_to_block_id(start + size - 1); 770 struct memory_block *mem; 771 unsigned long block_id; 772 int ret = 0; 773 774 if (!size) 775 return 0; 776 777 for (block_id = start_block_id; block_id <= end_block_id; block_id++) { 778 mem = find_memory_block_by_id(block_id); 779 if (!mem) 780 continue; 781 782 ret = func(mem, arg); 783 put_device(&mem->dev); 784 if (ret) 785 break; 786 } 787 return ret; 788 } 789 790 struct for_each_memory_block_cb_data { 791 walk_memory_blocks_func_t func; 792 void *arg; 793 }; 794 795 static int for_each_memory_block_cb(struct device *dev, void *data) 796 { 797 struct memory_block *mem = to_memory_block(dev); 798 struct for_each_memory_block_cb_data *cb_data = data; 799 800 return cb_data->func(mem, cb_data->arg); 801 } 802 803 /** 804 * for_each_memory_block - walk through all present memory blocks 805 * 806 * @arg: argument passed to func 807 * @func: callback for each memory block walked 808 * 809 * This function walks through all present memory blocks, calling func on 810 * each memory block. 811 * 812 * In case func() returns an error, walking is aborted and the error is 813 * returned. 814 */ 815 int for_each_memory_block(void *arg, walk_memory_blocks_func_t func) 816 { 817 struct for_each_memory_block_cb_data cb_data = { 818 .func = func, 819 .arg = arg, 820 }; 821 822 return bus_for_each_dev(&memory_subsys, NULL, &cb_data, 823 for_each_memory_block_cb); 824 } 825