1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for maintaining information about logical memory blocks. 4 * 5 * Peter Bergner, IBM Corp. June 2001. 6 * Copyright (C) 2001 Peter Bergner. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/bitops.h> 13 #include <linux/poison.h> 14 #include <linux/pfn.h> 15 #include <linux/debugfs.h> 16 #include <linux/kmemleak.h> 17 #include <linux/seq_file.h> 18 #include <linux/memblock.h> 19 20 #include <asm/sections.h> 21 #include <linux/io.h> 22 23 #include "internal.h" 24 25 #define INIT_MEMBLOCK_REGIONS 128 26 #define INIT_PHYSMEM_REGIONS 4 27 28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS 29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS 30 #endif 31 32 /** 33 * DOC: memblock overview 34 * 35 * Memblock is a method of managing memory regions during the early 36 * boot period when the usual kernel memory allocators are not up and 37 * running. 38 * 39 * Memblock views the system memory as collections of contiguous 40 * regions. There are several types of these collections: 41 * 42 * * ``memory`` - describes the physical memory available to the 43 * kernel; this may differ from the actual physical memory installed 44 * in the system, for instance when the memory is restricted with 45 * ``mem=`` command line parameter 46 * * ``reserved`` - describes the regions that were allocated 47 * * ``physmem`` - describes the actual physical memory available during 48 * boot regardless of the possible restrictions and memory hot(un)plug; 49 * the ``physmem`` type is only available on some architectures. 50 * 51 * Each region is represented by :c:type:`struct memblock_region` that 52 * defines the region extents, its attributes and NUMA node id on NUMA 53 * systems. Every memory type is described by the :c:type:`struct 54 * memblock_type` which contains an array of memory regions along with 55 * the allocator metadata. The "memory" and "reserved" types are nicely 56 * wrapped with :c:type:`struct memblock`. This structure is statically 57 * initialized at build time. The region arrays are initially sized to 58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS 59 * for "reserved". The region array for "physmem" is initially sized to 60 * %INIT_PHYSMEM_REGIONS. 61 * The memblock_allow_resize() enables automatic resizing of the region 62 * arrays during addition of new regions. This feature should be used 63 * with care so that memory allocated for the region array will not 64 * overlap with areas that should be reserved, for example initrd. 65 * 66 * The early architecture setup should tell memblock what the physical 67 * memory layout is by using memblock_add() or memblock_add_node() 68 * functions. The first function does not assign the region to a NUMA 69 * node and it is appropriate for UMA systems. Yet, it is possible to 70 * use it on NUMA systems as well and assign the region to a NUMA node 71 * later in the setup process using memblock_set_node(). The 72 * memblock_add_node() performs such an assignment directly. 73 * 74 * Once memblock is setup the memory can be allocated using one of the 75 * API variants: 76 * 77 * * memblock_phys_alloc*() - these functions return the **physical** 78 * address of the allocated memory 79 * * memblock_alloc*() - these functions return the **virtual** address 80 * of the allocated memory. 81 * 82 * Note, that both API variants use implicit assumptions about allowed 83 * memory ranges and the fallback methods. Consult the documentation 84 * of memblock_alloc_internal() and memblock_alloc_range_nid() 85 * functions for more elaborate description. 86 * 87 * As the system boot progresses, the architecture specific mem_init() 88 * function frees all the memory to the buddy page allocator. 89 * 90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the 91 * memblock data structures (except "physmem") will be discarded after the 92 * system initialization completes. 93 */ 94 95 #ifndef CONFIG_NEED_MULTIPLE_NODES 96 struct pglist_data __refdata contig_page_data; 97 EXPORT_SYMBOL(contig_page_data); 98 #endif 99 100 unsigned long max_low_pfn; 101 unsigned long min_low_pfn; 102 unsigned long max_pfn; 103 unsigned long long max_possible_pfn; 104 105 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 106 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; 107 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 108 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; 109 #endif 110 111 struct memblock memblock __initdata_memblock = { 112 .memory.regions = memblock_memory_init_regions, 113 .memory.cnt = 1, /* empty dummy entry */ 114 .memory.max = INIT_MEMBLOCK_REGIONS, 115 .memory.name = "memory", 116 117 .reserved.regions = memblock_reserved_init_regions, 118 .reserved.cnt = 1, /* empty dummy entry */ 119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 120 .reserved.name = "reserved", 121 122 .bottom_up = false, 123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 124 }; 125 126 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 127 struct memblock_type physmem = { 128 .regions = memblock_physmem_init_regions, 129 .cnt = 1, /* empty dummy entry */ 130 .max = INIT_PHYSMEM_REGIONS, 131 .name = "physmem", 132 }; 133 #endif 134 135 #define for_each_memblock_type(i, memblock_type, rgn) \ 136 for (i = 0, rgn = &memblock_type->regions[0]; \ 137 i < memblock_type->cnt; \ 138 i++, rgn = &memblock_type->regions[i]) 139 140 #define memblock_dbg(fmt, ...) \ 141 do { \ 142 if (memblock_debug) \ 143 pr_info(fmt, ##__VA_ARGS__); \ 144 } while (0) 145 146 static int memblock_debug __initdata_memblock; 147 static bool system_has_some_mirror __initdata_memblock = false; 148 static int memblock_can_resize __initdata_memblock; 149 static int memblock_memory_in_slab __initdata_memblock = 0; 150 static int memblock_reserved_in_slab __initdata_memblock = 0; 151 152 static enum memblock_flags __init_memblock choose_memblock_flags(void) 153 { 154 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 155 } 156 157 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 158 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 159 { 160 return *size = min(*size, PHYS_ADDR_MAX - base); 161 } 162 163 /* 164 * Address comparison utilities 165 */ 166 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 167 phys_addr_t base2, phys_addr_t size2) 168 { 169 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 170 } 171 172 bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 173 phys_addr_t base, phys_addr_t size) 174 { 175 unsigned long i; 176 177 for (i = 0; i < type->cnt; i++) 178 if (memblock_addrs_overlap(base, size, type->regions[i].base, 179 type->regions[i].size)) 180 break; 181 return i < type->cnt; 182 } 183 184 /** 185 * __memblock_find_range_bottom_up - find free area utility in bottom-up 186 * @start: start of candidate range 187 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 188 * %MEMBLOCK_ALLOC_ACCESSIBLE 189 * @size: size of free area to find 190 * @align: alignment of free area to find 191 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 192 * @flags: pick from blocks based on memory attributes 193 * 194 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 195 * 196 * Return: 197 * Found address on success, 0 on failure. 198 */ 199 static phys_addr_t __init_memblock 200 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 201 phys_addr_t size, phys_addr_t align, int nid, 202 enum memblock_flags flags) 203 { 204 phys_addr_t this_start, this_end, cand; 205 u64 i; 206 207 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 208 this_start = clamp(this_start, start, end); 209 this_end = clamp(this_end, start, end); 210 211 cand = round_up(this_start, align); 212 if (cand < this_end && this_end - cand >= size) 213 return cand; 214 } 215 216 return 0; 217 } 218 219 /** 220 * __memblock_find_range_top_down - find free area utility, in top-down 221 * @start: start of candidate range 222 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 223 * %MEMBLOCK_ALLOC_ACCESSIBLE 224 * @size: size of free area to find 225 * @align: alignment of free area to find 226 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 227 * @flags: pick from blocks based on memory attributes 228 * 229 * Utility called from memblock_find_in_range_node(), find free area top-down. 230 * 231 * Return: 232 * Found address on success, 0 on failure. 233 */ 234 static phys_addr_t __init_memblock 235 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 236 phys_addr_t size, phys_addr_t align, int nid, 237 enum memblock_flags flags) 238 { 239 phys_addr_t this_start, this_end, cand; 240 u64 i; 241 242 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 243 NULL) { 244 this_start = clamp(this_start, start, end); 245 this_end = clamp(this_end, start, end); 246 247 if (this_end < size) 248 continue; 249 250 cand = round_down(this_end - size, align); 251 if (cand >= this_start) 252 return cand; 253 } 254 255 return 0; 256 } 257 258 /** 259 * memblock_find_in_range_node - find free area in given range and node 260 * @size: size of free area to find 261 * @align: alignment of free area to find 262 * @start: start of candidate range 263 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 264 * %MEMBLOCK_ALLOC_ACCESSIBLE 265 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 266 * @flags: pick from blocks based on memory attributes 267 * 268 * Find @size free area aligned to @align in the specified range and node. 269 * 270 * When allocation direction is bottom-up, the @start should be greater 271 * than the end of the kernel image. Otherwise, it will be trimmed. The 272 * reason is that we want the bottom-up allocation just near the kernel 273 * image so it is highly likely that the allocated memory and the kernel 274 * will reside in the same node. 275 * 276 * If bottom-up allocation failed, will try to allocate memory top-down. 277 * 278 * Return: 279 * Found address on success, 0 on failure. 280 */ 281 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 282 phys_addr_t align, phys_addr_t start, 283 phys_addr_t end, int nid, 284 enum memblock_flags flags) 285 { 286 phys_addr_t kernel_end, ret; 287 288 /* pump up @end */ 289 if (end == MEMBLOCK_ALLOC_ACCESSIBLE || 290 end == MEMBLOCK_ALLOC_KASAN) 291 end = memblock.current_limit; 292 293 /* avoid allocating the first page */ 294 start = max_t(phys_addr_t, start, PAGE_SIZE); 295 end = max(start, end); 296 kernel_end = __pa_symbol(_end); 297 298 /* 299 * try bottom-up allocation only when bottom-up mode 300 * is set and @end is above the kernel image. 301 */ 302 if (memblock_bottom_up() && end > kernel_end) { 303 phys_addr_t bottom_up_start; 304 305 /* make sure we will allocate above the kernel */ 306 bottom_up_start = max(start, kernel_end); 307 308 /* ok, try bottom-up allocation first */ 309 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 310 size, align, nid, flags); 311 if (ret) 312 return ret; 313 314 /* 315 * we always limit bottom-up allocation above the kernel, 316 * but top-down allocation doesn't have the limit, so 317 * retrying top-down allocation may succeed when bottom-up 318 * allocation failed. 319 * 320 * bottom-up allocation is expected to be fail very rarely, 321 * so we use WARN_ONCE() here to see the stack trace if 322 * fail happens. 323 */ 324 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), 325 "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); 326 } 327 328 return __memblock_find_range_top_down(start, end, size, align, nid, 329 flags); 330 } 331 332 /** 333 * memblock_find_in_range - find free area in given range 334 * @start: start of candidate range 335 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 336 * %MEMBLOCK_ALLOC_ACCESSIBLE 337 * @size: size of free area to find 338 * @align: alignment of free area to find 339 * 340 * Find @size free area aligned to @align in the specified range. 341 * 342 * Return: 343 * Found address on success, 0 on failure. 344 */ 345 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 346 phys_addr_t end, phys_addr_t size, 347 phys_addr_t align) 348 { 349 phys_addr_t ret; 350 enum memblock_flags flags = choose_memblock_flags(); 351 352 again: 353 ret = memblock_find_in_range_node(size, align, start, end, 354 NUMA_NO_NODE, flags); 355 356 if (!ret && (flags & MEMBLOCK_MIRROR)) { 357 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 358 &size); 359 flags &= ~MEMBLOCK_MIRROR; 360 goto again; 361 } 362 363 return ret; 364 } 365 366 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 367 { 368 type->total_size -= type->regions[r].size; 369 memmove(&type->regions[r], &type->regions[r + 1], 370 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 371 type->cnt--; 372 373 /* Special case for empty arrays */ 374 if (type->cnt == 0) { 375 WARN_ON(type->total_size != 0); 376 type->cnt = 1; 377 type->regions[0].base = 0; 378 type->regions[0].size = 0; 379 type->regions[0].flags = 0; 380 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 381 } 382 } 383 384 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 385 /** 386 * memblock_discard - discard memory and reserved arrays if they were allocated 387 */ 388 void __init memblock_discard(void) 389 { 390 phys_addr_t addr, size; 391 392 if (memblock.reserved.regions != memblock_reserved_init_regions) { 393 addr = __pa(memblock.reserved.regions); 394 size = PAGE_ALIGN(sizeof(struct memblock_region) * 395 memblock.reserved.max); 396 __memblock_free_late(addr, size); 397 } 398 399 if (memblock.memory.regions != memblock_memory_init_regions) { 400 addr = __pa(memblock.memory.regions); 401 size = PAGE_ALIGN(sizeof(struct memblock_region) * 402 memblock.memory.max); 403 __memblock_free_late(addr, size); 404 } 405 } 406 #endif 407 408 /** 409 * memblock_double_array - double the size of the memblock regions array 410 * @type: memblock type of the regions array being doubled 411 * @new_area_start: starting address of memory range to avoid overlap with 412 * @new_area_size: size of memory range to avoid overlap with 413 * 414 * Double the size of the @type regions array. If memblock is being used to 415 * allocate memory for a new reserved regions array and there is a previously 416 * allocated memory range [@new_area_start, @new_area_start + @new_area_size] 417 * waiting to be reserved, ensure the memory used by the new array does 418 * not overlap. 419 * 420 * Return: 421 * 0 on success, -1 on failure. 422 */ 423 static int __init_memblock memblock_double_array(struct memblock_type *type, 424 phys_addr_t new_area_start, 425 phys_addr_t new_area_size) 426 { 427 struct memblock_region *new_array, *old_array; 428 phys_addr_t old_alloc_size, new_alloc_size; 429 phys_addr_t old_size, new_size, addr, new_end; 430 int use_slab = slab_is_available(); 431 int *in_slab; 432 433 /* We don't allow resizing until we know about the reserved regions 434 * of memory that aren't suitable for allocation 435 */ 436 if (!memblock_can_resize) 437 return -1; 438 439 /* Calculate new doubled size */ 440 old_size = type->max * sizeof(struct memblock_region); 441 new_size = old_size << 1; 442 /* 443 * We need to allocated new one align to PAGE_SIZE, 444 * so we can free them completely later. 445 */ 446 old_alloc_size = PAGE_ALIGN(old_size); 447 new_alloc_size = PAGE_ALIGN(new_size); 448 449 /* Retrieve the slab flag */ 450 if (type == &memblock.memory) 451 in_slab = &memblock_memory_in_slab; 452 else 453 in_slab = &memblock_reserved_in_slab; 454 455 /* Try to find some space for it */ 456 if (use_slab) { 457 new_array = kmalloc(new_size, GFP_KERNEL); 458 addr = new_array ? __pa(new_array) : 0; 459 } else { 460 /* only exclude range when trying to double reserved.regions */ 461 if (type != &memblock.reserved) 462 new_area_start = new_area_size = 0; 463 464 addr = memblock_find_in_range(new_area_start + new_area_size, 465 memblock.current_limit, 466 new_alloc_size, PAGE_SIZE); 467 if (!addr && new_area_size) 468 addr = memblock_find_in_range(0, 469 min(new_area_start, memblock.current_limit), 470 new_alloc_size, PAGE_SIZE); 471 472 new_array = addr ? __va(addr) : NULL; 473 } 474 if (!addr) { 475 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 476 type->name, type->max, type->max * 2); 477 return -1; 478 } 479 480 new_end = addr + new_size - 1; 481 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", 482 type->name, type->max * 2, &addr, &new_end); 483 484 /* 485 * Found space, we now need to move the array over before we add the 486 * reserved region since it may be our reserved array itself that is 487 * full. 488 */ 489 memcpy(new_array, type->regions, old_size); 490 memset(new_array + type->max, 0, old_size); 491 old_array = type->regions; 492 type->regions = new_array; 493 type->max <<= 1; 494 495 /* Free old array. We needn't free it if the array is the static one */ 496 if (*in_slab) 497 kfree(old_array); 498 else if (old_array != memblock_memory_init_regions && 499 old_array != memblock_reserved_init_regions) 500 memblock_free(__pa(old_array), old_alloc_size); 501 502 /* 503 * Reserve the new array if that comes from the memblock. Otherwise, we 504 * needn't do it 505 */ 506 if (!use_slab) 507 BUG_ON(memblock_reserve(addr, new_alloc_size)); 508 509 /* Update slab flag */ 510 *in_slab = use_slab; 511 512 return 0; 513 } 514 515 /** 516 * memblock_merge_regions - merge neighboring compatible regions 517 * @type: memblock type to scan 518 * 519 * Scan @type and merge neighboring compatible regions. 520 */ 521 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 522 { 523 int i = 0; 524 525 /* cnt never goes below 1 */ 526 while (i < type->cnt - 1) { 527 struct memblock_region *this = &type->regions[i]; 528 struct memblock_region *next = &type->regions[i + 1]; 529 530 if (this->base + this->size != next->base || 531 memblock_get_region_node(this) != 532 memblock_get_region_node(next) || 533 this->flags != next->flags) { 534 BUG_ON(this->base + this->size > next->base); 535 i++; 536 continue; 537 } 538 539 this->size += next->size; 540 /* move forward from next + 1, index of which is i + 2 */ 541 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 542 type->cnt--; 543 } 544 } 545 546 /** 547 * memblock_insert_region - insert new memblock region 548 * @type: memblock type to insert into 549 * @idx: index for the insertion point 550 * @base: base address of the new region 551 * @size: size of the new region 552 * @nid: node id of the new region 553 * @flags: flags of the new region 554 * 555 * Insert new memblock region [@base, @base + @size) into @type at @idx. 556 * @type must already have extra room to accommodate the new region. 557 */ 558 static void __init_memblock memblock_insert_region(struct memblock_type *type, 559 int idx, phys_addr_t base, 560 phys_addr_t size, 561 int nid, 562 enum memblock_flags flags) 563 { 564 struct memblock_region *rgn = &type->regions[idx]; 565 566 BUG_ON(type->cnt >= type->max); 567 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 568 rgn->base = base; 569 rgn->size = size; 570 rgn->flags = flags; 571 memblock_set_region_node(rgn, nid); 572 type->cnt++; 573 type->total_size += size; 574 } 575 576 /** 577 * memblock_add_range - add new memblock region 578 * @type: memblock type to add new region into 579 * @base: base address of the new region 580 * @size: size of the new region 581 * @nid: nid of the new region 582 * @flags: flags of the new region 583 * 584 * Add new memblock region [@base, @base + @size) into @type. The new region 585 * is allowed to overlap with existing ones - overlaps don't affect already 586 * existing regions. @type is guaranteed to be minimal (all neighbouring 587 * compatible regions are merged) after the addition. 588 * 589 * Return: 590 * 0 on success, -errno on failure. 591 */ 592 static int __init_memblock memblock_add_range(struct memblock_type *type, 593 phys_addr_t base, phys_addr_t size, 594 int nid, enum memblock_flags flags) 595 { 596 bool insert = false; 597 phys_addr_t obase = base; 598 phys_addr_t end = base + memblock_cap_size(base, &size); 599 int idx, nr_new; 600 struct memblock_region *rgn; 601 602 if (!size) 603 return 0; 604 605 /* special case for empty array */ 606 if (type->regions[0].size == 0) { 607 WARN_ON(type->cnt != 1 || type->total_size); 608 type->regions[0].base = base; 609 type->regions[0].size = size; 610 type->regions[0].flags = flags; 611 memblock_set_region_node(&type->regions[0], nid); 612 type->total_size = size; 613 return 0; 614 } 615 repeat: 616 /* 617 * The following is executed twice. Once with %false @insert and 618 * then with %true. The first counts the number of regions needed 619 * to accommodate the new area. The second actually inserts them. 620 */ 621 base = obase; 622 nr_new = 0; 623 624 for_each_memblock_type(idx, type, rgn) { 625 phys_addr_t rbase = rgn->base; 626 phys_addr_t rend = rbase + rgn->size; 627 628 if (rbase >= end) 629 break; 630 if (rend <= base) 631 continue; 632 /* 633 * @rgn overlaps. If it separates the lower part of new 634 * area, insert that portion. 635 */ 636 if (rbase > base) { 637 #ifdef CONFIG_NEED_MULTIPLE_NODES 638 WARN_ON(nid != memblock_get_region_node(rgn)); 639 #endif 640 WARN_ON(flags != rgn->flags); 641 nr_new++; 642 if (insert) 643 memblock_insert_region(type, idx++, base, 644 rbase - base, nid, 645 flags); 646 } 647 /* area below @rend is dealt with, forget about it */ 648 base = min(rend, end); 649 } 650 651 /* insert the remaining portion */ 652 if (base < end) { 653 nr_new++; 654 if (insert) 655 memblock_insert_region(type, idx, base, end - base, 656 nid, flags); 657 } 658 659 if (!nr_new) 660 return 0; 661 662 /* 663 * If this was the first round, resize array and repeat for actual 664 * insertions; otherwise, merge and return. 665 */ 666 if (!insert) { 667 while (type->cnt + nr_new > type->max) 668 if (memblock_double_array(type, obase, size) < 0) 669 return -ENOMEM; 670 insert = true; 671 goto repeat; 672 } else { 673 memblock_merge_regions(type); 674 return 0; 675 } 676 } 677 678 /** 679 * memblock_add_node - add new memblock region within a NUMA node 680 * @base: base address of the new region 681 * @size: size of the new region 682 * @nid: nid of the new region 683 * 684 * Add new memblock region [@base, @base + @size) to the "memory" 685 * type. See memblock_add_range() description for mode details 686 * 687 * Return: 688 * 0 on success, -errno on failure. 689 */ 690 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 691 int nid) 692 { 693 return memblock_add_range(&memblock.memory, base, size, nid, 0); 694 } 695 696 /** 697 * memblock_add - add new memblock region 698 * @base: base address of the new region 699 * @size: size of the new region 700 * 701 * Add new memblock region [@base, @base + @size) to the "memory" 702 * type. See memblock_add_range() description for mode details 703 * 704 * Return: 705 * 0 on success, -errno on failure. 706 */ 707 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 708 { 709 phys_addr_t end = base + size - 1; 710 711 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 712 &base, &end, (void *)_RET_IP_); 713 714 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 715 } 716 717 /** 718 * memblock_isolate_range - isolate given range into disjoint memblocks 719 * @type: memblock type to isolate range for 720 * @base: base of range to isolate 721 * @size: size of range to isolate 722 * @start_rgn: out parameter for the start of isolated region 723 * @end_rgn: out parameter for the end of isolated region 724 * 725 * Walk @type and ensure that regions don't cross the boundaries defined by 726 * [@base, @base + @size). Crossing regions are split at the boundaries, 727 * which may create at most two more regions. The index of the first 728 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 729 * 730 * Return: 731 * 0 on success, -errno on failure. 732 */ 733 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 734 phys_addr_t base, phys_addr_t size, 735 int *start_rgn, int *end_rgn) 736 { 737 phys_addr_t end = base + memblock_cap_size(base, &size); 738 int idx; 739 struct memblock_region *rgn; 740 741 *start_rgn = *end_rgn = 0; 742 743 if (!size) 744 return 0; 745 746 /* we'll create at most two more regions */ 747 while (type->cnt + 2 > type->max) 748 if (memblock_double_array(type, base, size) < 0) 749 return -ENOMEM; 750 751 for_each_memblock_type(idx, type, rgn) { 752 phys_addr_t rbase = rgn->base; 753 phys_addr_t rend = rbase + rgn->size; 754 755 if (rbase >= end) 756 break; 757 if (rend <= base) 758 continue; 759 760 if (rbase < base) { 761 /* 762 * @rgn intersects from below. Split and continue 763 * to process the next region - the new top half. 764 */ 765 rgn->base = base; 766 rgn->size -= base - rbase; 767 type->total_size -= base - rbase; 768 memblock_insert_region(type, idx, rbase, base - rbase, 769 memblock_get_region_node(rgn), 770 rgn->flags); 771 } else if (rend > end) { 772 /* 773 * @rgn intersects from above. Split and redo the 774 * current region - the new bottom half. 775 */ 776 rgn->base = end; 777 rgn->size -= end - rbase; 778 type->total_size -= end - rbase; 779 memblock_insert_region(type, idx--, rbase, end - rbase, 780 memblock_get_region_node(rgn), 781 rgn->flags); 782 } else { 783 /* @rgn is fully contained, record it */ 784 if (!*end_rgn) 785 *start_rgn = idx; 786 *end_rgn = idx + 1; 787 } 788 } 789 790 return 0; 791 } 792 793 static int __init_memblock memblock_remove_range(struct memblock_type *type, 794 phys_addr_t base, phys_addr_t size) 795 { 796 int start_rgn, end_rgn; 797 int i, ret; 798 799 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 800 if (ret) 801 return ret; 802 803 for (i = end_rgn - 1; i >= start_rgn; i--) 804 memblock_remove_region(type, i); 805 return 0; 806 } 807 808 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 809 { 810 phys_addr_t end = base + size - 1; 811 812 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 813 &base, &end, (void *)_RET_IP_); 814 815 return memblock_remove_range(&memblock.memory, base, size); 816 } 817 818 /** 819 * memblock_free - free boot memory block 820 * @base: phys starting address of the boot memory block 821 * @size: size of the boot memory block in bytes 822 * 823 * Free boot memory block previously allocated by memblock_alloc_xx() API. 824 * The freeing memory will not be released to the buddy allocator. 825 */ 826 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 827 { 828 phys_addr_t end = base + size - 1; 829 830 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 831 &base, &end, (void *)_RET_IP_); 832 833 kmemleak_free_part_phys(base, size); 834 return memblock_remove_range(&memblock.reserved, base, size); 835 } 836 837 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 838 { 839 phys_addr_t end = base + size - 1; 840 841 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 842 &base, &end, (void *)_RET_IP_); 843 844 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 845 } 846 847 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 848 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) 849 { 850 phys_addr_t end = base + size - 1; 851 852 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 853 &base, &end, (void *)_RET_IP_); 854 855 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); 856 } 857 #endif 858 859 /** 860 * memblock_setclr_flag - set or clear flag for a memory region 861 * @base: base address of the region 862 * @size: size of the region 863 * @set: set or clear the flag 864 * @flag: the flag to udpate 865 * 866 * This function isolates region [@base, @base + @size), and sets/clears flag 867 * 868 * Return: 0 on success, -errno on failure. 869 */ 870 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 871 phys_addr_t size, int set, int flag) 872 { 873 struct memblock_type *type = &memblock.memory; 874 int i, ret, start_rgn, end_rgn; 875 876 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 877 if (ret) 878 return ret; 879 880 for (i = start_rgn; i < end_rgn; i++) { 881 struct memblock_region *r = &type->regions[i]; 882 883 if (set) 884 r->flags |= flag; 885 else 886 r->flags &= ~flag; 887 } 888 889 memblock_merge_regions(type); 890 return 0; 891 } 892 893 /** 894 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 895 * @base: the base phys addr of the region 896 * @size: the size of the region 897 * 898 * Return: 0 on success, -errno on failure. 899 */ 900 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 901 { 902 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 903 } 904 905 /** 906 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 907 * @base: the base phys addr of the region 908 * @size: the size of the region 909 * 910 * Return: 0 on success, -errno on failure. 911 */ 912 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 913 { 914 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 915 } 916 917 /** 918 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 919 * @base: the base phys addr of the region 920 * @size: the size of the region 921 * 922 * Return: 0 on success, -errno on failure. 923 */ 924 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 925 { 926 system_has_some_mirror = true; 927 928 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 929 } 930 931 /** 932 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 933 * @base: the base phys addr of the region 934 * @size: the size of the region 935 * 936 * Return: 0 on success, -errno on failure. 937 */ 938 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 939 { 940 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 941 } 942 943 /** 944 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 945 * @base: the base phys addr of the region 946 * @size: the size of the region 947 * 948 * Return: 0 on success, -errno on failure. 949 */ 950 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 951 { 952 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 953 } 954 955 /** 956 * __next_reserved_mem_region - next function for for_each_reserved_region() 957 * @idx: pointer to u64 loop variable 958 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 959 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 960 * 961 * Iterate over all reserved memory regions. 962 */ 963 void __init_memblock __next_reserved_mem_region(u64 *idx, 964 phys_addr_t *out_start, 965 phys_addr_t *out_end) 966 { 967 struct memblock_type *type = &memblock.reserved; 968 969 if (*idx < type->cnt) { 970 struct memblock_region *r = &type->regions[*idx]; 971 phys_addr_t base = r->base; 972 phys_addr_t size = r->size; 973 974 if (out_start) 975 *out_start = base; 976 if (out_end) 977 *out_end = base + size - 1; 978 979 *idx += 1; 980 return; 981 } 982 983 /* signal end of iteration */ 984 *idx = ULLONG_MAX; 985 } 986 987 static bool should_skip_region(struct memblock_region *m, int nid, int flags) 988 { 989 int m_nid = memblock_get_region_node(m); 990 991 /* only memory regions are associated with nodes, check it */ 992 if (nid != NUMA_NO_NODE && nid != m_nid) 993 return true; 994 995 /* skip hotpluggable memory regions if needed */ 996 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 997 return true; 998 999 /* if we want mirror memory skip non-mirror memory regions */ 1000 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 1001 return true; 1002 1003 /* skip nomap memory unless we were asked for it explicitly */ 1004 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 1005 return true; 1006 1007 return false; 1008 } 1009 1010 /** 1011 * __next_mem_range - next function for for_each_free_mem_range() etc. 1012 * @idx: pointer to u64 loop variable 1013 * @nid: node selector, %NUMA_NO_NODE for all nodes 1014 * @flags: pick from blocks based on memory attributes 1015 * @type_a: pointer to memblock_type from where the range is taken 1016 * @type_b: pointer to memblock_type which excludes memory from being taken 1017 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1018 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1019 * @out_nid: ptr to int for nid of the range, can be %NULL 1020 * 1021 * Find the first area from *@idx which matches @nid, fill the out 1022 * parameters, and update *@idx for the next iteration. The lower 32bit of 1023 * *@idx contains index into type_a and the upper 32bit indexes the 1024 * areas before each region in type_b. For example, if type_b regions 1025 * look like the following, 1026 * 1027 * 0:[0-16), 1:[32-48), 2:[128-130) 1028 * 1029 * The upper 32bit indexes the following regions. 1030 * 1031 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 1032 * 1033 * As both region arrays are sorted, the function advances the two indices 1034 * in lockstep and returns each intersection. 1035 */ 1036 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 1037 struct memblock_type *type_a, 1038 struct memblock_type *type_b, phys_addr_t *out_start, 1039 phys_addr_t *out_end, int *out_nid) 1040 { 1041 int idx_a = *idx & 0xffffffff; 1042 int idx_b = *idx >> 32; 1043 1044 if (WARN_ONCE(nid == MAX_NUMNODES, 1045 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1046 nid = NUMA_NO_NODE; 1047 1048 for (; idx_a < type_a->cnt; idx_a++) { 1049 struct memblock_region *m = &type_a->regions[idx_a]; 1050 1051 phys_addr_t m_start = m->base; 1052 phys_addr_t m_end = m->base + m->size; 1053 int m_nid = memblock_get_region_node(m); 1054 1055 if (should_skip_region(m, nid, flags)) 1056 continue; 1057 1058 if (!type_b) { 1059 if (out_start) 1060 *out_start = m_start; 1061 if (out_end) 1062 *out_end = m_end; 1063 if (out_nid) 1064 *out_nid = m_nid; 1065 idx_a++; 1066 *idx = (u32)idx_a | (u64)idx_b << 32; 1067 return; 1068 } 1069 1070 /* scan areas before each reservation */ 1071 for (; idx_b < type_b->cnt + 1; idx_b++) { 1072 struct memblock_region *r; 1073 phys_addr_t r_start; 1074 phys_addr_t r_end; 1075 1076 r = &type_b->regions[idx_b]; 1077 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1078 r_end = idx_b < type_b->cnt ? 1079 r->base : PHYS_ADDR_MAX; 1080 1081 /* 1082 * if idx_b advanced past idx_a, 1083 * break out to advance idx_a 1084 */ 1085 if (r_start >= m_end) 1086 break; 1087 /* if the two regions intersect, we're done */ 1088 if (m_start < r_end) { 1089 if (out_start) 1090 *out_start = 1091 max(m_start, r_start); 1092 if (out_end) 1093 *out_end = min(m_end, r_end); 1094 if (out_nid) 1095 *out_nid = m_nid; 1096 /* 1097 * The region which ends first is 1098 * advanced for the next iteration. 1099 */ 1100 if (m_end <= r_end) 1101 idx_a++; 1102 else 1103 idx_b++; 1104 *idx = (u32)idx_a | (u64)idx_b << 32; 1105 return; 1106 } 1107 } 1108 } 1109 1110 /* signal end of iteration */ 1111 *idx = ULLONG_MAX; 1112 } 1113 1114 /** 1115 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 1116 * 1117 * @idx: pointer to u64 loop variable 1118 * @nid: node selector, %NUMA_NO_NODE for all nodes 1119 * @flags: pick from blocks based on memory attributes 1120 * @type_a: pointer to memblock_type from where the range is taken 1121 * @type_b: pointer to memblock_type which excludes memory from being taken 1122 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1123 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1124 * @out_nid: ptr to int for nid of the range, can be %NULL 1125 * 1126 * Finds the next range from type_a which is not marked as unsuitable 1127 * in type_b. 1128 * 1129 * Reverse of __next_mem_range(). 1130 */ 1131 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 1132 enum memblock_flags flags, 1133 struct memblock_type *type_a, 1134 struct memblock_type *type_b, 1135 phys_addr_t *out_start, 1136 phys_addr_t *out_end, int *out_nid) 1137 { 1138 int idx_a = *idx & 0xffffffff; 1139 int idx_b = *idx >> 32; 1140 1141 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1142 nid = NUMA_NO_NODE; 1143 1144 if (*idx == (u64)ULLONG_MAX) { 1145 idx_a = type_a->cnt - 1; 1146 if (type_b != NULL) 1147 idx_b = type_b->cnt; 1148 else 1149 idx_b = 0; 1150 } 1151 1152 for (; idx_a >= 0; idx_a--) { 1153 struct memblock_region *m = &type_a->regions[idx_a]; 1154 1155 phys_addr_t m_start = m->base; 1156 phys_addr_t m_end = m->base + m->size; 1157 int m_nid = memblock_get_region_node(m); 1158 1159 if (should_skip_region(m, nid, flags)) 1160 continue; 1161 1162 if (!type_b) { 1163 if (out_start) 1164 *out_start = m_start; 1165 if (out_end) 1166 *out_end = m_end; 1167 if (out_nid) 1168 *out_nid = m_nid; 1169 idx_a--; 1170 *idx = (u32)idx_a | (u64)idx_b << 32; 1171 return; 1172 } 1173 1174 /* scan areas before each reservation */ 1175 for (; idx_b >= 0; idx_b--) { 1176 struct memblock_region *r; 1177 phys_addr_t r_start; 1178 phys_addr_t r_end; 1179 1180 r = &type_b->regions[idx_b]; 1181 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1182 r_end = idx_b < type_b->cnt ? 1183 r->base : PHYS_ADDR_MAX; 1184 /* 1185 * if idx_b advanced past idx_a, 1186 * break out to advance idx_a 1187 */ 1188 1189 if (r_end <= m_start) 1190 break; 1191 /* if the two regions intersect, we're done */ 1192 if (m_end > r_start) { 1193 if (out_start) 1194 *out_start = max(m_start, r_start); 1195 if (out_end) 1196 *out_end = min(m_end, r_end); 1197 if (out_nid) 1198 *out_nid = m_nid; 1199 if (m_start >= r_start) 1200 idx_a--; 1201 else 1202 idx_b--; 1203 *idx = (u32)idx_a | (u64)idx_b << 32; 1204 return; 1205 } 1206 } 1207 } 1208 /* signal end of iteration */ 1209 *idx = ULLONG_MAX; 1210 } 1211 1212 /* 1213 * Common iterator interface used to define for_each_mem_pfn_range(). 1214 */ 1215 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1216 unsigned long *out_start_pfn, 1217 unsigned long *out_end_pfn, int *out_nid) 1218 { 1219 struct memblock_type *type = &memblock.memory; 1220 struct memblock_region *r; 1221 int r_nid; 1222 1223 while (++*idx < type->cnt) { 1224 r = &type->regions[*idx]; 1225 r_nid = memblock_get_region_node(r); 1226 1227 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1228 continue; 1229 if (nid == MAX_NUMNODES || nid == r_nid) 1230 break; 1231 } 1232 if (*idx >= type->cnt) { 1233 *idx = -1; 1234 return; 1235 } 1236 1237 if (out_start_pfn) 1238 *out_start_pfn = PFN_UP(r->base); 1239 if (out_end_pfn) 1240 *out_end_pfn = PFN_DOWN(r->base + r->size); 1241 if (out_nid) 1242 *out_nid = r_nid; 1243 } 1244 1245 /** 1246 * memblock_set_node - set node ID on memblock regions 1247 * @base: base of area to set node ID for 1248 * @size: size of area to set node ID for 1249 * @type: memblock type to set node ID for 1250 * @nid: node ID to set 1251 * 1252 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. 1253 * Regions which cross the area boundaries are split as necessary. 1254 * 1255 * Return: 1256 * 0 on success, -errno on failure. 1257 */ 1258 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1259 struct memblock_type *type, int nid) 1260 { 1261 #ifdef CONFIG_NEED_MULTIPLE_NODES 1262 int start_rgn, end_rgn; 1263 int i, ret; 1264 1265 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1266 if (ret) 1267 return ret; 1268 1269 for (i = start_rgn; i < end_rgn; i++) 1270 memblock_set_region_node(&type->regions[i], nid); 1271 1272 memblock_merge_regions(type); 1273 #endif 1274 return 0; 1275 } 1276 1277 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1278 /** 1279 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() 1280 * 1281 * @idx: pointer to u64 loop variable 1282 * @zone: zone in which all of the memory blocks reside 1283 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL 1284 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL 1285 * 1286 * This function is meant to be a zone/pfn specific wrapper for the 1287 * for_each_mem_range type iterators. Specifically they are used in the 1288 * deferred memory init routines and as such we were duplicating much of 1289 * this logic throughout the code. So instead of having it in multiple 1290 * locations it seemed like it would make more sense to centralize this to 1291 * one new iterator that does everything they need. 1292 */ 1293 void __init_memblock 1294 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 1295 unsigned long *out_spfn, unsigned long *out_epfn) 1296 { 1297 int zone_nid = zone_to_nid(zone); 1298 phys_addr_t spa, epa; 1299 int nid; 1300 1301 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1302 &memblock.memory, &memblock.reserved, 1303 &spa, &epa, &nid); 1304 1305 while (*idx != U64_MAX) { 1306 unsigned long epfn = PFN_DOWN(epa); 1307 unsigned long spfn = PFN_UP(spa); 1308 1309 /* 1310 * Verify the end is at least past the start of the zone and 1311 * that we have at least one PFN to initialize. 1312 */ 1313 if (zone->zone_start_pfn < epfn && spfn < epfn) { 1314 /* if we went too far just stop searching */ 1315 if (zone_end_pfn(zone) <= spfn) { 1316 *idx = U64_MAX; 1317 break; 1318 } 1319 1320 if (out_spfn) 1321 *out_spfn = max(zone->zone_start_pfn, spfn); 1322 if (out_epfn) 1323 *out_epfn = min(zone_end_pfn(zone), epfn); 1324 1325 return; 1326 } 1327 1328 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1329 &memblock.memory, &memblock.reserved, 1330 &spa, &epa, &nid); 1331 } 1332 1333 /* signal end of iteration */ 1334 if (out_spfn) 1335 *out_spfn = ULONG_MAX; 1336 if (out_epfn) 1337 *out_epfn = 0; 1338 } 1339 1340 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1341 1342 /** 1343 * memblock_alloc_range_nid - allocate boot memory block 1344 * @size: size of memory block to be allocated in bytes 1345 * @align: alignment of the region and block's size 1346 * @start: the lower bound of the memory region to allocate (phys address) 1347 * @end: the upper bound of the memory region to allocate (phys address) 1348 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1349 * @exact_nid: control the allocation fall back to other nodes 1350 * 1351 * The allocation is performed from memory region limited by 1352 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. 1353 * 1354 * If the specified node can not hold the requested memory and @exact_nid 1355 * is false, the allocation falls back to any node in the system. 1356 * 1357 * For systems with memory mirroring, the allocation is attempted first 1358 * from the regions with mirroring enabled and then retried from any 1359 * memory region. 1360 * 1361 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for 1362 * allocated boot memory block, so that it is never reported as leaks. 1363 * 1364 * Return: 1365 * Physical address of allocated memory block on success, %0 on failure. 1366 */ 1367 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1368 phys_addr_t align, phys_addr_t start, 1369 phys_addr_t end, int nid, 1370 bool exact_nid) 1371 { 1372 enum memblock_flags flags = choose_memblock_flags(); 1373 phys_addr_t found; 1374 1375 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1376 nid = NUMA_NO_NODE; 1377 1378 if (!align) { 1379 /* Can't use WARNs this early in boot on powerpc */ 1380 dump_stack(); 1381 align = SMP_CACHE_BYTES; 1382 } 1383 1384 again: 1385 found = memblock_find_in_range_node(size, align, start, end, nid, 1386 flags); 1387 if (found && !memblock_reserve(found, size)) 1388 goto done; 1389 1390 if (nid != NUMA_NO_NODE && !exact_nid) { 1391 found = memblock_find_in_range_node(size, align, start, 1392 end, NUMA_NO_NODE, 1393 flags); 1394 if (found && !memblock_reserve(found, size)) 1395 goto done; 1396 } 1397 1398 if (flags & MEMBLOCK_MIRROR) { 1399 flags &= ~MEMBLOCK_MIRROR; 1400 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 1401 &size); 1402 goto again; 1403 } 1404 1405 return 0; 1406 1407 done: 1408 /* Skip kmemleak for kasan_init() due to high volume. */ 1409 if (end != MEMBLOCK_ALLOC_KASAN) 1410 /* 1411 * The min_count is set to 0 so that memblock allocated 1412 * blocks are never reported as leaks. This is because many 1413 * of these blocks are only referred via the physical 1414 * address which is not looked up by kmemleak. 1415 */ 1416 kmemleak_alloc_phys(found, size, 0, 0); 1417 1418 return found; 1419 } 1420 1421 /** 1422 * memblock_phys_alloc_range - allocate a memory block inside specified range 1423 * @size: size of memory block to be allocated in bytes 1424 * @align: alignment of the region and block's size 1425 * @start: the lower bound of the memory region to allocate (physical address) 1426 * @end: the upper bound of the memory region to allocate (physical address) 1427 * 1428 * Allocate @size bytes in the between @start and @end. 1429 * 1430 * Return: physical address of the allocated memory block on success, 1431 * %0 on failure. 1432 */ 1433 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 1434 phys_addr_t align, 1435 phys_addr_t start, 1436 phys_addr_t end) 1437 { 1438 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1439 false); 1440 } 1441 1442 /** 1443 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node 1444 * @size: size of memory block to be allocated in bytes 1445 * @align: alignment of the region and block's size 1446 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1447 * 1448 * Allocates memory block from the specified NUMA node. If the node 1449 * has no available memory, attempts to allocated from any node in the 1450 * system. 1451 * 1452 * Return: physical address of the allocated memory block on success, 1453 * %0 on failure. 1454 */ 1455 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1456 { 1457 return memblock_alloc_range_nid(size, align, 0, 1458 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false); 1459 } 1460 1461 /** 1462 * memblock_alloc_internal - allocate boot memory block 1463 * @size: size of memory block to be allocated in bytes 1464 * @align: alignment of the region and block's size 1465 * @min_addr: the lower bound of the memory region to allocate (phys address) 1466 * @max_addr: the upper bound of the memory region to allocate (phys address) 1467 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1468 * @exact_nid: control the allocation fall back to other nodes 1469 * 1470 * Allocates memory block using memblock_alloc_range_nid() and 1471 * converts the returned physical address to virtual. 1472 * 1473 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1474 * will fall back to memory below @min_addr. Other constraints, such 1475 * as node and mirrored memory will be handled again in 1476 * memblock_alloc_range_nid(). 1477 * 1478 * Return: 1479 * Virtual address of allocated memory block on success, NULL on failure. 1480 */ 1481 static void * __init memblock_alloc_internal( 1482 phys_addr_t size, phys_addr_t align, 1483 phys_addr_t min_addr, phys_addr_t max_addr, 1484 int nid, bool exact_nid) 1485 { 1486 phys_addr_t alloc; 1487 1488 /* 1489 * Detect any accidental use of these APIs after slab is ready, as at 1490 * this moment memblock may be deinitialized already and its 1491 * internal data may be destroyed (after execution of memblock_free_all) 1492 */ 1493 if (WARN_ON_ONCE(slab_is_available())) 1494 return kzalloc_node(size, GFP_NOWAIT, nid); 1495 1496 if (max_addr > memblock.current_limit) 1497 max_addr = memblock.current_limit; 1498 1499 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, 1500 exact_nid); 1501 1502 /* retry allocation without lower limit */ 1503 if (!alloc && min_addr) 1504 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, 1505 exact_nid); 1506 1507 if (!alloc) 1508 return NULL; 1509 1510 return phys_to_virt(alloc); 1511 } 1512 1513 /** 1514 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node 1515 * without zeroing memory 1516 * @size: size of memory block to be allocated in bytes 1517 * @align: alignment of the region and block's size 1518 * @min_addr: the lower bound of the memory region from where the allocation 1519 * is preferred (phys address) 1520 * @max_addr: the upper bound of the memory region from where the allocation 1521 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1522 * allocate only from memory limited by memblock.current_limit value 1523 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1524 * 1525 * Public function, provides additional debug information (including caller 1526 * info), if enabled. Does not zero allocated memory. 1527 * 1528 * Return: 1529 * Virtual address of allocated memory block on success, NULL on failure. 1530 */ 1531 void * __init memblock_alloc_exact_nid_raw( 1532 phys_addr_t size, phys_addr_t align, 1533 phys_addr_t min_addr, phys_addr_t max_addr, 1534 int nid) 1535 { 1536 void *ptr; 1537 1538 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1539 __func__, (u64)size, (u64)align, nid, &min_addr, 1540 &max_addr, (void *)_RET_IP_); 1541 1542 ptr = memblock_alloc_internal(size, align, 1543 min_addr, max_addr, nid, true); 1544 if (ptr && size > 0) 1545 page_init_poison(ptr, size); 1546 1547 return ptr; 1548 } 1549 1550 /** 1551 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing 1552 * memory and without panicking 1553 * @size: size of memory block to be allocated in bytes 1554 * @align: alignment of the region and block's size 1555 * @min_addr: the lower bound of the memory region from where the allocation 1556 * is preferred (phys address) 1557 * @max_addr: the upper bound of the memory region from where the allocation 1558 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1559 * allocate only from memory limited by memblock.current_limit value 1560 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1561 * 1562 * Public function, provides additional debug information (including caller 1563 * info), if enabled. Does not zero allocated memory, does not panic if request 1564 * cannot be satisfied. 1565 * 1566 * Return: 1567 * Virtual address of allocated memory block on success, NULL on failure. 1568 */ 1569 void * __init memblock_alloc_try_nid_raw( 1570 phys_addr_t size, phys_addr_t align, 1571 phys_addr_t min_addr, phys_addr_t max_addr, 1572 int nid) 1573 { 1574 void *ptr; 1575 1576 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1577 __func__, (u64)size, (u64)align, nid, &min_addr, 1578 &max_addr, (void *)_RET_IP_); 1579 1580 ptr = memblock_alloc_internal(size, align, 1581 min_addr, max_addr, nid, false); 1582 if (ptr && size > 0) 1583 page_init_poison(ptr, size); 1584 1585 return ptr; 1586 } 1587 1588 /** 1589 * memblock_alloc_try_nid - allocate boot memory block 1590 * @size: size of memory block to be allocated in bytes 1591 * @align: alignment of the region and block's size 1592 * @min_addr: the lower bound of the memory region from where the allocation 1593 * is preferred (phys address) 1594 * @max_addr: the upper bound of the memory region from where the allocation 1595 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1596 * allocate only from memory limited by memblock.current_limit value 1597 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1598 * 1599 * Public function, provides additional debug information (including caller 1600 * info), if enabled. This function zeroes the allocated memory. 1601 * 1602 * Return: 1603 * Virtual address of allocated memory block on success, NULL on failure. 1604 */ 1605 void * __init memblock_alloc_try_nid( 1606 phys_addr_t size, phys_addr_t align, 1607 phys_addr_t min_addr, phys_addr_t max_addr, 1608 int nid) 1609 { 1610 void *ptr; 1611 1612 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1613 __func__, (u64)size, (u64)align, nid, &min_addr, 1614 &max_addr, (void *)_RET_IP_); 1615 ptr = memblock_alloc_internal(size, align, 1616 min_addr, max_addr, nid, false); 1617 if (ptr) 1618 memset(ptr, 0, size); 1619 1620 return ptr; 1621 } 1622 1623 /** 1624 * __memblock_free_late - free pages directly to buddy allocator 1625 * @base: phys starting address of the boot memory block 1626 * @size: size of the boot memory block in bytes 1627 * 1628 * This is only useful when the memblock allocator has already been torn 1629 * down, but we are still initializing the system. Pages are released directly 1630 * to the buddy allocator. 1631 */ 1632 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1633 { 1634 phys_addr_t cursor, end; 1635 1636 end = base + size - 1; 1637 memblock_dbg("%s: [%pa-%pa] %pS\n", 1638 __func__, &base, &end, (void *)_RET_IP_); 1639 kmemleak_free_part_phys(base, size); 1640 cursor = PFN_UP(base); 1641 end = PFN_DOWN(base + size); 1642 1643 for (; cursor < end; cursor++) { 1644 memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1645 totalram_pages_inc(); 1646 } 1647 } 1648 1649 /* 1650 * Remaining API functions 1651 */ 1652 1653 phys_addr_t __init_memblock memblock_phys_mem_size(void) 1654 { 1655 return memblock.memory.total_size; 1656 } 1657 1658 phys_addr_t __init_memblock memblock_reserved_size(void) 1659 { 1660 return memblock.reserved.total_size; 1661 } 1662 1663 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1664 { 1665 unsigned long pages = 0; 1666 unsigned long start_pfn, end_pfn; 1667 int i; 1668 1669 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) { 1670 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1671 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1672 pages += end_pfn - start_pfn; 1673 } 1674 1675 return PFN_PHYS(pages); 1676 } 1677 1678 /* lowest address */ 1679 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1680 { 1681 return memblock.memory.regions[0].base; 1682 } 1683 1684 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1685 { 1686 int idx = memblock.memory.cnt - 1; 1687 1688 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1689 } 1690 1691 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 1692 { 1693 phys_addr_t max_addr = PHYS_ADDR_MAX; 1694 struct memblock_region *r; 1695 1696 /* 1697 * translate the memory @limit size into the max address within one of 1698 * the memory memblock regions, if the @limit exceeds the total size 1699 * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1700 */ 1701 for_each_memblock(memory, r) { 1702 if (limit <= r->size) { 1703 max_addr = r->base + limit; 1704 break; 1705 } 1706 limit -= r->size; 1707 } 1708 1709 return max_addr; 1710 } 1711 1712 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1713 { 1714 phys_addr_t max_addr; 1715 1716 if (!limit) 1717 return; 1718 1719 max_addr = __find_max_addr(limit); 1720 1721 /* @limit exceeds the total size of the memory, do nothing */ 1722 if (max_addr == PHYS_ADDR_MAX) 1723 return; 1724 1725 /* truncate both memory and reserved regions */ 1726 memblock_remove_range(&memblock.memory, max_addr, 1727 PHYS_ADDR_MAX); 1728 memblock_remove_range(&memblock.reserved, max_addr, 1729 PHYS_ADDR_MAX); 1730 } 1731 1732 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1733 { 1734 int start_rgn, end_rgn; 1735 int i, ret; 1736 1737 if (!size) 1738 return; 1739 1740 ret = memblock_isolate_range(&memblock.memory, base, size, 1741 &start_rgn, &end_rgn); 1742 if (ret) 1743 return; 1744 1745 /* remove all the MAP regions */ 1746 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1747 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1748 memblock_remove_region(&memblock.memory, i); 1749 1750 for (i = start_rgn - 1; i >= 0; i--) 1751 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1752 memblock_remove_region(&memblock.memory, i); 1753 1754 /* truncate the reserved regions */ 1755 memblock_remove_range(&memblock.reserved, 0, base); 1756 memblock_remove_range(&memblock.reserved, 1757 base + size, PHYS_ADDR_MAX); 1758 } 1759 1760 void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1761 { 1762 phys_addr_t max_addr; 1763 1764 if (!limit) 1765 return; 1766 1767 max_addr = __find_max_addr(limit); 1768 1769 /* @limit exceeds the total size of the memory, do nothing */ 1770 if (max_addr == PHYS_ADDR_MAX) 1771 return; 1772 1773 memblock_cap_memory_range(0, max_addr); 1774 } 1775 1776 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1777 { 1778 unsigned int left = 0, right = type->cnt; 1779 1780 do { 1781 unsigned int mid = (right + left) / 2; 1782 1783 if (addr < type->regions[mid].base) 1784 right = mid; 1785 else if (addr >= (type->regions[mid].base + 1786 type->regions[mid].size)) 1787 left = mid + 1; 1788 else 1789 return mid; 1790 } while (left < right); 1791 return -1; 1792 } 1793 1794 bool __init_memblock memblock_is_reserved(phys_addr_t addr) 1795 { 1796 return memblock_search(&memblock.reserved, addr) != -1; 1797 } 1798 1799 bool __init_memblock memblock_is_memory(phys_addr_t addr) 1800 { 1801 return memblock_search(&memblock.memory, addr) != -1; 1802 } 1803 1804 bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1805 { 1806 int i = memblock_search(&memblock.memory, addr); 1807 1808 if (i == -1) 1809 return false; 1810 return !memblock_is_nomap(&memblock.memory.regions[i]); 1811 } 1812 1813 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1814 unsigned long *start_pfn, unsigned long *end_pfn) 1815 { 1816 struct memblock_type *type = &memblock.memory; 1817 int mid = memblock_search(type, PFN_PHYS(pfn)); 1818 1819 if (mid == -1) 1820 return -1; 1821 1822 *start_pfn = PFN_DOWN(type->regions[mid].base); 1823 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1824 1825 return memblock_get_region_node(&type->regions[mid]); 1826 } 1827 1828 /** 1829 * memblock_is_region_memory - check if a region is a subset of memory 1830 * @base: base of region to check 1831 * @size: size of region to check 1832 * 1833 * Check if the region [@base, @base + @size) is a subset of a memory block. 1834 * 1835 * Return: 1836 * 0 if false, non-zero if true 1837 */ 1838 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1839 { 1840 int idx = memblock_search(&memblock.memory, base); 1841 phys_addr_t end = base + memblock_cap_size(base, &size); 1842 1843 if (idx == -1) 1844 return false; 1845 return (memblock.memory.regions[idx].base + 1846 memblock.memory.regions[idx].size) >= end; 1847 } 1848 1849 /** 1850 * memblock_is_region_reserved - check if a region intersects reserved memory 1851 * @base: base of region to check 1852 * @size: size of region to check 1853 * 1854 * Check if the region [@base, @base + @size) intersects a reserved 1855 * memory block. 1856 * 1857 * Return: 1858 * True if they intersect, false if not. 1859 */ 1860 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1861 { 1862 memblock_cap_size(base, &size); 1863 return memblock_overlaps_region(&memblock.reserved, base, size); 1864 } 1865 1866 void __init_memblock memblock_trim_memory(phys_addr_t align) 1867 { 1868 phys_addr_t start, end, orig_start, orig_end; 1869 struct memblock_region *r; 1870 1871 for_each_memblock(memory, r) { 1872 orig_start = r->base; 1873 orig_end = r->base + r->size; 1874 start = round_up(orig_start, align); 1875 end = round_down(orig_end, align); 1876 1877 if (start == orig_start && end == orig_end) 1878 continue; 1879 1880 if (start < end) { 1881 r->base = start; 1882 r->size = end - start; 1883 } else { 1884 memblock_remove_region(&memblock.memory, 1885 r - memblock.memory.regions); 1886 r--; 1887 } 1888 } 1889 } 1890 1891 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1892 { 1893 memblock.current_limit = limit; 1894 } 1895 1896 phys_addr_t __init_memblock memblock_get_current_limit(void) 1897 { 1898 return memblock.current_limit; 1899 } 1900 1901 static void __init_memblock memblock_dump(struct memblock_type *type) 1902 { 1903 phys_addr_t base, end, size; 1904 enum memblock_flags flags; 1905 int idx; 1906 struct memblock_region *rgn; 1907 1908 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 1909 1910 for_each_memblock_type(idx, type, rgn) { 1911 char nid_buf[32] = ""; 1912 1913 base = rgn->base; 1914 size = rgn->size; 1915 end = base + size - 1; 1916 flags = rgn->flags; 1917 #ifdef CONFIG_NEED_MULTIPLE_NODES 1918 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1919 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1920 memblock_get_region_node(rgn)); 1921 #endif 1922 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", 1923 type->name, idx, &base, &end, &size, nid_buf, flags); 1924 } 1925 } 1926 1927 static void __init_memblock __memblock_dump_all(void) 1928 { 1929 pr_info("MEMBLOCK configuration:\n"); 1930 pr_info(" memory size = %pa reserved size = %pa\n", 1931 &memblock.memory.total_size, 1932 &memblock.reserved.total_size); 1933 1934 memblock_dump(&memblock.memory); 1935 memblock_dump(&memblock.reserved); 1936 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1937 memblock_dump(&physmem); 1938 #endif 1939 } 1940 1941 void __init_memblock memblock_dump_all(void) 1942 { 1943 if (memblock_debug) 1944 __memblock_dump_all(); 1945 } 1946 1947 void __init memblock_allow_resize(void) 1948 { 1949 memblock_can_resize = 1; 1950 } 1951 1952 static int __init early_memblock(char *p) 1953 { 1954 if (p && strstr(p, "debug")) 1955 memblock_debug = 1; 1956 return 0; 1957 } 1958 early_param("memblock", early_memblock); 1959 1960 static void __init __free_pages_memory(unsigned long start, unsigned long end) 1961 { 1962 int order; 1963 1964 while (start < end) { 1965 order = min(MAX_ORDER - 1UL, __ffs(start)); 1966 1967 while (start + (1UL << order) > end) 1968 order--; 1969 1970 memblock_free_pages(pfn_to_page(start), start, order); 1971 1972 start += (1UL << order); 1973 } 1974 } 1975 1976 static unsigned long __init __free_memory_core(phys_addr_t start, 1977 phys_addr_t end) 1978 { 1979 unsigned long start_pfn = PFN_UP(start); 1980 unsigned long end_pfn = min_t(unsigned long, 1981 PFN_DOWN(end), max_low_pfn); 1982 1983 if (start_pfn >= end_pfn) 1984 return 0; 1985 1986 __free_pages_memory(start_pfn, end_pfn); 1987 1988 return end_pfn - start_pfn; 1989 } 1990 1991 static unsigned long __init free_low_memory_core_early(void) 1992 { 1993 unsigned long count = 0; 1994 phys_addr_t start, end; 1995 u64 i; 1996 1997 memblock_clear_hotplug(0, -1); 1998 1999 for_each_reserved_mem_region(i, &start, &end) 2000 reserve_bootmem_region(start, end); 2001 2002 /* 2003 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id 2004 * because in some case like Node0 doesn't have RAM installed 2005 * low ram will be on Node1 2006 */ 2007 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 2008 NULL) 2009 count += __free_memory_core(start, end); 2010 2011 return count; 2012 } 2013 2014 static int reset_managed_pages_done __initdata; 2015 2016 void reset_node_managed_pages(pg_data_t *pgdat) 2017 { 2018 struct zone *z; 2019 2020 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 2021 atomic_long_set(&z->managed_pages, 0); 2022 } 2023 2024 void __init reset_all_zones_managed_pages(void) 2025 { 2026 struct pglist_data *pgdat; 2027 2028 if (reset_managed_pages_done) 2029 return; 2030 2031 for_each_online_pgdat(pgdat) 2032 reset_node_managed_pages(pgdat); 2033 2034 reset_managed_pages_done = 1; 2035 } 2036 2037 /** 2038 * memblock_free_all - release free pages to the buddy allocator 2039 * 2040 * Return: the number of pages actually released. 2041 */ 2042 unsigned long __init memblock_free_all(void) 2043 { 2044 unsigned long pages; 2045 2046 reset_all_zones_managed_pages(); 2047 2048 pages = free_low_memory_core_early(); 2049 totalram_pages_add(pages); 2050 2051 return pages; 2052 } 2053 2054 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) 2055 2056 static int memblock_debug_show(struct seq_file *m, void *private) 2057 { 2058 struct memblock_type *type = m->private; 2059 struct memblock_region *reg; 2060 int i; 2061 phys_addr_t end; 2062 2063 for (i = 0; i < type->cnt; i++) { 2064 reg = &type->regions[i]; 2065 end = reg->base + reg->size - 1; 2066 2067 seq_printf(m, "%4d: ", i); 2068 seq_printf(m, "%pa..%pa\n", ®->base, &end); 2069 } 2070 return 0; 2071 } 2072 DEFINE_SHOW_ATTRIBUTE(memblock_debug); 2073 2074 static int __init memblock_init_debugfs(void) 2075 { 2076 struct dentry *root = debugfs_create_dir("memblock", NULL); 2077 2078 debugfs_create_file("memory", 0444, root, 2079 &memblock.memory, &memblock_debug_fops); 2080 debugfs_create_file("reserved", 0444, root, 2081 &memblock.reserved, &memblock_debug_fops); 2082 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 2083 debugfs_create_file("physmem", 0444, root, &physmem, 2084 &memblock_debug_fops); 2085 #endif 2086 2087 return 0; 2088 } 2089 __initcall(memblock_init_debugfs); 2090 2091 #endif /* CONFIG_DEBUG_FS */ 2092