1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for maintaining information about logical memory blocks. 4 * 5 * Peter Bergner, IBM Corp. June 2001. 6 * Copyright (C) 2001 Peter Bergner. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/bitops.h> 13 #include <linux/poison.h> 14 #include <linux/pfn.h> 15 #include <linux/debugfs.h> 16 #include <linux/kmemleak.h> 17 #include <linux/seq_file.h> 18 #include <linux/memblock.h> 19 20 #include <asm/sections.h> 21 #include <linux/io.h> 22 23 #include "internal.h" 24 25 #define INIT_MEMBLOCK_REGIONS 128 26 #define INIT_PHYSMEM_REGIONS 4 27 28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS 29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS 30 #endif 31 32 /** 33 * DOC: memblock overview 34 * 35 * Memblock is a method of managing memory regions during the early 36 * boot period when the usual kernel memory allocators are not up and 37 * running. 38 * 39 * Memblock views the system memory as collections of contiguous 40 * regions. There are several types of these collections: 41 * 42 * * ``memory`` - describes the physical memory available to the 43 * kernel; this may differ from the actual physical memory installed 44 * in the system, for instance when the memory is restricted with 45 * ``mem=`` command line parameter 46 * * ``reserved`` - describes the regions that were allocated 47 * * ``physmem`` - describes the actual physical memory available during 48 * boot regardless of the possible restrictions and memory hot(un)plug; 49 * the ``physmem`` type is only available on some architectures. 50 * 51 * Each region is represented by :c:type:`struct memblock_region` that 52 * defines the region extents, its attributes and NUMA node id on NUMA 53 * systems. Every memory type is described by the :c:type:`struct 54 * memblock_type` which contains an array of memory regions along with 55 * the allocator metadata. The "memory" and "reserved" types are nicely 56 * wrapped with :c:type:`struct memblock`. This structure is statically 57 * initialized at build time. The region arrays are initially sized to 58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS 59 * for "reserved". The region array for "physmem" is initially sized to 60 * %INIT_PHYSMEM_REGIONS. 61 * The memblock_allow_resize() enables automatic resizing of the region 62 * arrays during addition of new regions. This feature should be used 63 * with care so that memory allocated for the region array will not 64 * overlap with areas that should be reserved, for example initrd. 65 * 66 * The early architecture setup should tell memblock what the physical 67 * memory layout is by using memblock_add() or memblock_add_node() 68 * functions. The first function does not assign the region to a NUMA 69 * node and it is appropriate for UMA systems. Yet, it is possible to 70 * use it on NUMA systems as well and assign the region to a NUMA node 71 * later in the setup process using memblock_set_node(). The 72 * memblock_add_node() performs such an assignment directly. 73 * 74 * Once memblock is setup the memory can be allocated using one of the 75 * API variants: 76 * 77 * * memblock_phys_alloc*() - these functions return the **physical** 78 * address of the allocated memory 79 * * memblock_alloc*() - these functions return the **virtual** address 80 * of the allocated memory. 81 * 82 * Note, that both API variants use implicit assumptions about allowed 83 * memory ranges and the fallback methods. Consult the documentation 84 * of memblock_alloc_internal() and memblock_alloc_range_nid() 85 * functions for more elaborate description. 86 * 87 * As the system boot progresses, the architecture specific mem_init() 88 * function frees all the memory to the buddy page allocator. 89 * 90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the 91 * memblock data structures (except "physmem") will be discarded after the 92 * system initialization completes. 93 */ 94 95 #ifndef CONFIG_NEED_MULTIPLE_NODES 96 struct pglist_data __refdata contig_page_data; 97 EXPORT_SYMBOL(contig_page_data); 98 #endif 99 100 unsigned long max_low_pfn; 101 unsigned long min_low_pfn; 102 unsigned long max_pfn; 103 unsigned long long max_possible_pfn; 104 105 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 106 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; 107 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 108 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; 109 #endif 110 111 struct memblock memblock __initdata_memblock = { 112 .memory.regions = memblock_memory_init_regions, 113 .memory.cnt = 1, /* empty dummy entry */ 114 .memory.max = INIT_MEMBLOCK_REGIONS, 115 .memory.name = "memory", 116 117 .reserved.regions = memblock_reserved_init_regions, 118 .reserved.cnt = 1, /* empty dummy entry */ 119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 120 .reserved.name = "reserved", 121 122 .bottom_up = false, 123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 124 }; 125 126 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 127 struct memblock_type physmem = { 128 .regions = memblock_physmem_init_regions, 129 .cnt = 1, /* empty dummy entry */ 130 .max = INIT_PHYSMEM_REGIONS, 131 .name = "physmem", 132 }; 133 #endif 134 135 #define for_each_memblock_type(i, memblock_type, rgn) \ 136 for (i = 0, rgn = &memblock_type->regions[0]; \ 137 i < memblock_type->cnt; \ 138 i++, rgn = &memblock_type->regions[i]) 139 140 int memblock_debug __initdata_memblock; 141 static bool system_has_some_mirror __initdata_memblock = false; 142 static int memblock_can_resize __initdata_memblock; 143 static int memblock_memory_in_slab __initdata_memblock = 0; 144 static int memblock_reserved_in_slab __initdata_memblock = 0; 145 146 static enum memblock_flags __init_memblock choose_memblock_flags(void) 147 { 148 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 149 } 150 151 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 152 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 153 { 154 return *size = min(*size, PHYS_ADDR_MAX - base); 155 } 156 157 /* 158 * Address comparison utilities 159 */ 160 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 161 phys_addr_t base2, phys_addr_t size2) 162 { 163 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 164 } 165 166 bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 167 phys_addr_t base, phys_addr_t size) 168 { 169 unsigned long i; 170 171 for (i = 0; i < type->cnt; i++) 172 if (memblock_addrs_overlap(base, size, type->regions[i].base, 173 type->regions[i].size)) 174 break; 175 return i < type->cnt; 176 } 177 178 /** 179 * __memblock_find_range_bottom_up - find free area utility in bottom-up 180 * @start: start of candidate range 181 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 182 * %MEMBLOCK_ALLOC_ACCESSIBLE 183 * @size: size of free area to find 184 * @align: alignment of free area to find 185 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 186 * @flags: pick from blocks based on memory attributes 187 * 188 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 189 * 190 * Return: 191 * Found address on success, 0 on failure. 192 */ 193 static phys_addr_t __init_memblock 194 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 195 phys_addr_t size, phys_addr_t align, int nid, 196 enum memblock_flags flags) 197 { 198 phys_addr_t this_start, this_end, cand; 199 u64 i; 200 201 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 202 this_start = clamp(this_start, start, end); 203 this_end = clamp(this_end, start, end); 204 205 cand = round_up(this_start, align); 206 if (cand < this_end && this_end - cand >= size) 207 return cand; 208 } 209 210 return 0; 211 } 212 213 /** 214 * __memblock_find_range_top_down - find free area utility, in top-down 215 * @start: start of candidate range 216 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 217 * %MEMBLOCK_ALLOC_ACCESSIBLE 218 * @size: size of free area to find 219 * @align: alignment of free area to find 220 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 221 * @flags: pick from blocks based on memory attributes 222 * 223 * Utility called from memblock_find_in_range_node(), find free area top-down. 224 * 225 * Return: 226 * Found address on success, 0 on failure. 227 */ 228 static phys_addr_t __init_memblock 229 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 230 phys_addr_t size, phys_addr_t align, int nid, 231 enum memblock_flags flags) 232 { 233 phys_addr_t this_start, this_end, cand; 234 u64 i; 235 236 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 237 NULL) { 238 this_start = clamp(this_start, start, end); 239 this_end = clamp(this_end, start, end); 240 241 if (this_end < size) 242 continue; 243 244 cand = round_down(this_end - size, align); 245 if (cand >= this_start) 246 return cand; 247 } 248 249 return 0; 250 } 251 252 /** 253 * memblock_find_in_range_node - find free area in given range and node 254 * @size: size of free area to find 255 * @align: alignment of free area to find 256 * @start: start of candidate range 257 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 258 * %MEMBLOCK_ALLOC_ACCESSIBLE 259 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 260 * @flags: pick from blocks based on memory attributes 261 * 262 * Find @size free area aligned to @align in the specified range and node. 263 * 264 * When allocation direction is bottom-up, the @start should be greater 265 * than the end of the kernel image. Otherwise, it will be trimmed. The 266 * reason is that we want the bottom-up allocation just near the kernel 267 * image so it is highly likely that the allocated memory and the kernel 268 * will reside in the same node. 269 * 270 * If bottom-up allocation failed, will try to allocate memory top-down. 271 * 272 * Return: 273 * Found address on success, 0 on failure. 274 */ 275 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 276 phys_addr_t align, phys_addr_t start, 277 phys_addr_t end, int nid, 278 enum memblock_flags flags) 279 { 280 phys_addr_t kernel_end, ret; 281 282 /* pump up @end */ 283 if (end == MEMBLOCK_ALLOC_ACCESSIBLE || 284 end == MEMBLOCK_ALLOC_KASAN) 285 end = memblock.current_limit; 286 287 /* avoid allocating the first page */ 288 start = max_t(phys_addr_t, start, PAGE_SIZE); 289 end = max(start, end); 290 kernel_end = __pa_symbol(_end); 291 292 /* 293 * try bottom-up allocation only when bottom-up mode 294 * is set and @end is above the kernel image. 295 */ 296 if (memblock_bottom_up() && end > kernel_end) { 297 phys_addr_t bottom_up_start; 298 299 /* make sure we will allocate above the kernel */ 300 bottom_up_start = max(start, kernel_end); 301 302 /* ok, try bottom-up allocation first */ 303 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 304 size, align, nid, flags); 305 if (ret) 306 return ret; 307 308 /* 309 * we always limit bottom-up allocation above the kernel, 310 * but top-down allocation doesn't have the limit, so 311 * retrying top-down allocation may succeed when bottom-up 312 * allocation failed. 313 * 314 * bottom-up allocation is expected to be fail very rarely, 315 * so we use WARN_ONCE() here to see the stack trace if 316 * fail happens. 317 */ 318 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), 319 "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); 320 } 321 322 return __memblock_find_range_top_down(start, end, size, align, nid, 323 flags); 324 } 325 326 /** 327 * memblock_find_in_range - find free area in given range 328 * @start: start of candidate range 329 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 330 * %MEMBLOCK_ALLOC_ACCESSIBLE 331 * @size: size of free area to find 332 * @align: alignment of free area to find 333 * 334 * Find @size free area aligned to @align in the specified range. 335 * 336 * Return: 337 * Found address on success, 0 on failure. 338 */ 339 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 340 phys_addr_t end, phys_addr_t size, 341 phys_addr_t align) 342 { 343 phys_addr_t ret; 344 enum memblock_flags flags = choose_memblock_flags(); 345 346 again: 347 ret = memblock_find_in_range_node(size, align, start, end, 348 NUMA_NO_NODE, flags); 349 350 if (!ret && (flags & MEMBLOCK_MIRROR)) { 351 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 352 &size); 353 flags &= ~MEMBLOCK_MIRROR; 354 goto again; 355 } 356 357 return ret; 358 } 359 360 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 361 { 362 type->total_size -= type->regions[r].size; 363 memmove(&type->regions[r], &type->regions[r + 1], 364 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 365 type->cnt--; 366 367 /* Special case for empty arrays */ 368 if (type->cnt == 0) { 369 WARN_ON(type->total_size != 0); 370 type->cnt = 1; 371 type->regions[0].base = 0; 372 type->regions[0].size = 0; 373 type->regions[0].flags = 0; 374 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 375 } 376 } 377 378 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 379 /** 380 * memblock_discard - discard memory and reserved arrays if they were allocated 381 */ 382 void __init memblock_discard(void) 383 { 384 phys_addr_t addr, size; 385 386 if (memblock.reserved.regions != memblock_reserved_init_regions) { 387 addr = __pa(memblock.reserved.regions); 388 size = PAGE_ALIGN(sizeof(struct memblock_region) * 389 memblock.reserved.max); 390 __memblock_free_late(addr, size); 391 } 392 393 if (memblock.memory.regions != memblock_memory_init_regions) { 394 addr = __pa(memblock.memory.regions); 395 size = PAGE_ALIGN(sizeof(struct memblock_region) * 396 memblock.memory.max); 397 __memblock_free_late(addr, size); 398 } 399 } 400 #endif 401 402 /** 403 * memblock_double_array - double the size of the memblock regions array 404 * @type: memblock type of the regions array being doubled 405 * @new_area_start: starting address of memory range to avoid overlap with 406 * @new_area_size: size of memory range to avoid overlap with 407 * 408 * Double the size of the @type regions array. If memblock is being used to 409 * allocate memory for a new reserved regions array and there is a previously 410 * allocated memory range [@new_area_start, @new_area_start + @new_area_size] 411 * waiting to be reserved, ensure the memory used by the new array does 412 * not overlap. 413 * 414 * Return: 415 * 0 on success, -1 on failure. 416 */ 417 static int __init_memblock memblock_double_array(struct memblock_type *type, 418 phys_addr_t new_area_start, 419 phys_addr_t new_area_size) 420 { 421 struct memblock_region *new_array, *old_array; 422 phys_addr_t old_alloc_size, new_alloc_size; 423 phys_addr_t old_size, new_size, addr, new_end; 424 int use_slab = slab_is_available(); 425 int *in_slab; 426 427 /* We don't allow resizing until we know about the reserved regions 428 * of memory that aren't suitable for allocation 429 */ 430 if (!memblock_can_resize) 431 return -1; 432 433 /* Calculate new doubled size */ 434 old_size = type->max * sizeof(struct memblock_region); 435 new_size = old_size << 1; 436 /* 437 * We need to allocated new one align to PAGE_SIZE, 438 * so we can free them completely later. 439 */ 440 old_alloc_size = PAGE_ALIGN(old_size); 441 new_alloc_size = PAGE_ALIGN(new_size); 442 443 /* Retrieve the slab flag */ 444 if (type == &memblock.memory) 445 in_slab = &memblock_memory_in_slab; 446 else 447 in_slab = &memblock_reserved_in_slab; 448 449 /* Try to find some space for it */ 450 if (use_slab) { 451 new_array = kmalloc(new_size, GFP_KERNEL); 452 addr = new_array ? __pa(new_array) : 0; 453 } else { 454 /* only exclude range when trying to double reserved.regions */ 455 if (type != &memblock.reserved) 456 new_area_start = new_area_size = 0; 457 458 addr = memblock_find_in_range(new_area_start + new_area_size, 459 memblock.current_limit, 460 new_alloc_size, PAGE_SIZE); 461 if (!addr && new_area_size) 462 addr = memblock_find_in_range(0, 463 min(new_area_start, memblock.current_limit), 464 new_alloc_size, PAGE_SIZE); 465 466 new_array = addr ? __va(addr) : NULL; 467 } 468 if (!addr) { 469 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 470 type->name, type->max, type->max * 2); 471 return -1; 472 } 473 474 new_end = addr + new_size - 1; 475 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", 476 type->name, type->max * 2, &addr, &new_end); 477 478 /* 479 * Found space, we now need to move the array over before we add the 480 * reserved region since it may be our reserved array itself that is 481 * full. 482 */ 483 memcpy(new_array, type->regions, old_size); 484 memset(new_array + type->max, 0, old_size); 485 old_array = type->regions; 486 type->regions = new_array; 487 type->max <<= 1; 488 489 /* Free old array. We needn't free it if the array is the static one */ 490 if (*in_slab) 491 kfree(old_array); 492 else if (old_array != memblock_memory_init_regions && 493 old_array != memblock_reserved_init_regions) 494 memblock_free(__pa(old_array), old_alloc_size); 495 496 /* 497 * Reserve the new array if that comes from the memblock. Otherwise, we 498 * needn't do it 499 */ 500 if (!use_slab) 501 BUG_ON(memblock_reserve(addr, new_alloc_size)); 502 503 /* Update slab flag */ 504 *in_slab = use_slab; 505 506 return 0; 507 } 508 509 /** 510 * memblock_merge_regions - merge neighboring compatible regions 511 * @type: memblock type to scan 512 * 513 * Scan @type and merge neighboring compatible regions. 514 */ 515 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 516 { 517 int i = 0; 518 519 /* cnt never goes below 1 */ 520 while (i < type->cnt - 1) { 521 struct memblock_region *this = &type->regions[i]; 522 struct memblock_region *next = &type->regions[i + 1]; 523 524 if (this->base + this->size != next->base || 525 memblock_get_region_node(this) != 526 memblock_get_region_node(next) || 527 this->flags != next->flags) { 528 BUG_ON(this->base + this->size > next->base); 529 i++; 530 continue; 531 } 532 533 this->size += next->size; 534 /* move forward from next + 1, index of which is i + 2 */ 535 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 536 type->cnt--; 537 } 538 } 539 540 /** 541 * memblock_insert_region - insert new memblock region 542 * @type: memblock type to insert into 543 * @idx: index for the insertion point 544 * @base: base address of the new region 545 * @size: size of the new region 546 * @nid: node id of the new region 547 * @flags: flags of the new region 548 * 549 * Insert new memblock region [@base, @base + @size) into @type at @idx. 550 * @type must already have extra room to accommodate the new region. 551 */ 552 static void __init_memblock memblock_insert_region(struct memblock_type *type, 553 int idx, phys_addr_t base, 554 phys_addr_t size, 555 int nid, 556 enum memblock_flags flags) 557 { 558 struct memblock_region *rgn = &type->regions[idx]; 559 560 BUG_ON(type->cnt >= type->max); 561 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 562 rgn->base = base; 563 rgn->size = size; 564 rgn->flags = flags; 565 memblock_set_region_node(rgn, nid); 566 type->cnt++; 567 type->total_size += size; 568 } 569 570 /** 571 * memblock_add_range - add new memblock region 572 * @type: memblock type to add new region into 573 * @base: base address of the new region 574 * @size: size of the new region 575 * @nid: nid of the new region 576 * @flags: flags of the new region 577 * 578 * Add new memblock region [@base, @base + @size) into @type. The new region 579 * is allowed to overlap with existing ones - overlaps don't affect already 580 * existing regions. @type is guaranteed to be minimal (all neighbouring 581 * compatible regions are merged) after the addition. 582 * 583 * Return: 584 * 0 on success, -errno on failure. 585 */ 586 static int __init_memblock memblock_add_range(struct memblock_type *type, 587 phys_addr_t base, phys_addr_t size, 588 int nid, enum memblock_flags flags) 589 { 590 bool insert = false; 591 phys_addr_t obase = base; 592 phys_addr_t end = base + memblock_cap_size(base, &size); 593 int idx, nr_new; 594 struct memblock_region *rgn; 595 596 if (!size) 597 return 0; 598 599 /* special case for empty array */ 600 if (type->regions[0].size == 0) { 601 WARN_ON(type->cnt != 1 || type->total_size); 602 type->regions[0].base = base; 603 type->regions[0].size = size; 604 type->regions[0].flags = flags; 605 memblock_set_region_node(&type->regions[0], nid); 606 type->total_size = size; 607 return 0; 608 } 609 repeat: 610 /* 611 * The following is executed twice. Once with %false @insert and 612 * then with %true. The first counts the number of regions needed 613 * to accommodate the new area. The second actually inserts them. 614 */ 615 base = obase; 616 nr_new = 0; 617 618 for_each_memblock_type(idx, type, rgn) { 619 phys_addr_t rbase = rgn->base; 620 phys_addr_t rend = rbase + rgn->size; 621 622 if (rbase >= end) 623 break; 624 if (rend <= base) 625 continue; 626 /* 627 * @rgn overlaps. If it separates the lower part of new 628 * area, insert that portion. 629 */ 630 if (rbase > base) { 631 #ifdef CONFIG_NEED_MULTIPLE_NODES 632 WARN_ON(nid != memblock_get_region_node(rgn)); 633 #endif 634 WARN_ON(flags != rgn->flags); 635 nr_new++; 636 if (insert) 637 memblock_insert_region(type, idx++, base, 638 rbase - base, nid, 639 flags); 640 } 641 /* area below @rend is dealt with, forget about it */ 642 base = min(rend, end); 643 } 644 645 /* insert the remaining portion */ 646 if (base < end) { 647 nr_new++; 648 if (insert) 649 memblock_insert_region(type, idx, base, end - base, 650 nid, flags); 651 } 652 653 if (!nr_new) 654 return 0; 655 656 /* 657 * If this was the first round, resize array and repeat for actual 658 * insertions; otherwise, merge and return. 659 */ 660 if (!insert) { 661 while (type->cnt + nr_new > type->max) 662 if (memblock_double_array(type, obase, size) < 0) 663 return -ENOMEM; 664 insert = true; 665 goto repeat; 666 } else { 667 memblock_merge_regions(type); 668 return 0; 669 } 670 } 671 672 /** 673 * memblock_add_node - add new memblock region within a NUMA node 674 * @base: base address of the new region 675 * @size: size of the new region 676 * @nid: nid of the new region 677 * 678 * Add new memblock region [@base, @base + @size) to the "memory" 679 * type. See memblock_add_range() description for mode details 680 * 681 * Return: 682 * 0 on success, -errno on failure. 683 */ 684 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 685 int nid) 686 { 687 return memblock_add_range(&memblock.memory, base, size, nid, 0); 688 } 689 690 /** 691 * memblock_add - add new memblock region 692 * @base: base address of the new region 693 * @size: size of the new region 694 * 695 * Add new memblock region [@base, @base + @size) to the "memory" 696 * type. See memblock_add_range() description for mode details 697 * 698 * Return: 699 * 0 on success, -errno on failure. 700 */ 701 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 702 { 703 phys_addr_t end = base + size - 1; 704 705 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 706 &base, &end, (void *)_RET_IP_); 707 708 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 709 } 710 711 /** 712 * memblock_isolate_range - isolate given range into disjoint memblocks 713 * @type: memblock type to isolate range for 714 * @base: base of range to isolate 715 * @size: size of range to isolate 716 * @start_rgn: out parameter for the start of isolated region 717 * @end_rgn: out parameter for the end of isolated region 718 * 719 * Walk @type and ensure that regions don't cross the boundaries defined by 720 * [@base, @base + @size). Crossing regions are split at the boundaries, 721 * which may create at most two more regions. The index of the first 722 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 723 * 724 * Return: 725 * 0 on success, -errno on failure. 726 */ 727 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 728 phys_addr_t base, phys_addr_t size, 729 int *start_rgn, int *end_rgn) 730 { 731 phys_addr_t end = base + memblock_cap_size(base, &size); 732 int idx; 733 struct memblock_region *rgn; 734 735 *start_rgn = *end_rgn = 0; 736 737 if (!size) 738 return 0; 739 740 /* we'll create at most two more regions */ 741 while (type->cnt + 2 > type->max) 742 if (memblock_double_array(type, base, size) < 0) 743 return -ENOMEM; 744 745 for_each_memblock_type(idx, type, rgn) { 746 phys_addr_t rbase = rgn->base; 747 phys_addr_t rend = rbase + rgn->size; 748 749 if (rbase >= end) 750 break; 751 if (rend <= base) 752 continue; 753 754 if (rbase < base) { 755 /* 756 * @rgn intersects from below. Split and continue 757 * to process the next region - the new top half. 758 */ 759 rgn->base = base; 760 rgn->size -= base - rbase; 761 type->total_size -= base - rbase; 762 memblock_insert_region(type, idx, rbase, base - rbase, 763 memblock_get_region_node(rgn), 764 rgn->flags); 765 } else if (rend > end) { 766 /* 767 * @rgn intersects from above. Split and redo the 768 * current region - the new bottom half. 769 */ 770 rgn->base = end; 771 rgn->size -= end - rbase; 772 type->total_size -= end - rbase; 773 memblock_insert_region(type, idx--, rbase, end - rbase, 774 memblock_get_region_node(rgn), 775 rgn->flags); 776 } else { 777 /* @rgn is fully contained, record it */ 778 if (!*end_rgn) 779 *start_rgn = idx; 780 *end_rgn = idx + 1; 781 } 782 } 783 784 return 0; 785 } 786 787 static int __init_memblock memblock_remove_range(struct memblock_type *type, 788 phys_addr_t base, phys_addr_t size) 789 { 790 int start_rgn, end_rgn; 791 int i, ret; 792 793 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 794 if (ret) 795 return ret; 796 797 for (i = end_rgn - 1; i >= start_rgn; i--) 798 memblock_remove_region(type, i); 799 return 0; 800 } 801 802 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 803 { 804 phys_addr_t end = base + size - 1; 805 806 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 807 &base, &end, (void *)_RET_IP_); 808 809 return memblock_remove_range(&memblock.memory, base, size); 810 } 811 812 /** 813 * memblock_free - free boot memory block 814 * @base: phys starting address of the boot memory block 815 * @size: size of the boot memory block in bytes 816 * 817 * Free boot memory block previously allocated by memblock_alloc_xx() API. 818 * The freeing memory will not be released to the buddy allocator. 819 */ 820 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 821 { 822 phys_addr_t end = base + size - 1; 823 824 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 825 &base, &end, (void *)_RET_IP_); 826 827 kmemleak_free_part_phys(base, size); 828 return memblock_remove_range(&memblock.reserved, base, size); 829 } 830 831 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 832 { 833 phys_addr_t end = base + size - 1; 834 835 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 836 &base, &end, (void *)_RET_IP_); 837 838 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 839 } 840 841 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 842 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) 843 { 844 phys_addr_t end = base + size - 1; 845 846 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 847 &base, &end, (void *)_RET_IP_); 848 849 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); 850 } 851 #endif 852 853 /** 854 * memblock_setclr_flag - set or clear flag for a memory region 855 * @base: base address of the region 856 * @size: size of the region 857 * @set: set or clear the flag 858 * @flag: the flag to udpate 859 * 860 * This function isolates region [@base, @base + @size), and sets/clears flag 861 * 862 * Return: 0 on success, -errno on failure. 863 */ 864 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 865 phys_addr_t size, int set, int flag) 866 { 867 struct memblock_type *type = &memblock.memory; 868 int i, ret, start_rgn, end_rgn; 869 870 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 871 if (ret) 872 return ret; 873 874 for (i = start_rgn; i < end_rgn; i++) { 875 struct memblock_region *r = &type->regions[i]; 876 877 if (set) 878 r->flags |= flag; 879 else 880 r->flags &= ~flag; 881 } 882 883 memblock_merge_regions(type); 884 return 0; 885 } 886 887 /** 888 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 889 * @base: the base phys addr of the region 890 * @size: the size of the region 891 * 892 * Return: 0 on success, -errno on failure. 893 */ 894 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 895 { 896 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 897 } 898 899 /** 900 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 901 * @base: the base phys addr of the region 902 * @size: the size of the region 903 * 904 * Return: 0 on success, -errno on failure. 905 */ 906 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 907 { 908 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 909 } 910 911 /** 912 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 913 * @base: the base phys addr of the region 914 * @size: the size of the region 915 * 916 * Return: 0 on success, -errno on failure. 917 */ 918 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 919 { 920 system_has_some_mirror = true; 921 922 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 923 } 924 925 /** 926 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 927 * @base: the base phys addr of the region 928 * @size: the size of the region 929 * 930 * Return: 0 on success, -errno on failure. 931 */ 932 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 933 { 934 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 935 } 936 937 /** 938 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 939 * @base: the base phys addr of the region 940 * @size: the size of the region 941 * 942 * Return: 0 on success, -errno on failure. 943 */ 944 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 945 { 946 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 947 } 948 949 /** 950 * __next_reserved_mem_region - next function for for_each_reserved_region() 951 * @idx: pointer to u64 loop variable 952 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 953 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 954 * 955 * Iterate over all reserved memory regions. 956 */ 957 void __init_memblock __next_reserved_mem_region(u64 *idx, 958 phys_addr_t *out_start, 959 phys_addr_t *out_end) 960 { 961 struct memblock_type *type = &memblock.reserved; 962 963 if (*idx < type->cnt) { 964 struct memblock_region *r = &type->regions[*idx]; 965 phys_addr_t base = r->base; 966 phys_addr_t size = r->size; 967 968 if (out_start) 969 *out_start = base; 970 if (out_end) 971 *out_end = base + size - 1; 972 973 *idx += 1; 974 return; 975 } 976 977 /* signal end of iteration */ 978 *idx = ULLONG_MAX; 979 } 980 981 static bool should_skip_region(struct memblock_region *m, int nid, int flags) 982 { 983 int m_nid = memblock_get_region_node(m); 984 985 /* only memory regions are associated with nodes, check it */ 986 if (nid != NUMA_NO_NODE && nid != m_nid) 987 return true; 988 989 /* skip hotpluggable memory regions if needed */ 990 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 991 return true; 992 993 /* if we want mirror memory skip non-mirror memory regions */ 994 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 995 return true; 996 997 /* skip nomap memory unless we were asked for it explicitly */ 998 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 999 return true; 1000 1001 return false; 1002 } 1003 1004 /** 1005 * __next_mem_range - next function for for_each_free_mem_range() etc. 1006 * @idx: pointer to u64 loop variable 1007 * @nid: node selector, %NUMA_NO_NODE for all nodes 1008 * @flags: pick from blocks based on memory attributes 1009 * @type_a: pointer to memblock_type from where the range is taken 1010 * @type_b: pointer to memblock_type which excludes memory from being taken 1011 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1012 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1013 * @out_nid: ptr to int for nid of the range, can be %NULL 1014 * 1015 * Find the first area from *@idx which matches @nid, fill the out 1016 * parameters, and update *@idx for the next iteration. The lower 32bit of 1017 * *@idx contains index into type_a and the upper 32bit indexes the 1018 * areas before each region in type_b. For example, if type_b regions 1019 * look like the following, 1020 * 1021 * 0:[0-16), 1:[32-48), 2:[128-130) 1022 * 1023 * The upper 32bit indexes the following regions. 1024 * 1025 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 1026 * 1027 * As both region arrays are sorted, the function advances the two indices 1028 * in lockstep and returns each intersection. 1029 */ 1030 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 1031 struct memblock_type *type_a, 1032 struct memblock_type *type_b, phys_addr_t *out_start, 1033 phys_addr_t *out_end, int *out_nid) 1034 { 1035 int idx_a = *idx & 0xffffffff; 1036 int idx_b = *idx >> 32; 1037 1038 if (WARN_ONCE(nid == MAX_NUMNODES, 1039 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1040 nid = NUMA_NO_NODE; 1041 1042 for (; idx_a < type_a->cnt; idx_a++) { 1043 struct memblock_region *m = &type_a->regions[idx_a]; 1044 1045 phys_addr_t m_start = m->base; 1046 phys_addr_t m_end = m->base + m->size; 1047 int m_nid = memblock_get_region_node(m); 1048 1049 if (should_skip_region(m, nid, flags)) 1050 continue; 1051 1052 if (!type_b) { 1053 if (out_start) 1054 *out_start = m_start; 1055 if (out_end) 1056 *out_end = m_end; 1057 if (out_nid) 1058 *out_nid = m_nid; 1059 idx_a++; 1060 *idx = (u32)idx_a | (u64)idx_b << 32; 1061 return; 1062 } 1063 1064 /* scan areas before each reservation */ 1065 for (; idx_b < type_b->cnt + 1; idx_b++) { 1066 struct memblock_region *r; 1067 phys_addr_t r_start; 1068 phys_addr_t r_end; 1069 1070 r = &type_b->regions[idx_b]; 1071 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1072 r_end = idx_b < type_b->cnt ? 1073 r->base : PHYS_ADDR_MAX; 1074 1075 /* 1076 * if idx_b advanced past idx_a, 1077 * break out to advance idx_a 1078 */ 1079 if (r_start >= m_end) 1080 break; 1081 /* if the two regions intersect, we're done */ 1082 if (m_start < r_end) { 1083 if (out_start) 1084 *out_start = 1085 max(m_start, r_start); 1086 if (out_end) 1087 *out_end = min(m_end, r_end); 1088 if (out_nid) 1089 *out_nid = m_nid; 1090 /* 1091 * The region which ends first is 1092 * advanced for the next iteration. 1093 */ 1094 if (m_end <= r_end) 1095 idx_a++; 1096 else 1097 idx_b++; 1098 *idx = (u32)idx_a | (u64)idx_b << 32; 1099 return; 1100 } 1101 } 1102 } 1103 1104 /* signal end of iteration */ 1105 *idx = ULLONG_MAX; 1106 } 1107 1108 /** 1109 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 1110 * 1111 * @idx: pointer to u64 loop variable 1112 * @nid: node selector, %NUMA_NO_NODE for all nodes 1113 * @flags: pick from blocks based on memory attributes 1114 * @type_a: pointer to memblock_type from where the range is taken 1115 * @type_b: pointer to memblock_type which excludes memory from being taken 1116 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1117 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1118 * @out_nid: ptr to int for nid of the range, can be %NULL 1119 * 1120 * Finds the next range from type_a which is not marked as unsuitable 1121 * in type_b. 1122 * 1123 * Reverse of __next_mem_range(). 1124 */ 1125 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 1126 enum memblock_flags flags, 1127 struct memblock_type *type_a, 1128 struct memblock_type *type_b, 1129 phys_addr_t *out_start, 1130 phys_addr_t *out_end, int *out_nid) 1131 { 1132 int idx_a = *idx & 0xffffffff; 1133 int idx_b = *idx >> 32; 1134 1135 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1136 nid = NUMA_NO_NODE; 1137 1138 if (*idx == (u64)ULLONG_MAX) { 1139 idx_a = type_a->cnt - 1; 1140 if (type_b != NULL) 1141 idx_b = type_b->cnt; 1142 else 1143 idx_b = 0; 1144 } 1145 1146 for (; idx_a >= 0; idx_a--) { 1147 struct memblock_region *m = &type_a->regions[idx_a]; 1148 1149 phys_addr_t m_start = m->base; 1150 phys_addr_t m_end = m->base + m->size; 1151 int m_nid = memblock_get_region_node(m); 1152 1153 if (should_skip_region(m, nid, flags)) 1154 continue; 1155 1156 if (!type_b) { 1157 if (out_start) 1158 *out_start = m_start; 1159 if (out_end) 1160 *out_end = m_end; 1161 if (out_nid) 1162 *out_nid = m_nid; 1163 idx_a--; 1164 *idx = (u32)idx_a | (u64)idx_b << 32; 1165 return; 1166 } 1167 1168 /* scan areas before each reservation */ 1169 for (; idx_b >= 0; idx_b--) { 1170 struct memblock_region *r; 1171 phys_addr_t r_start; 1172 phys_addr_t r_end; 1173 1174 r = &type_b->regions[idx_b]; 1175 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1176 r_end = idx_b < type_b->cnt ? 1177 r->base : PHYS_ADDR_MAX; 1178 /* 1179 * if idx_b advanced past idx_a, 1180 * break out to advance idx_a 1181 */ 1182 1183 if (r_end <= m_start) 1184 break; 1185 /* if the two regions intersect, we're done */ 1186 if (m_end > r_start) { 1187 if (out_start) 1188 *out_start = max(m_start, r_start); 1189 if (out_end) 1190 *out_end = min(m_end, r_end); 1191 if (out_nid) 1192 *out_nid = m_nid; 1193 if (m_start >= r_start) 1194 idx_a--; 1195 else 1196 idx_b--; 1197 *idx = (u32)idx_a | (u64)idx_b << 32; 1198 return; 1199 } 1200 } 1201 } 1202 /* signal end of iteration */ 1203 *idx = ULLONG_MAX; 1204 } 1205 1206 /* 1207 * Common iterator interface used to define for_each_mem_pfn_range(). 1208 */ 1209 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1210 unsigned long *out_start_pfn, 1211 unsigned long *out_end_pfn, int *out_nid) 1212 { 1213 struct memblock_type *type = &memblock.memory; 1214 struct memblock_region *r; 1215 int r_nid; 1216 1217 while (++*idx < type->cnt) { 1218 r = &type->regions[*idx]; 1219 r_nid = memblock_get_region_node(r); 1220 1221 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1222 continue; 1223 if (nid == MAX_NUMNODES || nid == r_nid) 1224 break; 1225 } 1226 if (*idx >= type->cnt) { 1227 *idx = -1; 1228 return; 1229 } 1230 1231 if (out_start_pfn) 1232 *out_start_pfn = PFN_UP(r->base); 1233 if (out_end_pfn) 1234 *out_end_pfn = PFN_DOWN(r->base + r->size); 1235 if (out_nid) 1236 *out_nid = r_nid; 1237 } 1238 1239 /** 1240 * memblock_set_node - set node ID on memblock regions 1241 * @base: base of area to set node ID for 1242 * @size: size of area to set node ID for 1243 * @type: memblock type to set node ID for 1244 * @nid: node ID to set 1245 * 1246 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. 1247 * Regions which cross the area boundaries are split as necessary. 1248 * 1249 * Return: 1250 * 0 on success, -errno on failure. 1251 */ 1252 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1253 struct memblock_type *type, int nid) 1254 { 1255 #ifdef CONFIG_NEED_MULTIPLE_NODES 1256 int start_rgn, end_rgn; 1257 int i, ret; 1258 1259 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1260 if (ret) 1261 return ret; 1262 1263 for (i = start_rgn; i < end_rgn; i++) 1264 memblock_set_region_node(&type->regions[i], nid); 1265 1266 memblock_merge_regions(type); 1267 #endif 1268 return 0; 1269 } 1270 1271 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1272 /** 1273 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() 1274 * 1275 * @idx: pointer to u64 loop variable 1276 * @zone: zone in which all of the memory blocks reside 1277 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL 1278 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL 1279 * 1280 * This function is meant to be a zone/pfn specific wrapper for the 1281 * for_each_mem_range type iterators. Specifically they are used in the 1282 * deferred memory init routines and as such we were duplicating much of 1283 * this logic throughout the code. So instead of having it in multiple 1284 * locations it seemed like it would make more sense to centralize this to 1285 * one new iterator that does everything they need. 1286 */ 1287 void __init_memblock 1288 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 1289 unsigned long *out_spfn, unsigned long *out_epfn) 1290 { 1291 int zone_nid = zone_to_nid(zone); 1292 phys_addr_t spa, epa; 1293 int nid; 1294 1295 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1296 &memblock.memory, &memblock.reserved, 1297 &spa, &epa, &nid); 1298 1299 while (*idx != U64_MAX) { 1300 unsigned long epfn = PFN_DOWN(epa); 1301 unsigned long spfn = PFN_UP(spa); 1302 1303 /* 1304 * Verify the end is at least past the start of the zone and 1305 * that we have at least one PFN to initialize. 1306 */ 1307 if (zone->zone_start_pfn < epfn && spfn < epfn) { 1308 /* if we went too far just stop searching */ 1309 if (zone_end_pfn(zone) <= spfn) { 1310 *idx = U64_MAX; 1311 break; 1312 } 1313 1314 if (out_spfn) 1315 *out_spfn = max(zone->zone_start_pfn, spfn); 1316 if (out_epfn) 1317 *out_epfn = min(zone_end_pfn(zone), epfn); 1318 1319 return; 1320 } 1321 1322 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1323 &memblock.memory, &memblock.reserved, 1324 &spa, &epa, &nid); 1325 } 1326 1327 /* signal end of iteration */ 1328 if (out_spfn) 1329 *out_spfn = ULONG_MAX; 1330 if (out_epfn) 1331 *out_epfn = 0; 1332 } 1333 1334 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1335 1336 /** 1337 * memblock_alloc_range_nid - allocate boot memory block 1338 * @size: size of memory block to be allocated in bytes 1339 * @align: alignment of the region and block's size 1340 * @start: the lower bound of the memory region to allocate (phys address) 1341 * @end: the upper bound of the memory region to allocate (phys address) 1342 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1343 * @exact_nid: control the allocation fall back to other nodes 1344 * 1345 * The allocation is performed from memory region limited by 1346 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. 1347 * 1348 * If the specified node can not hold the requested memory and @exact_nid 1349 * is false, the allocation falls back to any node in the system. 1350 * 1351 * For systems with memory mirroring, the allocation is attempted first 1352 * from the regions with mirroring enabled and then retried from any 1353 * memory region. 1354 * 1355 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for 1356 * allocated boot memory block, so that it is never reported as leaks. 1357 * 1358 * Return: 1359 * Physical address of allocated memory block on success, %0 on failure. 1360 */ 1361 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1362 phys_addr_t align, phys_addr_t start, 1363 phys_addr_t end, int nid, 1364 bool exact_nid) 1365 { 1366 enum memblock_flags flags = choose_memblock_flags(); 1367 phys_addr_t found; 1368 1369 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1370 nid = NUMA_NO_NODE; 1371 1372 if (!align) { 1373 /* Can't use WARNs this early in boot on powerpc */ 1374 dump_stack(); 1375 align = SMP_CACHE_BYTES; 1376 } 1377 1378 again: 1379 found = memblock_find_in_range_node(size, align, start, end, nid, 1380 flags); 1381 if (found && !memblock_reserve(found, size)) 1382 goto done; 1383 1384 if (nid != NUMA_NO_NODE && !exact_nid) { 1385 found = memblock_find_in_range_node(size, align, start, 1386 end, NUMA_NO_NODE, 1387 flags); 1388 if (found && !memblock_reserve(found, size)) 1389 goto done; 1390 } 1391 1392 if (flags & MEMBLOCK_MIRROR) { 1393 flags &= ~MEMBLOCK_MIRROR; 1394 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 1395 &size); 1396 goto again; 1397 } 1398 1399 return 0; 1400 1401 done: 1402 /* Skip kmemleak for kasan_init() due to high volume. */ 1403 if (end != MEMBLOCK_ALLOC_KASAN) 1404 /* 1405 * The min_count is set to 0 so that memblock allocated 1406 * blocks are never reported as leaks. This is because many 1407 * of these blocks are only referred via the physical 1408 * address which is not looked up by kmemleak. 1409 */ 1410 kmemleak_alloc_phys(found, size, 0, 0); 1411 1412 return found; 1413 } 1414 1415 /** 1416 * memblock_phys_alloc_range - allocate a memory block inside specified range 1417 * @size: size of memory block to be allocated in bytes 1418 * @align: alignment of the region and block's size 1419 * @start: the lower bound of the memory region to allocate (physical address) 1420 * @end: the upper bound of the memory region to allocate (physical address) 1421 * 1422 * Allocate @size bytes in the between @start and @end. 1423 * 1424 * Return: physical address of the allocated memory block on success, 1425 * %0 on failure. 1426 */ 1427 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 1428 phys_addr_t align, 1429 phys_addr_t start, 1430 phys_addr_t end) 1431 { 1432 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1433 false); 1434 } 1435 1436 /** 1437 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node 1438 * @size: size of memory block to be allocated in bytes 1439 * @align: alignment of the region and block's size 1440 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1441 * 1442 * Allocates memory block from the specified NUMA node. If the node 1443 * has no available memory, attempts to allocated from any node in the 1444 * system. 1445 * 1446 * Return: physical address of the allocated memory block on success, 1447 * %0 on failure. 1448 */ 1449 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1450 { 1451 return memblock_alloc_range_nid(size, align, 0, 1452 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false); 1453 } 1454 1455 /** 1456 * memblock_alloc_internal - allocate boot memory block 1457 * @size: size of memory block to be allocated in bytes 1458 * @align: alignment of the region and block's size 1459 * @min_addr: the lower bound of the memory region to allocate (phys address) 1460 * @max_addr: the upper bound of the memory region to allocate (phys address) 1461 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1462 * @exact_nid: control the allocation fall back to other nodes 1463 * 1464 * Allocates memory block using memblock_alloc_range_nid() and 1465 * converts the returned physical address to virtual. 1466 * 1467 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1468 * will fall back to memory below @min_addr. Other constraints, such 1469 * as node and mirrored memory will be handled again in 1470 * memblock_alloc_range_nid(). 1471 * 1472 * Return: 1473 * Virtual address of allocated memory block on success, NULL on failure. 1474 */ 1475 static void * __init memblock_alloc_internal( 1476 phys_addr_t size, phys_addr_t align, 1477 phys_addr_t min_addr, phys_addr_t max_addr, 1478 int nid, bool exact_nid) 1479 { 1480 phys_addr_t alloc; 1481 1482 /* 1483 * Detect any accidental use of these APIs after slab is ready, as at 1484 * this moment memblock may be deinitialized already and its 1485 * internal data may be destroyed (after execution of memblock_free_all) 1486 */ 1487 if (WARN_ON_ONCE(slab_is_available())) 1488 return kzalloc_node(size, GFP_NOWAIT, nid); 1489 1490 if (max_addr > memblock.current_limit) 1491 max_addr = memblock.current_limit; 1492 1493 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, 1494 exact_nid); 1495 1496 /* retry allocation without lower limit */ 1497 if (!alloc && min_addr) 1498 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, 1499 exact_nid); 1500 1501 if (!alloc) 1502 return NULL; 1503 1504 return phys_to_virt(alloc); 1505 } 1506 1507 /** 1508 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node 1509 * without zeroing memory 1510 * @size: size of memory block to be allocated in bytes 1511 * @align: alignment of the region and block's size 1512 * @min_addr: the lower bound of the memory region from where the allocation 1513 * is preferred (phys address) 1514 * @max_addr: the upper bound of the memory region from where the allocation 1515 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1516 * allocate only from memory limited by memblock.current_limit value 1517 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1518 * 1519 * Public function, provides additional debug information (including caller 1520 * info), if enabled. Does not zero allocated memory. 1521 * 1522 * Return: 1523 * Virtual address of allocated memory block on success, NULL on failure. 1524 */ 1525 void * __init memblock_alloc_exact_nid_raw( 1526 phys_addr_t size, phys_addr_t align, 1527 phys_addr_t min_addr, phys_addr_t max_addr, 1528 int nid) 1529 { 1530 void *ptr; 1531 1532 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1533 __func__, (u64)size, (u64)align, nid, &min_addr, 1534 &max_addr, (void *)_RET_IP_); 1535 1536 ptr = memblock_alloc_internal(size, align, 1537 min_addr, max_addr, nid, true); 1538 if (ptr && size > 0) 1539 page_init_poison(ptr, size); 1540 1541 return ptr; 1542 } 1543 1544 /** 1545 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing 1546 * memory and without panicking 1547 * @size: size of memory block to be allocated in bytes 1548 * @align: alignment of the region and block's size 1549 * @min_addr: the lower bound of the memory region from where the allocation 1550 * is preferred (phys address) 1551 * @max_addr: the upper bound of the memory region from where the allocation 1552 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1553 * allocate only from memory limited by memblock.current_limit value 1554 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1555 * 1556 * Public function, provides additional debug information (including caller 1557 * info), if enabled. Does not zero allocated memory, does not panic if request 1558 * cannot be satisfied. 1559 * 1560 * Return: 1561 * Virtual address of allocated memory block on success, NULL on failure. 1562 */ 1563 void * __init memblock_alloc_try_nid_raw( 1564 phys_addr_t size, phys_addr_t align, 1565 phys_addr_t min_addr, phys_addr_t max_addr, 1566 int nid) 1567 { 1568 void *ptr; 1569 1570 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1571 __func__, (u64)size, (u64)align, nid, &min_addr, 1572 &max_addr, (void *)_RET_IP_); 1573 1574 ptr = memblock_alloc_internal(size, align, 1575 min_addr, max_addr, nid, false); 1576 if (ptr && size > 0) 1577 page_init_poison(ptr, size); 1578 1579 return ptr; 1580 } 1581 1582 /** 1583 * memblock_alloc_try_nid - allocate boot memory block 1584 * @size: size of memory block to be allocated in bytes 1585 * @align: alignment of the region and block's size 1586 * @min_addr: the lower bound of the memory region from where the allocation 1587 * is preferred (phys address) 1588 * @max_addr: the upper bound of the memory region from where the allocation 1589 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1590 * allocate only from memory limited by memblock.current_limit value 1591 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1592 * 1593 * Public function, provides additional debug information (including caller 1594 * info), if enabled. This function zeroes the allocated memory. 1595 * 1596 * Return: 1597 * Virtual address of allocated memory block on success, NULL on failure. 1598 */ 1599 void * __init memblock_alloc_try_nid( 1600 phys_addr_t size, phys_addr_t align, 1601 phys_addr_t min_addr, phys_addr_t max_addr, 1602 int nid) 1603 { 1604 void *ptr; 1605 1606 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1607 __func__, (u64)size, (u64)align, nid, &min_addr, 1608 &max_addr, (void *)_RET_IP_); 1609 ptr = memblock_alloc_internal(size, align, 1610 min_addr, max_addr, nid, false); 1611 if (ptr) 1612 memset(ptr, 0, size); 1613 1614 return ptr; 1615 } 1616 1617 /** 1618 * __memblock_free_late - free pages directly to buddy allocator 1619 * @base: phys starting address of the boot memory block 1620 * @size: size of the boot memory block in bytes 1621 * 1622 * This is only useful when the memblock allocator has already been torn 1623 * down, but we are still initializing the system. Pages are released directly 1624 * to the buddy allocator. 1625 */ 1626 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1627 { 1628 phys_addr_t cursor, end; 1629 1630 end = base + size - 1; 1631 memblock_dbg("%s: [%pa-%pa] %pS\n", 1632 __func__, &base, &end, (void *)_RET_IP_); 1633 kmemleak_free_part_phys(base, size); 1634 cursor = PFN_UP(base); 1635 end = PFN_DOWN(base + size); 1636 1637 for (; cursor < end; cursor++) { 1638 memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1639 totalram_pages_inc(); 1640 } 1641 } 1642 1643 /* 1644 * Remaining API functions 1645 */ 1646 1647 phys_addr_t __init_memblock memblock_phys_mem_size(void) 1648 { 1649 return memblock.memory.total_size; 1650 } 1651 1652 phys_addr_t __init_memblock memblock_reserved_size(void) 1653 { 1654 return memblock.reserved.total_size; 1655 } 1656 1657 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1658 { 1659 unsigned long pages = 0; 1660 struct memblock_region *r; 1661 unsigned long start_pfn, end_pfn; 1662 1663 for_each_memblock(memory, r) { 1664 start_pfn = memblock_region_memory_base_pfn(r); 1665 end_pfn = memblock_region_memory_end_pfn(r); 1666 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1667 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1668 pages += end_pfn - start_pfn; 1669 } 1670 1671 return PFN_PHYS(pages); 1672 } 1673 1674 /* lowest address */ 1675 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1676 { 1677 return memblock.memory.regions[0].base; 1678 } 1679 1680 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1681 { 1682 int idx = memblock.memory.cnt - 1; 1683 1684 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1685 } 1686 1687 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 1688 { 1689 phys_addr_t max_addr = PHYS_ADDR_MAX; 1690 struct memblock_region *r; 1691 1692 /* 1693 * translate the memory @limit size into the max address within one of 1694 * the memory memblock regions, if the @limit exceeds the total size 1695 * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1696 */ 1697 for_each_memblock(memory, r) { 1698 if (limit <= r->size) { 1699 max_addr = r->base + limit; 1700 break; 1701 } 1702 limit -= r->size; 1703 } 1704 1705 return max_addr; 1706 } 1707 1708 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1709 { 1710 phys_addr_t max_addr; 1711 1712 if (!limit) 1713 return; 1714 1715 max_addr = __find_max_addr(limit); 1716 1717 /* @limit exceeds the total size of the memory, do nothing */ 1718 if (max_addr == PHYS_ADDR_MAX) 1719 return; 1720 1721 /* truncate both memory and reserved regions */ 1722 memblock_remove_range(&memblock.memory, max_addr, 1723 PHYS_ADDR_MAX); 1724 memblock_remove_range(&memblock.reserved, max_addr, 1725 PHYS_ADDR_MAX); 1726 } 1727 1728 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1729 { 1730 int start_rgn, end_rgn; 1731 int i, ret; 1732 1733 if (!size) 1734 return; 1735 1736 ret = memblock_isolate_range(&memblock.memory, base, size, 1737 &start_rgn, &end_rgn); 1738 if (ret) 1739 return; 1740 1741 /* remove all the MAP regions */ 1742 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1743 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1744 memblock_remove_region(&memblock.memory, i); 1745 1746 for (i = start_rgn - 1; i >= 0; i--) 1747 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1748 memblock_remove_region(&memblock.memory, i); 1749 1750 /* truncate the reserved regions */ 1751 memblock_remove_range(&memblock.reserved, 0, base); 1752 memblock_remove_range(&memblock.reserved, 1753 base + size, PHYS_ADDR_MAX); 1754 } 1755 1756 void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1757 { 1758 phys_addr_t max_addr; 1759 1760 if (!limit) 1761 return; 1762 1763 max_addr = __find_max_addr(limit); 1764 1765 /* @limit exceeds the total size of the memory, do nothing */ 1766 if (max_addr == PHYS_ADDR_MAX) 1767 return; 1768 1769 memblock_cap_memory_range(0, max_addr); 1770 } 1771 1772 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1773 { 1774 unsigned int left = 0, right = type->cnt; 1775 1776 do { 1777 unsigned int mid = (right + left) / 2; 1778 1779 if (addr < type->regions[mid].base) 1780 right = mid; 1781 else if (addr >= (type->regions[mid].base + 1782 type->regions[mid].size)) 1783 left = mid + 1; 1784 else 1785 return mid; 1786 } while (left < right); 1787 return -1; 1788 } 1789 1790 bool __init_memblock memblock_is_reserved(phys_addr_t addr) 1791 { 1792 return memblock_search(&memblock.reserved, addr) != -1; 1793 } 1794 1795 bool __init_memblock memblock_is_memory(phys_addr_t addr) 1796 { 1797 return memblock_search(&memblock.memory, addr) != -1; 1798 } 1799 1800 bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1801 { 1802 int i = memblock_search(&memblock.memory, addr); 1803 1804 if (i == -1) 1805 return false; 1806 return !memblock_is_nomap(&memblock.memory.regions[i]); 1807 } 1808 1809 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1810 unsigned long *start_pfn, unsigned long *end_pfn) 1811 { 1812 struct memblock_type *type = &memblock.memory; 1813 int mid = memblock_search(type, PFN_PHYS(pfn)); 1814 1815 if (mid == -1) 1816 return -1; 1817 1818 *start_pfn = PFN_DOWN(type->regions[mid].base); 1819 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1820 1821 return memblock_get_region_node(&type->regions[mid]); 1822 } 1823 1824 /** 1825 * memblock_is_region_memory - check if a region is a subset of memory 1826 * @base: base of region to check 1827 * @size: size of region to check 1828 * 1829 * Check if the region [@base, @base + @size) is a subset of a memory block. 1830 * 1831 * Return: 1832 * 0 if false, non-zero if true 1833 */ 1834 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1835 { 1836 int idx = memblock_search(&memblock.memory, base); 1837 phys_addr_t end = base + memblock_cap_size(base, &size); 1838 1839 if (idx == -1) 1840 return false; 1841 return (memblock.memory.regions[idx].base + 1842 memblock.memory.regions[idx].size) >= end; 1843 } 1844 1845 /** 1846 * memblock_is_region_reserved - check if a region intersects reserved memory 1847 * @base: base of region to check 1848 * @size: size of region to check 1849 * 1850 * Check if the region [@base, @base + @size) intersects a reserved 1851 * memory block. 1852 * 1853 * Return: 1854 * True if they intersect, false if not. 1855 */ 1856 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1857 { 1858 memblock_cap_size(base, &size); 1859 return memblock_overlaps_region(&memblock.reserved, base, size); 1860 } 1861 1862 void __init_memblock memblock_trim_memory(phys_addr_t align) 1863 { 1864 phys_addr_t start, end, orig_start, orig_end; 1865 struct memblock_region *r; 1866 1867 for_each_memblock(memory, r) { 1868 orig_start = r->base; 1869 orig_end = r->base + r->size; 1870 start = round_up(orig_start, align); 1871 end = round_down(orig_end, align); 1872 1873 if (start == orig_start && end == orig_end) 1874 continue; 1875 1876 if (start < end) { 1877 r->base = start; 1878 r->size = end - start; 1879 } else { 1880 memblock_remove_region(&memblock.memory, 1881 r - memblock.memory.regions); 1882 r--; 1883 } 1884 } 1885 } 1886 1887 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1888 { 1889 memblock.current_limit = limit; 1890 } 1891 1892 phys_addr_t __init_memblock memblock_get_current_limit(void) 1893 { 1894 return memblock.current_limit; 1895 } 1896 1897 static void __init_memblock memblock_dump(struct memblock_type *type) 1898 { 1899 phys_addr_t base, end, size; 1900 enum memblock_flags flags; 1901 int idx; 1902 struct memblock_region *rgn; 1903 1904 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 1905 1906 for_each_memblock_type(idx, type, rgn) { 1907 char nid_buf[32] = ""; 1908 1909 base = rgn->base; 1910 size = rgn->size; 1911 end = base + size - 1; 1912 flags = rgn->flags; 1913 #ifdef CONFIG_NEED_MULTIPLE_NODES 1914 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1915 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1916 memblock_get_region_node(rgn)); 1917 #endif 1918 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", 1919 type->name, idx, &base, &end, &size, nid_buf, flags); 1920 } 1921 } 1922 1923 void __init_memblock __memblock_dump_all(void) 1924 { 1925 pr_info("MEMBLOCK configuration:\n"); 1926 pr_info(" memory size = %pa reserved size = %pa\n", 1927 &memblock.memory.total_size, 1928 &memblock.reserved.total_size); 1929 1930 memblock_dump(&memblock.memory); 1931 memblock_dump(&memblock.reserved); 1932 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1933 memblock_dump(&physmem); 1934 #endif 1935 } 1936 1937 void __init memblock_allow_resize(void) 1938 { 1939 memblock_can_resize = 1; 1940 } 1941 1942 static int __init early_memblock(char *p) 1943 { 1944 if (p && strstr(p, "debug")) 1945 memblock_debug = 1; 1946 return 0; 1947 } 1948 early_param("memblock", early_memblock); 1949 1950 static void __init __free_pages_memory(unsigned long start, unsigned long end) 1951 { 1952 int order; 1953 1954 while (start < end) { 1955 order = min(MAX_ORDER - 1UL, __ffs(start)); 1956 1957 while (start + (1UL << order) > end) 1958 order--; 1959 1960 memblock_free_pages(pfn_to_page(start), start, order); 1961 1962 start += (1UL << order); 1963 } 1964 } 1965 1966 static unsigned long __init __free_memory_core(phys_addr_t start, 1967 phys_addr_t end) 1968 { 1969 unsigned long start_pfn = PFN_UP(start); 1970 unsigned long end_pfn = min_t(unsigned long, 1971 PFN_DOWN(end), max_low_pfn); 1972 1973 if (start_pfn >= end_pfn) 1974 return 0; 1975 1976 __free_pages_memory(start_pfn, end_pfn); 1977 1978 return end_pfn - start_pfn; 1979 } 1980 1981 static unsigned long __init free_low_memory_core_early(void) 1982 { 1983 unsigned long count = 0; 1984 phys_addr_t start, end; 1985 u64 i; 1986 1987 memblock_clear_hotplug(0, -1); 1988 1989 for_each_reserved_mem_region(i, &start, &end) 1990 reserve_bootmem_region(start, end); 1991 1992 /* 1993 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id 1994 * because in some case like Node0 doesn't have RAM installed 1995 * low ram will be on Node1 1996 */ 1997 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 1998 NULL) 1999 count += __free_memory_core(start, end); 2000 2001 return count; 2002 } 2003 2004 static int reset_managed_pages_done __initdata; 2005 2006 void reset_node_managed_pages(pg_data_t *pgdat) 2007 { 2008 struct zone *z; 2009 2010 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 2011 atomic_long_set(&z->managed_pages, 0); 2012 } 2013 2014 void __init reset_all_zones_managed_pages(void) 2015 { 2016 struct pglist_data *pgdat; 2017 2018 if (reset_managed_pages_done) 2019 return; 2020 2021 for_each_online_pgdat(pgdat) 2022 reset_node_managed_pages(pgdat); 2023 2024 reset_managed_pages_done = 1; 2025 } 2026 2027 /** 2028 * memblock_free_all - release free pages to the buddy allocator 2029 * 2030 * Return: the number of pages actually released. 2031 */ 2032 unsigned long __init memblock_free_all(void) 2033 { 2034 unsigned long pages; 2035 2036 reset_all_zones_managed_pages(); 2037 2038 pages = free_low_memory_core_early(); 2039 totalram_pages_add(pages); 2040 2041 return pages; 2042 } 2043 2044 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) 2045 2046 static int memblock_debug_show(struct seq_file *m, void *private) 2047 { 2048 struct memblock_type *type = m->private; 2049 struct memblock_region *reg; 2050 int i; 2051 phys_addr_t end; 2052 2053 for (i = 0; i < type->cnt; i++) { 2054 reg = &type->regions[i]; 2055 end = reg->base + reg->size - 1; 2056 2057 seq_printf(m, "%4d: ", i); 2058 seq_printf(m, "%pa..%pa\n", ®->base, &end); 2059 } 2060 return 0; 2061 } 2062 DEFINE_SHOW_ATTRIBUTE(memblock_debug); 2063 2064 static int __init memblock_init_debugfs(void) 2065 { 2066 struct dentry *root = debugfs_create_dir("memblock", NULL); 2067 2068 debugfs_create_file("memory", 0444, root, 2069 &memblock.memory, &memblock_debug_fops); 2070 debugfs_create_file("reserved", 0444, root, 2071 &memblock.reserved, &memblock_debug_fops); 2072 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 2073 debugfs_create_file("physmem", 0444, root, &physmem, 2074 &memblock_debug_fops); 2075 #endif 2076 2077 return 0; 2078 } 2079 __initcall(memblock_init_debugfs); 2080 2081 #endif /* CONFIG_DEBUG_FS */ 2082