1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for maintaining information about logical memory blocks. 4 * 5 * Peter Bergner, IBM Corp. June 2001. 6 * Copyright (C) 2001 Peter Bergner. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/bitops.h> 13 #include <linux/poison.h> 14 #include <linux/pfn.h> 15 #include <linux/debugfs.h> 16 #include <linux/kmemleak.h> 17 #include <linux/seq_file.h> 18 #include <linux/memblock.h> 19 20 #include <asm/sections.h> 21 #include <linux/io.h> 22 23 #include "internal.h" 24 25 #define INIT_MEMBLOCK_REGIONS 128 26 #define INIT_PHYSMEM_REGIONS 4 27 28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS 29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS 30 #endif 31 32 /** 33 * DOC: memblock overview 34 * 35 * Memblock is a method of managing memory regions during the early 36 * boot period when the usual kernel memory allocators are not up and 37 * running. 38 * 39 * Memblock views the system memory as collections of contiguous 40 * regions. There are several types of these collections: 41 * 42 * * ``memory`` - describes the physical memory available to the 43 * kernel; this may differ from the actual physical memory installed 44 * in the system, for instance when the memory is restricted with 45 * ``mem=`` command line parameter 46 * * ``reserved`` - describes the regions that were allocated 47 * * ``physmem`` - describes the actual physical memory available during 48 * boot regardless of the possible restrictions and memory hot(un)plug; 49 * the ``physmem`` type is only available on some architectures. 50 * 51 * Each region is represented by :c:type:`struct memblock_region` that 52 * defines the region extents, its attributes and NUMA node id on NUMA 53 * systems. Every memory type is described by the :c:type:`struct 54 * memblock_type` which contains an array of memory regions along with 55 * the allocator metadata. The "memory" and "reserved" types are nicely 56 * wrapped with :c:type:`struct memblock`. This structure is statically 57 * initialized at build time. The region arrays are initially sized to 58 * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS 59 * for "reserved". The region array for "physmem" is initially sized to 60 * %INIT_PHYSMEM_REGIONS. 61 * The memblock_allow_resize() enables automatic resizing of the region 62 * arrays during addition of new regions. This feature should be used 63 * with care so that memory allocated for the region array will not 64 * overlap with areas that should be reserved, for example initrd. 65 * 66 * The early architecture setup should tell memblock what the physical 67 * memory layout is by using memblock_add() or memblock_add_node() 68 * functions. The first function does not assign the region to a NUMA 69 * node and it is appropriate for UMA systems. Yet, it is possible to 70 * use it on NUMA systems as well and assign the region to a NUMA node 71 * later in the setup process using memblock_set_node(). The 72 * memblock_add_node() performs such an assignment directly. 73 * 74 * Once memblock is setup the memory can be allocated using one of the 75 * API variants: 76 * 77 * * memblock_phys_alloc*() - these functions return the **physical** 78 * address of the allocated memory 79 * * memblock_alloc*() - these functions return the **virtual** address 80 * of the allocated memory. 81 * 82 * Note, that both API variants use implicit assumptions about allowed 83 * memory ranges and the fallback methods. Consult the documentation 84 * of memblock_alloc_internal() and memblock_alloc_range_nid() 85 * functions for more elaborate description. 86 * 87 * As the system boot progresses, the architecture specific mem_init() 88 * function frees all the memory to the buddy page allocator. 89 * 90 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the 91 * memblock data structures (except "physmem") will be discarded after the 92 * system initialization completes. 93 */ 94 95 #ifndef CONFIG_NEED_MULTIPLE_NODES 96 struct pglist_data __refdata contig_page_data; 97 EXPORT_SYMBOL(contig_page_data); 98 #endif 99 100 unsigned long max_low_pfn; 101 unsigned long min_low_pfn; 102 unsigned long max_pfn; 103 unsigned long long max_possible_pfn; 104 105 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 106 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; 107 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 108 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; 109 #endif 110 111 struct memblock memblock __initdata_memblock = { 112 .memory.regions = memblock_memory_init_regions, 113 .memory.cnt = 1, /* empty dummy entry */ 114 .memory.max = INIT_MEMBLOCK_REGIONS, 115 .memory.name = "memory", 116 117 .reserved.regions = memblock_reserved_init_regions, 118 .reserved.cnt = 1, /* empty dummy entry */ 119 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 120 .reserved.name = "reserved", 121 122 .bottom_up = false, 123 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 124 }; 125 126 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 127 struct memblock_type physmem = { 128 .regions = memblock_physmem_init_regions, 129 .cnt = 1, /* empty dummy entry */ 130 .max = INIT_PHYSMEM_REGIONS, 131 .name = "physmem", 132 }; 133 #endif 134 135 int memblock_debug __initdata_memblock; 136 static bool system_has_some_mirror __initdata_memblock = false; 137 static int memblock_can_resize __initdata_memblock; 138 static int memblock_memory_in_slab __initdata_memblock = 0; 139 static int memblock_reserved_in_slab __initdata_memblock = 0; 140 141 static enum memblock_flags __init_memblock choose_memblock_flags(void) 142 { 143 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 144 } 145 146 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 147 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 148 { 149 return *size = min(*size, PHYS_ADDR_MAX - base); 150 } 151 152 /* 153 * Address comparison utilities 154 */ 155 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 156 phys_addr_t base2, phys_addr_t size2) 157 { 158 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 159 } 160 161 bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 162 phys_addr_t base, phys_addr_t size) 163 { 164 unsigned long i; 165 166 for (i = 0; i < type->cnt; i++) 167 if (memblock_addrs_overlap(base, size, type->regions[i].base, 168 type->regions[i].size)) 169 break; 170 return i < type->cnt; 171 } 172 173 /** 174 * __memblock_find_range_bottom_up - find free area utility in bottom-up 175 * @start: start of candidate range 176 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 177 * %MEMBLOCK_ALLOC_ACCESSIBLE 178 * @size: size of free area to find 179 * @align: alignment of free area to find 180 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 181 * @flags: pick from blocks based on memory attributes 182 * 183 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 184 * 185 * Return: 186 * Found address on success, 0 on failure. 187 */ 188 static phys_addr_t __init_memblock 189 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 190 phys_addr_t size, phys_addr_t align, int nid, 191 enum memblock_flags flags) 192 { 193 phys_addr_t this_start, this_end, cand; 194 u64 i; 195 196 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 197 this_start = clamp(this_start, start, end); 198 this_end = clamp(this_end, start, end); 199 200 cand = round_up(this_start, align); 201 if (cand < this_end && this_end - cand >= size) 202 return cand; 203 } 204 205 return 0; 206 } 207 208 /** 209 * __memblock_find_range_top_down - find free area utility, in top-down 210 * @start: start of candidate range 211 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 212 * %MEMBLOCK_ALLOC_ACCESSIBLE 213 * @size: size of free area to find 214 * @align: alignment of free area to find 215 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 216 * @flags: pick from blocks based on memory attributes 217 * 218 * Utility called from memblock_find_in_range_node(), find free area top-down. 219 * 220 * Return: 221 * Found address on success, 0 on failure. 222 */ 223 static phys_addr_t __init_memblock 224 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 225 phys_addr_t size, phys_addr_t align, int nid, 226 enum memblock_flags flags) 227 { 228 phys_addr_t this_start, this_end, cand; 229 u64 i; 230 231 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 232 NULL) { 233 this_start = clamp(this_start, start, end); 234 this_end = clamp(this_end, start, end); 235 236 if (this_end < size) 237 continue; 238 239 cand = round_down(this_end - size, align); 240 if (cand >= this_start) 241 return cand; 242 } 243 244 return 0; 245 } 246 247 /** 248 * memblock_find_in_range_node - find free area in given range and node 249 * @size: size of free area to find 250 * @align: alignment of free area to find 251 * @start: start of candidate range 252 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 253 * %MEMBLOCK_ALLOC_ACCESSIBLE 254 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 255 * @flags: pick from blocks based on memory attributes 256 * 257 * Find @size free area aligned to @align in the specified range and node. 258 * 259 * When allocation direction is bottom-up, the @start should be greater 260 * than the end of the kernel image. Otherwise, it will be trimmed. The 261 * reason is that we want the bottom-up allocation just near the kernel 262 * image so it is highly likely that the allocated memory and the kernel 263 * will reside in the same node. 264 * 265 * If bottom-up allocation failed, will try to allocate memory top-down. 266 * 267 * Return: 268 * Found address on success, 0 on failure. 269 */ 270 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 271 phys_addr_t align, phys_addr_t start, 272 phys_addr_t end, int nid, 273 enum memblock_flags flags) 274 { 275 phys_addr_t kernel_end, ret; 276 277 /* pump up @end */ 278 if (end == MEMBLOCK_ALLOC_ACCESSIBLE || 279 end == MEMBLOCK_ALLOC_KASAN) 280 end = memblock.current_limit; 281 282 /* avoid allocating the first page */ 283 start = max_t(phys_addr_t, start, PAGE_SIZE); 284 end = max(start, end); 285 kernel_end = __pa_symbol(_end); 286 287 /* 288 * try bottom-up allocation only when bottom-up mode 289 * is set and @end is above the kernel image. 290 */ 291 if (memblock_bottom_up() && end > kernel_end) { 292 phys_addr_t bottom_up_start; 293 294 /* make sure we will allocate above the kernel */ 295 bottom_up_start = max(start, kernel_end); 296 297 /* ok, try bottom-up allocation first */ 298 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 299 size, align, nid, flags); 300 if (ret) 301 return ret; 302 303 /* 304 * we always limit bottom-up allocation above the kernel, 305 * but top-down allocation doesn't have the limit, so 306 * retrying top-down allocation may succeed when bottom-up 307 * allocation failed. 308 * 309 * bottom-up allocation is expected to be fail very rarely, 310 * so we use WARN_ONCE() here to see the stack trace if 311 * fail happens. 312 */ 313 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), 314 "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); 315 } 316 317 return __memblock_find_range_top_down(start, end, size, align, nid, 318 flags); 319 } 320 321 /** 322 * memblock_find_in_range - find free area in given range 323 * @start: start of candidate range 324 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 325 * %MEMBLOCK_ALLOC_ACCESSIBLE 326 * @size: size of free area to find 327 * @align: alignment of free area to find 328 * 329 * Find @size free area aligned to @align in the specified range. 330 * 331 * Return: 332 * Found address on success, 0 on failure. 333 */ 334 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 335 phys_addr_t end, phys_addr_t size, 336 phys_addr_t align) 337 { 338 phys_addr_t ret; 339 enum memblock_flags flags = choose_memblock_flags(); 340 341 again: 342 ret = memblock_find_in_range_node(size, align, start, end, 343 NUMA_NO_NODE, flags); 344 345 if (!ret && (flags & MEMBLOCK_MIRROR)) { 346 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 347 &size); 348 flags &= ~MEMBLOCK_MIRROR; 349 goto again; 350 } 351 352 return ret; 353 } 354 355 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 356 { 357 type->total_size -= type->regions[r].size; 358 memmove(&type->regions[r], &type->regions[r + 1], 359 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 360 type->cnt--; 361 362 /* Special case for empty arrays */ 363 if (type->cnt == 0) { 364 WARN_ON(type->total_size != 0); 365 type->cnt = 1; 366 type->regions[0].base = 0; 367 type->regions[0].size = 0; 368 type->regions[0].flags = 0; 369 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 370 } 371 } 372 373 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 374 /** 375 * memblock_discard - discard memory and reserved arrays if they were allocated 376 */ 377 void __init memblock_discard(void) 378 { 379 phys_addr_t addr, size; 380 381 if (memblock.reserved.regions != memblock_reserved_init_regions) { 382 addr = __pa(memblock.reserved.regions); 383 size = PAGE_ALIGN(sizeof(struct memblock_region) * 384 memblock.reserved.max); 385 __memblock_free_late(addr, size); 386 } 387 388 if (memblock.memory.regions != memblock_memory_init_regions) { 389 addr = __pa(memblock.memory.regions); 390 size = PAGE_ALIGN(sizeof(struct memblock_region) * 391 memblock.memory.max); 392 __memblock_free_late(addr, size); 393 } 394 } 395 #endif 396 397 /** 398 * memblock_double_array - double the size of the memblock regions array 399 * @type: memblock type of the regions array being doubled 400 * @new_area_start: starting address of memory range to avoid overlap with 401 * @new_area_size: size of memory range to avoid overlap with 402 * 403 * Double the size of the @type regions array. If memblock is being used to 404 * allocate memory for a new reserved regions array and there is a previously 405 * allocated memory range [@new_area_start, @new_area_start + @new_area_size] 406 * waiting to be reserved, ensure the memory used by the new array does 407 * not overlap. 408 * 409 * Return: 410 * 0 on success, -1 on failure. 411 */ 412 static int __init_memblock memblock_double_array(struct memblock_type *type, 413 phys_addr_t new_area_start, 414 phys_addr_t new_area_size) 415 { 416 struct memblock_region *new_array, *old_array; 417 phys_addr_t old_alloc_size, new_alloc_size; 418 phys_addr_t old_size, new_size, addr, new_end; 419 int use_slab = slab_is_available(); 420 int *in_slab; 421 422 /* We don't allow resizing until we know about the reserved regions 423 * of memory that aren't suitable for allocation 424 */ 425 if (!memblock_can_resize) 426 return -1; 427 428 /* Calculate new doubled size */ 429 old_size = type->max * sizeof(struct memblock_region); 430 new_size = old_size << 1; 431 /* 432 * We need to allocated new one align to PAGE_SIZE, 433 * so we can free them completely later. 434 */ 435 old_alloc_size = PAGE_ALIGN(old_size); 436 new_alloc_size = PAGE_ALIGN(new_size); 437 438 /* Retrieve the slab flag */ 439 if (type == &memblock.memory) 440 in_slab = &memblock_memory_in_slab; 441 else 442 in_slab = &memblock_reserved_in_slab; 443 444 /* Try to find some space for it */ 445 if (use_slab) { 446 new_array = kmalloc(new_size, GFP_KERNEL); 447 addr = new_array ? __pa(new_array) : 0; 448 } else { 449 /* only exclude range when trying to double reserved.regions */ 450 if (type != &memblock.reserved) 451 new_area_start = new_area_size = 0; 452 453 addr = memblock_find_in_range(new_area_start + new_area_size, 454 memblock.current_limit, 455 new_alloc_size, PAGE_SIZE); 456 if (!addr && new_area_size) 457 addr = memblock_find_in_range(0, 458 min(new_area_start, memblock.current_limit), 459 new_alloc_size, PAGE_SIZE); 460 461 new_array = addr ? __va(addr) : NULL; 462 } 463 if (!addr) { 464 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 465 type->name, type->max, type->max * 2); 466 return -1; 467 } 468 469 new_end = addr + new_size - 1; 470 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", 471 type->name, type->max * 2, &addr, &new_end); 472 473 /* 474 * Found space, we now need to move the array over before we add the 475 * reserved region since it may be our reserved array itself that is 476 * full. 477 */ 478 memcpy(new_array, type->regions, old_size); 479 memset(new_array + type->max, 0, old_size); 480 old_array = type->regions; 481 type->regions = new_array; 482 type->max <<= 1; 483 484 /* Free old array. We needn't free it if the array is the static one */ 485 if (*in_slab) 486 kfree(old_array); 487 else if (old_array != memblock_memory_init_regions && 488 old_array != memblock_reserved_init_regions) 489 memblock_free(__pa(old_array), old_alloc_size); 490 491 /* 492 * Reserve the new array if that comes from the memblock. Otherwise, we 493 * needn't do it 494 */ 495 if (!use_slab) 496 BUG_ON(memblock_reserve(addr, new_alloc_size)); 497 498 /* Update slab flag */ 499 *in_slab = use_slab; 500 501 return 0; 502 } 503 504 /** 505 * memblock_merge_regions - merge neighboring compatible regions 506 * @type: memblock type to scan 507 * 508 * Scan @type and merge neighboring compatible regions. 509 */ 510 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 511 { 512 int i = 0; 513 514 /* cnt never goes below 1 */ 515 while (i < type->cnt - 1) { 516 struct memblock_region *this = &type->regions[i]; 517 struct memblock_region *next = &type->regions[i + 1]; 518 519 if (this->base + this->size != next->base || 520 memblock_get_region_node(this) != 521 memblock_get_region_node(next) || 522 this->flags != next->flags) { 523 BUG_ON(this->base + this->size > next->base); 524 i++; 525 continue; 526 } 527 528 this->size += next->size; 529 /* move forward from next + 1, index of which is i + 2 */ 530 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 531 type->cnt--; 532 } 533 } 534 535 /** 536 * memblock_insert_region - insert new memblock region 537 * @type: memblock type to insert into 538 * @idx: index for the insertion point 539 * @base: base address of the new region 540 * @size: size of the new region 541 * @nid: node id of the new region 542 * @flags: flags of the new region 543 * 544 * Insert new memblock region [@base, @base + @size) into @type at @idx. 545 * @type must already have extra room to accommodate the new region. 546 */ 547 static void __init_memblock memblock_insert_region(struct memblock_type *type, 548 int idx, phys_addr_t base, 549 phys_addr_t size, 550 int nid, 551 enum memblock_flags flags) 552 { 553 struct memblock_region *rgn = &type->regions[idx]; 554 555 BUG_ON(type->cnt >= type->max); 556 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 557 rgn->base = base; 558 rgn->size = size; 559 rgn->flags = flags; 560 memblock_set_region_node(rgn, nid); 561 type->cnt++; 562 type->total_size += size; 563 } 564 565 /** 566 * memblock_add_range - add new memblock region 567 * @type: memblock type to add new region into 568 * @base: base address of the new region 569 * @size: size of the new region 570 * @nid: nid of the new region 571 * @flags: flags of the new region 572 * 573 * Add new memblock region [@base, @base + @size) into @type. The new region 574 * is allowed to overlap with existing ones - overlaps don't affect already 575 * existing regions. @type is guaranteed to be minimal (all neighbouring 576 * compatible regions are merged) after the addition. 577 * 578 * Return: 579 * 0 on success, -errno on failure. 580 */ 581 static int __init_memblock memblock_add_range(struct memblock_type *type, 582 phys_addr_t base, phys_addr_t size, 583 int nid, enum memblock_flags flags) 584 { 585 bool insert = false; 586 phys_addr_t obase = base; 587 phys_addr_t end = base + memblock_cap_size(base, &size); 588 int idx, nr_new; 589 struct memblock_region *rgn; 590 591 if (!size) 592 return 0; 593 594 /* special case for empty array */ 595 if (type->regions[0].size == 0) { 596 WARN_ON(type->cnt != 1 || type->total_size); 597 type->regions[0].base = base; 598 type->regions[0].size = size; 599 type->regions[0].flags = flags; 600 memblock_set_region_node(&type->regions[0], nid); 601 type->total_size = size; 602 return 0; 603 } 604 repeat: 605 /* 606 * The following is executed twice. Once with %false @insert and 607 * then with %true. The first counts the number of regions needed 608 * to accommodate the new area. The second actually inserts them. 609 */ 610 base = obase; 611 nr_new = 0; 612 613 for_each_memblock_type(idx, type, rgn) { 614 phys_addr_t rbase = rgn->base; 615 phys_addr_t rend = rbase + rgn->size; 616 617 if (rbase >= end) 618 break; 619 if (rend <= base) 620 continue; 621 /* 622 * @rgn overlaps. If it separates the lower part of new 623 * area, insert that portion. 624 */ 625 if (rbase > base) { 626 #ifdef CONFIG_NEED_MULTIPLE_NODES 627 WARN_ON(nid != memblock_get_region_node(rgn)); 628 #endif 629 WARN_ON(flags != rgn->flags); 630 nr_new++; 631 if (insert) 632 memblock_insert_region(type, idx++, base, 633 rbase - base, nid, 634 flags); 635 } 636 /* area below @rend is dealt with, forget about it */ 637 base = min(rend, end); 638 } 639 640 /* insert the remaining portion */ 641 if (base < end) { 642 nr_new++; 643 if (insert) 644 memblock_insert_region(type, idx, base, end - base, 645 nid, flags); 646 } 647 648 if (!nr_new) 649 return 0; 650 651 /* 652 * If this was the first round, resize array and repeat for actual 653 * insertions; otherwise, merge and return. 654 */ 655 if (!insert) { 656 while (type->cnt + nr_new > type->max) 657 if (memblock_double_array(type, obase, size) < 0) 658 return -ENOMEM; 659 insert = true; 660 goto repeat; 661 } else { 662 memblock_merge_regions(type); 663 return 0; 664 } 665 } 666 667 /** 668 * memblock_add_node - add new memblock region within a NUMA node 669 * @base: base address of the new region 670 * @size: size of the new region 671 * @nid: nid of the new region 672 * 673 * Add new memblock region [@base, @base + @size) to the "memory" 674 * type. See memblock_add_range() description for mode details 675 * 676 * Return: 677 * 0 on success, -errno on failure. 678 */ 679 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 680 int nid) 681 { 682 return memblock_add_range(&memblock.memory, base, size, nid, 0); 683 } 684 685 /** 686 * memblock_add - add new memblock region 687 * @base: base address of the new region 688 * @size: size of the new region 689 * 690 * Add new memblock region [@base, @base + @size) to the "memory" 691 * type. See memblock_add_range() description for mode details 692 * 693 * Return: 694 * 0 on success, -errno on failure. 695 */ 696 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 697 { 698 phys_addr_t end = base + size - 1; 699 700 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 701 &base, &end, (void *)_RET_IP_); 702 703 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 704 } 705 706 /** 707 * memblock_isolate_range - isolate given range into disjoint memblocks 708 * @type: memblock type to isolate range for 709 * @base: base of range to isolate 710 * @size: size of range to isolate 711 * @start_rgn: out parameter for the start of isolated region 712 * @end_rgn: out parameter for the end of isolated region 713 * 714 * Walk @type and ensure that regions don't cross the boundaries defined by 715 * [@base, @base + @size). Crossing regions are split at the boundaries, 716 * which may create at most two more regions. The index of the first 717 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 718 * 719 * Return: 720 * 0 on success, -errno on failure. 721 */ 722 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 723 phys_addr_t base, phys_addr_t size, 724 int *start_rgn, int *end_rgn) 725 { 726 phys_addr_t end = base + memblock_cap_size(base, &size); 727 int idx; 728 struct memblock_region *rgn; 729 730 *start_rgn = *end_rgn = 0; 731 732 if (!size) 733 return 0; 734 735 /* we'll create at most two more regions */ 736 while (type->cnt + 2 > type->max) 737 if (memblock_double_array(type, base, size) < 0) 738 return -ENOMEM; 739 740 for_each_memblock_type(idx, type, rgn) { 741 phys_addr_t rbase = rgn->base; 742 phys_addr_t rend = rbase + rgn->size; 743 744 if (rbase >= end) 745 break; 746 if (rend <= base) 747 continue; 748 749 if (rbase < base) { 750 /* 751 * @rgn intersects from below. Split and continue 752 * to process the next region - the new top half. 753 */ 754 rgn->base = base; 755 rgn->size -= base - rbase; 756 type->total_size -= base - rbase; 757 memblock_insert_region(type, idx, rbase, base - rbase, 758 memblock_get_region_node(rgn), 759 rgn->flags); 760 } else if (rend > end) { 761 /* 762 * @rgn intersects from above. Split and redo the 763 * current region - the new bottom half. 764 */ 765 rgn->base = end; 766 rgn->size -= end - rbase; 767 type->total_size -= end - rbase; 768 memblock_insert_region(type, idx--, rbase, end - rbase, 769 memblock_get_region_node(rgn), 770 rgn->flags); 771 } else { 772 /* @rgn is fully contained, record it */ 773 if (!*end_rgn) 774 *start_rgn = idx; 775 *end_rgn = idx + 1; 776 } 777 } 778 779 return 0; 780 } 781 782 static int __init_memblock memblock_remove_range(struct memblock_type *type, 783 phys_addr_t base, phys_addr_t size) 784 { 785 int start_rgn, end_rgn; 786 int i, ret; 787 788 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 789 if (ret) 790 return ret; 791 792 for (i = end_rgn - 1; i >= start_rgn; i--) 793 memblock_remove_region(type, i); 794 return 0; 795 } 796 797 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 798 { 799 phys_addr_t end = base + size - 1; 800 801 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 802 &base, &end, (void *)_RET_IP_); 803 804 return memblock_remove_range(&memblock.memory, base, size); 805 } 806 807 /** 808 * memblock_free - free boot memory block 809 * @base: phys starting address of the boot memory block 810 * @size: size of the boot memory block in bytes 811 * 812 * Free boot memory block previously allocated by memblock_alloc_xx() API. 813 * The freeing memory will not be released to the buddy allocator. 814 */ 815 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 816 { 817 phys_addr_t end = base + size - 1; 818 819 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 820 &base, &end, (void *)_RET_IP_); 821 822 kmemleak_free_part_phys(base, size); 823 return memblock_remove_range(&memblock.reserved, base, size); 824 } 825 826 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 827 { 828 phys_addr_t end = base + size - 1; 829 830 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 831 &base, &end, (void *)_RET_IP_); 832 833 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 834 } 835 836 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 837 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) 838 { 839 phys_addr_t end = base + size - 1; 840 841 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 842 &base, &end, (void *)_RET_IP_); 843 844 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); 845 } 846 #endif 847 848 /** 849 * memblock_setclr_flag - set or clear flag for a memory region 850 * @base: base address of the region 851 * @size: size of the region 852 * @set: set or clear the flag 853 * @flag: the flag to udpate 854 * 855 * This function isolates region [@base, @base + @size), and sets/clears flag 856 * 857 * Return: 0 on success, -errno on failure. 858 */ 859 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 860 phys_addr_t size, int set, int flag) 861 { 862 struct memblock_type *type = &memblock.memory; 863 int i, ret, start_rgn, end_rgn; 864 865 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 866 if (ret) 867 return ret; 868 869 for (i = start_rgn; i < end_rgn; i++) { 870 struct memblock_region *r = &type->regions[i]; 871 872 if (set) 873 r->flags |= flag; 874 else 875 r->flags &= ~flag; 876 } 877 878 memblock_merge_regions(type); 879 return 0; 880 } 881 882 /** 883 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 884 * @base: the base phys addr of the region 885 * @size: the size of the region 886 * 887 * Return: 0 on success, -errno on failure. 888 */ 889 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 890 { 891 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 892 } 893 894 /** 895 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 896 * @base: the base phys addr of the region 897 * @size: the size of the region 898 * 899 * Return: 0 on success, -errno on failure. 900 */ 901 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 902 { 903 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 904 } 905 906 /** 907 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 908 * @base: the base phys addr of the region 909 * @size: the size of the region 910 * 911 * Return: 0 on success, -errno on failure. 912 */ 913 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 914 { 915 system_has_some_mirror = true; 916 917 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 918 } 919 920 /** 921 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 922 * @base: the base phys addr of the region 923 * @size: the size of the region 924 * 925 * Return: 0 on success, -errno on failure. 926 */ 927 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 928 { 929 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 930 } 931 932 /** 933 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 934 * @base: the base phys addr of the region 935 * @size: the size of the region 936 * 937 * Return: 0 on success, -errno on failure. 938 */ 939 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 940 { 941 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 942 } 943 944 /** 945 * __next_reserved_mem_region - next function for for_each_reserved_region() 946 * @idx: pointer to u64 loop variable 947 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 948 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 949 * 950 * Iterate over all reserved memory regions. 951 */ 952 void __init_memblock __next_reserved_mem_region(u64 *idx, 953 phys_addr_t *out_start, 954 phys_addr_t *out_end) 955 { 956 struct memblock_type *type = &memblock.reserved; 957 958 if (*idx < type->cnt) { 959 struct memblock_region *r = &type->regions[*idx]; 960 phys_addr_t base = r->base; 961 phys_addr_t size = r->size; 962 963 if (out_start) 964 *out_start = base; 965 if (out_end) 966 *out_end = base + size - 1; 967 968 *idx += 1; 969 return; 970 } 971 972 /* signal end of iteration */ 973 *idx = ULLONG_MAX; 974 } 975 976 static bool should_skip_region(struct memblock_region *m, int nid, int flags) 977 { 978 int m_nid = memblock_get_region_node(m); 979 980 /* only memory regions are associated with nodes, check it */ 981 if (nid != NUMA_NO_NODE && nid != m_nid) 982 return true; 983 984 /* skip hotpluggable memory regions if needed */ 985 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 986 return true; 987 988 /* if we want mirror memory skip non-mirror memory regions */ 989 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 990 return true; 991 992 /* skip nomap memory unless we were asked for it explicitly */ 993 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 994 return true; 995 996 return false; 997 } 998 999 /** 1000 * __next_mem_range - next function for for_each_free_mem_range() etc. 1001 * @idx: pointer to u64 loop variable 1002 * @nid: node selector, %NUMA_NO_NODE for all nodes 1003 * @flags: pick from blocks based on memory attributes 1004 * @type_a: pointer to memblock_type from where the range is taken 1005 * @type_b: pointer to memblock_type which excludes memory from being taken 1006 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1007 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1008 * @out_nid: ptr to int for nid of the range, can be %NULL 1009 * 1010 * Find the first area from *@idx which matches @nid, fill the out 1011 * parameters, and update *@idx for the next iteration. The lower 32bit of 1012 * *@idx contains index into type_a and the upper 32bit indexes the 1013 * areas before each region in type_b. For example, if type_b regions 1014 * look like the following, 1015 * 1016 * 0:[0-16), 1:[32-48), 2:[128-130) 1017 * 1018 * The upper 32bit indexes the following regions. 1019 * 1020 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 1021 * 1022 * As both region arrays are sorted, the function advances the two indices 1023 * in lockstep and returns each intersection. 1024 */ 1025 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 1026 struct memblock_type *type_a, 1027 struct memblock_type *type_b, phys_addr_t *out_start, 1028 phys_addr_t *out_end, int *out_nid) 1029 { 1030 int idx_a = *idx & 0xffffffff; 1031 int idx_b = *idx >> 32; 1032 1033 if (WARN_ONCE(nid == MAX_NUMNODES, 1034 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1035 nid = NUMA_NO_NODE; 1036 1037 for (; idx_a < type_a->cnt; idx_a++) { 1038 struct memblock_region *m = &type_a->regions[idx_a]; 1039 1040 phys_addr_t m_start = m->base; 1041 phys_addr_t m_end = m->base + m->size; 1042 int m_nid = memblock_get_region_node(m); 1043 1044 if (should_skip_region(m, nid, flags)) 1045 continue; 1046 1047 if (!type_b) { 1048 if (out_start) 1049 *out_start = m_start; 1050 if (out_end) 1051 *out_end = m_end; 1052 if (out_nid) 1053 *out_nid = m_nid; 1054 idx_a++; 1055 *idx = (u32)idx_a | (u64)idx_b << 32; 1056 return; 1057 } 1058 1059 /* scan areas before each reservation */ 1060 for (; idx_b < type_b->cnt + 1; idx_b++) { 1061 struct memblock_region *r; 1062 phys_addr_t r_start; 1063 phys_addr_t r_end; 1064 1065 r = &type_b->regions[idx_b]; 1066 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1067 r_end = idx_b < type_b->cnt ? 1068 r->base : PHYS_ADDR_MAX; 1069 1070 /* 1071 * if idx_b advanced past idx_a, 1072 * break out to advance idx_a 1073 */ 1074 if (r_start >= m_end) 1075 break; 1076 /* if the two regions intersect, we're done */ 1077 if (m_start < r_end) { 1078 if (out_start) 1079 *out_start = 1080 max(m_start, r_start); 1081 if (out_end) 1082 *out_end = min(m_end, r_end); 1083 if (out_nid) 1084 *out_nid = m_nid; 1085 /* 1086 * The region which ends first is 1087 * advanced for the next iteration. 1088 */ 1089 if (m_end <= r_end) 1090 idx_a++; 1091 else 1092 idx_b++; 1093 *idx = (u32)idx_a | (u64)idx_b << 32; 1094 return; 1095 } 1096 } 1097 } 1098 1099 /* signal end of iteration */ 1100 *idx = ULLONG_MAX; 1101 } 1102 1103 /** 1104 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 1105 * 1106 * @idx: pointer to u64 loop variable 1107 * @nid: node selector, %NUMA_NO_NODE for all nodes 1108 * @flags: pick from blocks based on memory attributes 1109 * @type_a: pointer to memblock_type from where the range is taken 1110 * @type_b: pointer to memblock_type which excludes memory from being taken 1111 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1112 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1113 * @out_nid: ptr to int for nid of the range, can be %NULL 1114 * 1115 * Finds the next range from type_a which is not marked as unsuitable 1116 * in type_b. 1117 * 1118 * Reverse of __next_mem_range(). 1119 */ 1120 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 1121 enum memblock_flags flags, 1122 struct memblock_type *type_a, 1123 struct memblock_type *type_b, 1124 phys_addr_t *out_start, 1125 phys_addr_t *out_end, int *out_nid) 1126 { 1127 int idx_a = *idx & 0xffffffff; 1128 int idx_b = *idx >> 32; 1129 1130 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1131 nid = NUMA_NO_NODE; 1132 1133 if (*idx == (u64)ULLONG_MAX) { 1134 idx_a = type_a->cnt - 1; 1135 if (type_b != NULL) 1136 idx_b = type_b->cnt; 1137 else 1138 idx_b = 0; 1139 } 1140 1141 for (; idx_a >= 0; idx_a--) { 1142 struct memblock_region *m = &type_a->regions[idx_a]; 1143 1144 phys_addr_t m_start = m->base; 1145 phys_addr_t m_end = m->base + m->size; 1146 int m_nid = memblock_get_region_node(m); 1147 1148 if (should_skip_region(m, nid, flags)) 1149 continue; 1150 1151 if (!type_b) { 1152 if (out_start) 1153 *out_start = m_start; 1154 if (out_end) 1155 *out_end = m_end; 1156 if (out_nid) 1157 *out_nid = m_nid; 1158 idx_a--; 1159 *idx = (u32)idx_a | (u64)idx_b << 32; 1160 return; 1161 } 1162 1163 /* scan areas before each reservation */ 1164 for (; idx_b >= 0; idx_b--) { 1165 struct memblock_region *r; 1166 phys_addr_t r_start; 1167 phys_addr_t r_end; 1168 1169 r = &type_b->regions[idx_b]; 1170 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1171 r_end = idx_b < type_b->cnt ? 1172 r->base : PHYS_ADDR_MAX; 1173 /* 1174 * if idx_b advanced past idx_a, 1175 * break out to advance idx_a 1176 */ 1177 1178 if (r_end <= m_start) 1179 break; 1180 /* if the two regions intersect, we're done */ 1181 if (m_end > r_start) { 1182 if (out_start) 1183 *out_start = max(m_start, r_start); 1184 if (out_end) 1185 *out_end = min(m_end, r_end); 1186 if (out_nid) 1187 *out_nid = m_nid; 1188 if (m_start >= r_start) 1189 idx_a--; 1190 else 1191 idx_b--; 1192 *idx = (u32)idx_a | (u64)idx_b << 32; 1193 return; 1194 } 1195 } 1196 } 1197 /* signal end of iteration */ 1198 *idx = ULLONG_MAX; 1199 } 1200 1201 /* 1202 * Common iterator interface used to define for_each_mem_pfn_range(). 1203 */ 1204 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1205 unsigned long *out_start_pfn, 1206 unsigned long *out_end_pfn, int *out_nid) 1207 { 1208 struct memblock_type *type = &memblock.memory; 1209 struct memblock_region *r; 1210 int r_nid; 1211 1212 while (++*idx < type->cnt) { 1213 r = &type->regions[*idx]; 1214 r_nid = memblock_get_region_node(r); 1215 1216 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1217 continue; 1218 if (nid == MAX_NUMNODES || nid == r_nid) 1219 break; 1220 } 1221 if (*idx >= type->cnt) { 1222 *idx = -1; 1223 return; 1224 } 1225 1226 if (out_start_pfn) 1227 *out_start_pfn = PFN_UP(r->base); 1228 if (out_end_pfn) 1229 *out_end_pfn = PFN_DOWN(r->base + r->size); 1230 if (out_nid) 1231 *out_nid = r_nid; 1232 } 1233 1234 /** 1235 * memblock_set_node - set node ID on memblock regions 1236 * @base: base of area to set node ID for 1237 * @size: size of area to set node ID for 1238 * @type: memblock type to set node ID for 1239 * @nid: node ID to set 1240 * 1241 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. 1242 * Regions which cross the area boundaries are split as necessary. 1243 * 1244 * Return: 1245 * 0 on success, -errno on failure. 1246 */ 1247 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1248 struct memblock_type *type, int nid) 1249 { 1250 #ifdef CONFIG_NEED_MULTIPLE_NODES 1251 int start_rgn, end_rgn; 1252 int i, ret; 1253 1254 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1255 if (ret) 1256 return ret; 1257 1258 for (i = start_rgn; i < end_rgn; i++) 1259 memblock_set_region_node(&type->regions[i], nid); 1260 1261 memblock_merge_regions(type); 1262 #endif 1263 return 0; 1264 } 1265 1266 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1267 /** 1268 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() 1269 * 1270 * @idx: pointer to u64 loop variable 1271 * @zone: zone in which all of the memory blocks reside 1272 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL 1273 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL 1274 * 1275 * This function is meant to be a zone/pfn specific wrapper for the 1276 * for_each_mem_range type iterators. Specifically they are used in the 1277 * deferred memory init routines and as such we were duplicating much of 1278 * this logic throughout the code. So instead of having it in multiple 1279 * locations it seemed like it would make more sense to centralize this to 1280 * one new iterator that does everything they need. 1281 */ 1282 void __init_memblock 1283 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 1284 unsigned long *out_spfn, unsigned long *out_epfn) 1285 { 1286 int zone_nid = zone_to_nid(zone); 1287 phys_addr_t spa, epa; 1288 int nid; 1289 1290 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1291 &memblock.memory, &memblock.reserved, 1292 &spa, &epa, &nid); 1293 1294 while (*idx != U64_MAX) { 1295 unsigned long epfn = PFN_DOWN(epa); 1296 unsigned long spfn = PFN_UP(spa); 1297 1298 /* 1299 * Verify the end is at least past the start of the zone and 1300 * that we have at least one PFN to initialize. 1301 */ 1302 if (zone->zone_start_pfn < epfn && spfn < epfn) { 1303 /* if we went too far just stop searching */ 1304 if (zone_end_pfn(zone) <= spfn) { 1305 *idx = U64_MAX; 1306 break; 1307 } 1308 1309 if (out_spfn) 1310 *out_spfn = max(zone->zone_start_pfn, spfn); 1311 if (out_epfn) 1312 *out_epfn = min(zone_end_pfn(zone), epfn); 1313 1314 return; 1315 } 1316 1317 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1318 &memblock.memory, &memblock.reserved, 1319 &spa, &epa, &nid); 1320 } 1321 1322 /* signal end of iteration */ 1323 if (out_spfn) 1324 *out_spfn = ULONG_MAX; 1325 if (out_epfn) 1326 *out_epfn = 0; 1327 } 1328 1329 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1330 1331 /** 1332 * memblock_alloc_range_nid - allocate boot memory block 1333 * @size: size of memory block to be allocated in bytes 1334 * @align: alignment of the region and block's size 1335 * @start: the lower bound of the memory region to allocate (phys address) 1336 * @end: the upper bound of the memory region to allocate (phys address) 1337 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1338 * @exact_nid: control the allocation fall back to other nodes 1339 * 1340 * The allocation is performed from memory region limited by 1341 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. 1342 * 1343 * If the specified node can not hold the requested memory and @exact_nid 1344 * is false, the allocation falls back to any node in the system. 1345 * 1346 * For systems with memory mirroring, the allocation is attempted first 1347 * from the regions with mirroring enabled and then retried from any 1348 * memory region. 1349 * 1350 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for 1351 * allocated boot memory block, so that it is never reported as leaks. 1352 * 1353 * Return: 1354 * Physical address of allocated memory block on success, %0 on failure. 1355 */ 1356 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1357 phys_addr_t align, phys_addr_t start, 1358 phys_addr_t end, int nid, 1359 bool exact_nid) 1360 { 1361 enum memblock_flags flags = choose_memblock_flags(); 1362 phys_addr_t found; 1363 1364 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1365 nid = NUMA_NO_NODE; 1366 1367 if (!align) { 1368 /* Can't use WARNs this early in boot on powerpc */ 1369 dump_stack(); 1370 align = SMP_CACHE_BYTES; 1371 } 1372 1373 again: 1374 found = memblock_find_in_range_node(size, align, start, end, nid, 1375 flags); 1376 if (found && !memblock_reserve(found, size)) 1377 goto done; 1378 1379 if (nid != NUMA_NO_NODE && !exact_nid) { 1380 found = memblock_find_in_range_node(size, align, start, 1381 end, NUMA_NO_NODE, 1382 flags); 1383 if (found && !memblock_reserve(found, size)) 1384 goto done; 1385 } 1386 1387 if (flags & MEMBLOCK_MIRROR) { 1388 flags &= ~MEMBLOCK_MIRROR; 1389 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 1390 &size); 1391 goto again; 1392 } 1393 1394 return 0; 1395 1396 done: 1397 /* Skip kmemleak for kasan_init() due to high volume. */ 1398 if (end != MEMBLOCK_ALLOC_KASAN) 1399 /* 1400 * The min_count is set to 0 so that memblock allocated 1401 * blocks are never reported as leaks. This is because many 1402 * of these blocks are only referred via the physical 1403 * address which is not looked up by kmemleak. 1404 */ 1405 kmemleak_alloc_phys(found, size, 0, 0); 1406 1407 return found; 1408 } 1409 1410 /** 1411 * memblock_phys_alloc_range - allocate a memory block inside specified range 1412 * @size: size of memory block to be allocated in bytes 1413 * @align: alignment of the region and block's size 1414 * @start: the lower bound of the memory region to allocate (physical address) 1415 * @end: the upper bound of the memory region to allocate (physical address) 1416 * 1417 * Allocate @size bytes in the between @start and @end. 1418 * 1419 * Return: physical address of the allocated memory block on success, 1420 * %0 on failure. 1421 */ 1422 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 1423 phys_addr_t align, 1424 phys_addr_t start, 1425 phys_addr_t end) 1426 { 1427 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1428 false); 1429 } 1430 1431 /** 1432 * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node 1433 * @size: size of memory block to be allocated in bytes 1434 * @align: alignment of the region and block's size 1435 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1436 * 1437 * Allocates memory block from the specified NUMA node. If the node 1438 * has no available memory, attempts to allocated from any node in the 1439 * system. 1440 * 1441 * Return: physical address of the allocated memory block on success, 1442 * %0 on failure. 1443 */ 1444 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1445 { 1446 return memblock_alloc_range_nid(size, align, 0, 1447 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false); 1448 } 1449 1450 /** 1451 * memblock_alloc_internal - allocate boot memory block 1452 * @size: size of memory block to be allocated in bytes 1453 * @align: alignment of the region and block's size 1454 * @min_addr: the lower bound of the memory region to allocate (phys address) 1455 * @max_addr: the upper bound of the memory region to allocate (phys address) 1456 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1457 * @exact_nid: control the allocation fall back to other nodes 1458 * 1459 * Allocates memory block using memblock_alloc_range_nid() and 1460 * converts the returned physical address to virtual. 1461 * 1462 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1463 * will fall back to memory below @min_addr. Other constraints, such 1464 * as node and mirrored memory will be handled again in 1465 * memblock_alloc_range_nid(). 1466 * 1467 * Return: 1468 * Virtual address of allocated memory block on success, NULL on failure. 1469 */ 1470 static void * __init memblock_alloc_internal( 1471 phys_addr_t size, phys_addr_t align, 1472 phys_addr_t min_addr, phys_addr_t max_addr, 1473 int nid, bool exact_nid) 1474 { 1475 phys_addr_t alloc; 1476 1477 /* 1478 * Detect any accidental use of these APIs after slab is ready, as at 1479 * this moment memblock may be deinitialized already and its 1480 * internal data may be destroyed (after execution of memblock_free_all) 1481 */ 1482 if (WARN_ON_ONCE(slab_is_available())) 1483 return kzalloc_node(size, GFP_NOWAIT, nid); 1484 1485 if (max_addr > memblock.current_limit) 1486 max_addr = memblock.current_limit; 1487 1488 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, 1489 exact_nid); 1490 1491 /* retry allocation without lower limit */ 1492 if (!alloc && min_addr) 1493 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, 1494 exact_nid); 1495 1496 if (!alloc) 1497 return NULL; 1498 1499 return phys_to_virt(alloc); 1500 } 1501 1502 /** 1503 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node 1504 * without zeroing memory 1505 * @size: size of memory block to be allocated in bytes 1506 * @align: alignment of the region and block's size 1507 * @min_addr: the lower bound of the memory region from where the allocation 1508 * is preferred (phys address) 1509 * @max_addr: the upper bound of the memory region from where the allocation 1510 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1511 * allocate only from memory limited by memblock.current_limit value 1512 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1513 * 1514 * Public function, provides additional debug information (including caller 1515 * info), if enabled. Does not zero allocated memory. 1516 * 1517 * Return: 1518 * Virtual address of allocated memory block on success, NULL on failure. 1519 */ 1520 void * __init memblock_alloc_exact_nid_raw( 1521 phys_addr_t size, phys_addr_t align, 1522 phys_addr_t min_addr, phys_addr_t max_addr, 1523 int nid) 1524 { 1525 void *ptr; 1526 1527 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1528 __func__, (u64)size, (u64)align, nid, &min_addr, 1529 &max_addr, (void *)_RET_IP_); 1530 1531 ptr = memblock_alloc_internal(size, align, 1532 min_addr, max_addr, nid, true); 1533 if (ptr && size > 0) 1534 page_init_poison(ptr, size); 1535 1536 return ptr; 1537 } 1538 1539 /** 1540 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing 1541 * memory and without panicking 1542 * @size: size of memory block to be allocated in bytes 1543 * @align: alignment of the region and block's size 1544 * @min_addr: the lower bound of the memory region from where the allocation 1545 * is preferred (phys address) 1546 * @max_addr: the upper bound of the memory region from where the allocation 1547 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1548 * allocate only from memory limited by memblock.current_limit value 1549 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1550 * 1551 * Public function, provides additional debug information (including caller 1552 * info), if enabled. Does not zero allocated memory, does not panic if request 1553 * cannot be satisfied. 1554 * 1555 * Return: 1556 * Virtual address of allocated memory block on success, NULL on failure. 1557 */ 1558 void * __init memblock_alloc_try_nid_raw( 1559 phys_addr_t size, phys_addr_t align, 1560 phys_addr_t min_addr, phys_addr_t max_addr, 1561 int nid) 1562 { 1563 void *ptr; 1564 1565 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1566 __func__, (u64)size, (u64)align, nid, &min_addr, 1567 &max_addr, (void *)_RET_IP_); 1568 1569 ptr = memblock_alloc_internal(size, align, 1570 min_addr, max_addr, nid, false); 1571 if (ptr && size > 0) 1572 page_init_poison(ptr, size); 1573 1574 return ptr; 1575 } 1576 1577 /** 1578 * memblock_alloc_try_nid - allocate boot memory block 1579 * @size: size of memory block to be allocated in bytes 1580 * @align: alignment of the region and block's size 1581 * @min_addr: the lower bound of the memory region from where the allocation 1582 * is preferred (phys address) 1583 * @max_addr: the upper bound of the memory region from where the allocation 1584 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1585 * allocate only from memory limited by memblock.current_limit value 1586 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1587 * 1588 * Public function, provides additional debug information (including caller 1589 * info), if enabled. This function zeroes the allocated memory. 1590 * 1591 * Return: 1592 * Virtual address of allocated memory block on success, NULL on failure. 1593 */ 1594 void * __init memblock_alloc_try_nid( 1595 phys_addr_t size, phys_addr_t align, 1596 phys_addr_t min_addr, phys_addr_t max_addr, 1597 int nid) 1598 { 1599 void *ptr; 1600 1601 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1602 __func__, (u64)size, (u64)align, nid, &min_addr, 1603 &max_addr, (void *)_RET_IP_); 1604 ptr = memblock_alloc_internal(size, align, 1605 min_addr, max_addr, nid, false); 1606 if (ptr) 1607 memset(ptr, 0, size); 1608 1609 return ptr; 1610 } 1611 1612 /** 1613 * __memblock_free_late - free pages directly to buddy allocator 1614 * @base: phys starting address of the boot memory block 1615 * @size: size of the boot memory block in bytes 1616 * 1617 * This is only useful when the memblock allocator has already been torn 1618 * down, but we are still initializing the system. Pages are released directly 1619 * to the buddy allocator. 1620 */ 1621 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1622 { 1623 phys_addr_t cursor, end; 1624 1625 end = base + size - 1; 1626 memblock_dbg("%s: [%pa-%pa] %pS\n", 1627 __func__, &base, &end, (void *)_RET_IP_); 1628 kmemleak_free_part_phys(base, size); 1629 cursor = PFN_UP(base); 1630 end = PFN_DOWN(base + size); 1631 1632 for (; cursor < end; cursor++) { 1633 memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1634 totalram_pages_inc(); 1635 } 1636 } 1637 1638 /* 1639 * Remaining API functions 1640 */ 1641 1642 phys_addr_t __init_memblock memblock_phys_mem_size(void) 1643 { 1644 return memblock.memory.total_size; 1645 } 1646 1647 phys_addr_t __init_memblock memblock_reserved_size(void) 1648 { 1649 return memblock.reserved.total_size; 1650 } 1651 1652 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1653 { 1654 unsigned long pages = 0; 1655 struct memblock_region *r; 1656 unsigned long start_pfn, end_pfn; 1657 1658 for_each_memblock(memory, r) { 1659 start_pfn = memblock_region_memory_base_pfn(r); 1660 end_pfn = memblock_region_memory_end_pfn(r); 1661 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1662 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1663 pages += end_pfn - start_pfn; 1664 } 1665 1666 return PFN_PHYS(pages); 1667 } 1668 1669 /* lowest address */ 1670 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1671 { 1672 return memblock.memory.regions[0].base; 1673 } 1674 1675 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1676 { 1677 int idx = memblock.memory.cnt - 1; 1678 1679 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1680 } 1681 1682 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 1683 { 1684 phys_addr_t max_addr = PHYS_ADDR_MAX; 1685 struct memblock_region *r; 1686 1687 /* 1688 * translate the memory @limit size into the max address within one of 1689 * the memory memblock regions, if the @limit exceeds the total size 1690 * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1691 */ 1692 for_each_memblock(memory, r) { 1693 if (limit <= r->size) { 1694 max_addr = r->base + limit; 1695 break; 1696 } 1697 limit -= r->size; 1698 } 1699 1700 return max_addr; 1701 } 1702 1703 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1704 { 1705 phys_addr_t max_addr; 1706 1707 if (!limit) 1708 return; 1709 1710 max_addr = __find_max_addr(limit); 1711 1712 /* @limit exceeds the total size of the memory, do nothing */ 1713 if (max_addr == PHYS_ADDR_MAX) 1714 return; 1715 1716 /* truncate both memory and reserved regions */ 1717 memblock_remove_range(&memblock.memory, max_addr, 1718 PHYS_ADDR_MAX); 1719 memblock_remove_range(&memblock.reserved, max_addr, 1720 PHYS_ADDR_MAX); 1721 } 1722 1723 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1724 { 1725 int start_rgn, end_rgn; 1726 int i, ret; 1727 1728 if (!size) 1729 return; 1730 1731 ret = memblock_isolate_range(&memblock.memory, base, size, 1732 &start_rgn, &end_rgn); 1733 if (ret) 1734 return; 1735 1736 /* remove all the MAP regions */ 1737 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1738 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1739 memblock_remove_region(&memblock.memory, i); 1740 1741 for (i = start_rgn - 1; i >= 0; i--) 1742 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1743 memblock_remove_region(&memblock.memory, i); 1744 1745 /* truncate the reserved regions */ 1746 memblock_remove_range(&memblock.reserved, 0, base); 1747 memblock_remove_range(&memblock.reserved, 1748 base + size, PHYS_ADDR_MAX); 1749 } 1750 1751 void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1752 { 1753 phys_addr_t max_addr; 1754 1755 if (!limit) 1756 return; 1757 1758 max_addr = __find_max_addr(limit); 1759 1760 /* @limit exceeds the total size of the memory, do nothing */ 1761 if (max_addr == PHYS_ADDR_MAX) 1762 return; 1763 1764 memblock_cap_memory_range(0, max_addr); 1765 } 1766 1767 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1768 { 1769 unsigned int left = 0, right = type->cnt; 1770 1771 do { 1772 unsigned int mid = (right + left) / 2; 1773 1774 if (addr < type->regions[mid].base) 1775 right = mid; 1776 else if (addr >= (type->regions[mid].base + 1777 type->regions[mid].size)) 1778 left = mid + 1; 1779 else 1780 return mid; 1781 } while (left < right); 1782 return -1; 1783 } 1784 1785 bool __init_memblock memblock_is_reserved(phys_addr_t addr) 1786 { 1787 return memblock_search(&memblock.reserved, addr) != -1; 1788 } 1789 1790 bool __init_memblock memblock_is_memory(phys_addr_t addr) 1791 { 1792 return memblock_search(&memblock.memory, addr) != -1; 1793 } 1794 1795 bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1796 { 1797 int i = memblock_search(&memblock.memory, addr); 1798 1799 if (i == -1) 1800 return false; 1801 return !memblock_is_nomap(&memblock.memory.regions[i]); 1802 } 1803 1804 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1805 unsigned long *start_pfn, unsigned long *end_pfn) 1806 { 1807 struct memblock_type *type = &memblock.memory; 1808 int mid = memblock_search(type, PFN_PHYS(pfn)); 1809 1810 if (mid == -1) 1811 return -1; 1812 1813 *start_pfn = PFN_DOWN(type->regions[mid].base); 1814 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1815 1816 return memblock_get_region_node(&type->regions[mid]); 1817 } 1818 1819 /** 1820 * memblock_is_region_memory - check if a region is a subset of memory 1821 * @base: base of region to check 1822 * @size: size of region to check 1823 * 1824 * Check if the region [@base, @base + @size) is a subset of a memory block. 1825 * 1826 * Return: 1827 * 0 if false, non-zero if true 1828 */ 1829 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1830 { 1831 int idx = memblock_search(&memblock.memory, base); 1832 phys_addr_t end = base + memblock_cap_size(base, &size); 1833 1834 if (idx == -1) 1835 return false; 1836 return (memblock.memory.regions[idx].base + 1837 memblock.memory.regions[idx].size) >= end; 1838 } 1839 1840 /** 1841 * memblock_is_region_reserved - check if a region intersects reserved memory 1842 * @base: base of region to check 1843 * @size: size of region to check 1844 * 1845 * Check if the region [@base, @base + @size) intersects a reserved 1846 * memory block. 1847 * 1848 * Return: 1849 * True if they intersect, false if not. 1850 */ 1851 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1852 { 1853 memblock_cap_size(base, &size); 1854 return memblock_overlaps_region(&memblock.reserved, base, size); 1855 } 1856 1857 void __init_memblock memblock_trim_memory(phys_addr_t align) 1858 { 1859 phys_addr_t start, end, orig_start, orig_end; 1860 struct memblock_region *r; 1861 1862 for_each_memblock(memory, r) { 1863 orig_start = r->base; 1864 orig_end = r->base + r->size; 1865 start = round_up(orig_start, align); 1866 end = round_down(orig_end, align); 1867 1868 if (start == orig_start && end == orig_end) 1869 continue; 1870 1871 if (start < end) { 1872 r->base = start; 1873 r->size = end - start; 1874 } else { 1875 memblock_remove_region(&memblock.memory, 1876 r - memblock.memory.regions); 1877 r--; 1878 } 1879 } 1880 } 1881 1882 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1883 { 1884 memblock.current_limit = limit; 1885 } 1886 1887 phys_addr_t __init_memblock memblock_get_current_limit(void) 1888 { 1889 return memblock.current_limit; 1890 } 1891 1892 static void __init_memblock memblock_dump(struct memblock_type *type) 1893 { 1894 phys_addr_t base, end, size; 1895 enum memblock_flags flags; 1896 int idx; 1897 struct memblock_region *rgn; 1898 1899 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 1900 1901 for_each_memblock_type(idx, type, rgn) { 1902 char nid_buf[32] = ""; 1903 1904 base = rgn->base; 1905 size = rgn->size; 1906 end = base + size - 1; 1907 flags = rgn->flags; 1908 #ifdef CONFIG_NEED_MULTIPLE_NODES 1909 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1910 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1911 memblock_get_region_node(rgn)); 1912 #endif 1913 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", 1914 type->name, idx, &base, &end, &size, nid_buf, flags); 1915 } 1916 } 1917 1918 void __init_memblock __memblock_dump_all(void) 1919 { 1920 pr_info("MEMBLOCK configuration:\n"); 1921 pr_info(" memory size = %pa reserved size = %pa\n", 1922 &memblock.memory.total_size, 1923 &memblock.reserved.total_size); 1924 1925 memblock_dump(&memblock.memory); 1926 memblock_dump(&memblock.reserved); 1927 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1928 memblock_dump(&physmem); 1929 #endif 1930 } 1931 1932 void __init memblock_allow_resize(void) 1933 { 1934 memblock_can_resize = 1; 1935 } 1936 1937 static int __init early_memblock(char *p) 1938 { 1939 if (p && strstr(p, "debug")) 1940 memblock_debug = 1; 1941 return 0; 1942 } 1943 early_param("memblock", early_memblock); 1944 1945 static void __init __free_pages_memory(unsigned long start, unsigned long end) 1946 { 1947 int order; 1948 1949 while (start < end) { 1950 order = min(MAX_ORDER - 1UL, __ffs(start)); 1951 1952 while (start + (1UL << order) > end) 1953 order--; 1954 1955 memblock_free_pages(pfn_to_page(start), start, order); 1956 1957 start += (1UL << order); 1958 } 1959 } 1960 1961 static unsigned long __init __free_memory_core(phys_addr_t start, 1962 phys_addr_t end) 1963 { 1964 unsigned long start_pfn = PFN_UP(start); 1965 unsigned long end_pfn = min_t(unsigned long, 1966 PFN_DOWN(end), max_low_pfn); 1967 1968 if (start_pfn >= end_pfn) 1969 return 0; 1970 1971 __free_pages_memory(start_pfn, end_pfn); 1972 1973 return end_pfn - start_pfn; 1974 } 1975 1976 static unsigned long __init free_low_memory_core_early(void) 1977 { 1978 unsigned long count = 0; 1979 phys_addr_t start, end; 1980 u64 i; 1981 1982 memblock_clear_hotplug(0, -1); 1983 1984 for_each_reserved_mem_region(i, &start, &end) 1985 reserve_bootmem_region(start, end); 1986 1987 /* 1988 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id 1989 * because in some case like Node0 doesn't have RAM installed 1990 * low ram will be on Node1 1991 */ 1992 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 1993 NULL) 1994 count += __free_memory_core(start, end); 1995 1996 return count; 1997 } 1998 1999 static int reset_managed_pages_done __initdata; 2000 2001 void reset_node_managed_pages(pg_data_t *pgdat) 2002 { 2003 struct zone *z; 2004 2005 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 2006 atomic_long_set(&z->managed_pages, 0); 2007 } 2008 2009 void __init reset_all_zones_managed_pages(void) 2010 { 2011 struct pglist_data *pgdat; 2012 2013 if (reset_managed_pages_done) 2014 return; 2015 2016 for_each_online_pgdat(pgdat) 2017 reset_node_managed_pages(pgdat); 2018 2019 reset_managed_pages_done = 1; 2020 } 2021 2022 /** 2023 * memblock_free_all - release free pages to the buddy allocator 2024 * 2025 * Return: the number of pages actually released. 2026 */ 2027 unsigned long __init memblock_free_all(void) 2028 { 2029 unsigned long pages; 2030 2031 reset_all_zones_managed_pages(); 2032 2033 pages = free_low_memory_core_early(); 2034 totalram_pages_add(pages); 2035 2036 return pages; 2037 } 2038 2039 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) 2040 2041 static int memblock_debug_show(struct seq_file *m, void *private) 2042 { 2043 struct memblock_type *type = m->private; 2044 struct memblock_region *reg; 2045 int i; 2046 phys_addr_t end; 2047 2048 for (i = 0; i < type->cnt; i++) { 2049 reg = &type->regions[i]; 2050 end = reg->base + reg->size - 1; 2051 2052 seq_printf(m, "%4d: ", i); 2053 seq_printf(m, "%pa..%pa\n", ®->base, &end); 2054 } 2055 return 0; 2056 } 2057 DEFINE_SHOW_ATTRIBUTE(memblock_debug); 2058 2059 static int __init memblock_init_debugfs(void) 2060 { 2061 struct dentry *root = debugfs_create_dir("memblock", NULL); 2062 2063 debugfs_create_file("memory", 0444, root, 2064 &memblock.memory, &memblock_debug_fops); 2065 debugfs_create_file("reserved", 0444, root, 2066 &memblock.reserved, &memblock_debug_fops); 2067 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 2068 debugfs_create_file("physmem", 0444, root, &physmem, 2069 &memblock_debug_fops); 2070 #endif 2071 2072 return 0; 2073 } 2074 __initcall(memblock_init_debugfs); 2075 2076 #endif /* CONFIG_DEBUG_FS */ 2077