1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for maintaining information about logical memory blocks. 4 * 5 * Peter Bergner, IBM Corp. June 2001. 6 * Copyright (C) 2001 Peter Bergner. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/bitops.h> 13 #include <linux/poison.h> 14 #include <linux/pfn.h> 15 #include <linux/debugfs.h> 16 #include <linux/kmemleak.h> 17 #include <linux/seq_file.h> 18 #include <linux/memblock.h> 19 20 #include <asm/sections.h> 21 #include <linux/io.h> 22 23 #include "internal.h" 24 25 #define INIT_MEMBLOCK_REGIONS 128 26 #define INIT_PHYSMEM_REGIONS 4 27 28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS 29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS 30 #endif 31 32 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS 33 #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS 34 #endif 35 36 /** 37 * DOC: memblock overview 38 * 39 * Memblock is a method of managing memory regions during the early 40 * boot period when the usual kernel memory allocators are not up and 41 * running. 42 * 43 * Memblock views the system memory as collections of contiguous 44 * regions. There are several types of these collections: 45 * 46 * * ``memory`` - describes the physical memory available to the 47 * kernel; this may differ from the actual physical memory installed 48 * in the system, for instance when the memory is restricted with 49 * ``mem=`` command line parameter 50 * * ``reserved`` - describes the regions that were allocated 51 * * ``physmem`` - describes the actual physical memory available during 52 * boot regardless of the possible restrictions and memory hot(un)plug; 53 * the ``physmem`` type is only available on some architectures. 54 * 55 * Each region is represented by struct memblock_region that 56 * defines the region extents, its attributes and NUMA node id on NUMA 57 * systems. Every memory type is described by the struct memblock_type 58 * which contains an array of memory regions along with 59 * the allocator metadata. The "memory" and "reserved" types are nicely 60 * wrapped with struct memblock. This structure is statically 61 * initialized at build time. The region arrays are initially sized to 62 * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and 63 * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array 64 * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS. 65 * The memblock_allow_resize() enables automatic resizing of the region 66 * arrays during addition of new regions. This feature should be used 67 * with care so that memory allocated for the region array will not 68 * overlap with areas that should be reserved, for example initrd. 69 * 70 * The early architecture setup should tell memblock what the physical 71 * memory layout is by using memblock_add() or memblock_add_node() 72 * functions. The first function does not assign the region to a NUMA 73 * node and it is appropriate for UMA systems. Yet, it is possible to 74 * use it on NUMA systems as well and assign the region to a NUMA node 75 * later in the setup process using memblock_set_node(). The 76 * memblock_add_node() performs such an assignment directly. 77 * 78 * Once memblock is setup the memory can be allocated using one of the 79 * API variants: 80 * 81 * * memblock_phys_alloc*() - these functions return the **physical** 82 * address of the allocated memory 83 * * memblock_alloc*() - these functions return the **virtual** address 84 * of the allocated memory. 85 * 86 * Note, that both API variants use implicit assumptions about allowed 87 * memory ranges and the fallback methods. Consult the documentation 88 * of memblock_alloc_internal() and memblock_alloc_range_nid() 89 * functions for more elaborate description. 90 * 91 * As the system boot progresses, the architecture specific mem_init() 92 * function frees all the memory to the buddy page allocator. 93 * 94 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the 95 * memblock data structures (except "physmem") will be discarded after the 96 * system initialization completes. 97 */ 98 99 #ifndef CONFIG_NUMA 100 struct pglist_data __refdata contig_page_data; 101 EXPORT_SYMBOL(contig_page_data); 102 #endif 103 104 unsigned long max_low_pfn; 105 unsigned long min_low_pfn; 106 unsigned long max_pfn; 107 unsigned long long max_possible_pfn; 108 109 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock; 110 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; 111 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 112 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; 113 #endif 114 115 struct memblock memblock __initdata_memblock = { 116 .memory.regions = memblock_memory_init_regions, 117 .memory.cnt = 1, /* empty dummy entry */ 118 .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS, 119 .memory.name = "memory", 120 121 .reserved.regions = memblock_reserved_init_regions, 122 .reserved.cnt = 1, /* empty dummy entry */ 123 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 124 .reserved.name = "reserved", 125 126 .bottom_up = false, 127 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 128 }; 129 130 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 131 struct memblock_type physmem = { 132 .regions = memblock_physmem_init_regions, 133 .cnt = 1, /* empty dummy entry */ 134 .max = INIT_PHYSMEM_REGIONS, 135 .name = "physmem", 136 }; 137 #endif 138 139 /* 140 * keep a pointer to &memblock.memory in the text section to use it in 141 * __next_mem_range() and its helpers. 142 * For architectures that do not keep memblock data after init, this 143 * pointer will be reset to NULL at memblock_discard() 144 */ 145 static __refdata struct memblock_type *memblock_memory = &memblock.memory; 146 147 #define for_each_memblock_type(i, memblock_type, rgn) \ 148 for (i = 0, rgn = &memblock_type->regions[0]; \ 149 i < memblock_type->cnt; \ 150 i++, rgn = &memblock_type->regions[i]) 151 152 #define memblock_dbg(fmt, ...) \ 153 do { \ 154 if (memblock_debug) \ 155 pr_info(fmt, ##__VA_ARGS__); \ 156 } while (0) 157 158 static int memblock_debug __initdata_memblock; 159 static bool system_has_some_mirror __initdata_memblock; 160 static int memblock_can_resize __initdata_memblock; 161 static int memblock_memory_in_slab __initdata_memblock; 162 static int memblock_reserved_in_slab __initdata_memblock; 163 164 bool __init_memblock memblock_has_mirror(void) 165 { 166 return system_has_some_mirror; 167 } 168 169 static enum memblock_flags __init_memblock choose_memblock_flags(void) 170 { 171 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 172 } 173 174 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 175 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 176 { 177 return *size = min(*size, PHYS_ADDR_MAX - base); 178 } 179 180 /* 181 * Address comparison utilities 182 */ 183 unsigned long __init_memblock 184 memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, phys_addr_t base2, 185 phys_addr_t size2) 186 { 187 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 188 } 189 190 bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 191 phys_addr_t base, phys_addr_t size) 192 { 193 unsigned long i; 194 195 memblock_cap_size(base, &size); 196 197 for (i = 0; i < type->cnt; i++) 198 if (memblock_addrs_overlap(base, size, type->regions[i].base, 199 type->regions[i].size)) 200 break; 201 return i < type->cnt; 202 } 203 204 /** 205 * __memblock_find_range_bottom_up - find free area utility in bottom-up 206 * @start: start of candidate range 207 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 208 * %MEMBLOCK_ALLOC_ACCESSIBLE 209 * @size: size of free area to find 210 * @align: alignment of free area to find 211 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 212 * @flags: pick from blocks based on memory attributes 213 * 214 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 215 * 216 * Return: 217 * Found address on success, 0 on failure. 218 */ 219 static phys_addr_t __init_memblock 220 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 221 phys_addr_t size, phys_addr_t align, int nid, 222 enum memblock_flags flags) 223 { 224 phys_addr_t this_start, this_end, cand; 225 u64 i; 226 227 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 228 this_start = clamp(this_start, start, end); 229 this_end = clamp(this_end, start, end); 230 231 cand = round_up(this_start, align); 232 if (cand < this_end && this_end - cand >= size) 233 return cand; 234 } 235 236 return 0; 237 } 238 239 /** 240 * __memblock_find_range_top_down - find free area utility, in top-down 241 * @start: start of candidate range 242 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 243 * %MEMBLOCK_ALLOC_ACCESSIBLE 244 * @size: size of free area to find 245 * @align: alignment of free area to find 246 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 247 * @flags: pick from blocks based on memory attributes 248 * 249 * Utility called from memblock_find_in_range_node(), find free area top-down. 250 * 251 * Return: 252 * Found address on success, 0 on failure. 253 */ 254 static phys_addr_t __init_memblock 255 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 256 phys_addr_t size, phys_addr_t align, int nid, 257 enum memblock_flags flags) 258 { 259 phys_addr_t this_start, this_end, cand; 260 u64 i; 261 262 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 263 NULL) { 264 this_start = clamp(this_start, start, end); 265 this_end = clamp(this_end, start, end); 266 267 if (this_end < size) 268 continue; 269 270 cand = round_down(this_end - size, align); 271 if (cand >= this_start) 272 return cand; 273 } 274 275 return 0; 276 } 277 278 /** 279 * memblock_find_in_range_node - find free area in given range and node 280 * @size: size of free area to find 281 * @align: alignment of free area to find 282 * @start: start of candidate range 283 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 284 * %MEMBLOCK_ALLOC_ACCESSIBLE 285 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 286 * @flags: pick from blocks based on memory attributes 287 * 288 * Find @size free area aligned to @align in the specified range and node. 289 * 290 * Return: 291 * Found address on success, 0 on failure. 292 */ 293 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 294 phys_addr_t align, phys_addr_t start, 295 phys_addr_t end, int nid, 296 enum memblock_flags flags) 297 { 298 /* pump up @end */ 299 if (end == MEMBLOCK_ALLOC_ACCESSIBLE || 300 end == MEMBLOCK_ALLOC_NOLEAKTRACE) 301 end = memblock.current_limit; 302 303 /* avoid allocating the first page */ 304 start = max_t(phys_addr_t, start, PAGE_SIZE); 305 end = max(start, end); 306 307 if (memblock_bottom_up()) 308 return __memblock_find_range_bottom_up(start, end, size, align, 309 nid, flags); 310 else 311 return __memblock_find_range_top_down(start, end, size, align, 312 nid, flags); 313 } 314 315 /** 316 * memblock_find_in_range - find free area in given range 317 * @start: start of candidate range 318 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 319 * %MEMBLOCK_ALLOC_ACCESSIBLE 320 * @size: size of free area to find 321 * @align: alignment of free area to find 322 * 323 * Find @size free area aligned to @align in the specified range. 324 * 325 * Return: 326 * Found address on success, 0 on failure. 327 */ 328 static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 329 phys_addr_t end, phys_addr_t size, 330 phys_addr_t align) 331 { 332 phys_addr_t ret; 333 enum memblock_flags flags = choose_memblock_flags(); 334 335 again: 336 ret = memblock_find_in_range_node(size, align, start, end, 337 NUMA_NO_NODE, flags); 338 339 if (!ret && (flags & MEMBLOCK_MIRROR)) { 340 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n", 341 &size); 342 flags &= ~MEMBLOCK_MIRROR; 343 goto again; 344 } 345 346 return ret; 347 } 348 349 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 350 { 351 type->total_size -= type->regions[r].size; 352 memmove(&type->regions[r], &type->regions[r + 1], 353 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 354 type->cnt--; 355 356 /* Special case for empty arrays */ 357 if (type->cnt == 0) { 358 WARN_ON(type->total_size != 0); 359 type->cnt = 1; 360 type->regions[0].base = 0; 361 type->regions[0].size = 0; 362 type->regions[0].flags = 0; 363 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 364 } 365 } 366 367 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 368 /** 369 * memblock_discard - discard memory and reserved arrays if they were allocated 370 */ 371 void __init memblock_discard(void) 372 { 373 phys_addr_t addr, size; 374 375 if (memblock.reserved.regions != memblock_reserved_init_regions) { 376 addr = __pa(memblock.reserved.regions); 377 size = PAGE_ALIGN(sizeof(struct memblock_region) * 378 memblock.reserved.max); 379 if (memblock_reserved_in_slab) 380 kfree(memblock.reserved.regions); 381 else 382 memblock_free_late(addr, size); 383 } 384 385 if (memblock.memory.regions != memblock_memory_init_regions) { 386 addr = __pa(memblock.memory.regions); 387 size = PAGE_ALIGN(sizeof(struct memblock_region) * 388 memblock.memory.max); 389 if (memblock_memory_in_slab) 390 kfree(memblock.memory.regions); 391 else 392 memblock_free_late(addr, size); 393 } 394 395 memblock_memory = NULL; 396 } 397 #endif 398 399 /** 400 * memblock_double_array - double the size of the memblock regions array 401 * @type: memblock type of the regions array being doubled 402 * @new_area_start: starting address of memory range to avoid overlap with 403 * @new_area_size: size of memory range to avoid overlap with 404 * 405 * Double the size of the @type regions array. If memblock is being used to 406 * allocate memory for a new reserved regions array and there is a previously 407 * allocated memory range [@new_area_start, @new_area_start + @new_area_size] 408 * waiting to be reserved, ensure the memory used by the new array does 409 * not overlap. 410 * 411 * Return: 412 * 0 on success, -1 on failure. 413 */ 414 static int __init_memblock memblock_double_array(struct memblock_type *type, 415 phys_addr_t new_area_start, 416 phys_addr_t new_area_size) 417 { 418 struct memblock_region *new_array, *old_array; 419 phys_addr_t old_alloc_size, new_alloc_size; 420 phys_addr_t old_size, new_size, addr, new_end; 421 int use_slab = slab_is_available(); 422 int *in_slab; 423 424 /* We don't allow resizing until we know about the reserved regions 425 * of memory that aren't suitable for allocation 426 */ 427 if (!memblock_can_resize) 428 return -1; 429 430 /* Calculate new doubled size */ 431 old_size = type->max * sizeof(struct memblock_region); 432 new_size = old_size << 1; 433 /* 434 * We need to allocated new one align to PAGE_SIZE, 435 * so we can free them completely later. 436 */ 437 old_alloc_size = PAGE_ALIGN(old_size); 438 new_alloc_size = PAGE_ALIGN(new_size); 439 440 /* Retrieve the slab flag */ 441 if (type == &memblock.memory) 442 in_slab = &memblock_memory_in_slab; 443 else 444 in_slab = &memblock_reserved_in_slab; 445 446 /* Try to find some space for it */ 447 if (use_slab) { 448 new_array = kmalloc(new_size, GFP_KERNEL); 449 addr = new_array ? __pa(new_array) : 0; 450 } else { 451 /* only exclude range when trying to double reserved.regions */ 452 if (type != &memblock.reserved) 453 new_area_start = new_area_size = 0; 454 455 addr = memblock_find_in_range(new_area_start + new_area_size, 456 memblock.current_limit, 457 new_alloc_size, PAGE_SIZE); 458 if (!addr && new_area_size) 459 addr = memblock_find_in_range(0, 460 min(new_area_start, memblock.current_limit), 461 new_alloc_size, PAGE_SIZE); 462 463 new_array = addr ? __va(addr) : NULL; 464 } 465 if (!addr) { 466 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 467 type->name, type->max, type->max * 2); 468 return -1; 469 } 470 471 new_end = addr + new_size - 1; 472 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", 473 type->name, type->max * 2, &addr, &new_end); 474 475 /* 476 * Found space, we now need to move the array over before we add the 477 * reserved region since it may be our reserved array itself that is 478 * full. 479 */ 480 memcpy(new_array, type->regions, old_size); 481 memset(new_array + type->max, 0, old_size); 482 old_array = type->regions; 483 type->regions = new_array; 484 type->max <<= 1; 485 486 /* Free old array. We needn't free it if the array is the static one */ 487 if (*in_slab) 488 kfree(old_array); 489 else if (old_array != memblock_memory_init_regions && 490 old_array != memblock_reserved_init_regions) 491 memblock_free(old_array, old_alloc_size); 492 493 /* 494 * Reserve the new array if that comes from the memblock. Otherwise, we 495 * needn't do it 496 */ 497 if (!use_slab) 498 BUG_ON(memblock_reserve(addr, new_alloc_size)); 499 500 /* Update slab flag */ 501 *in_slab = use_slab; 502 503 return 0; 504 } 505 506 /** 507 * memblock_merge_regions - merge neighboring compatible regions 508 * @type: memblock type to scan 509 * @start_rgn: start scanning from (@start_rgn - 1) 510 * @end_rgn: end scanning at (@end_rgn - 1) 511 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn) 512 */ 513 static void __init_memblock memblock_merge_regions(struct memblock_type *type, 514 unsigned long start_rgn, 515 unsigned long end_rgn) 516 { 517 int i = 0; 518 if (start_rgn) 519 i = start_rgn - 1; 520 end_rgn = min(end_rgn, type->cnt - 1); 521 while (i < end_rgn) { 522 struct memblock_region *this = &type->regions[i]; 523 struct memblock_region *next = &type->regions[i + 1]; 524 525 if (this->base + this->size != next->base || 526 memblock_get_region_node(this) != 527 memblock_get_region_node(next) || 528 this->flags != next->flags) { 529 BUG_ON(this->base + this->size > next->base); 530 i++; 531 continue; 532 } 533 534 this->size += next->size; 535 /* move forward from next + 1, index of which is i + 2 */ 536 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 537 type->cnt--; 538 end_rgn--; 539 } 540 } 541 542 /** 543 * memblock_insert_region - insert new memblock region 544 * @type: memblock type to insert into 545 * @idx: index for the insertion point 546 * @base: base address of the new region 547 * @size: size of the new region 548 * @nid: node id of the new region 549 * @flags: flags of the new region 550 * 551 * Insert new memblock region [@base, @base + @size) into @type at @idx. 552 * @type must already have extra room to accommodate the new region. 553 */ 554 static void __init_memblock memblock_insert_region(struct memblock_type *type, 555 int idx, phys_addr_t base, 556 phys_addr_t size, 557 int nid, 558 enum memblock_flags flags) 559 { 560 struct memblock_region *rgn = &type->regions[idx]; 561 562 BUG_ON(type->cnt >= type->max); 563 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 564 rgn->base = base; 565 rgn->size = size; 566 rgn->flags = flags; 567 memblock_set_region_node(rgn, nid); 568 type->cnt++; 569 type->total_size += size; 570 } 571 572 /** 573 * memblock_add_range - add new memblock region 574 * @type: memblock type to add new region into 575 * @base: base address of the new region 576 * @size: size of the new region 577 * @nid: nid of the new region 578 * @flags: flags of the new region 579 * 580 * Add new memblock region [@base, @base + @size) into @type. The new region 581 * is allowed to overlap with existing ones - overlaps don't affect already 582 * existing regions. @type is guaranteed to be minimal (all neighbouring 583 * compatible regions are merged) after the addition. 584 * 585 * Return: 586 * 0 on success, -errno on failure. 587 */ 588 static int __init_memblock memblock_add_range(struct memblock_type *type, 589 phys_addr_t base, phys_addr_t size, 590 int nid, enum memblock_flags flags) 591 { 592 bool insert = false; 593 phys_addr_t obase = base; 594 phys_addr_t end = base + memblock_cap_size(base, &size); 595 int idx, nr_new, start_rgn = -1, end_rgn; 596 struct memblock_region *rgn; 597 598 if (!size) 599 return 0; 600 601 /* special case for empty array */ 602 if (type->regions[0].size == 0) { 603 WARN_ON(type->cnt != 1 || type->total_size); 604 type->regions[0].base = base; 605 type->regions[0].size = size; 606 type->regions[0].flags = flags; 607 memblock_set_region_node(&type->regions[0], nid); 608 type->total_size = size; 609 return 0; 610 } 611 612 /* 613 * The worst case is when new range overlaps all existing regions, 614 * then we'll need type->cnt + 1 empty regions in @type. So if 615 * type->cnt * 2 + 1 is less than or equal to type->max, we know 616 * that there is enough empty regions in @type, and we can insert 617 * regions directly. 618 */ 619 if (type->cnt * 2 + 1 <= type->max) 620 insert = true; 621 622 repeat: 623 /* 624 * The following is executed twice. Once with %false @insert and 625 * then with %true. The first counts the number of regions needed 626 * to accommodate the new area. The second actually inserts them. 627 */ 628 base = obase; 629 nr_new = 0; 630 631 for_each_memblock_type(idx, type, rgn) { 632 phys_addr_t rbase = rgn->base; 633 phys_addr_t rend = rbase + rgn->size; 634 635 if (rbase >= end) 636 break; 637 if (rend <= base) 638 continue; 639 /* 640 * @rgn overlaps. If it separates the lower part of new 641 * area, insert that portion. 642 */ 643 if (rbase > base) { 644 #ifdef CONFIG_NUMA 645 WARN_ON(nid != memblock_get_region_node(rgn)); 646 #endif 647 WARN_ON(flags != rgn->flags); 648 nr_new++; 649 if (insert) { 650 if (start_rgn == -1) 651 start_rgn = idx; 652 end_rgn = idx + 1; 653 memblock_insert_region(type, idx++, base, 654 rbase - base, nid, 655 flags); 656 } 657 } 658 /* area below @rend is dealt with, forget about it */ 659 base = min(rend, end); 660 } 661 662 /* insert the remaining portion */ 663 if (base < end) { 664 nr_new++; 665 if (insert) { 666 if (start_rgn == -1) 667 start_rgn = idx; 668 end_rgn = idx + 1; 669 memblock_insert_region(type, idx, base, end - base, 670 nid, flags); 671 } 672 } 673 674 if (!nr_new) 675 return 0; 676 677 /* 678 * If this was the first round, resize array and repeat for actual 679 * insertions; otherwise, merge and return. 680 */ 681 if (!insert) { 682 while (type->cnt + nr_new > type->max) 683 if (memblock_double_array(type, obase, size) < 0) 684 return -ENOMEM; 685 insert = true; 686 goto repeat; 687 } else { 688 memblock_merge_regions(type, start_rgn, end_rgn); 689 return 0; 690 } 691 } 692 693 /** 694 * memblock_add_node - add new memblock region within a NUMA node 695 * @base: base address of the new region 696 * @size: size of the new region 697 * @nid: nid of the new region 698 * @flags: flags of the new region 699 * 700 * Add new memblock region [@base, @base + @size) to the "memory" 701 * type. See memblock_add_range() description for mode details 702 * 703 * Return: 704 * 0 on success, -errno on failure. 705 */ 706 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 707 int nid, enum memblock_flags flags) 708 { 709 phys_addr_t end = base + size - 1; 710 711 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__, 712 &base, &end, nid, flags, (void *)_RET_IP_); 713 714 return memblock_add_range(&memblock.memory, base, size, nid, flags); 715 } 716 717 /** 718 * memblock_add - add new memblock region 719 * @base: base address of the new region 720 * @size: size of the new region 721 * 722 * Add new memblock region [@base, @base + @size) to the "memory" 723 * type. See memblock_add_range() description for mode details 724 * 725 * Return: 726 * 0 on success, -errno on failure. 727 */ 728 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 729 { 730 phys_addr_t end = base + size - 1; 731 732 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 733 &base, &end, (void *)_RET_IP_); 734 735 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 736 } 737 738 /** 739 * memblock_isolate_range - isolate given range into disjoint memblocks 740 * @type: memblock type to isolate range for 741 * @base: base of range to isolate 742 * @size: size of range to isolate 743 * @start_rgn: out parameter for the start of isolated region 744 * @end_rgn: out parameter for the end of isolated region 745 * 746 * Walk @type and ensure that regions don't cross the boundaries defined by 747 * [@base, @base + @size). Crossing regions are split at the boundaries, 748 * which may create at most two more regions. The index of the first 749 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 750 * 751 * Return: 752 * 0 on success, -errno on failure. 753 */ 754 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 755 phys_addr_t base, phys_addr_t size, 756 int *start_rgn, int *end_rgn) 757 { 758 phys_addr_t end = base + memblock_cap_size(base, &size); 759 int idx; 760 struct memblock_region *rgn; 761 762 *start_rgn = *end_rgn = 0; 763 764 if (!size) 765 return 0; 766 767 /* we'll create at most two more regions */ 768 while (type->cnt + 2 > type->max) 769 if (memblock_double_array(type, base, size) < 0) 770 return -ENOMEM; 771 772 for_each_memblock_type(idx, type, rgn) { 773 phys_addr_t rbase = rgn->base; 774 phys_addr_t rend = rbase + rgn->size; 775 776 if (rbase >= end) 777 break; 778 if (rend <= base) 779 continue; 780 781 if (rbase < base) { 782 /* 783 * @rgn intersects from below. Split and continue 784 * to process the next region - the new top half. 785 */ 786 rgn->base = base; 787 rgn->size -= base - rbase; 788 type->total_size -= base - rbase; 789 memblock_insert_region(type, idx, rbase, base - rbase, 790 memblock_get_region_node(rgn), 791 rgn->flags); 792 } else if (rend > end) { 793 /* 794 * @rgn intersects from above. Split and redo the 795 * current region - the new bottom half. 796 */ 797 rgn->base = end; 798 rgn->size -= end - rbase; 799 type->total_size -= end - rbase; 800 memblock_insert_region(type, idx--, rbase, end - rbase, 801 memblock_get_region_node(rgn), 802 rgn->flags); 803 } else { 804 /* @rgn is fully contained, record it */ 805 if (!*end_rgn) 806 *start_rgn = idx; 807 *end_rgn = idx + 1; 808 } 809 } 810 811 return 0; 812 } 813 814 static int __init_memblock memblock_remove_range(struct memblock_type *type, 815 phys_addr_t base, phys_addr_t size) 816 { 817 int start_rgn, end_rgn; 818 int i, ret; 819 820 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 821 if (ret) 822 return ret; 823 824 for (i = end_rgn - 1; i >= start_rgn; i--) 825 memblock_remove_region(type, i); 826 return 0; 827 } 828 829 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 830 { 831 phys_addr_t end = base + size - 1; 832 833 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 834 &base, &end, (void *)_RET_IP_); 835 836 return memblock_remove_range(&memblock.memory, base, size); 837 } 838 839 /** 840 * memblock_free - free boot memory allocation 841 * @ptr: starting address of the boot memory allocation 842 * @size: size of the boot memory block in bytes 843 * 844 * Free boot memory block previously allocated by memblock_alloc_xx() API. 845 * The freeing memory will not be released to the buddy allocator. 846 */ 847 void __init_memblock memblock_free(void *ptr, size_t size) 848 { 849 if (ptr) 850 memblock_phys_free(__pa(ptr), size); 851 } 852 853 /** 854 * memblock_phys_free - free boot memory block 855 * @base: phys starting address of the boot memory block 856 * @size: size of the boot memory block in bytes 857 * 858 * Free boot memory block previously allocated by memblock_phys_alloc_xx() API. 859 * The freeing memory will not be released to the buddy allocator. 860 */ 861 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size) 862 { 863 phys_addr_t end = base + size - 1; 864 865 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 866 &base, &end, (void *)_RET_IP_); 867 868 kmemleak_free_part_phys(base, size); 869 return memblock_remove_range(&memblock.reserved, base, size); 870 } 871 872 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 873 { 874 phys_addr_t end = base + size - 1; 875 876 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 877 &base, &end, (void *)_RET_IP_); 878 879 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 880 } 881 882 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 883 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) 884 { 885 phys_addr_t end = base + size - 1; 886 887 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 888 &base, &end, (void *)_RET_IP_); 889 890 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); 891 } 892 #endif 893 894 /** 895 * memblock_setclr_flag - set or clear flag for a memory region 896 * @base: base address of the region 897 * @size: size of the region 898 * @set: set or clear the flag 899 * @flag: the flag to update 900 * 901 * This function isolates region [@base, @base + @size), and sets/clears flag 902 * 903 * Return: 0 on success, -errno on failure. 904 */ 905 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 906 phys_addr_t size, int set, int flag) 907 { 908 struct memblock_type *type = &memblock.memory; 909 int i, ret, start_rgn, end_rgn; 910 911 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 912 if (ret) 913 return ret; 914 915 for (i = start_rgn; i < end_rgn; i++) { 916 struct memblock_region *r = &type->regions[i]; 917 918 if (set) 919 r->flags |= flag; 920 else 921 r->flags &= ~flag; 922 } 923 924 memblock_merge_regions(type, start_rgn, end_rgn); 925 return 0; 926 } 927 928 /** 929 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 930 * @base: the base phys addr of the region 931 * @size: the size of the region 932 * 933 * Return: 0 on success, -errno on failure. 934 */ 935 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 936 { 937 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 938 } 939 940 /** 941 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 942 * @base: the base phys addr of the region 943 * @size: the size of the region 944 * 945 * Return: 0 on success, -errno on failure. 946 */ 947 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 948 { 949 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 950 } 951 952 /** 953 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 954 * @base: the base phys addr of the region 955 * @size: the size of the region 956 * 957 * Return: 0 on success, -errno on failure. 958 */ 959 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 960 { 961 if (!mirrored_kernelcore) 962 return 0; 963 964 system_has_some_mirror = true; 965 966 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 967 } 968 969 /** 970 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 971 * @base: the base phys addr of the region 972 * @size: the size of the region 973 * 974 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the 975 * direct mapping of the physical memory. These regions will still be 976 * covered by the memory map. The struct page representing NOMAP memory 977 * frames in the memory map will be PageReserved() 978 * 979 * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from 980 * memblock, the caller must inform kmemleak to ignore that memory 981 * 982 * Return: 0 on success, -errno on failure. 983 */ 984 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 985 { 986 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 987 } 988 989 /** 990 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 991 * @base: the base phys addr of the region 992 * @size: the size of the region 993 * 994 * Return: 0 on success, -errno on failure. 995 */ 996 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 997 { 998 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 999 } 1000 1001 static bool should_skip_region(struct memblock_type *type, 1002 struct memblock_region *m, 1003 int nid, int flags) 1004 { 1005 int m_nid = memblock_get_region_node(m); 1006 1007 /* we never skip regions when iterating memblock.reserved or physmem */ 1008 if (type != memblock_memory) 1009 return false; 1010 1011 /* only memory regions are associated with nodes, check it */ 1012 if (nid != NUMA_NO_NODE && nid != m_nid) 1013 return true; 1014 1015 /* skip hotpluggable memory regions if needed */ 1016 if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && 1017 !(flags & MEMBLOCK_HOTPLUG)) 1018 return true; 1019 1020 /* if we want mirror memory skip non-mirror memory regions */ 1021 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 1022 return true; 1023 1024 /* skip nomap memory unless we were asked for it explicitly */ 1025 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 1026 return true; 1027 1028 /* skip driver-managed memory unless we were asked for it explicitly */ 1029 if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m)) 1030 return true; 1031 1032 return false; 1033 } 1034 1035 /** 1036 * __next_mem_range - next function for for_each_free_mem_range() etc. 1037 * @idx: pointer to u64 loop variable 1038 * @nid: node selector, %NUMA_NO_NODE for all nodes 1039 * @flags: pick from blocks based on memory attributes 1040 * @type_a: pointer to memblock_type from where the range is taken 1041 * @type_b: pointer to memblock_type which excludes memory from being taken 1042 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1043 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1044 * @out_nid: ptr to int for nid of the range, can be %NULL 1045 * 1046 * Find the first area from *@idx which matches @nid, fill the out 1047 * parameters, and update *@idx for the next iteration. The lower 32bit of 1048 * *@idx contains index into type_a and the upper 32bit indexes the 1049 * areas before each region in type_b. For example, if type_b regions 1050 * look like the following, 1051 * 1052 * 0:[0-16), 1:[32-48), 2:[128-130) 1053 * 1054 * The upper 32bit indexes the following regions. 1055 * 1056 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 1057 * 1058 * As both region arrays are sorted, the function advances the two indices 1059 * in lockstep and returns each intersection. 1060 */ 1061 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 1062 struct memblock_type *type_a, 1063 struct memblock_type *type_b, phys_addr_t *out_start, 1064 phys_addr_t *out_end, int *out_nid) 1065 { 1066 int idx_a = *idx & 0xffffffff; 1067 int idx_b = *idx >> 32; 1068 1069 if (WARN_ONCE(nid == MAX_NUMNODES, 1070 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1071 nid = NUMA_NO_NODE; 1072 1073 for (; idx_a < type_a->cnt; idx_a++) { 1074 struct memblock_region *m = &type_a->regions[idx_a]; 1075 1076 phys_addr_t m_start = m->base; 1077 phys_addr_t m_end = m->base + m->size; 1078 int m_nid = memblock_get_region_node(m); 1079 1080 if (should_skip_region(type_a, m, nid, flags)) 1081 continue; 1082 1083 if (!type_b) { 1084 if (out_start) 1085 *out_start = m_start; 1086 if (out_end) 1087 *out_end = m_end; 1088 if (out_nid) 1089 *out_nid = m_nid; 1090 idx_a++; 1091 *idx = (u32)idx_a | (u64)idx_b << 32; 1092 return; 1093 } 1094 1095 /* scan areas before each reservation */ 1096 for (; idx_b < type_b->cnt + 1; idx_b++) { 1097 struct memblock_region *r; 1098 phys_addr_t r_start; 1099 phys_addr_t r_end; 1100 1101 r = &type_b->regions[idx_b]; 1102 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1103 r_end = idx_b < type_b->cnt ? 1104 r->base : PHYS_ADDR_MAX; 1105 1106 /* 1107 * if idx_b advanced past idx_a, 1108 * break out to advance idx_a 1109 */ 1110 if (r_start >= m_end) 1111 break; 1112 /* if the two regions intersect, we're done */ 1113 if (m_start < r_end) { 1114 if (out_start) 1115 *out_start = 1116 max(m_start, r_start); 1117 if (out_end) 1118 *out_end = min(m_end, r_end); 1119 if (out_nid) 1120 *out_nid = m_nid; 1121 /* 1122 * The region which ends first is 1123 * advanced for the next iteration. 1124 */ 1125 if (m_end <= r_end) 1126 idx_a++; 1127 else 1128 idx_b++; 1129 *idx = (u32)idx_a | (u64)idx_b << 32; 1130 return; 1131 } 1132 } 1133 } 1134 1135 /* signal end of iteration */ 1136 *idx = ULLONG_MAX; 1137 } 1138 1139 /** 1140 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 1141 * 1142 * @idx: pointer to u64 loop variable 1143 * @nid: node selector, %NUMA_NO_NODE for all nodes 1144 * @flags: pick from blocks based on memory attributes 1145 * @type_a: pointer to memblock_type from where the range is taken 1146 * @type_b: pointer to memblock_type which excludes memory from being taken 1147 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1148 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1149 * @out_nid: ptr to int for nid of the range, can be %NULL 1150 * 1151 * Finds the next range from type_a which is not marked as unsuitable 1152 * in type_b. 1153 * 1154 * Reverse of __next_mem_range(). 1155 */ 1156 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 1157 enum memblock_flags flags, 1158 struct memblock_type *type_a, 1159 struct memblock_type *type_b, 1160 phys_addr_t *out_start, 1161 phys_addr_t *out_end, int *out_nid) 1162 { 1163 int idx_a = *idx & 0xffffffff; 1164 int idx_b = *idx >> 32; 1165 1166 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1167 nid = NUMA_NO_NODE; 1168 1169 if (*idx == (u64)ULLONG_MAX) { 1170 idx_a = type_a->cnt - 1; 1171 if (type_b != NULL) 1172 idx_b = type_b->cnt; 1173 else 1174 idx_b = 0; 1175 } 1176 1177 for (; idx_a >= 0; idx_a--) { 1178 struct memblock_region *m = &type_a->regions[idx_a]; 1179 1180 phys_addr_t m_start = m->base; 1181 phys_addr_t m_end = m->base + m->size; 1182 int m_nid = memblock_get_region_node(m); 1183 1184 if (should_skip_region(type_a, m, nid, flags)) 1185 continue; 1186 1187 if (!type_b) { 1188 if (out_start) 1189 *out_start = m_start; 1190 if (out_end) 1191 *out_end = m_end; 1192 if (out_nid) 1193 *out_nid = m_nid; 1194 idx_a--; 1195 *idx = (u32)idx_a | (u64)idx_b << 32; 1196 return; 1197 } 1198 1199 /* scan areas before each reservation */ 1200 for (; idx_b >= 0; idx_b--) { 1201 struct memblock_region *r; 1202 phys_addr_t r_start; 1203 phys_addr_t r_end; 1204 1205 r = &type_b->regions[idx_b]; 1206 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1207 r_end = idx_b < type_b->cnt ? 1208 r->base : PHYS_ADDR_MAX; 1209 /* 1210 * if idx_b advanced past idx_a, 1211 * break out to advance idx_a 1212 */ 1213 1214 if (r_end <= m_start) 1215 break; 1216 /* if the two regions intersect, we're done */ 1217 if (m_end > r_start) { 1218 if (out_start) 1219 *out_start = max(m_start, r_start); 1220 if (out_end) 1221 *out_end = min(m_end, r_end); 1222 if (out_nid) 1223 *out_nid = m_nid; 1224 if (m_start >= r_start) 1225 idx_a--; 1226 else 1227 idx_b--; 1228 *idx = (u32)idx_a | (u64)idx_b << 32; 1229 return; 1230 } 1231 } 1232 } 1233 /* signal end of iteration */ 1234 *idx = ULLONG_MAX; 1235 } 1236 1237 /* 1238 * Common iterator interface used to define for_each_mem_pfn_range(). 1239 */ 1240 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1241 unsigned long *out_start_pfn, 1242 unsigned long *out_end_pfn, int *out_nid) 1243 { 1244 struct memblock_type *type = &memblock.memory; 1245 struct memblock_region *r; 1246 int r_nid; 1247 1248 while (++*idx < type->cnt) { 1249 r = &type->regions[*idx]; 1250 r_nid = memblock_get_region_node(r); 1251 1252 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1253 continue; 1254 if (nid == MAX_NUMNODES || nid == r_nid) 1255 break; 1256 } 1257 if (*idx >= type->cnt) { 1258 *idx = -1; 1259 return; 1260 } 1261 1262 if (out_start_pfn) 1263 *out_start_pfn = PFN_UP(r->base); 1264 if (out_end_pfn) 1265 *out_end_pfn = PFN_DOWN(r->base + r->size); 1266 if (out_nid) 1267 *out_nid = r_nid; 1268 } 1269 1270 /** 1271 * memblock_set_node - set node ID on memblock regions 1272 * @base: base of area to set node ID for 1273 * @size: size of area to set node ID for 1274 * @type: memblock type to set node ID for 1275 * @nid: node ID to set 1276 * 1277 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. 1278 * Regions which cross the area boundaries are split as necessary. 1279 * 1280 * Return: 1281 * 0 on success, -errno on failure. 1282 */ 1283 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1284 struct memblock_type *type, int nid) 1285 { 1286 #ifdef CONFIG_NUMA 1287 int start_rgn, end_rgn; 1288 int i, ret; 1289 1290 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1291 if (ret) 1292 return ret; 1293 1294 for (i = start_rgn; i < end_rgn; i++) 1295 memblock_set_region_node(&type->regions[i], nid); 1296 1297 memblock_merge_regions(type, start_rgn, end_rgn); 1298 #endif 1299 return 0; 1300 } 1301 1302 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1303 /** 1304 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() 1305 * 1306 * @idx: pointer to u64 loop variable 1307 * @zone: zone in which all of the memory blocks reside 1308 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL 1309 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL 1310 * 1311 * This function is meant to be a zone/pfn specific wrapper for the 1312 * for_each_mem_range type iterators. Specifically they are used in the 1313 * deferred memory init routines and as such we were duplicating much of 1314 * this logic throughout the code. So instead of having it in multiple 1315 * locations it seemed like it would make more sense to centralize this to 1316 * one new iterator that does everything they need. 1317 */ 1318 void __init_memblock 1319 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 1320 unsigned long *out_spfn, unsigned long *out_epfn) 1321 { 1322 int zone_nid = zone_to_nid(zone); 1323 phys_addr_t spa, epa; 1324 1325 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1326 &memblock.memory, &memblock.reserved, 1327 &spa, &epa, NULL); 1328 1329 while (*idx != U64_MAX) { 1330 unsigned long epfn = PFN_DOWN(epa); 1331 unsigned long spfn = PFN_UP(spa); 1332 1333 /* 1334 * Verify the end is at least past the start of the zone and 1335 * that we have at least one PFN to initialize. 1336 */ 1337 if (zone->zone_start_pfn < epfn && spfn < epfn) { 1338 /* if we went too far just stop searching */ 1339 if (zone_end_pfn(zone) <= spfn) { 1340 *idx = U64_MAX; 1341 break; 1342 } 1343 1344 if (out_spfn) 1345 *out_spfn = max(zone->zone_start_pfn, spfn); 1346 if (out_epfn) 1347 *out_epfn = min(zone_end_pfn(zone), epfn); 1348 1349 return; 1350 } 1351 1352 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1353 &memblock.memory, &memblock.reserved, 1354 &spa, &epa, NULL); 1355 } 1356 1357 /* signal end of iteration */ 1358 if (out_spfn) 1359 *out_spfn = ULONG_MAX; 1360 if (out_epfn) 1361 *out_epfn = 0; 1362 } 1363 1364 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1365 1366 /** 1367 * memblock_alloc_range_nid - allocate boot memory block 1368 * @size: size of memory block to be allocated in bytes 1369 * @align: alignment of the region and block's size 1370 * @start: the lower bound of the memory region to allocate (phys address) 1371 * @end: the upper bound of the memory region to allocate (phys address) 1372 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1373 * @exact_nid: control the allocation fall back to other nodes 1374 * 1375 * The allocation is performed from memory region limited by 1376 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. 1377 * 1378 * If the specified node can not hold the requested memory and @exact_nid 1379 * is false, the allocation falls back to any node in the system. 1380 * 1381 * For systems with memory mirroring, the allocation is attempted first 1382 * from the regions with mirroring enabled and then retried from any 1383 * memory region. 1384 * 1385 * In addition, function using kmemleak_alloc_phys for allocated boot 1386 * memory block, it is never reported as leaks. 1387 * 1388 * Return: 1389 * Physical address of allocated memory block on success, %0 on failure. 1390 */ 1391 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1392 phys_addr_t align, phys_addr_t start, 1393 phys_addr_t end, int nid, 1394 bool exact_nid) 1395 { 1396 enum memblock_flags flags = choose_memblock_flags(); 1397 phys_addr_t found; 1398 1399 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1400 nid = NUMA_NO_NODE; 1401 1402 if (!align) { 1403 /* Can't use WARNs this early in boot on powerpc */ 1404 dump_stack(); 1405 align = SMP_CACHE_BYTES; 1406 } 1407 1408 again: 1409 found = memblock_find_in_range_node(size, align, start, end, nid, 1410 flags); 1411 if (found && !memblock_reserve(found, size)) 1412 goto done; 1413 1414 if (nid != NUMA_NO_NODE && !exact_nid) { 1415 found = memblock_find_in_range_node(size, align, start, 1416 end, NUMA_NO_NODE, 1417 flags); 1418 if (found && !memblock_reserve(found, size)) 1419 goto done; 1420 } 1421 1422 if (flags & MEMBLOCK_MIRROR) { 1423 flags &= ~MEMBLOCK_MIRROR; 1424 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n", 1425 &size); 1426 goto again; 1427 } 1428 1429 return 0; 1430 1431 done: 1432 /* 1433 * Skip kmemleak for those places like kasan_init() and 1434 * early_pgtable_alloc() due to high volume. 1435 */ 1436 if (end != MEMBLOCK_ALLOC_NOLEAKTRACE) 1437 /* 1438 * Memblock allocated blocks are never reported as 1439 * leaks. This is because many of these blocks are 1440 * only referred via the physical address which is 1441 * not looked up by kmemleak. 1442 */ 1443 kmemleak_alloc_phys(found, size, 0); 1444 1445 /* 1446 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP, 1447 * require memory to be accepted before it can be used by the 1448 * guest. 1449 * 1450 * Accept the memory of the allocated buffer. 1451 */ 1452 accept_memory(found, found + size); 1453 1454 return found; 1455 } 1456 1457 /** 1458 * memblock_phys_alloc_range - allocate a memory block inside specified range 1459 * @size: size of memory block to be allocated in bytes 1460 * @align: alignment of the region and block's size 1461 * @start: the lower bound of the memory region to allocate (physical address) 1462 * @end: the upper bound of the memory region to allocate (physical address) 1463 * 1464 * Allocate @size bytes in the between @start and @end. 1465 * 1466 * Return: physical address of the allocated memory block on success, 1467 * %0 on failure. 1468 */ 1469 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 1470 phys_addr_t align, 1471 phys_addr_t start, 1472 phys_addr_t end) 1473 { 1474 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n", 1475 __func__, (u64)size, (u64)align, &start, &end, 1476 (void *)_RET_IP_); 1477 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1478 false); 1479 } 1480 1481 /** 1482 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node 1483 * @size: size of memory block to be allocated in bytes 1484 * @align: alignment of the region and block's size 1485 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1486 * 1487 * Allocates memory block from the specified NUMA node. If the node 1488 * has no available memory, attempts to allocated from any node in the 1489 * system. 1490 * 1491 * Return: physical address of the allocated memory block on success, 1492 * %0 on failure. 1493 */ 1494 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1495 { 1496 return memblock_alloc_range_nid(size, align, 0, 1497 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false); 1498 } 1499 1500 /** 1501 * memblock_alloc_internal - allocate boot memory block 1502 * @size: size of memory block to be allocated in bytes 1503 * @align: alignment of the region and block's size 1504 * @min_addr: the lower bound of the memory region to allocate (phys address) 1505 * @max_addr: the upper bound of the memory region to allocate (phys address) 1506 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1507 * @exact_nid: control the allocation fall back to other nodes 1508 * 1509 * Allocates memory block using memblock_alloc_range_nid() and 1510 * converts the returned physical address to virtual. 1511 * 1512 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1513 * will fall back to memory below @min_addr. Other constraints, such 1514 * as node and mirrored memory will be handled again in 1515 * memblock_alloc_range_nid(). 1516 * 1517 * Return: 1518 * Virtual address of allocated memory block on success, NULL on failure. 1519 */ 1520 static void * __init memblock_alloc_internal( 1521 phys_addr_t size, phys_addr_t align, 1522 phys_addr_t min_addr, phys_addr_t max_addr, 1523 int nid, bool exact_nid) 1524 { 1525 phys_addr_t alloc; 1526 1527 /* 1528 * Detect any accidental use of these APIs after slab is ready, as at 1529 * this moment memblock may be deinitialized already and its 1530 * internal data may be destroyed (after execution of memblock_free_all) 1531 */ 1532 if (WARN_ON_ONCE(slab_is_available())) 1533 return kzalloc_node(size, GFP_NOWAIT, nid); 1534 1535 if (max_addr > memblock.current_limit) 1536 max_addr = memblock.current_limit; 1537 1538 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, 1539 exact_nid); 1540 1541 /* retry allocation without lower limit */ 1542 if (!alloc && min_addr) 1543 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, 1544 exact_nid); 1545 1546 if (!alloc) 1547 return NULL; 1548 1549 return phys_to_virt(alloc); 1550 } 1551 1552 /** 1553 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node 1554 * without zeroing memory 1555 * @size: size of memory block to be allocated in bytes 1556 * @align: alignment of the region and block's size 1557 * @min_addr: the lower bound of the memory region from where the allocation 1558 * is preferred (phys address) 1559 * @max_addr: the upper bound of the memory region from where the allocation 1560 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1561 * allocate only from memory limited by memblock.current_limit value 1562 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1563 * 1564 * Public function, provides additional debug information (including caller 1565 * info), if enabled. Does not zero allocated memory. 1566 * 1567 * Return: 1568 * Virtual address of allocated memory block on success, NULL on failure. 1569 */ 1570 void * __init memblock_alloc_exact_nid_raw( 1571 phys_addr_t size, phys_addr_t align, 1572 phys_addr_t min_addr, phys_addr_t max_addr, 1573 int nid) 1574 { 1575 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1576 __func__, (u64)size, (u64)align, nid, &min_addr, 1577 &max_addr, (void *)_RET_IP_); 1578 1579 return memblock_alloc_internal(size, align, min_addr, max_addr, nid, 1580 true); 1581 } 1582 1583 /** 1584 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing 1585 * memory and without panicking 1586 * @size: size of memory block to be allocated in bytes 1587 * @align: alignment of the region and block's size 1588 * @min_addr: the lower bound of the memory region from where the allocation 1589 * is preferred (phys address) 1590 * @max_addr: the upper bound of the memory region from where the allocation 1591 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1592 * allocate only from memory limited by memblock.current_limit value 1593 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1594 * 1595 * Public function, provides additional debug information (including caller 1596 * info), if enabled. Does not zero allocated memory, does not panic if request 1597 * cannot be satisfied. 1598 * 1599 * Return: 1600 * Virtual address of allocated memory block on success, NULL on failure. 1601 */ 1602 void * __init memblock_alloc_try_nid_raw( 1603 phys_addr_t size, phys_addr_t align, 1604 phys_addr_t min_addr, phys_addr_t max_addr, 1605 int nid) 1606 { 1607 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1608 __func__, (u64)size, (u64)align, nid, &min_addr, 1609 &max_addr, (void *)_RET_IP_); 1610 1611 return memblock_alloc_internal(size, align, min_addr, max_addr, nid, 1612 false); 1613 } 1614 1615 /** 1616 * memblock_alloc_try_nid - allocate boot memory block 1617 * @size: size of memory block to be allocated in bytes 1618 * @align: alignment of the region and block's size 1619 * @min_addr: the lower bound of the memory region from where the allocation 1620 * is preferred (phys address) 1621 * @max_addr: the upper bound of the memory region from where the allocation 1622 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1623 * allocate only from memory limited by memblock.current_limit value 1624 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1625 * 1626 * Public function, provides additional debug information (including caller 1627 * info), if enabled. This function zeroes the allocated memory. 1628 * 1629 * Return: 1630 * Virtual address of allocated memory block on success, NULL on failure. 1631 */ 1632 void * __init memblock_alloc_try_nid( 1633 phys_addr_t size, phys_addr_t align, 1634 phys_addr_t min_addr, phys_addr_t max_addr, 1635 int nid) 1636 { 1637 void *ptr; 1638 1639 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1640 __func__, (u64)size, (u64)align, nid, &min_addr, 1641 &max_addr, (void *)_RET_IP_); 1642 ptr = memblock_alloc_internal(size, align, 1643 min_addr, max_addr, nid, false); 1644 if (ptr) 1645 memset(ptr, 0, size); 1646 1647 return ptr; 1648 } 1649 1650 /** 1651 * memblock_free_late - free pages directly to buddy allocator 1652 * @base: phys starting address of the boot memory block 1653 * @size: size of the boot memory block in bytes 1654 * 1655 * This is only useful when the memblock allocator has already been torn 1656 * down, but we are still initializing the system. Pages are released directly 1657 * to the buddy allocator. 1658 */ 1659 void __init memblock_free_late(phys_addr_t base, phys_addr_t size) 1660 { 1661 phys_addr_t cursor, end; 1662 1663 end = base + size - 1; 1664 memblock_dbg("%s: [%pa-%pa] %pS\n", 1665 __func__, &base, &end, (void *)_RET_IP_); 1666 kmemleak_free_part_phys(base, size); 1667 cursor = PFN_UP(base); 1668 end = PFN_DOWN(base + size); 1669 1670 for (; cursor < end; cursor++) { 1671 memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1672 totalram_pages_inc(); 1673 } 1674 } 1675 1676 /* 1677 * Remaining API functions 1678 */ 1679 1680 phys_addr_t __init_memblock memblock_phys_mem_size(void) 1681 { 1682 return memblock.memory.total_size; 1683 } 1684 1685 phys_addr_t __init_memblock memblock_reserved_size(void) 1686 { 1687 return memblock.reserved.total_size; 1688 } 1689 1690 /* lowest address */ 1691 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1692 { 1693 return memblock.memory.regions[0].base; 1694 } 1695 1696 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1697 { 1698 int idx = memblock.memory.cnt - 1; 1699 1700 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1701 } 1702 1703 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 1704 { 1705 phys_addr_t max_addr = PHYS_ADDR_MAX; 1706 struct memblock_region *r; 1707 1708 /* 1709 * translate the memory @limit size into the max address within one of 1710 * the memory memblock regions, if the @limit exceeds the total size 1711 * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1712 */ 1713 for_each_mem_region(r) { 1714 if (limit <= r->size) { 1715 max_addr = r->base + limit; 1716 break; 1717 } 1718 limit -= r->size; 1719 } 1720 1721 return max_addr; 1722 } 1723 1724 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1725 { 1726 phys_addr_t max_addr; 1727 1728 if (!limit) 1729 return; 1730 1731 max_addr = __find_max_addr(limit); 1732 1733 /* @limit exceeds the total size of the memory, do nothing */ 1734 if (max_addr == PHYS_ADDR_MAX) 1735 return; 1736 1737 /* truncate both memory and reserved regions */ 1738 memblock_remove_range(&memblock.memory, max_addr, 1739 PHYS_ADDR_MAX); 1740 memblock_remove_range(&memblock.reserved, max_addr, 1741 PHYS_ADDR_MAX); 1742 } 1743 1744 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1745 { 1746 int start_rgn, end_rgn; 1747 int i, ret; 1748 1749 if (!size) 1750 return; 1751 1752 if (!memblock_memory->total_size) { 1753 pr_warn("%s: No memory registered yet\n", __func__); 1754 return; 1755 } 1756 1757 ret = memblock_isolate_range(&memblock.memory, base, size, 1758 &start_rgn, &end_rgn); 1759 if (ret) 1760 return; 1761 1762 /* remove all the MAP regions */ 1763 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1764 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1765 memblock_remove_region(&memblock.memory, i); 1766 1767 for (i = start_rgn - 1; i >= 0; i--) 1768 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1769 memblock_remove_region(&memblock.memory, i); 1770 1771 /* truncate the reserved regions */ 1772 memblock_remove_range(&memblock.reserved, 0, base); 1773 memblock_remove_range(&memblock.reserved, 1774 base + size, PHYS_ADDR_MAX); 1775 } 1776 1777 void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1778 { 1779 phys_addr_t max_addr; 1780 1781 if (!limit) 1782 return; 1783 1784 max_addr = __find_max_addr(limit); 1785 1786 /* @limit exceeds the total size of the memory, do nothing */ 1787 if (max_addr == PHYS_ADDR_MAX) 1788 return; 1789 1790 memblock_cap_memory_range(0, max_addr); 1791 } 1792 1793 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1794 { 1795 unsigned int left = 0, right = type->cnt; 1796 1797 do { 1798 unsigned int mid = (right + left) / 2; 1799 1800 if (addr < type->regions[mid].base) 1801 right = mid; 1802 else if (addr >= (type->regions[mid].base + 1803 type->regions[mid].size)) 1804 left = mid + 1; 1805 else 1806 return mid; 1807 } while (left < right); 1808 return -1; 1809 } 1810 1811 bool __init_memblock memblock_is_reserved(phys_addr_t addr) 1812 { 1813 return memblock_search(&memblock.reserved, addr) != -1; 1814 } 1815 1816 bool __init_memblock memblock_is_memory(phys_addr_t addr) 1817 { 1818 return memblock_search(&memblock.memory, addr) != -1; 1819 } 1820 1821 bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1822 { 1823 int i = memblock_search(&memblock.memory, addr); 1824 1825 if (i == -1) 1826 return false; 1827 return !memblock_is_nomap(&memblock.memory.regions[i]); 1828 } 1829 1830 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1831 unsigned long *start_pfn, unsigned long *end_pfn) 1832 { 1833 struct memblock_type *type = &memblock.memory; 1834 int mid = memblock_search(type, PFN_PHYS(pfn)); 1835 1836 if (mid == -1) 1837 return -1; 1838 1839 *start_pfn = PFN_DOWN(type->regions[mid].base); 1840 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1841 1842 return memblock_get_region_node(&type->regions[mid]); 1843 } 1844 1845 /** 1846 * memblock_is_region_memory - check if a region is a subset of memory 1847 * @base: base of region to check 1848 * @size: size of region to check 1849 * 1850 * Check if the region [@base, @base + @size) is a subset of a memory block. 1851 * 1852 * Return: 1853 * 0 if false, non-zero if true 1854 */ 1855 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1856 { 1857 int idx = memblock_search(&memblock.memory, base); 1858 phys_addr_t end = base + memblock_cap_size(base, &size); 1859 1860 if (idx == -1) 1861 return false; 1862 return (memblock.memory.regions[idx].base + 1863 memblock.memory.regions[idx].size) >= end; 1864 } 1865 1866 /** 1867 * memblock_is_region_reserved - check if a region intersects reserved memory 1868 * @base: base of region to check 1869 * @size: size of region to check 1870 * 1871 * Check if the region [@base, @base + @size) intersects a reserved 1872 * memory block. 1873 * 1874 * Return: 1875 * True if they intersect, false if not. 1876 */ 1877 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1878 { 1879 return memblock_overlaps_region(&memblock.reserved, base, size); 1880 } 1881 1882 void __init_memblock memblock_trim_memory(phys_addr_t align) 1883 { 1884 phys_addr_t start, end, orig_start, orig_end; 1885 struct memblock_region *r; 1886 1887 for_each_mem_region(r) { 1888 orig_start = r->base; 1889 orig_end = r->base + r->size; 1890 start = round_up(orig_start, align); 1891 end = round_down(orig_end, align); 1892 1893 if (start == orig_start && end == orig_end) 1894 continue; 1895 1896 if (start < end) { 1897 r->base = start; 1898 r->size = end - start; 1899 } else { 1900 memblock_remove_region(&memblock.memory, 1901 r - memblock.memory.regions); 1902 r--; 1903 } 1904 } 1905 } 1906 1907 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1908 { 1909 memblock.current_limit = limit; 1910 } 1911 1912 phys_addr_t __init_memblock memblock_get_current_limit(void) 1913 { 1914 return memblock.current_limit; 1915 } 1916 1917 static void __init_memblock memblock_dump(struct memblock_type *type) 1918 { 1919 phys_addr_t base, end, size; 1920 enum memblock_flags flags; 1921 int idx; 1922 struct memblock_region *rgn; 1923 1924 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 1925 1926 for_each_memblock_type(idx, type, rgn) { 1927 char nid_buf[32] = ""; 1928 1929 base = rgn->base; 1930 size = rgn->size; 1931 end = base + size - 1; 1932 flags = rgn->flags; 1933 #ifdef CONFIG_NUMA 1934 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1935 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1936 memblock_get_region_node(rgn)); 1937 #endif 1938 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", 1939 type->name, idx, &base, &end, &size, nid_buf, flags); 1940 } 1941 } 1942 1943 static void __init_memblock __memblock_dump_all(void) 1944 { 1945 pr_info("MEMBLOCK configuration:\n"); 1946 pr_info(" memory size = %pa reserved size = %pa\n", 1947 &memblock.memory.total_size, 1948 &memblock.reserved.total_size); 1949 1950 memblock_dump(&memblock.memory); 1951 memblock_dump(&memblock.reserved); 1952 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1953 memblock_dump(&physmem); 1954 #endif 1955 } 1956 1957 void __init_memblock memblock_dump_all(void) 1958 { 1959 if (memblock_debug) 1960 __memblock_dump_all(); 1961 } 1962 1963 void __init memblock_allow_resize(void) 1964 { 1965 memblock_can_resize = 1; 1966 } 1967 1968 static int __init early_memblock(char *p) 1969 { 1970 if (p && strstr(p, "debug")) 1971 memblock_debug = 1; 1972 return 0; 1973 } 1974 early_param("memblock", early_memblock); 1975 1976 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) 1977 { 1978 struct page *start_pg, *end_pg; 1979 phys_addr_t pg, pgend; 1980 1981 /* 1982 * Convert start_pfn/end_pfn to a struct page pointer. 1983 */ 1984 start_pg = pfn_to_page(start_pfn - 1) + 1; 1985 end_pg = pfn_to_page(end_pfn - 1) + 1; 1986 1987 /* 1988 * Convert to physical addresses, and round start upwards and end 1989 * downwards. 1990 */ 1991 pg = PAGE_ALIGN(__pa(start_pg)); 1992 pgend = __pa(end_pg) & PAGE_MASK; 1993 1994 /* 1995 * If there are free pages between these, free the section of the 1996 * memmap array. 1997 */ 1998 if (pg < pgend) 1999 memblock_phys_free(pg, pgend - pg); 2000 } 2001 2002 /* 2003 * The mem_map array can get very big. Free the unused area of the memory map. 2004 */ 2005 static void __init free_unused_memmap(void) 2006 { 2007 unsigned long start, end, prev_end = 0; 2008 int i; 2009 2010 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) || 2011 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) 2012 return; 2013 2014 /* 2015 * This relies on each bank being in address order. 2016 * The banks are sorted previously in bootmem_init(). 2017 */ 2018 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { 2019 #ifdef CONFIG_SPARSEMEM 2020 /* 2021 * Take care not to free memmap entries that don't exist 2022 * due to SPARSEMEM sections which aren't present. 2023 */ 2024 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); 2025 #endif 2026 /* 2027 * Align down here since many operations in VM subsystem 2028 * presume that there are no holes in the memory map inside 2029 * a pageblock 2030 */ 2031 start = pageblock_start_pfn(start); 2032 2033 /* 2034 * If we had a previous bank, and there is a space 2035 * between the current bank and the previous, free it. 2036 */ 2037 if (prev_end && prev_end < start) 2038 free_memmap(prev_end, start); 2039 2040 /* 2041 * Align up here since many operations in VM subsystem 2042 * presume that there are no holes in the memory map inside 2043 * a pageblock 2044 */ 2045 prev_end = pageblock_align(end); 2046 } 2047 2048 #ifdef CONFIG_SPARSEMEM 2049 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { 2050 prev_end = pageblock_align(end); 2051 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); 2052 } 2053 #endif 2054 } 2055 2056 static void __init __free_pages_memory(unsigned long start, unsigned long end) 2057 { 2058 int order; 2059 2060 while (start < end) { 2061 /* 2062 * Free the pages in the largest chunks alignment allows. 2063 * 2064 * __ffs() behaviour is undefined for 0. start == 0 is 2065 * MAX_ORDER-aligned, set order to MAX_ORDER for the case. 2066 */ 2067 if (start) 2068 order = min_t(int, MAX_ORDER, __ffs(start)); 2069 else 2070 order = MAX_ORDER; 2071 2072 while (start + (1UL << order) > end) 2073 order--; 2074 2075 memblock_free_pages(pfn_to_page(start), start, order); 2076 2077 start += (1UL << order); 2078 } 2079 } 2080 2081 static unsigned long __init __free_memory_core(phys_addr_t start, 2082 phys_addr_t end) 2083 { 2084 unsigned long start_pfn = PFN_UP(start); 2085 unsigned long end_pfn = min_t(unsigned long, 2086 PFN_DOWN(end), max_low_pfn); 2087 2088 if (start_pfn >= end_pfn) 2089 return 0; 2090 2091 __free_pages_memory(start_pfn, end_pfn); 2092 2093 return end_pfn - start_pfn; 2094 } 2095 2096 static void __init memmap_init_reserved_pages(void) 2097 { 2098 struct memblock_region *region; 2099 phys_addr_t start, end; 2100 int nid; 2101 2102 /* 2103 * set nid on all reserved pages and also treat struct 2104 * pages for the NOMAP regions as PageReserved 2105 */ 2106 for_each_mem_region(region) { 2107 nid = memblock_get_region_node(region); 2108 start = region->base; 2109 end = start + region->size; 2110 2111 if (memblock_is_nomap(region)) 2112 reserve_bootmem_region(start, end, nid); 2113 2114 memblock_set_node(start, end, &memblock.reserved, nid); 2115 } 2116 2117 /* initialize struct pages for the reserved regions */ 2118 for_each_reserved_mem_region(region) { 2119 nid = memblock_get_region_node(region); 2120 start = region->base; 2121 end = start + region->size; 2122 2123 if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES) 2124 nid = early_pfn_to_nid(PFN_DOWN(start)); 2125 2126 reserve_bootmem_region(start, end, nid); 2127 } 2128 } 2129 2130 static unsigned long __init free_low_memory_core_early(void) 2131 { 2132 unsigned long count = 0; 2133 phys_addr_t start, end; 2134 u64 i; 2135 2136 memblock_clear_hotplug(0, -1); 2137 2138 memmap_init_reserved_pages(); 2139 2140 /* 2141 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id 2142 * because in some case like Node0 doesn't have RAM installed 2143 * low ram will be on Node1 2144 */ 2145 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 2146 NULL) 2147 count += __free_memory_core(start, end); 2148 2149 return count; 2150 } 2151 2152 static int reset_managed_pages_done __initdata; 2153 2154 static void __init reset_node_managed_pages(pg_data_t *pgdat) 2155 { 2156 struct zone *z; 2157 2158 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 2159 atomic_long_set(&z->managed_pages, 0); 2160 } 2161 2162 void __init reset_all_zones_managed_pages(void) 2163 { 2164 struct pglist_data *pgdat; 2165 2166 if (reset_managed_pages_done) 2167 return; 2168 2169 for_each_online_pgdat(pgdat) 2170 reset_node_managed_pages(pgdat); 2171 2172 reset_managed_pages_done = 1; 2173 } 2174 2175 /** 2176 * memblock_free_all - release free pages to the buddy allocator 2177 */ 2178 void __init memblock_free_all(void) 2179 { 2180 unsigned long pages; 2181 2182 free_unused_memmap(); 2183 reset_all_zones_managed_pages(); 2184 2185 pages = free_low_memory_core_early(); 2186 totalram_pages_add(pages); 2187 } 2188 2189 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) 2190 static const char * const flagname[] = { 2191 [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG", 2192 [ilog2(MEMBLOCK_MIRROR)] = "MIRROR", 2193 [ilog2(MEMBLOCK_NOMAP)] = "NOMAP", 2194 [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG", 2195 }; 2196 2197 static int memblock_debug_show(struct seq_file *m, void *private) 2198 { 2199 struct memblock_type *type = m->private; 2200 struct memblock_region *reg; 2201 int i, j, nid; 2202 unsigned int count = ARRAY_SIZE(flagname); 2203 phys_addr_t end; 2204 2205 for (i = 0; i < type->cnt; i++) { 2206 reg = &type->regions[i]; 2207 end = reg->base + reg->size - 1; 2208 nid = memblock_get_region_node(reg); 2209 2210 seq_printf(m, "%4d: ", i); 2211 seq_printf(m, "%pa..%pa ", ®->base, &end); 2212 if (nid != MAX_NUMNODES) 2213 seq_printf(m, "%4d ", nid); 2214 else 2215 seq_printf(m, "%4c ", 'x'); 2216 if (reg->flags) { 2217 for (j = 0; j < count; j++) { 2218 if (reg->flags & (1U << j)) { 2219 seq_printf(m, "%s\n", flagname[j]); 2220 break; 2221 } 2222 } 2223 if (j == count) 2224 seq_printf(m, "%s\n", "UNKNOWN"); 2225 } else { 2226 seq_printf(m, "%s\n", "NONE"); 2227 } 2228 } 2229 return 0; 2230 } 2231 DEFINE_SHOW_ATTRIBUTE(memblock_debug); 2232 2233 static int __init memblock_init_debugfs(void) 2234 { 2235 struct dentry *root = debugfs_create_dir("memblock", NULL); 2236 2237 debugfs_create_file("memory", 0444, root, 2238 &memblock.memory, &memblock_debug_fops); 2239 debugfs_create_file("reserved", 0444, root, 2240 &memblock.reserved, &memblock_debug_fops); 2241 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 2242 debugfs_create_file("physmem", 0444, root, &physmem, 2243 &memblock_debug_fops); 2244 #endif 2245 2246 return 0; 2247 } 2248 __initcall(memblock_init_debugfs); 2249 2250 #endif /* CONFIG_DEBUG_FS */ 2251