1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Procedures for maintaining information about logical memory blocks. 4 * 5 * Peter Bergner, IBM Corp. June 2001. 6 * Copyright (C) 2001 Peter Bergner. 7 */ 8 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/bitops.h> 13 #include <linux/poison.h> 14 #include <linux/pfn.h> 15 #include <linux/debugfs.h> 16 #include <linux/kmemleak.h> 17 #include <linux/seq_file.h> 18 #include <linux/memblock.h> 19 20 #include <asm/sections.h> 21 #include <linux/io.h> 22 23 #include "internal.h" 24 25 #define INIT_MEMBLOCK_REGIONS 128 26 #define INIT_PHYSMEM_REGIONS 4 27 28 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS 29 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS 30 #endif 31 32 #ifndef INIT_MEMBLOCK_MEMORY_REGIONS 33 #define INIT_MEMBLOCK_MEMORY_REGIONS INIT_MEMBLOCK_REGIONS 34 #endif 35 36 /** 37 * DOC: memblock overview 38 * 39 * Memblock is a method of managing memory regions during the early 40 * boot period when the usual kernel memory allocators are not up and 41 * running. 42 * 43 * Memblock views the system memory as collections of contiguous 44 * regions. There are several types of these collections: 45 * 46 * * ``memory`` - describes the physical memory available to the 47 * kernel; this may differ from the actual physical memory installed 48 * in the system, for instance when the memory is restricted with 49 * ``mem=`` command line parameter 50 * * ``reserved`` - describes the regions that were allocated 51 * * ``physmem`` - describes the actual physical memory available during 52 * boot regardless of the possible restrictions and memory hot(un)plug; 53 * the ``physmem`` type is only available on some architectures. 54 * 55 * Each region is represented by struct memblock_region that 56 * defines the region extents, its attributes and NUMA node id on NUMA 57 * systems. Every memory type is described by the struct memblock_type 58 * which contains an array of memory regions along with 59 * the allocator metadata. The "memory" and "reserved" types are nicely 60 * wrapped with struct memblock. This structure is statically 61 * initialized at build time. The region arrays are initially sized to 62 * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and 63 * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array 64 * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS. 65 * The memblock_allow_resize() enables automatic resizing of the region 66 * arrays during addition of new regions. This feature should be used 67 * with care so that memory allocated for the region array will not 68 * overlap with areas that should be reserved, for example initrd. 69 * 70 * The early architecture setup should tell memblock what the physical 71 * memory layout is by using memblock_add() or memblock_add_node() 72 * functions. The first function does not assign the region to a NUMA 73 * node and it is appropriate for UMA systems. Yet, it is possible to 74 * use it on NUMA systems as well and assign the region to a NUMA node 75 * later in the setup process using memblock_set_node(). The 76 * memblock_add_node() performs such an assignment directly. 77 * 78 * Once memblock is setup the memory can be allocated using one of the 79 * API variants: 80 * 81 * * memblock_phys_alloc*() - these functions return the **physical** 82 * address of the allocated memory 83 * * memblock_alloc*() - these functions return the **virtual** address 84 * of the allocated memory. 85 * 86 * Note, that both API variants use implicit assumptions about allowed 87 * memory ranges and the fallback methods. Consult the documentation 88 * of memblock_alloc_internal() and memblock_alloc_range_nid() 89 * functions for more elaborate description. 90 * 91 * As the system boot progresses, the architecture specific mem_init() 92 * function frees all the memory to the buddy page allocator. 93 * 94 * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the 95 * memblock data structures (except "physmem") will be discarded after the 96 * system initialization completes. 97 */ 98 99 #ifndef CONFIG_NUMA 100 struct pglist_data __refdata contig_page_data; 101 EXPORT_SYMBOL(contig_page_data); 102 #endif 103 104 unsigned long max_low_pfn; 105 unsigned long min_low_pfn; 106 unsigned long max_pfn; 107 unsigned long long max_possible_pfn; 108 109 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_MEMORY_REGIONS] __initdata_memblock; 110 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; 111 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 112 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS]; 113 #endif 114 115 struct memblock memblock __initdata_memblock = { 116 .memory.regions = memblock_memory_init_regions, 117 .memory.cnt = 1, /* empty dummy entry */ 118 .memory.max = INIT_MEMBLOCK_MEMORY_REGIONS, 119 .memory.name = "memory", 120 121 .reserved.regions = memblock_reserved_init_regions, 122 .reserved.cnt = 1, /* empty dummy entry */ 123 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, 124 .reserved.name = "reserved", 125 126 .bottom_up = false, 127 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 128 }; 129 130 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 131 struct memblock_type physmem = { 132 .regions = memblock_physmem_init_regions, 133 .cnt = 1, /* empty dummy entry */ 134 .max = INIT_PHYSMEM_REGIONS, 135 .name = "physmem", 136 }; 137 #endif 138 139 /* 140 * keep a pointer to &memblock.memory in the text section to use it in 141 * __next_mem_range() and its helpers. 142 * For architectures that do not keep memblock data after init, this 143 * pointer will be reset to NULL at memblock_discard() 144 */ 145 static __refdata struct memblock_type *memblock_memory = &memblock.memory; 146 147 #define for_each_memblock_type(i, memblock_type, rgn) \ 148 for (i = 0, rgn = &memblock_type->regions[0]; \ 149 i < memblock_type->cnt; \ 150 i++, rgn = &memblock_type->regions[i]) 151 152 #define memblock_dbg(fmt, ...) \ 153 do { \ 154 if (memblock_debug) \ 155 pr_info(fmt, ##__VA_ARGS__); \ 156 } while (0) 157 158 static int memblock_debug __initdata_memblock; 159 static bool system_has_some_mirror __initdata_memblock; 160 static int memblock_can_resize __initdata_memblock; 161 static int memblock_memory_in_slab __initdata_memblock; 162 static int memblock_reserved_in_slab __initdata_memblock; 163 164 static enum memblock_flags __init_memblock choose_memblock_flags(void) 165 { 166 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 167 } 168 169 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 170 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 171 { 172 return *size = min(*size, PHYS_ADDR_MAX - base); 173 } 174 175 /* 176 * Address comparison utilities 177 */ 178 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 179 phys_addr_t base2, phys_addr_t size2) 180 { 181 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 182 } 183 184 bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 185 phys_addr_t base, phys_addr_t size) 186 { 187 unsigned long i; 188 189 memblock_cap_size(base, &size); 190 191 for (i = 0; i < type->cnt; i++) 192 if (memblock_addrs_overlap(base, size, type->regions[i].base, 193 type->regions[i].size)) 194 break; 195 return i < type->cnt; 196 } 197 198 /** 199 * __memblock_find_range_bottom_up - find free area utility in bottom-up 200 * @start: start of candidate range 201 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 202 * %MEMBLOCK_ALLOC_ACCESSIBLE 203 * @size: size of free area to find 204 * @align: alignment of free area to find 205 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 206 * @flags: pick from blocks based on memory attributes 207 * 208 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 209 * 210 * Return: 211 * Found address on success, 0 on failure. 212 */ 213 static phys_addr_t __init_memblock 214 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 215 phys_addr_t size, phys_addr_t align, int nid, 216 enum memblock_flags flags) 217 { 218 phys_addr_t this_start, this_end, cand; 219 u64 i; 220 221 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 222 this_start = clamp(this_start, start, end); 223 this_end = clamp(this_end, start, end); 224 225 cand = round_up(this_start, align); 226 if (cand < this_end && this_end - cand >= size) 227 return cand; 228 } 229 230 return 0; 231 } 232 233 /** 234 * __memblock_find_range_top_down - find free area utility, in top-down 235 * @start: start of candidate range 236 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 237 * %MEMBLOCK_ALLOC_ACCESSIBLE 238 * @size: size of free area to find 239 * @align: alignment of free area to find 240 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 241 * @flags: pick from blocks based on memory attributes 242 * 243 * Utility called from memblock_find_in_range_node(), find free area top-down. 244 * 245 * Return: 246 * Found address on success, 0 on failure. 247 */ 248 static phys_addr_t __init_memblock 249 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 250 phys_addr_t size, phys_addr_t align, int nid, 251 enum memblock_flags flags) 252 { 253 phys_addr_t this_start, this_end, cand; 254 u64 i; 255 256 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 257 NULL) { 258 this_start = clamp(this_start, start, end); 259 this_end = clamp(this_end, start, end); 260 261 if (this_end < size) 262 continue; 263 264 cand = round_down(this_end - size, align); 265 if (cand >= this_start) 266 return cand; 267 } 268 269 return 0; 270 } 271 272 /** 273 * memblock_find_in_range_node - find free area in given range and node 274 * @size: size of free area to find 275 * @align: alignment of free area to find 276 * @start: start of candidate range 277 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 278 * %MEMBLOCK_ALLOC_ACCESSIBLE 279 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 280 * @flags: pick from blocks based on memory attributes 281 * 282 * Find @size free area aligned to @align in the specified range and node. 283 * 284 * Return: 285 * Found address on success, 0 on failure. 286 */ 287 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 288 phys_addr_t align, phys_addr_t start, 289 phys_addr_t end, int nid, 290 enum memblock_flags flags) 291 { 292 /* pump up @end */ 293 if (end == MEMBLOCK_ALLOC_ACCESSIBLE || 294 end == MEMBLOCK_ALLOC_NOLEAKTRACE) 295 end = memblock.current_limit; 296 297 /* avoid allocating the first page */ 298 start = max_t(phys_addr_t, start, PAGE_SIZE); 299 end = max(start, end); 300 301 if (memblock_bottom_up()) 302 return __memblock_find_range_bottom_up(start, end, size, align, 303 nid, flags); 304 else 305 return __memblock_find_range_top_down(start, end, size, align, 306 nid, flags); 307 } 308 309 /** 310 * memblock_find_in_range - find free area in given range 311 * @start: start of candidate range 312 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or 313 * %MEMBLOCK_ALLOC_ACCESSIBLE 314 * @size: size of free area to find 315 * @align: alignment of free area to find 316 * 317 * Find @size free area aligned to @align in the specified range. 318 * 319 * Return: 320 * Found address on success, 0 on failure. 321 */ 322 static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 323 phys_addr_t end, phys_addr_t size, 324 phys_addr_t align) 325 { 326 phys_addr_t ret; 327 enum memblock_flags flags = choose_memblock_flags(); 328 329 again: 330 ret = memblock_find_in_range_node(size, align, start, end, 331 NUMA_NO_NODE, flags); 332 333 if (!ret && (flags & MEMBLOCK_MIRROR)) { 334 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n", 335 &size); 336 flags &= ~MEMBLOCK_MIRROR; 337 goto again; 338 } 339 340 return ret; 341 } 342 343 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 344 { 345 type->total_size -= type->regions[r].size; 346 memmove(&type->regions[r], &type->regions[r + 1], 347 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 348 type->cnt--; 349 350 /* Special case for empty arrays */ 351 if (type->cnt == 0) { 352 WARN_ON(type->total_size != 0); 353 type->cnt = 1; 354 type->regions[0].base = 0; 355 type->regions[0].size = 0; 356 type->regions[0].flags = 0; 357 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 358 } 359 } 360 361 #ifndef CONFIG_ARCH_KEEP_MEMBLOCK 362 /** 363 * memblock_discard - discard memory and reserved arrays if they were allocated 364 */ 365 void __init memblock_discard(void) 366 { 367 phys_addr_t addr, size; 368 369 if (memblock.reserved.regions != memblock_reserved_init_regions) { 370 addr = __pa(memblock.reserved.regions); 371 size = PAGE_ALIGN(sizeof(struct memblock_region) * 372 memblock.reserved.max); 373 if (memblock_reserved_in_slab) 374 kfree(memblock.reserved.regions); 375 else 376 memblock_free_late(addr, size); 377 /* Reset to prevent UAF from stray frees. */ 378 memblock.reserved.regions = memblock_reserved_init_regions; 379 memblock.reserved.cnt = 1; 380 memblock_remove_region(&memblock.reserved, 0); 381 } 382 383 if (memblock.memory.regions != memblock_memory_init_regions) { 384 addr = __pa(memblock.memory.regions); 385 size = PAGE_ALIGN(sizeof(struct memblock_region) * 386 memblock.memory.max); 387 if (memblock_memory_in_slab) 388 kfree(memblock.memory.regions); 389 else 390 memblock_free_late(addr, size); 391 } 392 393 memblock_memory = NULL; 394 } 395 #endif 396 397 /** 398 * memblock_double_array - double the size of the memblock regions array 399 * @type: memblock type of the regions array being doubled 400 * @new_area_start: starting address of memory range to avoid overlap with 401 * @new_area_size: size of memory range to avoid overlap with 402 * 403 * Double the size of the @type regions array. If memblock is being used to 404 * allocate memory for a new reserved regions array and there is a previously 405 * allocated memory range [@new_area_start, @new_area_start + @new_area_size] 406 * waiting to be reserved, ensure the memory used by the new array does 407 * not overlap. 408 * 409 * Return: 410 * 0 on success, -1 on failure. 411 */ 412 static int __init_memblock memblock_double_array(struct memblock_type *type, 413 phys_addr_t new_area_start, 414 phys_addr_t new_area_size) 415 { 416 struct memblock_region *new_array, *old_array; 417 phys_addr_t old_alloc_size, new_alloc_size; 418 phys_addr_t old_size, new_size, addr, new_end; 419 int use_slab = slab_is_available(); 420 int *in_slab; 421 422 /* We don't allow resizing until we know about the reserved regions 423 * of memory that aren't suitable for allocation 424 */ 425 if (!memblock_can_resize) 426 return -1; 427 428 /* Calculate new doubled size */ 429 old_size = type->max * sizeof(struct memblock_region); 430 new_size = old_size << 1; 431 /* 432 * We need to allocated new one align to PAGE_SIZE, 433 * so we can free them completely later. 434 */ 435 old_alloc_size = PAGE_ALIGN(old_size); 436 new_alloc_size = PAGE_ALIGN(new_size); 437 438 /* Retrieve the slab flag */ 439 if (type == &memblock.memory) 440 in_slab = &memblock_memory_in_slab; 441 else 442 in_slab = &memblock_reserved_in_slab; 443 444 /* Try to find some space for it */ 445 if (use_slab) { 446 new_array = kmalloc(new_size, GFP_KERNEL); 447 addr = new_array ? __pa(new_array) : 0; 448 } else { 449 /* only exclude range when trying to double reserved.regions */ 450 if (type != &memblock.reserved) 451 new_area_start = new_area_size = 0; 452 453 addr = memblock_find_in_range(new_area_start + new_area_size, 454 memblock.current_limit, 455 new_alloc_size, PAGE_SIZE); 456 if (!addr && new_area_size) 457 addr = memblock_find_in_range(0, 458 min(new_area_start, memblock.current_limit), 459 new_alloc_size, PAGE_SIZE); 460 461 new_array = addr ? __va(addr) : NULL; 462 } 463 if (!addr) { 464 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 465 type->name, type->max, type->max * 2); 466 return -1; 467 } 468 469 new_end = addr + new_size - 1; 470 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", 471 type->name, type->max * 2, &addr, &new_end); 472 473 /* 474 * Found space, we now need to move the array over before we add the 475 * reserved region since it may be our reserved array itself that is 476 * full. 477 */ 478 memcpy(new_array, type->regions, old_size); 479 memset(new_array + type->max, 0, old_size); 480 old_array = type->regions; 481 type->regions = new_array; 482 type->max <<= 1; 483 484 /* Free old array. We needn't free it if the array is the static one */ 485 if (*in_slab) 486 kfree(old_array); 487 else if (old_array != memblock_memory_init_regions && 488 old_array != memblock_reserved_init_regions) 489 memblock_free(old_array, old_alloc_size); 490 491 /* 492 * Reserve the new array if that comes from the memblock. Otherwise, we 493 * needn't do it 494 */ 495 if (!use_slab) 496 BUG_ON(memblock_reserve(addr, new_alloc_size)); 497 498 /* Update slab flag */ 499 *in_slab = use_slab; 500 501 return 0; 502 } 503 504 /** 505 * memblock_merge_regions - merge neighboring compatible regions 506 * @type: memblock type to scan 507 * @start_rgn: start scanning from (@start_rgn - 1) 508 * @end_rgn: end scanning at (@end_rgn - 1) 509 * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn) 510 */ 511 static void __init_memblock memblock_merge_regions(struct memblock_type *type, 512 unsigned long start_rgn, 513 unsigned long end_rgn) 514 { 515 int i = 0; 516 if (start_rgn) 517 i = start_rgn - 1; 518 end_rgn = min(end_rgn, type->cnt - 1); 519 while (i < end_rgn) { 520 struct memblock_region *this = &type->regions[i]; 521 struct memblock_region *next = &type->regions[i + 1]; 522 523 if (this->base + this->size != next->base || 524 memblock_get_region_node(this) != 525 memblock_get_region_node(next) || 526 this->flags != next->flags) { 527 BUG_ON(this->base + this->size > next->base); 528 i++; 529 continue; 530 } 531 532 this->size += next->size; 533 /* move forward from next + 1, index of which is i + 2 */ 534 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 535 type->cnt--; 536 end_rgn--; 537 } 538 } 539 540 /** 541 * memblock_insert_region - insert new memblock region 542 * @type: memblock type to insert into 543 * @idx: index for the insertion point 544 * @base: base address of the new region 545 * @size: size of the new region 546 * @nid: node id of the new region 547 * @flags: flags of the new region 548 * 549 * Insert new memblock region [@base, @base + @size) into @type at @idx. 550 * @type must already have extra room to accommodate the new region. 551 */ 552 static void __init_memblock memblock_insert_region(struct memblock_type *type, 553 int idx, phys_addr_t base, 554 phys_addr_t size, 555 int nid, 556 enum memblock_flags flags) 557 { 558 struct memblock_region *rgn = &type->regions[idx]; 559 560 BUG_ON(type->cnt >= type->max); 561 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 562 rgn->base = base; 563 rgn->size = size; 564 rgn->flags = flags; 565 memblock_set_region_node(rgn, nid); 566 type->cnt++; 567 type->total_size += size; 568 } 569 570 /** 571 * memblock_add_range - add new memblock region 572 * @type: memblock type to add new region into 573 * @base: base address of the new region 574 * @size: size of the new region 575 * @nid: nid of the new region 576 * @flags: flags of the new region 577 * 578 * Add new memblock region [@base, @base + @size) into @type. The new region 579 * is allowed to overlap with existing ones - overlaps don't affect already 580 * existing regions. @type is guaranteed to be minimal (all neighbouring 581 * compatible regions are merged) after the addition. 582 * 583 * Return: 584 * 0 on success, -errno on failure. 585 */ 586 static int __init_memblock memblock_add_range(struct memblock_type *type, 587 phys_addr_t base, phys_addr_t size, 588 int nid, enum memblock_flags flags) 589 { 590 bool insert = false; 591 phys_addr_t obase = base; 592 phys_addr_t end = base + memblock_cap_size(base, &size); 593 int idx, nr_new, start_rgn = -1, end_rgn; 594 struct memblock_region *rgn; 595 596 if (!size) 597 return 0; 598 599 /* special case for empty array */ 600 if (type->regions[0].size == 0) { 601 WARN_ON(type->cnt != 1 || type->total_size); 602 type->regions[0].base = base; 603 type->regions[0].size = size; 604 type->regions[0].flags = flags; 605 memblock_set_region_node(&type->regions[0], nid); 606 type->total_size = size; 607 return 0; 608 } 609 610 /* 611 * The worst case is when new range overlaps all existing regions, 612 * then we'll need type->cnt + 1 empty regions in @type. So if 613 * type->cnt * 2 + 1 is less than or equal to type->max, we know 614 * that there is enough empty regions in @type, and we can insert 615 * regions directly. 616 */ 617 if (type->cnt * 2 + 1 <= type->max) 618 insert = true; 619 620 repeat: 621 /* 622 * The following is executed twice. Once with %false @insert and 623 * then with %true. The first counts the number of regions needed 624 * to accommodate the new area. The second actually inserts them. 625 */ 626 base = obase; 627 nr_new = 0; 628 629 for_each_memblock_type(idx, type, rgn) { 630 phys_addr_t rbase = rgn->base; 631 phys_addr_t rend = rbase + rgn->size; 632 633 if (rbase >= end) 634 break; 635 if (rend <= base) 636 continue; 637 /* 638 * @rgn overlaps. If it separates the lower part of new 639 * area, insert that portion. 640 */ 641 if (rbase > base) { 642 #ifdef CONFIG_NUMA 643 WARN_ON(nid != memblock_get_region_node(rgn)); 644 #endif 645 WARN_ON(flags != rgn->flags); 646 nr_new++; 647 if (insert) { 648 if (start_rgn == -1) 649 start_rgn = idx; 650 end_rgn = idx + 1; 651 memblock_insert_region(type, idx++, base, 652 rbase - base, nid, 653 flags); 654 } 655 } 656 /* area below @rend is dealt with, forget about it */ 657 base = min(rend, end); 658 } 659 660 /* insert the remaining portion */ 661 if (base < end) { 662 nr_new++; 663 if (insert) { 664 if (start_rgn == -1) 665 start_rgn = idx; 666 end_rgn = idx + 1; 667 memblock_insert_region(type, idx, base, end - base, 668 nid, flags); 669 } 670 } 671 672 if (!nr_new) 673 return 0; 674 675 /* 676 * If this was the first round, resize array and repeat for actual 677 * insertions; otherwise, merge and return. 678 */ 679 if (!insert) { 680 while (type->cnt + nr_new > type->max) 681 if (memblock_double_array(type, obase, size) < 0) 682 return -ENOMEM; 683 insert = true; 684 goto repeat; 685 } else { 686 memblock_merge_regions(type, start_rgn, end_rgn); 687 return 0; 688 } 689 } 690 691 /** 692 * memblock_add_node - add new memblock region within a NUMA node 693 * @base: base address of the new region 694 * @size: size of the new region 695 * @nid: nid of the new region 696 * @flags: flags of the new region 697 * 698 * Add new memblock region [@base, @base + @size) to the "memory" 699 * type. See memblock_add_range() description for mode details 700 * 701 * Return: 702 * 0 on success, -errno on failure. 703 */ 704 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 705 int nid, enum memblock_flags flags) 706 { 707 phys_addr_t end = base + size - 1; 708 709 memblock_dbg("%s: [%pa-%pa] nid=%d flags=%x %pS\n", __func__, 710 &base, &end, nid, flags, (void *)_RET_IP_); 711 712 return memblock_add_range(&memblock.memory, base, size, nid, flags); 713 } 714 715 /** 716 * memblock_add - add new memblock region 717 * @base: base address of the new region 718 * @size: size of the new region 719 * 720 * Add new memblock region [@base, @base + @size) to the "memory" 721 * type. See memblock_add_range() description for mode details 722 * 723 * Return: 724 * 0 on success, -errno on failure. 725 */ 726 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 727 { 728 phys_addr_t end = base + size - 1; 729 730 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 731 &base, &end, (void *)_RET_IP_); 732 733 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 734 } 735 736 /** 737 * memblock_isolate_range - isolate given range into disjoint memblocks 738 * @type: memblock type to isolate range for 739 * @base: base of range to isolate 740 * @size: size of range to isolate 741 * @start_rgn: out parameter for the start of isolated region 742 * @end_rgn: out parameter for the end of isolated region 743 * 744 * Walk @type and ensure that regions don't cross the boundaries defined by 745 * [@base, @base + @size). Crossing regions are split at the boundaries, 746 * which may create at most two more regions. The index of the first 747 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 748 * 749 * Return: 750 * 0 on success, -errno on failure. 751 */ 752 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 753 phys_addr_t base, phys_addr_t size, 754 int *start_rgn, int *end_rgn) 755 { 756 phys_addr_t end = base + memblock_cap_size(base, &size); 757 int idx; 758 struct memblock_region *rgn; 759 760 *start_rgn = *end_rgn = 0; 761 762 if (!size) 763 return 0; 764 765 /* we'll create at most two more regions */ 766 while (type->cnt + 2 > type->max) 767 if (memblock_double_array(type, base, size) < 0) 768 return -ENOMEM; 769 770 for_each_memblock_type(idx, type, rgn) { 771 phys_addr_t rbase = rgn->base; 772 phys_addr_t rend = rbase + rgn->size; 773 774 if (rbase >= end) 775 break; 776 if (rend <= base) 777 continue; 778 779 if (rbase < base) { 780 /* 781 * @rgn intersects from below. Split and continue 782 * to process the next region - the new top half. 783 */ 784 rgn->base = base; 785 rgn->size -= base - rbase; 786 type->total_size -= base - rbase; 787 memblock_insert_region(type, idx, rbase, base - rbase, 788 memblock_get_region_node(rgn), 789 rgn->flags); 790 } else if (rend > end) { 791 /* 792 * @rgn intersects from above. Split and redo the 793 * current region - the new bottom half. 794 */ 795 rgn->base = end; 796 rgn->size -= end - rbase; 797 type->total_size -= end - rbase; 798 memblock_insert_region(type, idx--, rbase, end - rbase, 799 memblock_get_region_node(rgn), 800 rgn->flags); 801 } else { 802 /* @rgn is fully contained, record it */ 803 if (!*end_rgn) 804 *start_rgn = idx; 805 *end_rgn = idx + 1; 806 } 807 } 808 809 return 0; 810 } 811 812 static int __init_memblock memblock_remove_range(struct memblock_type *type, 813 phys_addr_t base, phys_addr_t size) 814 { 815 int start_rgn, end_rgn; 816 int i, ret; 817 818 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 819 if (ret) 820 return ret; 821 822 for (i = end_rgn - 1; i >= start_rgn; i--) 823 memblock_remove_region(type, i); 824 return 0; 825 } 826 827 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 828 { 829 phys_addr_t end = base + size - 1; 830 831 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 832 &base, &end, (void *)_RET_IP_); 833 834 return memblock_remove_range(&memblock.memory, base, size); 835 } 836 837 /** 838 * memblock_free - free boot memory allocation 839 * @ptr: starting address of the boot memory allocation 840 * @size: size of the boot memory block in bytes 841 * 842 * Free boot memory block previously allocated by memblock_alloc_xx() API. 843 * The freeing memory will not be released to the buddy allocator. 844 */ 845 void __init_memblock memblock_free(void *ptr, size_t size) 846 { 847 if (ptr) 848 memblock_phys_free(__pa(ptr), size); 849 } 850 851 /** 852 * memblock_phys_free - free boot memory block 853 * @base: phys starting address of the boot memory block 854 * @size: size of the boot memory block in bytes 855 * 856 * Free boot memory block previously allocated by memblock_phys_alloc_xx() API. 857 * The freeing memory will not be released to the buddy allocator. 858 */ 859 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size) 860 { 861 phys_addr_t end = base + size - 1; 862 863 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 864 &base, &end, (void *)_RET_IP_); 865 866 kmemleak_free_part_phys(base, size); 867 return memblock_remove_range(&memblock.reserved, base, size); 868 } 869 870 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 871 { 872 phys_addr_t end = base + size - 1; 873 874 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 875 &base, &end, (void *)_RET_IP_); 876 877 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 878 } 879 880 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 881 int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size) 882 { 883 phys_addr_t end = base + size - 1; 884 885 memblock_dbg("%s: [%pa-%pa] %pS\n", __func__, 886 &base, &end, (void *)_RET_IP_); 887 888 return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0); 889 } 890 #endif 891 892 /** 893 * memblock_setclr_flag - set or clear flag for a memory region 894 * @base: base address of the region 895 * @size: size of the region 896 * @set: set or clear the flag 897 * @flag: the flag to update 898 * 899 * This function isolates region [@base, @base + @size), and sets/clears flag 900 * 901 * Return: 0 on success, -errno on failure. 902 */ 903 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 904 phys_addr_t size, int set, int flag) 905 { 906 struct memblock_type *type = &memblock.memory; 907 int i, ret, start_rgn, end_rgn; 908 909 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 910 if (ret) 911 return ret; 912 913 for (i = start_rgn; i < end_rgn; i++) { 914 struct memblock_region *r = &type->regions[i]; 915 916 if (set) 917 r->flags |= flag; 918 else 919 r->flags &= ~flag; 920 } 921 922 memblock_merge_regions(type, start_rgn, end_rgn); 923 return 0; 924 } 925 926 /** 927 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 928 * @base: the base phys addr of the region 929 * @size: the size of the region 930 * 931 * Return: 0 on success, -errno on failure. 932 */ 933 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 934 { 935 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 936 } 937 938 /** 939 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 940 * @base: the base phys addr of the region 941 * @size: the size of the region 942 * 943 * Return: 0 on success, -errno on failure. 944 */ 945 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 946 { 947 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 948 } 949 950 /** 951 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 952 * @base: the base phys addr of the region 953 * @size: the size of the region 954 * 955 * Return: 0 on success, -errno on failure. 956 */ 957 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 958 { 959 if (!mirrored_kernelcore) 960 return 0; 961 962 system_has_some_mirror = true; 963 964 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 965 } 966 967 /** 968 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 969 * @base: the base phys addr of the region 970 * @size: the size of the region 971 * 972 * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the 973 * direct mapping of the physical memory. These regions will still be 974 * covered by the memory map. The struct page representing NOMAP memory 975 * frames in the memory map will be PageReserved() 976 * 977 * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from 978 * memblock, the caller must inform kmemleak to ignore that memory 979 * 980 * Return: 0 on success, -errno on failure. 981 */ 982 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 983 { 984 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 985 } 986 987 /** 988 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 989 * @base: the base phys addr of the region 990 * @size: the size of the region 991 * 992 * Return: 0 on success, -errno on failure. 993 */ 994 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 995 { 996 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 997 } 998 999 static bool should_skip_region(struct memblock_type *type, 1000 struct memblock_region *m, 1001 int nid, int flags) 1002 { 1003 int m_nid = memblock_get_region_node(m); 1004 1005 /* we never skip regions when iterating memblock.reserved or physmem */ 1006 if (type != memblock_memory) 1007 return false; 1008 1009 /* only memory regions are associated with nodes, check it */ 1010 if (nid != NUMA_NO_NODE && nid != m_nid) 1011 return true; 1012 1013 /* skip hotpluggable memory regions if needed */ 1014 if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && 1015 !(flags & MEMBLOCK_HOTPLUG)) 1016 return true; 1017 1018 /* if we want mirror memory skip non-mirror memory regions */ 1019 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 1020 return true; 1021 1022 /* skip nomap memory unless we were asked for it explicitly */ 1023 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 1024 return true; 1025 1026 /* skip driver-managed memory unless we were asked for it explicitly */ 1027 if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m)) 1028 return true; 1029 1030 return false; 1031 } 1032 1033 /** 1034 * __next_mem_range - next function for for_each_free_mem_range() etc. 1035 * @idx: pointer to u64 loop variable 1036 * @nid: node selector, %NUMA_NO_NODE for all nodes 1037 * @flags: pick from blocks based on memory attributes 1038 * @type_a: pointer to memblock_type from where the range is taken 1039 * @type_b: pointer to memblock_type which excludes memory from being taken 1040 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1041 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1042 * @out_nid: ptr to int for nid of the range, can be %NULL 1043 * 1044 * Find the first area from *@idx which matches @nid, fill the out 1045 * parameters, and update *@idx for the next iteration. The lower 32bit of 1046 * *@idx contains index into type_a and the upper 32bit indexes the 1047 * areas before each region in type_b. For example, if type_b regions 1048 * look like the following, 1049 * 1050 * 0:[0-16), 1:[32-48), 2:[128-130) 1051 * 1052 * The upper 32bit indexes the following regions. 1053 * 1054 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 1055 * 1056 * As both region arrays are sorted, the function advances the two indices 1057 * in lockstep and returns each intersection. 1058 */ 1059 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, 1060 struct memblock_type *type_a, 1061 struct memblock_type *type_b, phys_addr_t *out_start, 1062 phys_addr_t *out_end, int *out_nid) 1063 { 1064 int idx_a = *idx & 0xffffffff; 1065 int idx_b = *idx >> 32; 1066 1067 if (WARN_ONCE(nid == MAX_NUMNODES, 1068 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1069 nid = NUMA_NO_NODE; 1070 1071 for (; idx_a < type_a->cnt; idx_a++) { 1072 struct memblock_region *m = &type_a->regions[idx_a]; 1073 1074 phys_addr_t m_start = m->base; 1075 phys_addr_t m_end = m->base + m->size; 1076 int m_nid = memblock_get_region_node(m); 1077 1078 if (should_skip_region(type_a, m, nid, flags)) 1079 continue; 1080 1081 if (!type_b) { 1082 if (out_start) 1083 *out_start = m_start; 1084 if (out_end) 1085 *out_end = m_end; 1086 if (out_nid) 1087 *out_nid = m_nid; 1088 idx_a++; 1089 *idx = (u32)idx_a | (u64)idx_b << 32; 1090 return; 1091 } 1092 1093 /* scan areas before each reservation */ 1094 for (; idx_b < type_b->cnt + 1; idx_b++) { 1095 struct memblock_region *r; 1096 phys_addr_t r_start; 1097 phys_addr_t r_end; 1098 1099 r = &type_b->regions[idx_b]; 1100 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1101 r_end = idx_b < type_b->cnt ? 1102 r->base : PHYS_ADDR_MAX; 1103 1104 /* 1105 * if idx_b advanced past idx_a, 1106 * break out to advance idx_a 1107 */ 1108 if (r_start >= m_end) 1109 break; 1110 /* if the two regions intersect, we're done */ 1111 if (m_start < r_end) { 1112 if (out_start) 1113 *out_start = 1114 max(m_start, r_start); 1115 if (out_end) 1116 *out_end = min(m_end, r_end); 1117 if (out_nid) 1118 *out_nid = m_nid; 1119 /* 1120 * The region which ends first is 1121 * advanced for the next iteration. 1122 */ 1123 if (m_end <= r_end) 1124 idx_a++; 1125 else 1126 idx_b++; 1127 *idx = (u32)idx_a | (u64)idx_b << 32; 1128 return; 1129 } 1130 } 1131 } 1132 1133 /* signal end of iteration */ 1134 *idx = ULLONG_MAX; 1135 } 1136 1137 /** 1138 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 1139 * 1140 * @idx: pointer to u64 loop variable 1141 * @nid: node selector, %NUMA_NO_NODE for all nodes 1142 * @flags: pick from blocks based on memory attributes 1143 * @type_a: pointer to memblock_type from where the range is taken 1144 * @type_b: pointer to memblock_type which excludes memory from being taken 1145 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 1146 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 1147 * @out_nid: ptr to int for nid of the range, can be %NULL 1148 * 1149 * Finds the next range from type_a which is not marked as unsuitable 1150 * in type_b. 1151 * 1152 * Reverse of __next_mem_range(). 1153 */ 1154 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 1155 enum memblock_flags flags, 1156 struct memblock_type *type_a, 1157 struct memblock_type *type_b, 1158 phys_addr_t *out_start, 1159 phys_addr_t *out_end, int *out_nid) 1160 { 1161 int idx_a = *idx & 0xffffffff; 1162 int idx_b = *idx >> 32; 1163 1164 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1165 nid = NUMA_NO_NODE; 1166 1167 if (*idx == (u64)ULLONG_MAX) { 1168 idx_a = type_a->cnt - 1; 1169 if (type_b != NULL) 1170 idx_b = type_b->cnt; 1171 else 1172 idx_b = 0; 1173 } 1174 1175 for (; idx_a >= 0; idx_a--) { 1176 struct memblock_region *m = &type_a->regions[idx_a]; 1177 1178 phys_addr_t m_start = m->base; 1179 phys_addr_t m_end = m->base + m->size; 1180 int m_nid = memblock_get_region_node(m); 1181 1182 if (should_skip_region(type_a, m, nid, flags)) 1183 continue; 1184 1185 if (!type_b) { 1186 if (out_start) 1187 *out_start = m_start; 1188 if (out_end) 1189 *out_end = m_end; 1190 if (out_nid) 1191 *out_nid = m_nid; 1192 idx_a--; 1193 *idx = (u32)idx_a | (u64)idx_b << 32; 1194 return; 1195 } 1196 1197 /* scan areas before each reservation */ 1198 for (; idx_b >= 0; idx_b--) { 1199 struct memblock_region *r; 1200 phys_addr_t r_start; 1201 phys_addr_t r_end; 1202 1203 r = &type_b->regions[idx_b]; 1204 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1205 r_end = idx_b < type_b->cnt ? 1206 r->base : PHYS_ADDR_MAX; 1207 /* 1208 * if idx_b advanced past idx_a, 1209 * break out to advance idx_a 1210 */ 1211 1212 if (r_end <= m_start) 1213 break; 1214 /* if the two regions intersect, we're done */ 1215 if (m_end > r_start) { 1216 if (out_start) 1217 *out_start = max(m_start, r_start); 1218 if (out_end) 1219 *out_end = min(m_end, r_end); 1220 if (out_nid) 1221 *out_nid = m_nid; 1222 if (m_start >= r_start) 1223 idx_a--; 1224 else 1225 idx_b--; 1226 *idx = (u32)idx_a | (u64)idx_b << 32; 1227 return; 1228 } 1229 } 1230 } 1231 /* signal end of iteration */ 1232 *idx = ULLONG_MAX; 1233 } 1234 1235 /* 1236 * Common iterator interface used to define for_each_mem_pfn_range(). 1237 */ 1238 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1239 unsigned long *out_start_pfn, 1240 unsigned long *out_end_pfn, int *out_nid) 1241 { 1242 struct memblock_type *type = &memblock.memory; 1243 struct memblock_region *r; 1244 int r_nid; 1245 1246 while (++*idx < type->cnt) { 1247 r = &type->regions[*idx]; 1248 r_nid = memblock_get_region_node(r); 1249 1250 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1251 continue; 1252 if (nid == MAX_NUMNODES || nid == r_nid) 1253 break; 1254 } 1255 if (*idx >= type->cnt) { 1256 *idx = -1; 1257 return; 1258 } 1259 1260 if (out_start_pfn) 1261 *out_start_pfn = PFN_UP(r->base); 1262 if (out_end_pfn) 1263 *out_end_pfn = PFN_DOWN(r->base + r->size); 1264 if (out_nid) 1265 *out_nid = r_nid; 1266 } 1267 1268 /** 1269 * memblock_set_node - set node ID on memblock regions 1270 * @base: base of area to set node ID for 1271 * @size: size of area to set node ID for 1272 * @type: memblock type to set node ID for 1273 * @nid: node ID to set 1274 * 1275 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. 1276 * Regions which cross the area boundaries are split as necessary. 1277 * 1278 * Return: 1279 * 0 on success, -errno on failure. 1280 */ 1281 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1282 struct memblock_type *type, int nid) 1283 { 1284 #ifdef CONFIG_NUMA 1285 int start_rgn, end_rgn; 1286 int i, ret; 1287 1288 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1289 if (ret) 1290 return ret; 1291 1292 for (i = start_rgn; i < end_rgn; i++) 1293 memblock_set_region_node(&type->regions[i], nid); 1294 1295 memblock_merge_regions(type, start_rgn, end_rgn); 1296 #endif 1297 return 0; 1298 } 1299 1300 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT 1301 /** 1302 * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() 1303 * 1304 * @idx: pointer to u64 loop variable 1305 * @zone: zone in which all of the memory blocks reside 1306 * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL 1307 * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL 1308 * 1309 * This function is meant to be a zone/pfn specific wrapper for the 1310 * for_each_mem_range type iterators. Specifically they are used in the 1311 * deferred memory init routines and as such we were duplicating much of 1312 * this logic throughout the code. So instead of having it in multiple 1313 * locations it seemed like it would make more sense to centralize this to 1314 * one new iterator that does everything they need. 1315 */ 1316 void __init_memblock 1317 __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, 1318 unsigned long *out_spfn, unsigned long *out_epfn) 1319 { 1320 int zone_nid = zone_to_nid(zone); 1321 phys_addr_t spa, epa; 1322 1323 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1324 &memblock.memory, &memblock.reserved, 1325 &spa, &epa, NULL); 1326 1327 while (*idx != U64_MAX) { 1328 unsigned long epfn = PFN_DOWN(epa); 1329 unsigned long spfn = PFN_UP(spa); 1330 1331 /* 1332 * Verify the end is at least past the start of the zone and 1333 * that we have at least one PFN to initialize. 1334 */ 1335 if (zone->zone_start_pfn < epfn && spfn < epfn) { 1336 /* if we went too far just stop searching */ 1337 if (zone_end_pfn(zone) <= spfn) { 1338 *idx = U64_MAX; 1339 break; 1340 } 1341 1342 if (out_spfn) 1343 *out_spfn = max(zone->zone_start_pfn, spfn); 1344 if (out_epfn) 1345 *out_epfn = min(zone_end_pfn(zone), epfn); 1346 1347 return; 1348 } 1349 1350 __next_mem_range(idx, zone_nid, MEMBLOCK_NONE, 1351 &memblock.memory, &memblock.reserved, 1352 &spa, &epa, NULL); 1353 } 1354 1355 /* signal end of iteration */ 1356 if (out_spfn) 1357 *out_spfn = ULONG_MAX; 1358 if (out_epfn) 1359 *out_epfn = 0; 1360 } 1361 1362 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1363 1364 /** 1365 * memblock_alloc_range_nid - allocate boot memory block 1366 * @size: size of memory block to be allocated in bytes 1367 * @align: alignment of the region and block's size 1368 * @start: the lower bound of the memory region to allocate (phys address) 1369 * @end: the upper bound of the memory region to allocate (phys address) 1370 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1371 * @exact_nid: control the allocation fall back to other nodes 1372 * 1373 * The allocation is performed from memory region limited by 1374 * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. 1375 * 1376 * If the specified node can not hold the requested memory and @exact_nid 1377 * is false, the allocation falls back to any node in the system. 1378 * 1379 * For systems with memory mirroring, the allocation is attempted first 1380 * from the regions with mirroring enabled and then retried from any 1381 * memory region. 1382 * 1383 * In addition, function using kmemleak_alloc_phys for allocated boot 1384 * memory block, it is never reported as leaks. 1385 * 1386 * Return: 1387 * Physical address of allocated memory block on success, %0 on failure. 1388 */ 1389 phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1390 phys_addr_t align, phys_addr_t start, 1391 phys_addr_t end, int nid, 1392 bool exact_nid) 1393 { 1394 enum memblock_flags flags = choose_memblock_flags(); 1395 phys_addr_t found; 1396 1397 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1398 nid = NUMA_NO_NODE; 1399 1400 if (!align) { 1401 /* Can't use WARNs this early in boot on powerpc */ 1402 dump_stack(); 1403 align = SMP_CACHE_BYTES; 1404 } 1405 1406 again: 1407 found = memblock_find_in_range_node(size, align, start, end, nid, 1408 flags); 1409 if (found && !memblock_reserve(found, size)) 1410 goto done; 1411 1412 if (nid != NUMA_NO_NODE && !exact_nid) { 1413 found = memblock_find_in_range_node(size, align, start, 1414 end, NUMA_NO_NODE, 1415 flags); 1416 if (found && !memblock_reserve(found, size)) 1417 goto done; 1418 } 1419 1420 if (flags & MEMBLOCK_MIRROR) { 1421 flags &= ~MEMBLOCK_MIRROR; 1422 pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n", 1423 &size); 1424 goto again; 1425 } 1426 1427 return 0; 1428 1429 done: 1430 /* 1431 * Skip kmemleak for those places like kasan_init() and 1432 * early_pgtable_alloc() due to high volume. 1433 */ 1434 if (end != MEMBLOCK_ALLOC_NOLEAKTRACE) 1435 /* 1436 * Memblock allocated blocks are never reported as 1437 * leaks. This is because many of these blocks are 1438 * only referred via the physical address which is 1439 * not looked up by kmemleak. 1440 */ 1441 kmemleak_alloc_phys(found, size, 0); 1442 1443 /* 1444 * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP, 1445 * require memory to be accepted before it can be used by the 1446 * guest. 1447 * 1448 * Accept the memory of the allocated buffer. 1449 */ 1450 accept_memory(found, found + size); 1451 1452 return found; 1453 } 1454 1455 /** 1456 * memblock_phys_alloc_range - allocate a memory block inside specified range 1457 * @size: size of memory block to be allocated in bytes 1458 * @align: alignment of the region and block's size 1459 * @start: the lower bound of the memory region to allocate (physical address) 1460 * @end: the upper bound of the memory region to allocate (physical address) 1461 * 1462 * Allocate @size bytes in the between @start and @end. 1463 * 1464 * Return: physical address of the allocated memory block on success, 1465 * %0 on failure. 1466 */ 1467 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size, 1468 phys_addr_t align, 1469 phys_addr_t start, 1470 phys_addr_t end) 1471 { 1472 memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n", 1473 __func__, (u64)size, (u64)align, &start, &end, 1474 (void *)_RET_IP_); 1475 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1476 false); 1477 } 1478 1479 /** 1480 * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node 1481 * @size: size of memory block to be allocated in bytes 1482 * @align: alignment of the region and block's size 1483 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1484 * 1485 * Allocates memory block from the specified NUMA node. If the node 1486 * has no available memory, attempts to allocated from any node in the 1487 * system. 1488 * 1489 * Return: physical address of the allocated memory block on success, 1490 * %0 on failure. 1491 */ 1492 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1493 { 1494 return memblock_alloc_range_nid(size, align, 0, 1495 MEMBLOCK_ALLOC_ACCESSIBLE, nid, false); 1496 } 1497 1498 /** 1499 * memblock_alloc_internal - allocate boot memory block 1500 * @size: size of memory block to be allocated in bytes 1501 * @align: alignment of the region and block's size 1502 * @min_addr: the lower bound of the memory region to allocate (phys address) 1503 * @max_addr: the upper bound of the memory region to allocate (phys address) 1504 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1505 * @exact_nid: control the allocation fall back to other nodes 1506 * 1507 * Allocates memory block using memblock_alloc_range_nid() and 1508 * converts the returned physical address to virtual. 1509 * 1510 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1511 * will fall back to memory below @min_addr. Other constraints, such 1512 * as node and mirrored memory will be handled again in 1513 * memblock_alloc_range_nid(). 1514 * 1515 * Return: 1516 * Virtual address of allocated memory block on success, NULL on failure. 1517 */ 1518 static void * __init memblock_alloc_internal( 1519 phys_addr_t size, phys_addr_t align, 1520 phys_addr_t min_addr, phys_addr_t max_addr, 1521 int nid, bool exact_nid) 1522 { 1523 phys_addr_t alloc; 1524 1525 /* 1526 * Detect any accidental use of these APIs after slab is ready, as at 1527 * this moment memblock may be deinitialized already and its 1528 * internal data may be destroyed (after execution of memblock_free_all) 1529 */ 1530 if (WARN_ON_ONCE(slab_is_available())) 1531 return kzalloc_node(size, GFP_NOWAIT, nid); 1532 1533 if (max_addr > memblock.current_limit) 1534 max_addr = memblock.current_limit; 1535 1536 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid, 1537 exact_nid); 1538 1539 /* retry allocation without lower limit */ 1540 if (!alloc && min_addr) 1541 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid, 1542 exact_nid); 1543 1544 if (!alloc) 1545 return NULL; 1546 1547 return phys_to_virt(alloc); 1548 } 1549 1550 /** 1551 * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node 1552 * without zeroing memory 1553 * @size: size of memory block to be allocated in bytes 1554 * @align: alignment of the region and block's size 1555 * @min_addr: the lower bound of the memory region from where the allocation 1556 * is preferred (phys address) 1557 * @max_addr: the upper bound of the memory region from where the allocation 1558 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1559 * allocate only from memory limited by memblock.current_limit value 1560 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1561 * 1562 * Public function, provides additional debug information (including caller 1563 * info), if enabled. Does not zero allocated memory. 1564 * 1565 * Return: 1566 * Virtual address of allocated memory block on success, NULL on failure. 1567 */ 1568 void * __init memblock_alloc_exact_nid_raw( 1569 phys_addr_t size, phys_addr_t align, 1570 phys_addr_t min_addr, phys_addr_t max_addr, 1571 int nid) 1572 { 1573 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1574 __func__, (u64)size, (u64)align, nid, &min_addr, 1575 &max_addr, (void *)_RET_IP_); 1576 1577 return memblock_alloc_internal(size, align, min_addr, max_addr, nid, 1578 true); 1579 } 1580 1581 /** 1582 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing 1583 * memory and without panicking 1584 * @size: size of memory block to be allocated in bytes 1585 * @align: alignment of the region and block's size 1586 * @min_addr: the lower bound of the memory region from where the allocation 1587 * is preferred (phys address) 1588 * @max_addr: the upper bound of the memory region from where the allocation 1589 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1590 * allocate only from memory limited by memblock.current_limit value 1591 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1592 * 1593 * Public function, provides additional debug information (including caller 1594 * info), if enabled. Does not zero allocated memory, does not panic if request 1595 * cannot be satisfied. 1596 * 1597 * Return: 1598 * Virtual address of allocated memory block on success, NULL on failure. 1599 */ 1600 void * __init memblock_alloc_try_nid_raw( 1601 phys_addr_t size, phys_addr_t align, 1602 phys_addr_t min_addr, phys_addr_t max_addr, 1603 int nid) 1604 { 1605 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1606 __func__, (u64)size, (u64)align, nid, &min_addr, 1607 &max_addr, (void *)_RET_IP_); 1608 1609 return memblock_alloc_internal(size, align, min_addr, max_addr, nid, 1610 false); 1611 } 1612 1613 /** 1614 * memblock_alloc_try_nid - allocate boot memory block 1615 * @size: size of memory block to be allocated in bytes 1616 * @align: alignment of the region and block's size 1617 * @min_addr: the lower bound of the memory region from where the allocation 1618 * is preferred (phys address) 1619 * @max_addr: the upper bound of the memory region from where the allocation 1620 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to 1621 * allocate only from memory limited by memblock.current_limit value 1622 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1623 * 1624 * Public function, provides additional debug information (including caller 1625 * info), if enabled. This function zeroes the allocated memory. 1626 * 1627 * Return: 1628 * Virtual address of allocated memory block on success, NULL on failure. 1629 */ 1630 void * __init memblock_alloc_try_nid( 1631 phys_addr_t size, phys_addr_t align, 1632 phys_addr_t min_addr, phys_addr_t max_addr, 1633 int nid) 1634 { 1635 void *ptr; 1636 1637 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n", 1638 __func__, (u64)size, (u64)align, nid, &min_addr, 1639 &max_addr, (void *)_RET_IP_); 1640 ptr = memblock_alloc_internal(size, align, 1641 min_addr, max_addr, nid, false); 1642 if (ptr) 1643 memset(ptr, 0, size); 1644 1645 return ptr; 1646 } 1647 1648 /** 1649 * memblock_free_late - free pages directly to buddy allocator 1650 * @base: phys starting address of the boot memory block 1651 * @size: size of the boot memory block in bytes 1652 * 1653 * This is only useful when the memblock allocator has already been torn 1654 * down, but we are still initializing the system. Pages are released directly 1655 * to the buddy allocator. 1656 */ 1657 void __init memblock_free_late(phys_addr_t base, phys_addr_t size) 1658 { 1659 phys_addr_t cursor, end; 1660 1661 end = base + size - 1; 1662 memblock_dbg("%s: [%pa-%pa] %pS\n", 1663 __func__, &base, &end, (void *)_RET_IP_); 1664 kmemleak_free_part_phys(base, size); 1665 cursor = PFN_UP(base); 1666 end = PFN_DOWN(base + size); 1667 1668 for (; cursor < end; cursor++) { 1669 memblock_free_pages(pfn_to_page(cursor), cursor, 0); 1670 totalram_pages_inc(); 1671 } 1672 } 1673 1674 /* 1675 * Remaining API functions 1676 */ 1677 1678 phys_addr_t __init_memblock memblock_phys_mem_size(void) 1679 { 1680 return memblock.memory.total_size; 1681 } 1682 1683 phys_addr_t __init_memblock memblock_reserved_size(void) 1684 { 1685 return memblock.reserved.total_size; 1686 } 1687 1688 /* lowest address */ 1689 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1690 { 1691 return memblock.memory.regions[0].base; 1692 } 1693 1694 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1695 { 1696 int idx = memblock.memory.cnt - 1; 1697 1698 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1699 } 1700 1701 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 1702 { 1703 phys_addr_t max_addr = PHYS_ADDR_MAX; 1704 struct memblock_region *r; 1705 1706 /* 1707 * translate the memory @limit size into the max address within one of 1708 * the memory memblock regions, if the @limit exceeds the total size 1709 * of those regions, max_addr will keep original value PHYS_ADDR_MAX 1710 */ 1711 for_each_mem_region(r) { 1712 if (limit <= r->size) { 1713 max_addr = r->base + limit; 1714 break; 1715 } 1716 limit -= r->size; 1717 } 1718 1719 return max_addr; 1720 } 1721 1722 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1723 { 1724 phys_addr_t max_addr; 1725 1726 if (!limit) 1727 return; 1728 1729 max_addr = __find_max_addr(limit); 1730 1731 /* @limit exceeds the total size of the memory, do nothing */ 1732 if (max_addr == PHYS_ADDR_MAX) 1733 return; 1734 1735 /* truncate both memory and reserved regions */ 1736 memblock_remove_range(&memblock.memory, max_addr, 1737 PHYS_ADDR_MAX); 1738 memblock_remove_range(&memblock.reserved, max_addr, 1739 PHYS_ADDR_MAX); 1740 } 1741 1742 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1743 { 1744 int start_rgn, end_rgn; 1745 int i, ret; 1746 1747 if (!size) 1748 return; 1749 1750 if (!memblock_memory->total_size) { 1751 pr_warn("%s: No memory registered yet\n", __func__); 1752 return; 1753 } 1754 1755 ret = memblock_isolate_range(&memblock.memory, base, size, 1756 &start_rgn, &end_rgn); 1757 if (ret) 1758 return; 1759 1760 /* remove all the MAP regions */ 1761 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1762 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1763 memblock_remove_region(&memblock.memory, i); 1764 1765 for (i = start_rgn - 1; i >= 0; i--) 1766 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1767 memblock_remove_region(&memblock.memory, i); 1768 1769 /* truncate the reserved regions */ 1770 memblock_remove_range(&memblock.reserved, 0, base); 1771 memblock_remove_range(&memblock.reserved, 1772 base + size, PHYS_ADDR_MAX); 1773 } 1774 1775 void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1776 { 1777 phys_addr_t max_addr; 1778 1779 if (!limit) 1780 return; 1781 1782 max_addr = __find_max_addr(limit); 1783 1784 /* @limit exceeds the total size of the memory, do nothing */ 1785 if (max_addr == PHYS_ADDR_MAX) 1786 return; 1787 1788 memblock_cap_memory_range(0, max_addr); 1789 } 1790 1791 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1792 { 1793 unsigned int left = 0, right = type->cnt; 1794 1795 do { 1796 unsigned int mid = (right + left) / 2; 1797 1798 if (addr < type->regions[mid].base) 1799 right = mid; 1800 else if (addr >= (type->regions[mid].base + 1801 type->regions[mid].size)) 1802 left = mid + 1; 1803 else 1804 return mid; 1805 } while (left < right); 1806 return -1; 1807 } 1808 1809 bool __init_memblock memblock_is_reserved(phys_addr_t addr) 1810 { 1811 return memblock_search(&memblock.reserved, addr) != -1; 1812 } 1813 1814 bool __init_memblock memblock_is_memory(phys_addr_t addr) 1815 { 1816 return memblock_search(&memblock.memory, addr) != -1; 1817 } 1818 1819 bool __init_memblock memblock_is_map_memory(phys_addr_t addr) 1820 { 1821 int i = memblock_search(&memblock.memory, addr); 1822 1823 if (i == -1) 1824 return false; 1825 return !memblock_is_nomap(&memblock.memory.regions[i]); 1826 } 1827 1828 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1829 unsigned long *start_pfn, unsigned long *end_pfn) 1830 { 1831 struct memblock_type *type = &memblock.memory; 1832 int mid = memblock_search(type, PFN_PHYS(pfn)); 1833 1834 if (mid == -1) 1835 return -1; 1836 1837 *start_pfn = PFN_DOWN(type->regions[mid].base); 1838 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1839 1840 return memblock_get_region_node(&type->regions[mid]); 1841 } 1842 1843 /** 1844 * memblock_is_region_memory - check if a region is a subset of memory 1845 * @base: base of region to check 1846 * @size: size of region to check 1847 * 1848 * Check if the region [@base, @base + @size) is a subset of a memory block. 1849 * 1850 * Return: 1851 * 0 if false, non-zero if true 1852 */ 1853 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1854 { 1855 int idx = memblock_search(&memblock.memory, base); 1856 phys_addr_t end = base + memblock_cap_size(base, &size); 1857 1858 if (idx == -1) 1859 return false; 1860 return (memblock.memory.regions[idx].base + 1861 memblock.memory.regions[idx].size) >= end; 1862 } 1863 1864 /** 1865 * memblock_is_region_reserved - check if a region intersects reserved memory 1866 * @base: base of region to check 1867 * @size: size of region to check 1868 * 1869 * Check if the region [@base, @base + @size) intersects a reserved 1870 * memory block. 1871 * 1872 * Return: 1873 * True if they intersect, false if not. 1874 */ 1875 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1876 { 1877 return memblock_overlaps_region(&memblock.reserved, base, size); 1878 } 1879 1880 void __init_memblock memblock_trim_memory(phys_addr_t align) 1881 { 1882 phys_addr_t start, end, orig_start, orig_end; 1883 struct memblock_region *r; 1884 1885 for_each_mem_region(r) { 1886 orig_start = r->base; 1887 orig_end = r->base + r->size; 1888 start = round_up(orig_start, align); 1889 end = round_down(orig_end, align); 1890 1891 if (start == orig_start && end == orig_end) 1892 continue; 1893 1894 if (start < end) { 1895 r->base = start; 1896 r->size = end - start; 1897 } else { 1898 memblock_remove_region(&memblock.memory, 1899 r - memblock.memory.regions); 1900 r--; 1901 } 1902 } 1903 } 1904 1905 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1906 { 1907 memblock.current_limit = limit; 1908 } 1909 1910 phys_addr_t __init_memblock memblock_get_current_limit(void) 1911 { 1912 return memblock.current_limit; 1913 } 1914 1915 static void __init_memblock memblock_dump(struct memblock_type *type) 1916 { 1917 phys_addr_t base, end, size; 1918 enum memblock_flags flags; 1919 int idx; 1920 struct memblock_region *rgn; 1921 1922 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 1923 1924 for_each_memblock_type(idx, type, rgn) { 1925 char nid_buf[32] = ""; 1926 1927 base = rgn->base; 1928 size = rgn->size; 1929 end = base + size - 1; 1930 flags = rgn->flags; 1931 #ifdef CONFIG_NUMA 1932 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1933 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1934 memblock_get_region_node(rgn)); 1935 #endif 1936 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", 1937 type->name, idx, &base, &end, &size, nid_buf, flags); 1938 } 1939 } 1940 1941 static void __init_memblock __memblock_dump_all(void) 1942 { 1943 pr_info("MEMBLOCK configuration:\n"); 1944 pr_info(" memory size = %pa reserved size = %pa\n", 1945 &memblock.memory.total_size, 1946 &memblock.reserved.total_size); 1947 1948 memblock_dump(&memblock.memory); 1949 memblock_dump(&memblock.reserved); 1950 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1951 memblock_dump(&physmem); 1952 #endif 1953 } 1954 1955 void __init_memblock memblock_dump_all(void) 1956 { 1957 if (memblock_debug) 1958 __memblock_dump_all(); 1959 } 1960 1961 void __init memblock_allow_resize(void) 1962 { 1963 memblock_can_resize = 1; 1964 } 1965 1966 static int __init early_memblock(char *p) 1967 { 1968 if (p && strstr(p, "debug")) 1969 memblock_debug = 1; 1970 return 0; 1971 } 1972 early_param("memblock", early_memblock); 1973 1974 static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn) 1975 { 1976 struct page *start_pg, *end_pg; 1977 phys_addr_t pg, pgend; 1978 1979 /* 1980 * Convert start_pfn/end_pfn to a struct page pointer. 1981 */ 1982 start_pg = pfn_to_page(start_pfn - 1) + 1; 1983 end_pg = pfn_to_page(end_pfn - 1) + 1; 1984 1985 /* 1986 * Convert to physical addresses, and round start upwards and end 1987 * downwards. 1988 */ 1989 pg = PAGE_ALIGN(__pa(start_pg)); 1990 pgend = __pa(end_pg) & PAGE_MASK; 1991 1992 /* 1993 * If there are free pages between these, free the section of the 1994 * memmap array. 1995 */ 1996 if (pg < pgend) 1997 memblock_phys_free(pg, pgend - pg); 1998 } 1999 2000 /* 2001 * The mem_map array can get very big. Free the unused area of the memory map. 2002 */ 2003 static void __init free_unused_memmap(void) 2004 { 2005 unsigned long start, end, prev_end = 0; 2006 int i; 2007 2008 if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) || 2009 IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) 2010 return; 2011 2012 /* 2013 * This relies on each bank being in address order. 2014 * The banks are sorted previously in bootmem_init(). 2015 */ 2016 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { 2017 #ifdef CONFIG_SPARSEMEM 2018 /* 2019 * Take care not to free memmap entries that don't exist 2020 * due to SPARSEMEM sections which aren't present. 2021 */ 2022 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); 2023 #endif 2024 /* 2025 * Align down here since many operations in VM subsystem 2026 * presume that there are no holes in the memory map inside 2027 * a pageblock 2028 */ 2029 start = pageblock_start_pfn(start); 2030 2031 /* 2032 * If we had a previous bank, and there is a space 2033 * between the current bank and the previous, free it. 2034 */ 2035 if (prev_end && prev_end < start) 2036 free_memmap(prev_end, start); 2037 2038 /* 2039 * Align up here since many operations in VM subsystem 2040 * presume that there are no holes in the memory map inside 2041 * a pageblock 2042 */ 2043 prev_end = pageblock_align(end); 2044 } 2045 2046 #ifdef CONFIG_SPARSEMEM 2047 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { 2048 prev_end = pageblock_align(end); 2049 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); 2050 } 2051 #endif 2052 } 2053 2054 static void __init __free_pages_memory(unsigned long start, unsigned long end) 2055 { 2056 int order; 2057 2058 while (start < end) { 2059 /* 2060 * Free the pages in the largest chunks alignment allows. 2061 * 2062 * __ffs() behaviour is undefined for 0. start == 0 is 2063 * MAX_ORDER-aligned, set order to MAX_ORDER for the case. 2064 */ 2065 if (start) 2066 order = min_t(int, MAX_ORDER, __ffs(start)); 2067 else 2068 order = MAX_ORDER; 2069 2070 while (start + (1UL << order) > end) 2071 order--; 2072 2073 memblock_free_pages(pfn_to_page(start), start, order); 2074 2075 start += (1UL << order); 2076 } 2077 } 2078 2079 static unsigned long __init __free_memory_core(phys_addr_t start, 2080 phys_addr_t end) 2081 { 2082 unsigned long start_pfn = PFN_UP(start); 2083 unsigned long end_pfn = min_t(unsigned long, 2084 PFN_DOWN(end), max_low_pfn); 2085 2086 if (start_pfn >= end_pfn) 2087 return 0; 2088 2089 __free_pages_memory(start_pfn, end_pfn); 2090 2091 return end_pfn - start_pfn; 2092 } 2093 2094 static void __init memmap_init_reserved_pages(void) 2095 { 2096 struct memblock_region *region; 2097 phys_addr_t start, end; 2098 int nid; 2099 2100 /* 2101 * set nid on all reserved pages and also treat struct 2102 * pages for the NOMAP regions as PageReserved 2103 */ 2104 for_each_mem_region(region) { 2105 nid = memblock_get_region_node(region); 2106 start = region->base; 2107 end = start + region->size; 2108 2109 if (memblock_is_nomap(region)) 2110 reserve_bootmem_region(start, end, nid); 2111 2112 memblock_set_node(start, end, &memblock.reserved, nid); 2113 } 2114 2115 /* initialize struct pages for the reserved regions */ 2116 for_each_reserved_mem_region(region) { 2117 nid = memblock_get_region_node(region); 2118 start = region->base; 2119 end = start + region->size; 2120 2121 reserve_bootmem_region(start, end, nid); 2122 } 2123 } 2124 2125 static unsigned long __init free_low_memory_core_early(void) 2126 { 2127 unsigned long count = 0; 2128 phys_addr_t start, end; 2129 u64 i; 2130 2131 memblock_clear_hotplug(0, -1); 2132 2133 memmap_init_reserved_pages(); 2134 2135 /* 2136 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id 2137 * because in some case like Node0 doesn't have RAM installed 2138 * low ram will be on Node1 2139 */ 2140 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, 2141 NULL) 2142 count += __free_memory_core(start, end); 2143 2144 return count; 2145 } 2146 2147 static int reset_managed_pages_done __initdata; 2148 2149 static void __init reset_node_managed_pages(pg_data_t *pgdat) 2150 { 2151 struct zone *z; 2152 2153 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 2154 atomic_long_set(&z->managed_pages, 0); 2155 } 2156 2157 void __init reset_all_zones_managed_pages(void) 2158 { 2159 struct pglist_data *pgdat; 2160 2161 if (reset_managed_pages_done) 2162 return; 2163 2164 for_each_online_pgdat(pgdat) 2165 reset_node_managed_pages(pgdat); 2166 2167 reset_managed_pages_done = 1; 2168 } 2169 2170 /** 2171 * memblock_free_all - release free pages to the buddy allocator 2172 */ 2173 void __init memblock_free_all(void) 2174 { 2175 unsigned long pages; 2176 2177 free_unused_memmap(); 2178 reset_all_zones_managed_pages(); 2179 2180 pages = free_low_memory_core_early(); 2181 totalram_pages_add(pages); 2182 } 2183 2184 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK) 2185 static const char * const flagname[] = { 2186 [ilog2(MEMBLOCK_HOTPLUG)] = "HOTPLUG", 2187 [ilog2(MEMBLOCK_MIRROR)] = "MIRROR", 2188 [ilog2(MEMBLOCK_NOMAP)] = "NOMAP", 2189 [ilog2(MEMBLOCK_DRIVER_MANAGED)] = "DRV_MNG", 2190 }; 2191 2192 static int memblock_debug_show(struct seq_file *m, void *private) 2193 { 2194 struct memblock_type *type = m->private; 2195 struct memblock_region *reg; 2196 int i, j, nid; 2197 unsigned int count = ARRAY_SIZE(flagname); 2198 phys_addr_t end; 2199 2200 for (i = 0; i < type->cnt; i++) { 2201 reg = &type->regions[i]; 2202 end = reg->base + reg->size - 1; 2203 nid = memblock_get_region_node(reg); 2204 2205 seq_printf(m, "%4d: ", i); 2206 seq_printf(m, "%pa..%pa ", ®->base, &end); 2207 if (nid != MAX_NUMNODES) 2208 seq_printf(m, "%4d ", nid); 2209 else 2210 seq_printf(m, "%4c ", 'x'); 2211 if (reg->flags) { 2212 for (j = 0; j < count; j++) { 2213 if (reg->flags & (1U << j)) { 2214 seq_printf(m, "%s\n", flagname[j]); 2215 break; 2216 } 2217 } 2218 if (j == count) 2219 seq_printf(m, "%s\n", "UNKNOWN"); 2220 } else { 2221 seq_printf(m, "%s\n", "NONE"); 2222 } 2223 } 2224 return 0; 2225 } 2226 DEFINE_SHOW_ATTRIBUTE(memblock_debug); 2227 2228 static int __init memblock_init_debugfs(void) 2229 { 2230 struct dentry *root = debugfs_create_dir("memblock", NULL); 2231 2232 debugfs_create_file("memory", 0444, root, 2233 &memblock.memory, &memblock_debug_fops); 2234 debugfs_create_file("reserved", 0444, root, 2235 &memblock.reserved, &memblock_debug_fops); 2236 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 2237 debugfs_create_file("physmem", 0444, root, &physmem, 2238 &memblock_debug_fops); 2239 #endif 2240 2241 return 0; 2242 } 2243 __initcall(memblock_init_debugfs); 2244 2245 #endif /* CONFIG_DEBUG_FS */ 2246