1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/poison.h> 18 #include <linux/pfn.h> 19 #include <linux/debugfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/memblock.h> 22 23 #include <asm-generic/sections.h> 24 #include <linux/io.h> 25 26 #include "internal.h" 27 28 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 29 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 30 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 31 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 32 #endif 33 34 struct memblock memblock __initdata_memblock = { 35 .memory.regions = memblock_memory_init_regions, 36 .memory.cnt = 1, /* empty dummy entry */ 37 .memory.max = INIT_MEMBLOCK_REGIONS, 38 39 .reserved.regions = memblock_reserved_init_regions, 40 .reserved.cnt = 1, /* empty dummy entry */ 41 .reserved.max = INIT_MEMBLOCK_REGIONS, 42 43 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 44 .physmem.regions = memblock_physmem_init_regions, 45 .physmem.cnt = 1, /* empty dummy entry */ 46 .physmem.max = INIT_PHYSMEM_REGIONS, 47 #endif 48 49 .bottom_up = false, 50 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 51 }; 52 53 int memblock_debug __initdata_memblock; 54 #ifdef CONFIG_MOVABLE_NODE 55 bool movable_node_enabled __initdata_memblock = false; 56 #endif 57 static bool system_has_some_mirror __initdata_memblock = false; 58 static int memblock_can_resize __initdata_memblock; 59 static int memblock_memory_in_slab __initdata_memblock = 0; 60 static int memblock_reserved_in_slab __initdata_memblock = 0; 61 62 ulong __init_memblock choose_memblock_flags(void) 63 { 64 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 65 } 66 67 /* inline so we don't get a warning when pr_debug is compiled out */ 68 static __init_memblock const char * 69 memblock_type_name(struct memblock_type *type) 70 { 71 if (type == &memblock.memory) 72 return "memory"; 73 else if (type == &memblock.reserved) 74 return "reserved"; 75 else 76 return "unknown"; 77 } 78 79 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 80 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 81 { 82 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 83 } 84 85 /* 86 * Address comparison utilities 87 */ 88 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 89 phys_addr_t base2, phys_addr_t size2) 90 { 91 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 92 } 93 94 bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 95 phys_addr_t base, phys_addr_t size) 96 { 97 unsigned long i; 98 99 for (i = 0; i < type->cnt; i++) 100 if (memblock_addrs_overlap(base, size, type->regions[i].base, 101 type->regions[i].size)) 102 break; 103 return i < type->cnt; 104 } 105 106 /* 107 * __memblock_find_range_bottom_up - find free area utility in bottom-up 108 * @start: start of candidate range 109 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 110 * @size: size of free area to find 111 * @align: alignment of free area to find 112 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 113 * @flags: pick from blocks based on memory attributes 114 * 115 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 116 * 117 * RETURNS: 118 * Found address on success, 0 on failure. 119 */ 120 static phys_addr_t __init_memblock 121 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 122 phys_addr_t size, phys_addr_t align, int nid, 123 ulong flags) 124 { 125 phys_addr_t this_start, this_end, cand; 126 u64 i; 127 128 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 129 this_start = clamp(this_start, start, end); 130 this_end = clamp(this_end, start, end); 131 132 cand = round_up(this_start, align); 133 if (cand < this_end && this_end - cand >= size) 134 return cand; 135 } 136 137 return 0; 138 } 139 140 /** 141 * __memblock_find_range_top_down - find free area utility, in top-down 142 * @start: start of candidate range 143 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 144 * @size: size of free area to find 145 * @align: alignment of free area to find 146 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 147 * @flags: pick from blocks based on memory attributes 148 * 149 * Utility called from memblock_find_in_range_node(), find free area top-down. 150 * 151 * RETURNS: 152 * Found address on success, 0 on failure. 153 */ 154 static phys_addr_t __init_memblock 155 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 156 phys_addr_t size, phys_addr_t align, int nid, 157 ulong flags) 158 { 159 phys_addr_t this_start, this_end, cand; 160 u64 i; 161 162 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 163 NULL) { 164 this_start = clamp(this_start, start, end); 165 this_end = clamp(this_end, start, end); 166 167 if (this_end < size) 168 continue; 169 170 cand = round_down(this_end - size, align); 171 if (cand >= this_start) 172 return cand; 173 } 174 175 return 0; 176 } 177 178 /** 179 * memblock_find_in_range_node - find free area in given range and node 180 * @size: size of free area to find 181 * @align: alignment of free area to find 182 * @start: start of candidate range 183 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 184 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 185 * @flags: pick from blocks based on memory attributes 186 * 187 * Find @size free area aligned to @align in the specified range and node. 188 * 189 * When allocation direction is bottom-up, the @start should be greater 190 * than the end of the kernel image. Otherwise, it will be trimmed. The 191 * reason is that we want the bottom-up allocation just near the kernel 192 * image so it is highly likely that the allocated memory and the kernel 193 * will reside in the same node. 194 * 195 * If bottom-up allocation failed, will try to allocate memory top-down. 196 * 197 * RETURNS: 198 * Found address on success, 0 on failure. 199 */ 200 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 201 phys_addr_t align, phys_addr_t start, 202 phys_addr_t end, int nid, ulong flags) 203 { 204 phys_addr_t kernel_end, ret; 205 206 /* pump up @end */ 207 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 208 end = memblock.current_limit; 209 210 /* avoid allocating the first page */ 211 start = max_t(phys_addr_t, start, PAGE_SIZE); 212 end = max(start, end); 213 kernel_end = __pa_symbol(_end); 214 215 /* 216 * try bottom-up allocation only when bottom-up mode 217 * is set and @end is above the kernel image. 218 */ 219 if (memblock_bottom_up() && end > kernel_end) { 220 phys_addr_t bottom_up_start; 221 222 /* make sure we will allocate above the kernel */ 223 bottom_up_start = max(start, kernel_end); 224 225 /* ok, try bottom-up allocation first */ 226 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 227 size, align, nid, flags); 228 if (ret) 229 return ret; 230 231 /* 232 * we always limit bottom-up allocation above the kernel, 233 * but top-down allocation doesn't have the limit, so 234 * retrying top-down allocation may succeed when bottom-up 235 * allocation failed. 236 * 237 * bottom-up allocation is expected to be fail very rarely, 238 * so we use WARN_ONCE() here to see the stack trace if 239 * fail happens. 240 */ 241 WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); 242 } 243 244 return __memblock_find_range_top_down(start, end, size, align, nid, 245 flags); 246 } 247 248 /** 249 * memblock_find_in_range - find free area in given range 250 * @start: start of candidate range 251 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 252 * @size: size of free area to find 253 * @align: alignment of free area to find 254 * 255 * Find @size free area aligned to @align in the specified range. 256 * 257 * RETURNS: 258 * Found address on success, 0 on failure. 259 */ 260 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 261 phys_addr_t end, phys_addr_t size, 262 phys_addr_t align) 263 { 264 phys_addr_t ret; 265 ulong flags = choose_memblock_flags(); 266 267 again: 268 ret = memblock_find_in_range_node(size, align, start, end, 269 NUMA_NO_NODE, flags); 270 271 if (!ret && (flags & MEMBLOCK_MIRROR)) { 272 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 273 &size); 274 flags &= ~MEMBLOCK_MIRROR; 275 goto again; 276 } 277 278 return ret; 279 } 280 281 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 282 { 283 type->total_size -= type->regions[r].size; 284 memmove(&type->regions[r], &type->regions[r + 1], 285 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 286 type->cnt--; 287 288 /* Special case for empty arrays */ 289 if (type->cnt == 0) { 290 WARN_ON(type->total_size != 0); 291 type->cnt = 1; 292 type->regions[0].base = 0; 293 type->regions[0].size = 0; 294 type->regions[0].flags = 0; 295 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 296 } 297 } 298 299 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 300 301 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 302 phys_addr_t *addr) 303 { 304 if (memblock.reserved.regions == memblock_reserved_init_regions) 305 return 0; 306 307 *addr = __pa(memblock.reserved.regions); 308 309 return PAGE_ALIGN(sizeof(struct memblock_region) * 310 memblock.reserved.max); 311 } 312 313 phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( 314 phys_addr_t *addr) 315 { 316 if (memblock.memory.regions == memblock_memory_init_regions) 317 return 0; 318 319 *addr = __pa(memblock.memory.regions); 320 321 return PAGE_ALIGN(sizeof(struct memblock_region) * 322 memblock.memory.max); 323 } 324 325 #endif 326 327 /** 328 * memblock_double_array - double the size of the memblock regions array 329 * @type: memblock type of the regions array being doubled 330 * @new_area_start: starting address of memory range to avoid overlap with 331 * @new_area_size: size of memory range to avoid overlap with 332 * 333 * Double the size of the @type regions array. If memblock is being used to 334 * allocate memory for a new reserved regions array and there is a previously 335 * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 336 * waiting to be reserved, ensure the memory used by the new array does 337 * not overlap. 338 * 339 * RETURNS: 340 * 0 on success, -1 on failure. 341 */ 342 static int __init_memblock memblock_double_array(struct memblock_type *type, 343 phys_addr_t new_area_start, 344 phys_addr_t new_area_size) 345 { 346 struct memblock_region *new_array, *old_array; 347 phys_addr_t old_alloc_size, new_alloc_size; 348 phys_addr_t old_size, new_size, addr; 349 int use_slab = slab_is_available(); 350 int *in_slab; 351 352 /* We don't allow resizing until we know about the reserved regions 353 * of memory that aren't suitable for allocation 354 */ 355 if (!memblock_can_resize) 356 return -1; 357 358 /* Calculate new doubled size */ 359 old_size = type->max * sizeof(struct memblock_region); 360 new_size = old_size << 1; 361 /* 362 * We need to allocated new one align to PAGE_SIZE, 363 * so we can free them completely later. 364 */ 365 old_alloc_size = PAGE_ALIGN(old_size); 366 new_alloc_size = PAGE_ALIGN(new_size); 367 368 /* Retrieve the slab flag */ 369 if (type == &memblock.memory) 370 in_slab = &memblock_memory_in_slab; 371 else 372 in_slab = &memblock_reserved_in_slab; 373 374 /* Try to find some space for it. 375 * 376 * WARNING: We assume that either slab_is_available() and we use it or 377 * we use MEMBLOCK for allocations. That means that this is unsafe to 378 * use when bootmem is currently active (unless bootmem itself is 379 * implemented on top of MEMBLOCK which isn't the case yet) 380 * 381 * This should however not be an issue for now, as we currently only 382 * call into MEMBLOCK while it's still active, or much later when slab 383 * is active for memory hotplug operations 384 */ 385 if (use_slab) { 386 new_array = kmalloc(new_size, GFP_KERNEL); 387 addr = new_array ? __pa(new_array) : 0; 388 } else { 389 /* only exclude range when trying to double reserved.regions */ 390 if (type != &memblock.reserved) 391 new_area_start = new_area_size = 0; 392 393 addr = memblock_find_in_range(new_area_start + new_area_size, 394 memblock.current_limit, 395 new_alloc_size, PAGE_SIZE); 396 if (!addr && new_area_size) 397 addr = memblock_find_in_range(0, 398 min(new_area_start, memblock.current_limit), 399 new_alloc_size, PAGE_SIZE); 400 401 new_array = addr ? __va(addr) : NULL; 402 } 403 if (!addr) { 404 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 405 memblock_type_name(type), type->max, type->max * 2); 406 return -1; 407 } 408 409 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 410 memblock_type_name(type), type->max * 2, (u64)addr, 411 (u64)addr + new_size - 1); 412 413 /* 414 * Found space, we now need to move the array over before we add the 415 * reserved region since it may be our reserved array itself that is 416 * full. 417 */ 418 memcpy(new_array, type->regions, old_size); 419 memset(new_array + type->max, 0, old_size); 420 old_array = type->regions; 421 type->regions = new_array; 422 type->max <<= 1; 423 424 /* Free old array. We needn't free it if the array is the static one */ 425 if (*in_slab) 426 kfree(old_array); 427 else if (old_array != memblock_memory_init_regions && 428 old_array != memblock_reserved_init_regions) 429 memblock_free(__pa(old_array), old_alloc_size); 430 431 /* 432 * Reserve the new array if that comes from the memblock. Otherwise, we 433 * needn't do it 434 */ 435 if (!use_slab) 436 BUG_ON(memblock_reserve(addr, new_alloc_size)); 437 438 /* Update slab flag */ 439 *in_slab = use_slab; 440 441 return 0; 442 } 443 444 /** 445 * memblock_merge_regions - merge neighboring compatible regions 446 * @type: memblock type to scan 447 * 448 * Scan @type and merge neighboring compatible regions. 449 */ 450 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 451 { 452 int i = 0; 453 454 /* cnt never goes below 1 */ 455 while (i < type->cnt - 1) { 456 struct memblock_region *this = &type->regions[i]; 457 struct memblock_region *next = &type->regions[i + 1]; 458 459 if (this->base + this->size != next->base || 460 memblock_get_region_node(this) != 461 memblock_get_region_node(next) || 462 this->flags != next->flags) { 463 BUG_ON(this->base + this->size > next->base); 464 i++; 465 continue; 466 } 467 468 this->size += next->size; 469 /* move forward from next + 1, index of which is i + 2 */ 470 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 471 type->cnt--; 472 } 473 } 474 475 /** 476 * memblock_insert_region - insert new memblock region 477 * @type: memblock type to insert into 478 * @idx: index for the insertion point 479 * @base: base address of the new region 480 * @size: size of the new region 481 * @nid: node id of the new region 482 * @flags: flags of the new region 483 * 484 * Insert new memblock region [@base,@base+@size) into @type at @idx. 485 * @type must already have extra room to accomodate the new region. 486 */ 487 static void __init_memblock memblock_insert_region(struct memblock_type *type, 488 int idx, phys_addr_t base, 489 phys_addr_t size, 490 int nid, unsigned long flags) 491 { 492 struct memblock_region *rgn = &type->regions[idx]; 493 494 BUG_ON(type->cnt >= type->max); 495 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 496 rgn->base = base; 497 rgn->size = size; 498 rgn->flags = flags; 499 memblock_set_region_node(rgn, nid); 500 type->cnt++; 501 type->total_size += size; 502 } 503 504 /** 505 * memblock_add_range - add new memblock region 506 * @type: memblock type to add new region into 507 * @base: base address of the new region 508 * @size: size of the new region 509 * @nid: nid of the new region 510 * @flags: flags of the new region 511 * 512 * Add new memblock region [@base,@base+@size) into @type. The new region 513 * is allowed to overlap with existing ones - overlaps don't affect already 514 * existing regions. @type is guaranteed to be minimal (all neighbouring 515 * compatible regions are merged) after the addition. 516 * 517 * RETURNS: 518 * 0 on success, -errno on failure. 519 */ 520 int __init_memblock memblock_add_range(struct memblock_type *type, 521 phys_addr_t base, phys_addr_t size, 522 int nid, unsigned long flags) 523 { 524 bool insert = false; 525 phys_addr_t obase = base; 526 phys_addr_t end = base + memblock_cap_size(base, &size); 527 int idx, nr_new; 528 struct memblock_region *rgn; 529 530 if (!size) 531 return 0; 532 533 /* special case for empty array */ 534 if (type->regions[0].size == 0) { 535 WARN_ON(type->cnt != 1 || type->total_size); 536 type->regions[0].base = base; 537 type->regions[0].size = size; 538 type->regions[0].flags = flags; 539 memblock_set_region_node(&type->regions[0], nid); 540 type->total_size = size; 541 return 0; 542 } 543 repeat: 544 /* 545 * The following is executed twice. Once with %false @insert and 546 * then with %true. The first counts the number of regions needed 547 * to accomodate the new area. The second actually inserts them. 548 */ 549 base = obase; 550 nr_new = 0; 551 552 for_each_memblock_type(type, rgn) { 553 phys_addr_t rbase = rgn->base; 554 phys_addr_t rend = rbase + rgn->size; 555 556 if (rbase >= end) 557 break; 558 if (rend <= base) 559 continue; 560 /* 561 * @rgn overlaps. If it separates the lower part of new 562 * area, insert that portion. 563 */ 564 if (rbase > base) { 565 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 566 WARN_ON(nid != memblock_get_region_node(rgn)); 567 #endif 568 WARN_ON(flags != rgn->flags); 569 nr_new++; 570 if (insert) 571 memblock_insert_region(type, idx++, base, 572 rbase - base, nid, 573 flags); 574 } 575 /* area below @rend is dealt with, forget about it */ 576 base = min(rend, end); 577 } 578 579 /* insert the remaining portion */ 580 if (base < end) { 581 nr_new++; 582 if (insert) 583 memblock_insert_region(type, idx, base, end - base, 584 nid, flags); 585 } 586 587 /* 588 * If this was the first round, resize array and repeat for actual 589 * insertions; otherwise, merge and return. 590 */ 591 if (!insert) { 592 while (type->cnt + nr_new > type->max) 593 if (memblock_double_array(type, obase, size) < 0) 594 return -ENOMEM; 595 insert = true; 596 goto repeat; 597 } else { 598 memblock_merge_regions(type); 599 return 0; 600 } 601 } 602 603 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 604 int nid) 605 { 606 return memblock_add_range(&memblock.memory, base, size, nid, 0); 607 } 608 609 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 610 { 611 memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n", 612 (unsigned long long)base, 613 (unsigned long long)base + size - 1, 614 0UL, (void *)_RET_IP_); 615 616 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 617 } 618 619 /** 620 * memblock_isolate_range - isolate given range into disjoint memblocks 621 * @type: memblock type to isolate range for 622 * @base: base of range to isolate 623 * @size: size of range to isolate 624 * @start_rgn: out parameter for the start of isolated region 625 * @end_rgn: out parameter for the end of isolated region 626 * 627 * Walk @type and ensure that regions don't cross the boundaries defined by 628 * [@base,@base+@size). Crossing regions are split at the boundaries, 629 * which may create at most two more regions. The index of the first 630 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 631 * 632 * RETURNS: 633 * 0 on success, -errno on failure. 634 */ 635 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 636 phys_addr_t base, phys_addr_t size, 637 int *start_rgn, int *end_rgn) 638 { 639 phys_addr_t end = base + memblock_cap_size(base, &size); 640 int idx; 641 struct memblock_region *rgn; 642 643 *start_rgn = *end_rgn = 0; 644 645 if (!size) 646 return 0; 647 648 /* we'll create at most two more regions */ 649 while (type->cnt + 2 > type->max) 650 if (memblock_double_array(type, base, size) < 0) 651 return -ENOMEM; 652 653 for_each_memblock_type(type, rgn) { 654 phys_addr_t rbase = rgn->base; 655 phys_addr_t rend = rbase + rgn->size; 656 657 if (rbase >= end) 658 break; 659 if (rend <= base) 660 continue; 661 662 if (rbase < base) { 663 /* 664 * @rgn intersects from below. Split and continue 665 * to process the next region - the new top half. 666 */ 667 rgn->base = base; 668 rgn->size -= base - rbase; 669 type->total_size -= base - rbase; 670 memblock_insert_region(type, idx, rbase, base - rbase, 671 memblock_get_region_node(rgn), 672 rgn->flags); 673 } else if (rend > end) { 674 /* 675 * @rgn intersects from above. Split and redo the 676 * current region - the new bottom half. 677 */ 678 rgn->base = end; 679 rgn->size -= end - rbase; 680 type->total_size -= end - rbase; 681 memblock_insert_region(type, idx--, rbase, end - rbase, 682 memblock_get_region_node(rgn), 683 rgn->flags); 684 } else { 685 /* @rgn is fully contained, record it */ 686 if (!*end_rgn) 687 *start_rgn = idx; 688 *end_rgn = idx + 1; 689 } 690 } 691 692 return 0; 693 } 694 695 static int __init_memblock memblock_remove_range(struct memblock_type *type, 696 phys_addr_t base, phys_addr_t size) 697 { 698 int start_rgn, end_rgn; 699 int i, ret; 700 701 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 702 if (ret) 703 return ret; 704 705 for (i = end_rgn - 1; i >= start_rgn; i--) 706 memblock_remove_region(type, i); 707 return 0; 708 } 709 710 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 711 { 712 return memblock_remove_range(&memblock.memory, base, size); 713 } 714 715 716 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 717 { 718 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n", 719 (unsigned long long)base, 720 (unsigned long long)base + size - 1, 721 (void *)_RET_IP_); 722 723 kmemleak_free_part(__va(base), size); 724 return memblock_remove_range(&memblock.reserved, base, size); 725 } 726 727 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 728 { 729 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n", 730 (unsigned long long)base, 731 (unsigned long long)base + size - 1, 732 0UL, (void *)_RET_IP_); 733 734 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 735 } 736 737 /** 738 * 739 * This function isolates region [@base, @base + @size), and sets/clears flag 740 * 741 * Return 0 on success, -errno on failure. 742 */ 743 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 744 phys_addr_t size, int set, int flag) 745 { 746 struct memblock_type *type = &memblock.memory; 747 int i, ret, start_rgn, end_rgn; 748 749 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 750 if (ret) 751 return ret; 752 753 for (i = start_rgn; i < end_rgn; i++) 754 if (set) 755 memblock_set_region_flags(&type->regions[i], flag); 756 else 757 memblock_clear_region_flags(&type->regions[i], flag); 758 759 memblock_merge_regions(type); 760 return 0; 761 } 762 763 /** 764 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 765 * @base: the base phys addr of the region 766 * @size: the size of the region 767 * 768 * Return 0 on success, -errno on failure. 769 */ 770 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 771 { 772 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 773 } 774 775 /** 776 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 777 * @base: the base phys addr of the region 778 * @size: the size of the region 779 * 780 * Return 0 on success, -errno on failure. 781 */ 782 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 783 { 784 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 785 } 786 787 /** 788 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 789 * @base: the base phys addr of the region 790 * @size: the size of the region 791 * 792 * Return 0 on success, -errno on failure. 793 */ 794 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 795 { 796 system_has_some_mirror = true; 797 798 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 799 } 800 801 /** 802 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 803 * @base: the base phys addr of the region 804 * @size: the size of the region 805 * 806 * Return 0 on success, -errno on failure. 807 */ 808 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 809 { 810 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 811 } 812 813 /** 814 * __next_reserved_mem_region - next function for for_each_reserved_region() 815 * @idx: pointer to u64 loop variable 816 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 817 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 818 * 819 * Iterate over all reserved memory regions. 820 */ 821 void __init_memblock __next_reserved_mem_region(u64 *idx, 822 phys_addr_t *out_start, 823 phys_addr_t *out_end) 824 { 825 struct memblock_type *type = &memblock.reserved; 826 827 if (*idx < type->cnt) { 828 struct memblock_region *r = &type->regions[*idx]; 829 phys_addr_t base = r->base; 830 phys_addr_t size = r->size; 831 832 if (out_start) 833 *out_start = base; 834 if (out_end) 835 *out_end = base + size - 1; 836 837 *idx += 1; 838 return; 839 } 840 841 /* signal end of iteration */ 842 *idx = ULLONG_MAX; 843 } 844 845 /** 846 * __next__mem_range - next function for for_each_free_mem_range() etc. 847 * @idx: pointer to u64 loop variable 848 * @nid: node selector, %NUMA_NO_NODE for all nodes 849 * @flags: pick from blocks based on memory attributes 850 * @type_a: pointer to memblock_type from where the range is taken 851 * @type_b: pointer to memblock_type which excludes memory from being taken 852 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 853 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 854 * @out_nid: ptr to int for nid of the range, can be %NULL 855 * 856 * Find the first area from *@idx which matches @nid, fill the out 857 * parameters, and update *@idx for the next iteration. The lower 32bit of 858 * *@idx contains index into type_a and the upper 32bit indexes the 859 * areas before each region in type_b. For example, if type_b regions 860 * look like the following, 861 * 862 * 0:[0-16), 1:[32-48), 2:[128-130) 863 * 864 * The upper 32bit indexes the following regions. 865 * 866 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 867 * 868 * As both region arrays are sorted, the function advances the two indices 869 * in lockstep and returns each intersection. 870 */ 871 void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, 872 struct memblock_type *type_a, 873 struct memblock_type *type_b, 874 phys_addr_t *out_start, 875 phys_addr_t *out_end, int *out_nid) 876 { 877 int idx_a = *idx & 0xffffffff; 878 int idx_b = *idx >> 32; 879 880 if (WARN_ONCE(nid == MAX_NUMNODES, 881 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 882 nid = NUMA_NO_NODE; 883 884 for (; idx_a < type_a->cnt; idx_a++) { 885 struct memblock_region *m = &type_a->regions[idx_a]; 886 887 phys_addr_t m_start = m->base; 888 phys_addr_t m_end = m->base + m->size; 889 int m_nid = memblock_get_region_node(m); 890 891 /* only memory regions are associated with nodes, check it */ 892 if (nid != NUMA_NO_NODE && nid != m_nid) 893 continue; 894 895 /* skip hotpluggable memory regions if needed */ 896 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 897 continue; 898 899 /* if we want mirror memory skip non-mirror memory regions */ 900 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 901 continue; 902 903 /* skip nomap memory unless we were asked for it explicitly */ 904 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 905 continue; 906 907 if (!type_b) { 908 if (out_start) 909 *out_start = m_start; 910 if (out_end) 911 *out_end = m_end; 912 if (out_nid) 913 *out_nid = m_nid; 914 idx_a++; 915 *idx = (u32)idx_a | (u64)idx_b << 32; 916 return; 917 } 918 919 /* scan areas before each reservation */ 920 for (; idx_b < type_b->cnt + 1; idx_b++) { 921 struct memblock_region *r; 922 phys_addr_t r_start; 923 phys_addr_t r_end; 924 925 r = &type_b->regions[idx_b]; 926 r_start = idx_b ? r[-1].base + r[-1].size : 0; 927 r_end = idx_b < type_b->cnt ? 928 r->base : ULLONG_MAX; 929 930 /* 931 * if idx_b advanced past idx_a, 932 * break out to advance idx_a 933 */ 934 if (r_start >= m_end) 935 break; 936 /* if the two regions intersect, we're done */ 937 if (m_start < r_end) { 938 if (out_start) 939 *out_start = 940 max(m_start, r_start); 941 if (out_end) 942 *out_end = min(m_end, r_end); 943 if (out_nid) 944 *out_nid = m_nid; 945 /* 946 * The region which ends first is 947 * advanced for the next iteration. 948 */ 949 if (m_end <= r_end) 950 idx_a++; 951 else 952 idx_b++; 953 *idx = (u32)idx_a | (u64)idx_b << 32; 954 return; 955 } 956 } 957 } 958 959 /* signal end of iteration */ 960 *idx = ULLONG_MAX; 961 } 962 963 /** 964 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 965 * 966 * Finds the next range from type_a which is not marked as unsuitable 967 * in type_b. 968 * 969 * @idx: pointer to u64 loop variable 970 * @nid: node selector, %NUMA_NO_NODE for all nodes 971 * @flags: pick from blocks based on memory attributes 972 * @type_a: pointer to memblock_type from where the range is taken 973 * @type_b: pointer to memblock_type which excludes memory from being taken 974 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 975 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 976 * @out_nid: ptr to int for nid of the range, can be %NULL 977 * 978 * Reverse of __next_mem_range(). 979 */ 980 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, 981 struct memblock_type *type_a, 982 struct memblock_type *type_b, 983 phys_addr_t *out_start, 984 phys_addr_t *out_end, int *out_nid) 985 { 986 int idx_a = *idx & 0xffffffff; 987 int idx_b = *idx >> 32; 988 989 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 990 nid = NUMA_NO_NODE; 991 992 if (*idx == (u64)ULLONG_MAX) { 993 idx_a = type_a->cnt - 1; 994 idx_b = type_b->cnt; 995 } 996 997 for (; idx_a >= 0; idx_a--) { 998 struct memblock_region *m = &type_a->regions[idx_a]; 999 1000 phys_addr_t m_start = m->base; 1001 phys_addr_t m_end = m->base + m->size; 1002 int m_nid = memblock_get_region_node(m); 1003 1004 /* only memory regions are associated with nodes, check it */ 1005 if (nid != NUMA_NO_NODE && nid != m_nid) 1006 continue; 1007 1008 /* skip hotpluggable memory regions if needed */ 1009 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 1010 continue; 1011 1012 /* if we want mirror memory skip non-mirror memory regions */ 1013 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 1014 continue; 1015 1016 /* skip nomap memory unless we were asked for it explicitly */ 1017 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 1018 continue; 1019 1020 if (!type_b) { 1021 if (out_start) 1022 *out_start = m_start; 1023 if (out_end) 1024 *out_end = m_end; 1025 if (out_nid) 1026 *out_nid = m_nid; 1027 idx_a++; 1028 *idx = (u32)idx_a | (u64)idx_b << 32; 1029 return; 1030 } 1031 1032 /* scan areas before each reservation */ 1033 for (; idx_b >= 0; idx_b--) { 1034 struct memblock_region *r; 1035 phys_addr_t r_start; 1036 phys_addr_t r_end; 1037 1038 r = &type_b->regions[idx_b]; 1039 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1040 r_end = idx_b < type_b->cnt ? 1041 r->base : ULLONG_MAX; 1042 /* 1043 * if idx_b advanced past idx_a, 1044 * break out to advance idx_a 1045 */ 1046 1047 if (r_end <= m_start) 1048 break; 1049 /* if the two regions intersect, we're done */ 1050 if (m_end > r_start) { 1051 if (out_start) 1052 *out_start = max(m_start, r_start); 1053 if (out_end) 1054 *out_end = min(m_end, r_end); 1055 if (out_nid) 1056 *out_nid = m_nid; 1057 if (m_start >= r_start) 1058 idx_a--; 1059 else 1060 idx_b--; 1061 *idx = (u32)idx_a | (u64)idx_b << 32; 1062 return; 1063 } 1064 } 1065 } 1066 /* signal end of iteration */ 1067 *idx = ULLONG_MAX; 1068 } 1069 1070 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1071 /* 1072 * Common iterator interface used to define for_each_mem_range(). 1073 */ 1074 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1075 unsigned long *out_start_pfn, 1076 unsigned long *out_end_pfn, int *out_nid) 1077 { 1078 struct memblock_type *type = &memblock.memory; 1079 struct memblock_region *r; 1080 1081 while (++*idx < type->cnt) { 1082 r = &type->regions[*idx]; 1083 1084 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1085 continue; 1086 if (nid == MAX_NUMNODES || nid == r->nid) 1087 break; 1088 } 1089 if (*idx >= type->cnt) { 1090 *idx = -1; 1091 return; 1092 } 1093 1094 if (out_start_pfn) 1095 *out_start_pfn = PFN_UP(r->base); 1096 if (out_end_pfn) 1097 *out_end_pfn = PFN_DOWN(r->base + r->size); 1098 if (out_nid) 1099 *out_nid = r->nid; 1100 } 1101 1102 /** 1103 * memblock_set_node - set node ID on memblock regions 1104 * @base: base of area to set node ID for 1105 * @size: size of area to set node ID for 1106 * @type: memblock type to set node ID for 1107 * @nid: node ID to set 1108 * 1109 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. 1110 * Regions which cross the area boundaries are split as necessary. 1111 * 1112 * RETURNS: 1113 * 0 on success, -errno on failure. 1114 */ 1115 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1116 struct memblock_type *type, int nid) 1117 { 1118 int start_rgn, end_rgn; 1119 int i, ret; 1120 1121 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1122 if (ret) 1123 return ret; 1124 1125 for (i = start_rgn; i < end_rgn; i++) 1126 memblock_set_region_node(&type->regions[i], nid); 1127 1128 memblock_merge_regions(type); 1129 return 0; 1130 } 1131 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1132 1133 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1134 phys_addr_t align, phys_addr_t start, 1135 phys_addr_t end, int nid, ulong flags) 1136 { 1137 phys_addr_t found; 1138 1139 if (!align) 1140 align = SMP_CACHE_BYTES; 1141 1142 found = memblock_find_in_range_node(size, align, start, end, nid, 1143 flags); 1144 if (found && !memblock_reserve(found, size)) { 1145 /* 1146 * The min_count is set to 0 so that memblock allocations are 1147 * never reported as leaks. 1148 */ 1149 kmemleak_alloc(__va(found), size, 0, 0); 1150 return found; 1151 } 1152 return 0; 1153 } 1154 1155 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 1156 phys_addr_t start, phys_addr_t end, 1157 ulong flags) 1158 { 1159 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1160 flags); 1161 } 1162 1163 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 1164 phys_addr_t align, phys_addr_t max_addr, 1165 int nid, ulong flags) 1166 { 1167 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags); 1168 } 1169 1170 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 1171 { 1172 ulong flags = choose_memblock_flags(); 1173 phys_addr_t ret; 1174 1175 again: 1176 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, 1177 nid, flags); 1178 1179 if (!ret && (flags & MEMBLOCK_MIRROR)) { 1180 flags &= ~MEMBLOCK_MIRROR; 1181 goto again; 1182 } 1183 return ret; 1184 } 1185 1186 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1187 { 1188 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE, 1189 MEMBLOCK_NONE); 1190 } 1191 1192 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1193 { 1194 phys_addr_t alloc; 1195 1196 alloc = __memblock_alloc_base(size, align, max_addr); 1197 1198 if (alloc == 0) 1199 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", 1200 (unsigned long long) size, (unsigned long long) max_addr); 1201 1202 return alloc; 1203 } 1204 1205 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 1206 { 1207 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1208 } 1209 1210 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1211 { 1212 phys_addr_t res = memblock_alloc_nid(size, align, nid); 1213 1214 if (res) 1215 return res; 1216 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1217 } 1218 1219 /** 1220 * memblock_virt_alloc_internal - allocate boot memory block 1221 * @size: size of memory block to be allocated in bytes 1222 * @align: alignment of the region and block's size 1223 * @min_addr: the lower bound of the memory region to allocate (phys address) 1224 * @max_addr: the upper bound of the memory region to allocate (phys address) 1225 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1226 * 1227 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1228 * will fall back to memory below @min_addr. Also, allocation may fall back 1229 * to any node in the system if the specified node can not 1230 * hold the requested memory. 1231 * 1232 * The allocation is performed from memory region limited by 1233 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 1234 * 1235 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. 1236 * 1237 * The phys address of allocated boot memory block is converted to virtual and 1238 * allocated memory is reset to 0. 1239 * 1240 * In addition, function sets the min_count to 0 using kmemleak_alloc for 1241 * allocated boot memory block, so that it is never reported as leaks. 1242 * 1243 * RETURNS: 1244 * Virtual address of allocated memory block on success, NULL on failure. 1245 */ 1246 static void * __init memblock_virt_alloc_internal( 1247 phys_addr_t size, phys_addr_t align, 1248 phys_addr_t min_addr, phys_addr_t max_addr, 1249 int nid) 1250 { 1251 phys_addr_t alloc; 1252 void *ptr; 1253 ulong flags = choose_memblock_flags(); 1254 1255 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1256 nid = NUMA_NO_NODE; 1257 1258 /* 1259 * Detect any accidental use of these APIs after slab is ready, as at 1260 * this moment memblock may be deinitialized already and its 1261 * internal data may be destroyed (after execution of free_all_bootmem) 1262 */ 1263 if (WARN_ON_ONCE(slab_is_available())) 1264 return kzalloc_node(size, GFP_NOWAIT, nid); 1265 1266 if (!align) 1267 align = SMP_CACHE_BYTES; 1268 1269 if (max_addr > memblock.current_limit) 1270 max_addr = memblock.current_limit; 1271 1272 again: 1273 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1274 nid, flags); 1275 if (alloc) 1276 goto done; 1277 1278 if (nid != NUMA_NO_NODE) { 1279 alloc = memblock_find_in_range_node(size, align, min_addr, 1280 max_addr, NUMA_NO_NODE, 1281 flags); 1282 if (alloc) 1283 goto done; 1284 } 1285 1286 if (min_addr) { 1287 min_addr = 0; 1288 goto again; 1289 } 1290 1291 if (flags & MEMBLOCK_MIRROR) { 1292 flags &= ~MEMBLOCK_MIRROR; 1293 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 1294 &size); 1295 goto again; 1296 } 1297 1298 return NULL; 1299 done: 1300 memblock_reserve(alloc, size); 1301 ptr = phys_to_virt(alloc); 1302 memset(ptr, 0, size); 1303 1304 /* 1305 * The min_count is set to 0 so that bootmem allocated blocks 1306 * are never reported as leaks. This is because many of these blocks 1307 * are only referred via the physical address which is not 1308 * looked up by kmemleak. 1309 */ 1310 kmemleak_alloc(ptr, size, 0, 0); 1311 1312 return ptr; 1313 } 1314 1315 /** 1316 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 1317 * @size: size of memory block to be allocated in bytes 1318 * @align: alignment of the region and block's size 1319 * @min_addr: the lower bound of the memory region from where the allocation 1320 * is preferred (phys address) 1321 * @max_addr: the upper bound of the memory region from where the allocation 1322 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1323 * allocate only from memory limited by memblock.current_limit value 1324 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1325 * 1326 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides 1327 * additional debug information (including caller info), if enabled. 1328 * 1329 * RETURNS: 1330 * Virtual address of allocated memory block on success, NULL on failure. 1331 */ 1332 void * __init memblock_virt_alloc_try_nid_nopanic( 1333 phys_addr_t size, phys_addr_t align, 1334 phys_addr_t min_addr, phys_addr_t max_addr, 1335 int nid) 1336 { 1337 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1338 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1339 (u64)max_addr, (void *)_RET_IP_); 1340 return memblock_virt_alloc_internal(size, align, min_addr, 1341 max_addr, nid); 1342 } 1343 1344 /** 1345 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 1346 * @size: size of memory block to be allocated in bytes 1347 * @align: alignment of the region and block's size 1348 * @min_addr: the lower bound of the memory region from where the allocation 1349 * is preferred (phys address) 1350 * @max_addr: the upper bound of the memory region from where the allocation 1351 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1352 * allocate only from memory limited by memblock.current_limit value 1353 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1354 * 1355 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() 1356 * which provides debug information (including caller info), if enabled, 1357 * and panics if the request can not be satisfied. 1358 * 1359 * RETURNS: 1360 * Virtual address of allocated memory block on success, NULL on failure. 1361 */ 1362 void * __init memblock_virt_alloc_try_nid( 1363 phys_addr_t size, phys_addr_t align, 1364 phys_addr_t min_addr, phys_addr_t max_addr, 1365 int nid) 1366 { 1367 void *ptr; 1368 1369 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1370 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1371 (u64)max_addr, (void *)_RET_IP_); 1372 ptr = memblock_virt_alloc_internal(size, align, 1373 min_addr, max_addr, nid); 1374 if (ptr) 1375 return ptr; 1376 1377 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", 1378 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1379 (u64)max_addr); 1380 return NULL; 1381 } 1382 1383 /** 1384 * __memblock_free_early - free boot memory block 1385 * @base: phys starting address of the boot memory block 1386 * @size: size of the boot memory block in bytes 1387 * 1388 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 1389 * The freeing memory will not be released to the buddy allocator. 1390 */ 1391 void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 1392 { 1393 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1394 __func__, (u64)base, (u64)base + size - 1, 1395 (void *)_RET_IP_); 1396 kmemleak_free_part(__va(base), size); 1397 memblock_remove_range(&memblock.reserved, base, size); 1398 } 1399 1400 /* 1401 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1402 * @addr: phys starting address of the boot memory block 1403 * @size: size of the boot memory block in bytes 1404 * 1405 * This is only useful when the bootmem allocator has already been torn 1406 * down, but we are still initializing the system. Pages are released directly 1407 * to the buddy allocator, no bootmem metadata is updated because it is gone. 1408 */ 1409 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1410 { 1411 u64 cursor, end; 1412 1413 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1414 __func__, (u64)base, (u64)base + size - 1, 1415 (void *)_RET_IP_); 1416 kmemleak_free_part(__va(base), size); 1417 cursor = PFN_UP(base); 1418 end = PFN_DOWN(base + size); 1419 1420 for (; cursor < end; cursor++) { 1421 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); 1422 totalram_pages++; 1423 } 1424 } 1425 1426 /* 1427 * Remaining API functions 1428 */ 1429 1430 phys_addr_t __init_memblock memblock_phys_mem_size(void) 1431 { 1432 return memblock.memory.total_size; 1433 } 1434 1435 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1436 { 1437 unsigned long pages = 0; 1438 struct memblock_region *r; 1439 unsigned long start_pfn, end_pfn; 1440 1441 for_each_memblock(memory, r) { 1442 start_pfn = memblock_region_memory_base_pfn(r); 1443 end_pfn = memblock_region_memory_end_pfn(r); 1444 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1445 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1446 pages += end_pfn - start_pfn; 1447 } 1448 1449 return PFN_PHYS(pages); 1450 } 1451 1452 /* lowest address */ 1453 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1454 { 1455 return memblock.memory.regions[0].base; 1456 } 1457 1458 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1459 { 1460 int idx = memblock.memory.cnt - 1; 1461 1462 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1463 } 1464 1465 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1466 { 1467 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 1468 struct memblock_region *r; 1469 1470 if (!limit) 1471 return; 1472 1473 /* find out max address */ 1474 for_each_memblock(memory, r) { 1475 if (limit <= r->size) { 1476 max_addr = r->base + limit; 1477 break; 1478 } 1479 limit -= r->size; 1480 } 1481 1482 /* truncate both memory and reserved regions */ 1483 memblock_remove_range(&memblock.memory, max_addr, 1484 (phys_addr_t)ULLONG_MAX); 1485 memblock_remove_range(&memblock.reserved, max_addr, 1486 (phys_addr_t)ULLONG_MAX); 1487 } 1488 1489 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1490 { 1491 unsigned int left = 0, right = type->cnt; 1492 1493 do { 1494 unsigned int mid = (right + left) / 2; 1495 1496 if (addr < type->regions[mid].base) 1497 right = mid; 1498 else if (addr >= (type->regions[mid].base + 1499 type->regions[mid].size)) 1500 left = mid + 1; 1501 else 1502 return mid; 1503 } while (left < right); 1504 return -1; 1505 } 1506 1507 bool __init memblock_is_reserved(phys_addr_t addr) 1508 { 1509 return memblock_search(&memblock.reserved, addr) != -1; 1510 } 1511 1512 bool __init_memblock memblock_is_memory(phys_addr_t addr) 1513 { 1514 return memblock_search(&memblock.memory, addr) != -1; 1515 } 1516 1517 int __init_memblock memblock_is_map_memory(phys_addr_t addr) 1518 { 1519 int i = memblock_search(&memblock.memory, addr); 1520 1521 if (i == -1) 1522 return false; 1523 return !memblock_is_nomap(&memblock.memory.regions[i]); 1524 } 1525 1526 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1527 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1528 unsigned long *start_pfn, unsigned long *end_pfn) 1529 { 1530 struct memblock_type *type = &memblock.memory; 1531 int mid = memblock_search(type, PFN_PHYS(pfn)); 1532 1533 if (mid == -1) 1534 return -1; 1535 1536 *start_pfn = PFN_DOWN(type->regions[mid].base); 1537 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1538 1539 return type->regions[mid].nid; 1540 } 1541 #endif 1542 1543 /** 1544 * memblock_is_region_memory - check if a region is a subset of memory 1545 * @base: base of region to check 1546 * @size: size of region to check 1547 * 1548 * Check if the region [@base, @base+@size) is a subset of a memory block. 1549 * 1550 * RETURNS: 1551 * 0 if false, non-zero if true 1552 */ 1553 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1554 { 1555 int idx = memblock_search(&memblock.memory, base); 1556 phys_addr_t end = base + memblock_cap_size(base, &size); 1557 1558 if (idx == -1) 1559 return 0; 1560 return memblock.memory.regions[idx].base <= base && 1561 (memblock.memory.regions[idx].base + 1562 memblock.memory.regions[idx].size) >= end; 1563 } 1564 1565 /** 1566 * memblock_is_region_reserved - check if a region intersects reserved memory 1567 * @base: base of region to check 1568 * @size: size of region to check 1569 * 1570 * Check if the region [@base, @base+@size) intersects a reserved memory block. 1571 * 1572 * RETURNS: 1573 * True if they intersect, false if not. 1574 */ 1575 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1576 { 1577 memblock_cap_size(base, &size); 1578 return memblock_overlaps_region(&memblock.reserved, base, size); 1579 } 1580 1581 void __init_memblock memblock_trim_memory(phys_addr_t align) 1582 { 1583 phys_addr_t start, end, orig_start, orig_end; 1584 struct memblock_region *r; 1585 1586 for_each_memblock(memory, r) { 1587 orig_start = r->base; 1588 orig_end = r->base + r->size; 1589 start = round_up(orig_start, align); 1590 end = round_down(orig_end, align); 1591 1592 if (start == orig_start && end == orig_end) 1593 continue; 1594 1595 if (start < end) { 1596 r->base = start; 1597 r->size = end - start; 1598 } else { 1599 memblock_remove_region(&memblock.memory, 1600 r - memblock.memory.regions); 1601 r--; 1602 } 1603 } 1604 } 1605 1606 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1607 { 1608 memblock.current_limit = limit; 1609 } 1610 1611 phys_addr_t __init_memblock memblock_get_current_limit(void) 1612 { 1613 return memblock.current_limit; 1614 } 1615 1616 static void __init_memblock memblock_dump(struct memblock_type *type, char *name) 1617 { 1618 unsigned long long base, size; 1619 unsigned long flags; 1620 int idx; 1621 struct memblock_region *rgn; 1622 1623 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt); 1624 1625 for_each_memblock_type(type, rgn) { 1626 char nid_buf[32] = ""; 1627 1628 base = rgn->base; 1629 size = rgn->size; 1630 flags = rgn->flags; 1631 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1632 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1633 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1634 memblock_get_region_node(rgn)); 1635 #endif 1636 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n", 1637 name, idx, base, base + size - 1, size, nid_buf, flags); 1638 } 1639 } 1640 1641 void __init_memblock __memblock_dump_all(void) 1642 { 1643 pr_info("MEMBLOCK configuration:\n"); 1644 pr_info(" memory size = %#llx reserved size = %#llx\n", 1645 (unsigned long long)memblock.memory.total_size, 1646 (unsigned long long)memblock.reserved.total_size); 1647 1648 memblock_dump(&memblock.memory, "memory"); 1649 memblock_dump(&memblock.reserved, "reserved"); 1650 } 1651 1652 void __init memblock_allow_resize(void) 1653 { 1654 memblock_can_resize = 1; 1655 } 1656 1657 static int __init early_memblock(char *p) 1658 { 1659 if (p && strstr(p, "debug")) 1660 memblock_debug = 1; 1661 return 0; 1662 } 1663 early_param("memblock", early_memblock); 1664 1665 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 1666 1667 static int memblock_debug_show(struct seq_file *m, void *private) 1668 { 1669 struct memblock_type *type = m->private; 1670 struct memblock_region *reg; 1671 int i; 1672 1673 for (i = 0; i < type->cnt; i++) { 1674 reg = &type->regions[i]; 1675 seq_printf(m, "%4d: ", i); 1676 if (sizeof(phys_addr_t) == 4) 1677 seq_printf(m, "0x%08lx..0x%08lx\n", 1678 (unsigned long)reg->base, 1679 (unsigned long)(reg->base + reg->size - 1)); 1680 else 1681 seq_printf(m, "0x%016llx..0x%016llx\n", 1682 (unsigned long long)reg->base, 1683 (unsigned long long)(reg->base + reg->size - 1)); 1684 1685 } 1686 return 0; 1687 } 1688 1689 static int memblock_debug_open(struct inode *inode, struct file *file) 1690 { 1691 return single_open(file, memblock_debug_show, inode->i_private); 1692 } 1693 1694 static const struct file_operations memblock_debug_fops = { 1695 .open = memblock_debug_open, 1696 .read = seq_read, 1697 .llseek = seq_lseek, 1698 .release = single_release, 1699 }; 1700 1701 static int __init memblock_init_debugfs(void) 1702 { 1703 struct dentry *root = debugfs_create_dir("memblock", NULL); 1704 if (!root) 1705 return -ENXIO; 1706 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 1707 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 1708 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1709 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); 1710 #endif 1711 1712 return 0; 1713 } 1714 __initcall(memblock_init_debugfs); 1715 1716 #endif /* CONFIG_DEBUG_FS */ 1717