1 /* 2 * Procedures for maintaining information about logical memory blocks. 3 * 4 * Peter Bergner, IBM Corp. June 2001. 5 * Copyright (C) 2001 Peter Bergner. 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/init.h> 16 #include <linux/bitops.h> 17 #include <linux/poison.h> 18 #include <linux/pfn.h> 19 #include <linux/debugfs.h> 20 #include <linux/seq_file.h> 21 #include <linux/memblock.h> 22 23 #include <asm/sections.h> 24 #include <linux/io.h> 25 26 #include "internal.h" 27 28 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 29 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; 30 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 31 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; 32 #endif 33 34 struct memblock memblock __initdata_memblock = { 35 .memory.regions = memblock_memory_init_regions, 36 .memory.cnt = 1, /* empty dummy entry */ 37 .memory.max = INIT_MEMBLOCK_REGIONS, 38 .memory.name = "memory", 39 40 .reserved.regions = memblock_reserved_init_regions, 41 .reserved.cnt = 1, /* empty dummy entry */ 42 .reserved.max = INIT_MEMBLOCK_REGIONS, 43 .reserved.name = "reserved", 44 45 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 46 .physmem.regions = memblock_physmem_init_regions, 47 .physmem.cnt = 1, /* empty dummy entry */ 48 .physmem.max = INIT_PHYSMEM_REGIONS, 49 .physmem.name = "physmem", 50 #endif 51 52 .bottom_up = false, 53 .current_limit = MEMBLOCK_ALLOC_ANYWHERE, 54 }; 55 56 int memblock_debug __initdata_memblock; 57 #ifdef CONFIG_MOVABLE_NODE 58 bool movable_node_enabled __initdata_memblock = false; 59 #endif 60 static bool system_has_some_mirror __initdata_memblock = false; 61 static int memblock_can_resize __initdata_memblock; 62 static int memblock_memory_in_slab __initdata_memblock = 0; 63 static int memblock_reserved_in_slab __initdata_memblock = 0; 64 65 ulong __init_memblock choose_memblock_flags(void) 66 { 67 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; 68 } 69 70 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */ 71 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size) 72 { 73 return *size = min(*size, (phys_addr_t)ULLONG_MAX - base); 74 } 75 76 /* 77 * Address comparison utilities 78 */ 79 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, 80 phys_addr_t base2, phys_addr_t size2) 81 { 82 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 83 } 84 85 bool __init_memblock memblock_overlaps_region(struct memblock_type *type, 86 phys_addr_t base, phys_addr_t size) 87 { 88 unsigned long i; 89 90 for (i = 0; i < type->cnt; i++) 91 if (memblock_addrs_overlap(base, size, type->regions[i].base, 92 type->regions[i].size)) 93 break; 94 return i < type->cnt; 95 } 96 97 /* 98 * __memblock_find_range_bottom_up - find free area utility in bottom-up 99 * @start: start of candidate range 100 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 101 * @size: size of free area to find 102 * @align: alignment of free area to find 103 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 104 * @flags: pick from blocks based on memory attributes 105 * 106 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 107 * 108 * RETURNS: 109 * Found address on success, 0 on failure. 110 */ 111 static phys_addr_t __init_memblock 112 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 113 phys_addr_t size, phys_addr_t align, int nid, 114 ulong flags) 115 { 116 phys_addr_t this_start, this_end, cand; 117 u64 i; 118 119 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) { 120 this_start = clamp(this_start, start, end); 121 this_end = clamp(this_end, start, end); 122 123 cand = round_up(this_start, align); 124 if (cand < this_end && this_end - cand >= size) 125 return cand; 126 } 127 128 return 0; 129 } 130 131 /** 132 * __memblock_find_range_top_down - find free area utility, in top-down 133 * @start: start of candidate range 134 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 135 * @size: size of free area to find 136 * @align: alignment of free area to find 137 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 138 * @flags: pick from blocks based on memory attributes 139 * 140 * Utility called from memblock_find_in_range_node(), find free area top-down. 141 * 142 * RETURNS: 143 * Found address on success, 0 on failure. 144 */ 145 static phys_addr_t __init_memblock 146 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 147 phys_addr_t size, phys_addr_t align, int nid, 148 ulong flags) 149 { 150 phys_addr_t this_start, this_end, cand; 151 u64 i; 152 153 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end, 154 NULL) { 155 this_start = clamp(this_start, start, end); 156 this_end = clamp(this_end, start, end); 157 158 if (this_end < size) 159 continue; 160 161 cand = round_down(this_end - size, align); 162 if (cand >= this_start) 163 return cand; 164 } 165 166 return 0; 167 } 168 169 /** 170 * memblock_find_in_range_node - find free area in given range and node 171 * @size: size of free area to find 172 * @align: alignment of free area to find 173 * @start: start of candidate range 174 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 175 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 176 * @flags: pick from blocks based on memory attributes 177 * 178 * Find @size free area aligned to @align in the specified range and node. 179 * 180 * When allocation direction is bottom-up, the @start should be greater 181 * than the end of the kernel image. Otherwise, it will be trimmed. The 182 * reason is that we want the bottom-up allocation just near the kernel 183 * image so it is highly likely that the allocated memory and the kernel 184 * will reside in the same node. 185 * 186 * If bottom-up allocation failed, will try to allocate memory top-down. 187 * 188 * RETURNS: 189 * Found address on success, 0 on failure. 190 */ 191 phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 192 phys_addr_t align, phys_addr_t start, 193 phys_addr_t end, int nid, ulong flags) 194 { 195 phys_addr_t kernel_end, ret; 196 197 /* pump up @end */ 198 if (end == MEMBLOCK_ALLOC_ACCESSIBLE) 199 end = memblock.current_limit; 200 201 /* avoid allocating the first page */ 202 start = max_t(phys_addr_t, start, PAGE_SIZE); 203 end = max(start, end); 204 kernel_end = __pa_symbol(_end); 205 206 /* 207 * try bottom-up allocation only when bottom-up mode 208 * is set and @end is above the kernel image. 209 */ 210 if (memblock_bottom_up() && end > kernel_end) { 211 phys_addr_t bottom_up_start; 212 213 /* make sure we will allocate above the kernel */ 214 bottom_up_start = max(start, kernel_end); 215 216 /* ok, try bottom-up allocation first */ 217 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 218 size, align, nid, flags); 219 if (ret) 220 return ret; 221 222 /* 223 * we always limit bottom-up allocation above the kernel, 224 * but top-down allocation doesn't have the limit, so 225 * retrying top-down allocation may succeed when bottom-up 226 * allocation failed. 227 * 228 * bottom-up allocation is expected to be fail very rarely, 229 * so we use WARN_ONCE() here to see the stack trace if 230 * fail happens. 231 */ 232 WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); 233 } 234 235 return __memblock_find_range_top_down(start, end, size, align, nid, 236 flags); 237 } 238 239 /** 240 * memblock_find_in_range - find free area in given range 241 * @start: start of candidate range 242 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 243 * @size: size of free area to find 244 * @align: alignment of free area to find 245 * 246 * Find @size free area aligned to @align in the specified range. 247 * 248 * RETURNS: 249 * Found address on success, 0 on failure. 250 */ 251 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, 252 phys_addr_t end, phys_addr_t size, 253 phys_addr_t align) 254 { 255 phys_addr_t ret; 256 ulong flags = choose_memblock_flags(); 257 258 again: 259 ret = memblock_find_in_range_node(size, align, start, end, 260 NUMA_NO_NODE, flags); 261 262 if (!ret && (flags & MEMBLOCK_MIRROR)) { 263 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 264 &size); 265 flags &= ~MEMBLOCK_MIRROR; 266 goto again; 267 } 268 269 return ret; 270 } 271 272 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 273 { 274 type->total_size -= type->regions[r].size; 275 memmove(&type->regions[r], &type->regions[r + 1], 276 (type->cnt - (r + 1)) * sizeof(type->regions[r])); 277 type->cnt--; 278 279 /* Special case for empty arrays */ 280 if (type->cnt == 0) { 281 WARN_ON(type->total_size != 0); 282 type->cnt = 1; 283 type->regions[0].base = 0; 284 type->regions[0].size = 0; 285 type->regions[0].flags = 0; 286 memblock_set_region_node(&type->regions[0], MAX_NUMNODES); 287 } 288 } 289 290 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 291 292 phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( 293 phys_addr_t *addr) 294 { 295 if (memblock.reserved.regions == memblock_reserved_init_regions) 296 return 0; 297 298 *addr = __pa(memblock.reserved.regions); 299 300 return PAGE_ALIGN(sizeof(struct memblock_region) * 301 memblock.reserved.max); 302 } 303 304 phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info( 305 phys_addr_t *addr) 306 { 307 if (memblock.memory.regions == memblock_memory_init_regions) 308 return 0; 309 310 *addr = __pa(memblock.memory.regions); 311 312 return PAGE_ALIGN(sizeof(struct memblock_region) * 313 memblock.memory.max); 314 } 315 316 #endif 317 318 /** 319 * memblock_double_array - double the size of the memblock regions array 320 * @type: memblock type of the regions array being doubled 321 * @new_area_start: starting address of memory range to avoid overlap with 322 * @new_area_size: size of memory range to avoid overlap with 323 * 324 * Double the size of the @type regions array. If memblock is being used to 325 * allocate memory for a new reserved regions array and there is a previously 326 * allocated memory range [@new_area_start,@new_area_start+@new_area_size] 327 * waiting to be reserved, ensure the memory used by the new array does 328 * not overlap. 329 * 330 * RETURNS: 331 * 0 on success, -1 on failure. 332 */ 333 static int __init_memblock memblock_double_array(struct memblock_type *type, 334 phys_addr_t new_area_start, 335 phys_addr_t new_area_size) 336 { 337 struct memblock_region *new_array, *old_array; 338 phys_addr_t old_alloc_size, new_alloc_size; 339 phys_addr_t old_size, new_size, addr; 340 int use_slab = slab_is_available(); 341 int *in_slab; 342 343 /* We don't allow resizing until we know about the reserved regions 344 * of memory that aren't suitable for allocation 345 */ 346 if (!memblock_can_resize) 347 return -1; 348 349 /* Calculate new doubled size */ 350 old_size = type->max * sizeof(struct memblock_region); 351 new_size = old_size << 1; 352 /* 353 * We need to allocated new one align to PAGE_SIZE, 354 * so we can free them completely later. 355 */ 356 old_alloc_size = PAGE_ALIGN(old_size); 357 new_alloc_size = PAGE_ALIGN(new_size); 358 359 /* Retrieve the slab flag */ 360 if (type == &memblock.memory) 361 in_slab = &memblock_memory_in_slab; 362 else 363 in_slab = &memblock_reserved_in_slab; 364 365 /* Try to find some space for it. 366 * 367 * WARNING: We assume that either slab_is_available() and we use it or 368 * we use MEMBLOCK for allocations. That means that this is unsafe to 369 * use when bootmem is currently active (unless bootmem itself is 370 * implemented on top of MEMBLOCK which isn't the case yet) 371 * 372 * This should however not be an issue for now, as we currently only 373 * call into MEMBLOCK while it's still active, or much later when slab 374 * is active for memory hotplug operations 375 */ 376 if (use_slab) { 377 new_array = kmalloc(new_size, GFP_KERNEL); 378 addr = new_array ? __pa(new_array) : 0; 379 } else { 380 /* only exclude range when trying to double reserved.regions */ 381 if (type != &memblock.reserved) 382 new_area_start = new_area_size = 0; 383 384 addr = memblock_find_in_range(new_area_start + new_area_size, 385 memblock.current_limit, 386 new_alloc_size, PAGE_SIZE); 387 if (!addr && new_area_size) 388 addr = memblock_find_in_range(0, 389 min(new_area_start, memblock.current_limit), 390 new_alloc_size, PAGE_SIZE); 391 392 new_array = addr ? __va(addr) : NULL; 393 } 394 if (!addr) { 395 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 396 type->name, type->max, type->max * 2); 397 return -1; 398 } 399 400 memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", 401 type->name, type->max * 2, (u64)addr, 402 (u64)addr + new_size - 1); 403 404 /* 405 * Found space, we now need to move the array over before we add the 406 * reserved region since it may be our reserved array itself that is 407 * full. 408 */ 409 memcpy(new_array, type->regions, old_size); 410 memset(new_array + type->max, 0, old_size); 411 old_array = type->regions; 412 type->regions = new_array; 413 type->max <<= 1; 414 415 /* Free old array. We needn't free it if the array is the static one */ 416 if (*in_slab) 417 kfree(old_array); 418 else if (old_array != memblock_memory_init_regions && 419 old_array != memblock_reserved_init_regions) 420 memblock_free(__pa(old_array), old_alloc_size); 421 422 /* 423 * Reserve the new array if that comes from the memblock. Otherwise, we 424 * needn't do it 425 */ 426 if (!use_slab) 427 BUG_ON(memblock_reserve(addr, new_alloc_size)); 428 429 /* Update slab flag */ 430 *in_slab = use_slab; 431 432 return 0; 433 } 434 435 /** 436 * memblock_merge_regions - merge neighboring compatible regions 437 * @type: memblock type to scan 438 * 439 * Scan @type and merge neighboring compatible regions. 440 */ 441 static void __init_memblock memblock_merge_regions(struct memblock_type *type) 442 { 443 int i = 0; 444 445 /* cnt never goes below 1 */ 446 while (i < type->cnt - 1) { 447 struct memblock_region *this = &type->regions[i]; 448 struct memblock_region *next = &type->regions[i + 1]; 449 450 if (this->base + this->size != next->base || 451 memblock_get_region_node(this) != 452 memblock_get_region_node(next) || 453 this->flags != next->flags) { 454 BUG_ON(this->base + this->size > next->base); 455 i++; 456 continue; 457 } 458 459 this->size += next->size; 460 /* move forward from next + 1, index of which is i + 2 */ 461 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next)); 462 type->cnt--; 463 } 464 } 465 466 /** 467 * memblock_insert_region - insert new memblock region 468 * @type: memblock type to insert into 469 * @idx: index for the insertion point 470 * @base: base address of the new region 471 * @size: size of the new region 472 * @nid: node id of the new region 473 * @flags: flags of the new region 474 * 475 * Insert new memblock region [@base,@base+@size) into @type at @idx. 476 * @type must already have extra room to accommodate the new region. 477 */ 478 static void __init_memblock memblock_insert_region(struct memblock_type *type, 479 int idx, phys_addr_t base, 480 phys_addr_t size, 481 int nid, unsigned long flags) 482 { 483 struct memblock_region *rgn = &type->regions[idx]; 484 485 BUG_ON(type->cnt >= type->max); 486 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn)); 487 rgn->base = base; 488 rgn->size = size; 489 rgn->flags = flags; 490 memblock_set_region_node(rgn, nid); 491 type->cnt++; 492 type->total_size += size; 493 } 494 495 /** 496 * memblock_add_range - add new memblock region 497 * @type: memblock type to add new region into 498 * @base: base address of the new region 499 * @size: size of the new region 500 * @nid: nid of the new region 501 * @flags: flags of the new region 502 * 503 * Add new memblock region [@base,@base+@size) into @type. The new region 504 * is allowed to overlap with existing ones - overlaps don't affect already 505 * existing regions. @type is guaranteed to be minimal (all neighbouring 506 * compatible regions are merged) after the addition. 507 * 508 * RETURNS: 509 * 0 on success, -errno on failure. 510 */ 511 int __init_memblock memblock_add_range(struct memblock_type *type, 512 phys_addr_t base, phys_addr_t size, 513 int nid, unsigned long flags) 514 { 515 bool insert = false; 516 phys_addr_t obase = base; 517 phys_addr_t end = base + memblock_cap_size(base, &size); 518 int idx, nr_new; 519 struct memblock_region *rgn; 520 521 if (!size) 522 return 0; 523 524 /* special case for empty array */ 525 if (type->regions[0].size == 0) { 526 WARN_ON(type->cnt != 1 || type->total_size); 527 type->regions[0].base = base; 528 type->regions[0].size = size; 529 type->regions[0].flags = flags; 530 memblock_set_region_node(&type->regions[0], nid); 531 type->total_size = size; 532 return 0; 533 } 534 repeat: 535 /* 536 * The following is executed twice. Once with %false @insert and 537 * then with %true. The first counts the number of regions needed 538 * to accommodate the new area. The second actually inserts them. 539 */ 540 base = obase; 541 nr_new = 0; 542 543 for_each_memblock_type(type, rgn) { 544 phys_addr_t rbase = rgn->base; 545 phys_addr_t rend = rbase + rgn->size; 546 547 if (rbase >= end) 548 break; 549 if (rend <= base) 550 continue; 551 /* 552 * @rgn overlaps. If it separates the lower part of new 553 * area, insert that portion. 554 */ 555 if (rbase > base) { 556 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 557 WARN_ON(nid != memblock_get_region_node(rgn)); 558 #endif 559 WARN_ON(flags != rgn->flags); 560 nr_new++; 561 if (insert) 562 memblock_insert_region(type, idx++, base, 563 rbase - base, nid, 564 flags); 565 } 566 /* area below @rend is dealt with, forget about it */ 567 base = min(rend, end); 568 } 569 570 /* insert the remaining portion */ 571 if (base < end) { 572 nr_new++; 573 if (insert) 574 memblock_insert_region(type, idx, base, end - base, 575 nid, flags); 576 } 577 578 if (!nr_new) 579 return 0; 580 581 /* 582 * If this was the first round, resize array and repeat for actual 583 * insertions; otherwise, merge and return. 584 */ 585 if (!insert) { 586 while (type->cnt + nr_new > type->max) 587 if (memblock_double_array(type, obase, size) < 0) 588 return -ENOMEM; 589 insert = true; 590 goto repeat; 591 } else { 592 memblock_merge_regions(type); 593 return 0; 594 } 595 } 596 597 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, 598 int nid) 599 { 600 return memblock_add_range(&memblock.memory, base, size, nid, 0); 601 } 602 603 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) 604 { 605 phys_addr_t end = base + size - 1; 606 607 memblock_dbg("memblock_add: [%pa-%pa] %pF\n", 608 &base, &end, (void *)_RET_IP_); 609 610 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0); 611 } 612 613 /** 614 * memblock_isolate_range - isolate given range into disjoint memblocks 615 * @type: memblock type to isolate range for 616 * @base: base of range to isolate 617 * @size: size of range to isolate 618 * @start_rgn: out parameter for the start of isolated region 619 * @end_rgn: out parameter for the end of isolated region 620 * 621 * Walk @type and ensure that regions don't cross the boundaries defined by 622 * [@base,@base+@size). Crossing regions are split at the boundaries, 623 * which may create at most two more regions. The index of the first 624 * region inside the range is returned in *@start_rgn and end in *@end_rgn. 625 * 626 * RETURNS: 627 * 0 on success, -errno on failure. 628 */ 629 static int __init_memblock memblock_isolate_range(struct memblock_type *type, 630 phys_addr_t base, phys_addr_t size, 631 int *start_rgn, int *end_rgn) 632 { 633 phys_addr_t end = base + memblock_cap_size(base, &size); 634 int idx; 635 struct memblock_region *rgn; 636 637 *start_rgn = *end_rgn = 0; 638 639 if (!size) 640 return 0; 641 642 /* we'll create at most two more regions */ 643 while (type->cnt + 2 > type->max) 644 if (memblock_double_array(type, base, size) < 0) 645 return -ENOMEM; 646 647 for_each_memblock_type(type, rgn) { 648 phys_addr_t rbase = rgn->base; 649 phys_addr_t rend = rbase + rgn->size; 650 651 if (rbase >= end) 652 break; 653 if (rend <= base) 654 continue; 655 656 if (rbase < base) { 657 /* 658 * @rgn intersects from below. Split and continue 659 * to process the next region - the new top half. 660 */ 661 rgn->base = base; 662 rgn->size -= base - rbase; 663 type->total_size -= base - rbase; 664 memblock_insert_region(type, idx, rbase, base - rbase, 665 memblock_get_region_node(rgn), 666 rgn->flags); 667 } else if (rend > end) { 668 /* 669 * @rgn intersects from above. Split and redo the 670 * current region - the new bottom half. 671 */ 672 rgn->base = end; 673 rgn->size -= end - rbase; 674 type->total_size -= end - rbase; 675 memblock_insert_region(type, idx--, rbase, end - rbase, 676 memblock_get_region_node(rgn), 677 rgn->flags); 678 } else { 679 /* @rgn is fully contained, record it */ 680 if (!*end_rgn) 681 *start_rgn = idx; 682 *end_rgn = idx + 1; 683 } 684 } 685 686 return 0; 687 } 688 689 static int __init_memblock memblock_remove_range(struct memblock_type *type, 690 phys_addr_t base, phys_addr_t size) 691 { 692 int start_rgn, end_rgn; 693 int i, ret; 694 695 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 696 if (ret) 697 return ret; 698 699 for (i = end_rgn - 1; i >= start_rgn; i--) 700 memblock_remove_region(type, i); 701 return 0; 702 } 703 704 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) 705 { 706 return memblock_remove_range(&memblock.memory, base, size); 707 } 708 709 710 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) 711 { 712 phys_addr_t end = base + size - 1; 713 714 memblock_dbg(" memblock_free: [%pa-%pa] %pF\n", 715 &base, &end, (void *)_RET_IP_); 716 717 kmemleak_free_part_phys(base, size); 718 return memblock_remove_range(&memblock.reserved, base, size); 719 } 720 721 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) 722 { 723 phys_addr_t end = base + size - 1; 724 725 memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n", 726 &base, &end, (void *)_RET_IP_); 727 728 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0); 729 } 730 731 /** 732 * 733 * This function isolates region [@base, @base + @size), and sets/clears flag 734 * 735 * Return 0 on success, -errno on failure. 736 */ 737 static int __init_memblock memblock_setclr_flag(phys_addr_t base, 738 phys_addr_t size, int set, int flag) 739 { 740 struct memblock_type *type = &memblock.memory; 741 int i, ret, start_rgn, end_rgn; 742 743 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 744 if (ret) 745 return ret; 746 747 for (i = start_rgn; i < end_rgn; i++) 748 if (set) 749 memblock_set_region_flags(&type->regions[i], flag); 750 else 751 memblock_clear_region_flags(&type->regions[i], flag); 752 753 memblock_merge_regions(type); 754 return 0; 755 } 756 757 /** 758 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. 759 * @base: the base phys addr of the region 760 * @size: the size of the region 761 * 762 * Return 0 on success, -errno on failure. 763 */ 764 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) 765 { 766 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG); 767 } 768 769 /** 770 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. 771 * @base: the base phys addr of the region 772 * @size: the size of the region 773 * 774 * Return 0 on success, -errno on failure. 775 */ 776 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) 777 { 778 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG); 779 } 780 781 /** 782 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. 783 * @base: the base phys addr of the region 784 * @size: the size of the region 785 * 786 * Return 0 on success, -errno on failure. 787 */ 788 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) 789 { 790 system_has_some_mirror = true; 791 792 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR); 793 } 794 795 /** 796 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. 797 * @base: the base phys addr of the region 798 * @size: the size of the region 799 * 800 * Return 0 on success, -errno on failure. 801 */ 802 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) 803 { 804 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP); 805 } 806 807 /** 808 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. 809 * @base: the base phys addr of the region 810 * @size: the size of the region 811 * 812 * Return 0 on success, -errno on failure. 813 */ 814 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) 815 { 816 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP); 817 } 818 819 /** 820 * __next_reserved_mem_region - next function for for_each_reserved_region() 821 * @idx: pointer to u64 loop variable 822 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL 823 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL 824 * 825 * Iterate over all reserved memory regions. 826 */ 827 void __init_memblock __next_reserved_mem_region(u64 *idx, 828 phys_addr_t *out_start, 829 phys_addr_t *out_end) 830 { 831 struct memblock_type *type = &memblock.reserved; 832 833 if (*idx < type->cnt) { 834 struct memblock_region *r = &type->regions[*idx]; 835 phys_addr_t base = r->base; 836 phys_addr_t size = r->size; 837 838 if (out_start) 839 *out_start = base; 840 if (out_end) 841 *out_end = base + size - 1; 842 843 *idx += 1; 844 return; 845 } 846 847 /* signal end of iteration */ 848 *idx = ULLONG_MAX; 849 } 850 851 /** 852 * __next__mem_range - next function for for_each_free_mem_range() etc. 853 * @idx: pointer to u64 loop variable 854 * @nid: node selector, %NUMA_NO_NODE for all nodes 855 * @flags: pick from blocks based on memory attributes 856 * @type_a: pointer to memblock_type from where the range is taken 857 * @type_b: pointer to memblock_type which excludes memory from being taken 858 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 859 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 860 * @out_nid: ptr to int for nid of the range, can be %NULL 861 * 862 * Find the first area from *@idx which matches @nid, fill the out 863 * parameters, and update *@idx for the next iteration. The lower 32bit of 864 * *@idx contains index into type_a and the upper 32bit indexes the 865 * areas before each region in type_b. For example, if type_b regions 866 * look like the following, 867 * 868 * 0:[0-16), 1:[32-48), 2:[128-130) 869 * 870 * The upper 32bit indexes the following regions. 871 * 872 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) 873 * 874 * As both region arrays are sorted, the function advances the two indices 875 * in lockstep and returns each intersection. 876 */ 877 void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, 878 struct memblock_type *type_a, 879 struct memblock_type *type_b, 880 phys_addr_t *out_start, 881 phys_addr_t *out_end, int *out_nid) 882 { 883 int idx_a = *idx & 0xffffffff; 884 int idx_b = *idx >> 32; 885 886 if (WARN_ONCE(nid == MAX_NUMNODES, 887 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 888 nid = NUMA_NO_NODE; 889 890 for (; idx_a < type_a->cnt; idx_a++) { 891 struct memblock_region *m = &type_a->regions[idx_a]; 892 893 phys_addr_t m_start = m->base; 894 phys_addr_t m_end = m->base + m->size; 895 int m_nid = memblock_get_region_node(m); 896 897 /* only memory regions are associated with nodes, check it */ 898 if (nid != NUMA_NO_NODE && nid != m_nid) 899 continue; 900 901 /* skip hotpluggable memory regions if needed */ 902 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 903 continue; 904 905 /* if we want mirror memory skip non-mirror memory regions */ 906 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 907 continue; 908 909 /* skip nomap memory unless we were asked for it explicitly */ 910 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 911 continue; 912 913 if (!type_b) { 914 if (out_start) 915 *out_start = m_start; 916 if (out_end) 917 *out_end = m_end; 918 if (out_nid) 919 *out_nid = m_nid; 920 idx_a++; 921 *idx = (u32)idx_a | (u64)idx_b << 32; 922 return; 923 } 924 925 /* scan areas before each reservation */ 926 for (; idx_b < type_b->cnt + 1; idx_b++) { 927 struct memblock_region *r; 928 phys_addr_t r_start; 929 phys_addr_t r_end; 930 931 r = &type_b->regions[idx_b]; 932 r_start = idx_b ? r[-1].base + r[-1].size : 0; 933 r_end = idx_b < type_b->cnt ? 934 r->base : ULLONG_MAX; 935 936 /* 937 * if idx_b advanced past idx_a, 938 * break out to advance idx_a 939 */ 940 if (r_start >= m_end) 941 break; 942 /* if the two regions intersect, we're done */ 943 if (m_start < r_end) { 944 if (out_start) 945 *out_start = 946 max(m_start, r_start); 947 if (out_end) 948 *out_end = min(m_end, r_end); 949 if (out_nid) 950 *out_nid = m_nid; 951 /* 952 * The region which ends first is 953 * advanced for the next iteration. 954 */ 955 if (m_end <= r_end) 956 idx_a++; 957 else 958 idx_b++; 959 *idx = (u32)idx_a | (u64)idx_b << 32; 960 return; 961 } 962 } 963 } 964 965 /* signal end of iteration */ 966 *idx = ULLONG_MAX; 967 } 968 969 /** 970 * __next_mem_range_rev - generic next function for for_each_*_range_rev() 971 * 972 * Finds the next range from type_a which is not marked as unsuitable 973 * in type_b. 974 * 975 * @idx: pointer to u64 loop variable 976 * @nid: node selector, %NUMA_NO_NODE for all nodes 977 * @flags: pick from blocks based on memory attributes 978 * @type_a: pointer to memblock_type from where the range is taken 979 * @type_b: pointer to memblock_type which excludes memory from being taken 980 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 981 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL 982 * @out_nid: ptr to int for nid of the range, can be %NULL 983 * 984 * Reverse of __next_mem_range(). 985 */ 986 void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, 987 struct memblock_type *type_a, 988 struct memblock_type *type_b, 989 phys_addr_t *out_start, 990 phys_addr_t *out_end, int *out_nid) 991 { 992 int idx_a = *idx & 0xffffffff; 993 int idx_b = *idx >> 32; 994 995 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 996 nid = NUMA_NO_NODE; 997 998 if (*idx == (u64)ULLONG_MAX) { 999 idx_a = type_a->cnt - 1; 1000 if (type_b != NULL) 1001 idx_b = type_b->cnt; 1002 else 1003 idx_b = 0; 1004 } 1005 1006 for (; idx_a >= 0; idx_a--) { 1007 struct memblock_region *m = &type_a->regions[idx_a]; 1008 1009 phys_addr_t m_start = m->base; 1010 phys_addr_t m_end = m->base + m->size; 1011 int m_nid = memblock_get_region_node(m); 1012 1013 /* only memory regions are associated with nodes, check it */ 1014 if (nid != NUMA_NO_NODE && nid != m_nid) 1015 continue; 1016 1017 /* skip hotpluggable memory regions if needed */ 1018 if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) 1019 continue; 1020 1021 /* if we want mirror memory skip non-mirror memory regions */ 1022 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) 1023 continue; 1024 1025 /* skip nomap memory unless we were asked for it explicitly */ 1026 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) 1027 continue; 1028 1029 if (!type_b) { 1030 if (out_start) 1031 *out_start = m_start; 1032 if (out_end) 1033 *out_end = m_end; 1034 if (out_nid) 1035 *out_nid = m_nid; 1036 idx_a--; 1037 *idx = (u32)idx_a | (u64)idx_b << 32; 1038 return; 1039 } 1040 1041 /* scan areas before each reservation */ 1042 for (; idx_b >= 0; idx_b--) { 1043 struct memblock_region *r; 1044 phys_addr_t r_start; 1045 phys_addr_t r_end; 1046 1047 r = &type_b->regions[idx_b]; 1048 r_start = idx_b ? r[-1].base + r[-1].size : 0; 1049 r_end = idx_b < type_b->cnt ? 1050 r->base : ULLONG_MAX; 1051 /* 1052 * if idx_b advanced past idx_a, 1053 * break out to advance idx_a 1054 */ 1055 1056 if (r_end <= m_start) 1057 break; 1058 /* if the two regions intersect, we're done */ 1059 if (m_end > r_start) { 1060 if (out_start) 1061 *out_start = max(m_start, r_start); 1062 if (out_end) 1063 *out_end = min(m_end, r_end); 1064 if (out_nid) 1065 *out_nid = m_nid; 1066 if (m_start >= r_start) 1067 idx_a--; 1068 else 1069 idx_b--; 1070 *idx = (u32)idx_a | (u64)idx_b << 32; 1071 return; 1072 } 1073 } 1074 } 1075 /* signal end of iteration */ 1076 *idx = ULLONG_MAX; 1077 } 1078 1079 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1080 /* 1081 * Common iterator interface used to define for_each_mem_range(). 1082 */ 1083 void __init_memblock __next_mem_pfn_range(int *idx, int nid, 1084 unsigned long *out_start_pfn, 1085 unsigned long *out_end_pfn, int *out_nid) 1086 { 1087 struct memblock_type *type = &memblock.memory; 1088 struct memblock_region *r; 1089 1090 while (++*idx < type->cnt) { 1091 r = &type->regions[*idx]; 1092 1093 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) 1094 continue; 1095 if (nid == MAX_NUMNODES || nid == r->nid) 1096 break; 1097 } 1098 if (*idx >= type->cnt) { 1099 *idx = -1; 1100 return; 1101 } 1102 1103 if (out_start_pfn) 1104 *out_start_pfn = PFN_UP(r->base); 1105 if (out_end_pfn) 1106 *out_end_pfn = PFN_DOWN(r->base + r->size); 1107 if (out_nid) 1108 *out_nid = r->nid; 1109 } 1110 1111 unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn, 1112 unsigned long max_pfn) 1113 { 1114 struct memblock_type *type = &memblock.memory; 1115 unsigned int right = type->cnt; 1116 unsigned int mid, left = 0; 1117 phys_addr_t addr = PFN_PHYS(pfn + 1); 1118 1119 do { 1120 mid = (right + left) / 2; 1121 1122 if (addr < type->regions[mid].base) 1123 right = mid; 1124 else if (addr >= (type->regions[mid].base + 1125 type->regions[mid].size)) 1126 left = mid + 1; 1127 else { 1128 /* addr is within the region, so pfn + 1 is valid */ 1129 return min(pfn + 1, max_pfn); 1130 } 1131 } while (left < right); 1132 1133 if (right == type->cnt) 1134 return max_pfn; 1135 else 1136 return min(PHYS_PFN(type->regions[right].base), max_pfn); 1137 } 1138 1139 /** 1140 * memblock_set_node - set node ID on memblock regions 1141 * @base: base of area to set node ID for 1142 * @size: size of area to set node ID for 1143 * @type: memblock type to set node ID for 1144 * @nid: node ID to set 1145 * 1146 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. 1147 * Regions which cross the area boundaries are split as necessary. 1148 * 1149 * RETURNS: 1150 * 0 on success, -errno on failure. 1151 */ 1152 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, 1153 struct memblock_type *type, int nid) 1154 { 1155 int start_rgn, end_rgn; 1156 int i, ret; 1157 1158 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); 1159 if (ret) 1160 return ret; 1161 1162 for (i = start_rgn; i < end_rgn; i++) 1163 memblock_set_region_node(&type->regions[i], nid); 1164 1165 memblock_merge_regions(type); 1166 return 0; 1167 } 1168 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ 1169 1170 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1171 phys_addr_t align, phys_addr_t start, 1172 phys_addr_t end, int nid, ulong flags) 1173 { 1174 phys_addr_t found; 1175 1176 if (!align) 1177 align = SMP_CACHE_BYTES; 1178 1179 found = memblock_find_in_range_node(size, align, start, end, nid, 1180 flags); 1181 if (found && !memblock_reserve(found, size)) { 1182 /* 1183 * The min_count is set to 0 so that memblock allocations are 1184 * never reported as leaks. 1185 */ 1186 kmemleak_alloc_phys(found, size, 0, 0); 1187 return found; 1188 } 1189 return 0; 1190 } 1191 1192 phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 1193 phys_addr_t start, phys_addr_t end, 1194 ulong flags) 1195 { 1196 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, 1197 flags); 1198 } 1199 1200 static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 1201 phys_addr_t align, phys_addr_t max_addr, 1202 int nid, ulong flags) 1203 { 1204 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags); 1205 } 1206 1207 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 1208 { 1209 ulong flags = choose_memblock_flags(); 1210 phys_addr_t ret; 1211 1212 again: 1213 ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, 1214 nid, flags); 1215 1216 if (!ret && (flags & MEMBLOCK_MIRROR)) { 1217 flags &= ~MEMBLOCK_MIRROR; 1218 goto again; 1219 } 1220 return ret; 1221 } 1222 1223 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1224 { 1225 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE, 1226 MEMBLOCK_NONE); 1227 } 1228 1229 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1230 { 1231 phys_addr_t alloc; 1232 1233 alloc = __memblock_alloc_base(size, align, max_addr); 1234 1235 if (alloc == 0) 1236 panic("ERROR: Failed to allocate %pa bytes below %pa.\n", 1237 &size, &max_addr); 1238 1239 return alloc; 1240 } 1241 1242 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) 1243 { 1244 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1245 } 1246 1247 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) 1248 { 1249 phys_addr_t res = memblock_alloc_nid(size, align, nid); 1250 1251 if (res) 1252 return res; 1253 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 1254 } 1255 1256 /** 1257 * memblock_virt_alloc_internal - allocate boot memory block 1258 * @size: size of memory block to be allocated in bytes 1259 * @align: alignment of the region and block's size 1260 * @min_addr: the lower bound of the memory region to allocate (phys address) 1261 * @max_addr: the upper bound of the memory region to allocate (phys address) 1262 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1263 * 1264 * The @min_addr limit is dropped if it can not be satisfied and the allocation 1265 * will fall back to memory below @min_addr. Also, allocation may fall back 1266 * to any node in the system if the specified node can not 1267 * hold the requested memory. 1268 * 1269 * The allocation is performed from memory region limited by 1270 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 1271 * 1272 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. 1273 * 1274 * The phys address of allocated boot memory block is converted to virtual and 1275 * allocated memory is reset to 0. 1276 * 1277 * In addition, function sets the min_count to 0 using kmemleak_alloc for 1278 * allocated boot memory block, so that it is never reported as leaks. 1279 * 1280 * RETURNS: 1281 * Virtual address of allocated memory block on success, NULL on failure. 1282 */ 1283 static void * __init memblock_virt_alloc_internal( 1284 phys_addr_t size, phys_addr_t align, 1285 phys_addr_t min_addr, phys_addr_t max_addr, 1286 int nid) 1287 { 1288 phys_addr_t alloc; 1289 void *ptr; 1290 ulong flags = choose_memblock_flags(); 1291 1292 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) 1293 nid = NUMA_NO_NODE; 1294 1295 /* 1296 * Detect any accidental use of these APIs after slab is ready, as at 1297 * this moment memblock may be deinitialized already and its 1298 * internal data may be destroyed (after execution of free_all_bootmem) 1299 */ 1300 if (WARN_ON_ONCE(slab_is_available())) 1301 return kzalloc_node(size, GFP_NOWAIT, nid); 1302 1303 if (!align) 1304 align = SMP_CACHE_BYTES; 1305 1306 if (max_addr > memblock.current_limit) 1307 max_addr = memblock.current_limit; 1308 again: 1309 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1310 nid, flags); 1311 if (alloc && !memblock_reserve(alloc, size)) 1312 goto done; 1313 1314 if (nid != NUMA_NO_NODE) { 1315 alloc = memblock_find_in_range_node(size, align, min_addr, 1316 max_addr, NUMA_NO_NODE, 1317 flags); 1318 if (alloc && !memblock_reserve(alloc, size)) 1319 goto done; 1320 } 1321 1322 if (min_addr) { 1323 min_addr = 0; 1324 goto again; 1325 } 1326 1327 if (flags & MEMBLOCK_MIRROR) { 1328 flags &= ~MEMBLOCK_MIRROR; 1329 pr_warn("Could not allocate %pap bytes of mirrored memory\n", 1330 &size); 1331 goto again; 1332 } 1333 1334 return NULL; 1335 done: 1336 ptr = phys_to_virt(alloc); 1337 memset(ptr, 0, size); 1338 1339 /* 1340 * The min_count is set to 0 so that bootmem allocated blocks 1341 * are never reported as leaks. This is because many of these blocks 1342 * are only referred via the physical address which is not 1343 * looked up by kmemleak. 1344 */ 1345 kmemleak_alloc(ptr, size, 0, 0); 1346 1347 return ptr; 1348 } 1349 1350 /** 1351 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block 1352 * @size: size of memory block to be allocated in bytes 1353 * @align: alignment of the region and block's size 1354 * @min_addr: the lower bound of the memory region from where the allocation 1355 * is preferred (phys address) 1356 * @max_addr: the upper bound of the memory region from where the allocation 1357 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1358 * allocate only from memory limited by memblock.current_limit value 1359 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1360 * 1361 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides 1362 * additional debug information (including caller info), if enabled. 1363 * 1364 * RETURNS: 1365 * Virtual address of allocated memory block on success, NULL on failure. 1366 */ 1367 void * __init memblock_virt_alloc_try_nid_nopanic( 1368 phys_addr_t size, phys_addr_t align, 1369 phys_addr_t min_addr, phys_addr_t max_addr, 1370 int nid) 1371 { 1372 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1373 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1374 (u64)max_addr, (void *)_RET_IP_); 1375 return memblock_virt_alloc_internal(size, align, min_addr, 1376 max_addr, nid); 1377 } 1378 1379 /** 1380 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking 1381 * @size: size of memory block to be allocated in bytes 1382 * @align: alignment of the region and block's size 1383 * @min_addr: the lower bound of the memory region from where the allocation 1384 * is preferred (phys address) 1385 * @max_addr: the upper bound of the memory region from where the allocation 1386 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1387 * allocate only from memory limited by memblock.current_limit value 1388 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1389 * 1390 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic() 1391 * which provides debug information (including caller info), if enabled, 1392 * and panics if the request can not be satisfied. 1393 * 1394 * RETURNS: 1395 * Virtual address of allocated memory block on success, NULL on failure. 1396 */ 1397 void * __init memblock_virt_alloc_try_nid( 1398 phys_addr_t size, phys_addr_t align, 1399 phys_addr_t min_addr, phys_addr_t max_addr, 1400 int nid) 1401 { 1402 void *ptr; 1403 1404 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", 1405 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1406 (u64)max_addr, (void *)_RET_IP_); 1407 ptr = memblock_virt_alloc_internal(size, align, 1408 min_addr, max_addr, nid); 1409 if (ptr) 1410 return ptr; 1411 1412 panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", 1413 __func__, (u64)size, (u64)align, nid, (u64)min_addr, 1414 (u64)max_addr); 1415 return NULL; 1416 } 1417 1418 /** 1419 * __memblock_free_early - free boot memory block 1420 * @base: phys starting address of the boot memory block 1421 * @size: size of the boot memory block in bytes 1422 * 1423 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API. 1424 * The freeing memory will not be released to the buddy allocator. 1425 */ 1426 void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) 1427 { 1428 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1429 __func__, (u64)base, (u64)base + size - 1, 1430 (void *)_RET_IP_); 1431 kmemleak_free_part_phys(base, size); 1432 memblock_remove_range(&memblock.reserved, base, size); 1433 } 1434 1435 /* 1436 * __memblock_free_late - free bootmem block pages directly to buddy allocator 1437 * @addr: phys starting address of the boot memory block 1438 * @size: size of the boot memory block in bytes 1439 * 1440 * This is only useful when the bootmem allocator has already been torn 1441 * down, but we are still initializing the system. Pages are released directly 1442 * to the buddy allocator, no bootmem metadata is updated because it is gone. 1443 */ 1444 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) 1445 { 1446 u64 cursor, end; 1447 1448 memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", 1449 __func__, (u64)base, (u64)base + size - 1, 1450 (void *)_RET_IP_); 1451 kmemleak_free_part_phys(base, size); 1452 cursor = PFN_UP(base); 1453 end = PFN_DOWN(base + size); 1454 1455 for (; cursor < end; cursor++) { 1456 __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); 1457 totalram_pages++; 1458 } 1459 } 1460 1461 /* 1462 * Remaining API functions 1463 */ 1464 1465 phys_addr_t __init_memblock memblock_phys_mem_size(void) 1466 { 1467 return memblock.memory.total_size; 1468 } 1469 1470 phys_addr_t __init_memblock memblock_reserved_size(void) 1471 { 1472 return memblock.reserved.total_size; 1473 } 1474 1475 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn) 1476 { 1477 unsigned long pages = 0; 1478 struct memblock_region *r; 1479 unsigned long start_pfn, end_pfn; 1480 1481 for_each_memblock(memory, r) { 1482 start_pfn = memblock_region_memory_base_pfn(r); 1483 end_pfn = memblock_region_memory_end_pfn(r); 1484 start_pfn = min_t(unsigned long, start_pfn, limit_pfn); 1485 end_pfn = min_t(unsigned long, end_pfn, limit_pfn); 1486 pages += end_pfn - start_pfn; 1487 } 1488 1489 return PFN_PHYS(pages); 1490 } 1491 1492 /* lowest address */ 1493 phys_addr_t __init_memblock memblock_start_of_DRAM(void) 1494 { 1495 return memblock.memory.regions[0].base; 1496 } 1497 1498 phys_addr_t __init_memblock memblock_end_of_DRAM(void) 1499 { 1500 int idx = memblock.memory.cnt - 1; 1501 1502 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); 1503 } 1504 1505 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit) 1506 { 1507 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 1508 struct memblock_region *r; 1509 1510 /* 1511 * translate the memory @limit size into the max address within one of 1512 * the memory memblock regions, if the @limit exceeds the total size 1513 * of those regions, max_addr will keep original value ULLONG_MAX 1514 */ 1515 for_each_memblock(memory, r) { 1516 if (limit <= r->size) { 1517 max_addr = r->base + limit; 1518 break; 1519 } 1520 limit -= r->size; 1521 } 1522 1523 return max_addr; 1524 } 1525 1526 void __init memblock_enforce_memory_limit(phys_addr_t limit) 1527 { 1528 phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX; 1529 1530 if (!limit) 1531 return; 1532 1533 max_addr = __find_max_addr(limit); 1534 1535 /* @limit exceeds the total size of the memory, do nothing */ 1536 if (max_addr == (phys_addr_t)ULLONG_MAX) 1537 return; 1538 1539 /* truncate both memory and reserved regions */ 1540 memblock_remove_range(&memblock.memory, max_addr, 1541 (phys_addr_t)ULLONG_MAX); 1542 memblock_remove_range(&memblock.reserved, max_addr, 1543 (phys_addr_t)ULLONG_MAX); 1544 } 1545 1546 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) 1547 { 1548 int start_rgn, end_rgn; 1549 int i, ret; 1550 1551 if (!size) 1552 return; 1553 1554 ret = memblock_isolate_range(&memblock.memory, base, size, 1555 &start_rgn, &end_rgn); 1556 if (ret) 1557 return; 1558 1559 /* remove all the MAP regions */ 1560 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) 1561 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1562 memblock_remove_region(&memblock.memory, i); 1563 1564 for (i = start_rgn - 1; i >= 0; i--) 1565 if (!memblock_is_nomap(&memblock.memory.regions[i])) 1566 memblock_remove_region(&memblock.memory, i); 1567 1568 /* truncate the reserved regions */ 1569 memblock_remove_range(&memblock.reserved, 0, base); 1570 memblock_remove_range(&memblock.reserved, 1571 base + size, (phys_addr_t)ULLONG_MAX); 1572 } 1573 1574 void __init memblock_mem_limit_remove_map(phys_addr_t limit) 1575 { 1576 phys_addr_t max_addr; 1577 1578 if (!limit) 1579 return; 1580 1581 max_addr = __find_max_addr(limit); 1582 1583 /* @limit exceeds the total size of the memory, do nothing */ 1584 if (max_addr == (phys_addr_t)ULLONG_MAX) 1585 return; 1586 1587 memblock_cap_memory_range(0, max_addr); 1588 } 1589 1590 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) 1591 { 1592 unsigned int left = 0, right = type->cnt; 1593 1594 do { 1595 unsigned int mid = (right + left) / 2; 1596 1597 if (addr < type->regions[mid].base) 1598 right = mid; 1599 else if (addr >= (type->regions[mid].base + 1600 type->regions[mid].size)) 1601 left = mid + 1; 1602 else 1603 return mid; 1604 } while (left < right); 1605 return -1; 1606 } 1607 1608 bool __init memblock_is_reserved(phys_addr_t addr) 1609 { 1610 return memblock_search(&memblock.reserved, addr) != -1; 1611 } 1612 1613 bool __init_memblock memblock_is_memory(phys_addr_t addr) 1614 { 1615 return memblock_search(&memblock.memory, addr) != -1; 1616 } 1617 1618 int __init_memblock memblock_is_map_memory(phys_addr_t addr) 1619 { 1620 int i = memblock_search(&memblock.memory, addr); 1621 1622 if (i == -1) 1623 return false; 1624 return !memblock_is_nomap(&memblock.memory.regions[i]); 1625 } 1626 1627 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1628 int __init_memblock memblock_search_pfn_nid(unsigned long pfn, 1629 unsigned long *start_pfn, unsigned long *end_pfn) 1630 { 1631 struct memblock_type *type = &memblock.memory; 1632 int mid = memblock_search(type, PFN_PHYS(pfn)); 1633 1634 if (mid == -1) 1635 return -1; 1636 1637 *start_pfn = PFN_DOWN(type->regions[mid].base); 1638 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); 1639 1640 return type->regions[mid].nid; 1641 } 1642 #endif 1643 1644 /** 1645 * memblock_is_region_memory - check if a region is a subset of memory 1646 * @base: base of region to check 1647 * @size: size of region to check 1648 * 1649 * Check if the region [@base, @base+@size) is a subset of a memory block. 1650 * 1651 * RETURNS: 1652 * 0 if false, non-zero if true 1653 */ 1654 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 1655 { 1656 int idx = memblock_search(&memblock.memory, base); 1657 phys_addr_t end = base + memblock_cap_size(base, &size); 1658 1659 if (idx == -1) 1660 return 0; 1661 return (memblock.memory.regions[idx].base + 1662 memblock.memory.regions[idx].size) >= end; 1663 } 1664 1665 /** 1666 * memblock_is_region_reserved - check if a region intersects reserved memory 1667 * @base: base of region to check 1668 * @size: size of region to check 1669 * 1670 * Check if the region [@base, @base+@size) intersects a reserved memory block. 1671 * 1672 * RETURNS: 1673 * True if they intersect, false if not. 1674 */ 1675 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 1676 { 1677 memblock_cap_size(base, &size); 1678 return memblock_overlaps_region(&memblock.reserved, base, size); 1679 } 1680 1681 void __init_memblock memblock_trim_memory(phys_addr_t align) 1682 { 1683 phys_addr_t start, end, orig_start, orig_end; 1684 struct memblock_region *r; 1685 1686 for_each_memblock(memory, r) { 1687 orig_start = r->base; 1688 orig_end = r->base + r->size; 1689 start = round_up(orig_start, align); 1690 end = round_down(orig_end, align); 1691 1692 if (start == orig_start && end == orig_end) 1693 continue; 1694 1695 if (start < end) { 1696 r->base = start; 1697 r->size = end - start; 1698 } else { 1699 memblock_remove_region(&memblock.memory, 1700 r - memblock.memory.regions); 1701 r--; 1702 } 1703 } 1704 } 1705 1706 void __init_memblock memblock_set_current_limit(phys_addr_t limit) 1707 { 1708 memblock.current_limit = limit; 1709 } 1710 1711 phys_addr_t __init_memblock memblock_get_current_limit(void) 1712 { 1713 return memblock.current_limit; 1714 } 1715 1716 static void __init_memblock memblock_dump(struct memblock_type *type) 1717 { 1718 phys_addr_t base, end, size; 1719 unsigned long flags; 1720 int idx; 1721 struct memblock_region *rgn; 1722 1723 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt); 1724 1725 for_each_memblock_type(type, rgn) { 1726 char nid_buf[32] = ""; 1727 1728 base = rgn->base; 1729 size = rgn->size; 1730 end = base + size - 1; 1731 flags = rgn->flags; 1732 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP 1733 if (memblock_get_region_node(rgn) != MAX_NUMNODES) 1734 snprintf(nid_buf, sizeof(nid_buf), " on node %d", 1735 memblock_get_region_node(rgn)); 1736 #endif 1737 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n", 1738 type->name, idx, &base, &end, &size, nid_buf, flags); 1739 } 1740 } 1741 1742 extern unsigned long __init_memblock 1743 memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr) 1744 { 1745 struct memblock_region *rgn; 1746 unsigned long size = 0; 1747 int idx; 1748 1749 for_each_memblock_type((&memblock.reserved), rgn) { 1750 phys_addr_t start, end; 1751 1752 if (rgn->base + rgn->size < start_addr) 1753 continue; 1754 if (rgn->base > end_addr) 1755 continue; 1756 1757 start = rgn->base; 1758 end = start + rgn->size; 1759 size += end - start; 1760 } 1761 1762 return size; 1763 } 1764 1765 void __init_memblock __memblock_dump_all(void) 1766 { 1767 pr_info("MEMBLOCK configuration:\n"); 1768 pr_info(" memory size = %pa reserved size = %pa\n", 1769 &memblock.memory.total_size, 1770 &memblock.reserved.total_size); 1771 1772 memblock_dump(&memblock.memory); 1773 memblock_dump(&memblock.reserved); 1774 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1775 memblock_dump(&memblock.physmem); 1776 #endif 1777 } 1778 1779 void __init memblock_allow_resize(void) 1780 { 1781 memblock_can_resize = 1; 1782 } 1783 1784 static int __init early_memblock(char *p) 1785 { 1786 if (p && strstr(p, "debug")) 1787 memblock_debug = 1; 1788 return 0; 1789 } 1790 early_param("memblock", early_memblock); 1791 1792 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK) 1793 1794 static int memblock_debug_show(struct seq_file *m, void *private) 1795 { 1796 struct memblock_type *type = m->private; 1797 struct memblock_region *reg; 1798 int i; 1799 phys_addr_t end; 1800 1801 for (i = 0; i < type->cnt; i++) { 1802 reg = &type->regions[i]; 1803 end = reg->base + reg->size - 1; 1804 1805 seq_printf(m, "%4d: ", i); 1806 seq_printf(m, "%pa..%pa\n", ®->base, &end); 1807 } 1808 return 0; 1809 } 1810 1811 static int memblock_debug_open(struct inode *inode, struct file *file) 1812 { 1813 return single_open(file, memblock_debug_show, inode->i_private); 1814 } 1815 1816 static const struct file_operations memblock_debug_fops = { 1817 .open = memblock_debug_open, 1818 .read = seq_read, 1819 .llseek = seq_lseek, 1820 .release = single_release, 1821 }; 1822 1823 static int __init memblock_init_debugfs(void) 1824 { 1825 struct dentry *root = debugfs_create_dir("memblock", NULL); 1826 if (!root) 1827 return -ENXIO; 1828 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 1829 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 1830 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1831 debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); 1832 #endif 1833 1834 return 0; 1835 } 1836 __initcall(memblock_init_debugfs); 1837 1838 #endif /* CONFIG_DEBUG_FS */ 1839